Dependencies for the affinity router and the
affinity routing daemon.

Change-Id: Icda72c3594ef7f8f0bc0c33dc03087a4c25529ca
diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE
new file mode 100644
index 0000000..339177b
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/LICENSE
@@ -0,0 +1,20 @@
+Copyright (C) 2013 Blake Mizerany
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
new file mode 100644
index 0000000..1602287
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 0000000..d7d14f8
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,316 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+	"math"
+	"sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+	Value float64 `json:",string"`
+	Width float64 `json:",string"`
+	Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int           { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+	ƒ := func(s *stream, r float64) float64 {
+		return 2 * epsilon * r
+	}
+	return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+	ƒ := func(s *stream, r float64) float64 {
+		return 2 * epsilon * (s.n - r)
+	}
+	return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targetMap map[float64]float64) *Stream {
+	// Convert map to slice to avoid slow iterations on a map.
+	// ƒ is called on the hot path, so converting the map to a slice
+	// beforehand results in significant CPU savings.
+	targets := targetMapToSlice(targetMap)
+
+	ƒ := func(s *stream, r float64) float64 {
+		var m = math.MaxFloat64
+		var f float64
+		for _, t := range targets {
+			if t.quantile*s.n <= r {
+				f = (2 * t.epsilon * r) / t.quantile
+			} else {
+				f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
+			}
+			if f < m {
+				m = f
+			}
+		}
+		return m
+	}
+	return newStream(ƒ)
+}
+
+type target struct {
+	quantile float64
+	epsilon  float64
+}
+
+func targetMapToSlice(targetMap map[float64]float64) []target {
+	targets := make([]target, 0, len(targetMap))
+
+	for quantile, epsilon := range targetMap {
+		t := target{
+			quantile: quantile,
+			epsilon:  epsilon,
+		}
+		targets = append(targets, t)
+	}
+
+	return targets
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+	*stream
+	b      Samples
+	sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+	x := &stream{ƒ: ƒ}
+	return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+	s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+	s.b = append(s.b, sample)
+	s.sorted = false
+	if len(s.b) == cap(s.b) {
+		s.flush()
+	}
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+	if !s.flushed() {
+		// Fast path when there hasn't been enough data for a flush;
+		// this also yields better accuracy for small sets of data.
+		l := len(s.b)
+		if l == 0 {
+			return 0
+		}
+		i := int(math.Ceil(float64(l) * q))
+		if i > 0 {
+			i -= 1
+		}
+		s.maybeSort()
+		return s.b[i].Value
+	}
+	s.flush()
+	return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+	sort.Sort(samples)
+	s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+	s.stream.reset()
+	s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+	if !s.flushed() {
+		return s.b
+	}
+	s.flush()
+	return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+	return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+	s.maybeSort()
+	s.stream.merge(s.b)
+	s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+	if !s.sorted {
+		s.sorted = true
+		sort.Sort(s.b)
+	}
+}
+
+func (s *Stream) flushed() bool {
+	return len(s.stream.l) > 0
+}
+
+type stream struct {
+	n float64
+	l []Sample
+	ƒ invariant
+}
+
+func (s *stream) reset() {
+	s.l = s.l[:0]
+	s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+	s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+	// TODO(beorn7): This tries to merge not only individual samples, but
+	// whole summaries. The paper doesn't mention merging summaries at
+	// all. Unittests show that the merging is inaccurate. Find out how to
+	// do merges properly.
+	var r float64
+	i := 0
+	for _, sample := range samples {
+		for ; i < len(s.l); i++ {
+			c := s.l[i]
+			if c.Value > sample.Value {
+				// Insert at position i.
+				s.l = append(s.l, Sample{})
+				copy(s.l[i+1:], s.l[i:])
+				s.l[i] = Sample{
+					sample.Value,
+					sample.Width,
+					math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+					// TODO(beorn7): How to calculate delta correctly?
+				}
+				i++
+				goto inserted
+			}
+			r += c.Width
+		}
+		s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+		i++
+	inserted:
+		s.n += sample.Width
+		r += sample.Width
+	}
+	s.compress()
+}
+
+func (s *stream) count() int {
+	return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+	t := math.Ceil(q * s.n)
+	t += math.Ceil(s.ƒ(s, t) / 2)
+	p := s.l[0]
+	var r float64
+	for _, c := range s.l[1:] {
+		r += p.Width
+		if r+c.Width+c.Delta > t {
+			return p.Value
+		}
+		p = c
+	}
+	return p.Value
+}
+
+func (s *stream) compress() {
+	if len(s.l) < 2 {
+		return
+	}
+	x := s.l[len(s.l)-1]
+	xi := len(s.l) - 1
+	r := s.n - 1 - x.Width
+
+	for i := len(s.l) - 2; i >= 0; i-- {
+		c := s.l[i]
+		if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+			x.Width += c.Width
+			s.l[xi] = x
+			// Remove element at i.
+			copy(s.l[i:], s.l[i+1:])
+			s.l = s.l[:len(s.l)-1]
+			xi -= 1
+		} else {
+			x = c
+			xi = i
+		}
+		r -= c.Width
+	}
+}
+
+func (s *stream) samples() Samples {
+	samples := make(Samples, len(s.l))
+	copy(samples, s.l)
+	return samples
+}
diff --git a/vendor/github.com/coreos/bbolt/.gitignore b/vendor/github.com/coreos/bbolt/.gitignore
new file mode 100644
index 0000000..c7bd2b7
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/.gitignore
@@ -0,0 +1,4 @@
+*.prof
+*.test
+*.swp
+/bin/
diff --git a/vendor/github.com/coreos/bbolt/LICENSE b/vendor/github.com/coreos/bbolt/LICENSE
new file mode 100644
index 0000000..004e77f
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Ben Johnson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/coreos/bbolt/Makefile b/vendor/github.com/coreos/bbolt/Makefile
new file mode 100644
index 0000000..e035e63
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/Makefile
@@ -0,0 +1,18 @@
+BRANCH=`git rev-parse --abbrev-ref HEAD`
+COMMIT=`git rev-parse --short HEAD`
+GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
+
+default: build
+
+race:
+	@go test -v -race -test.run="TestSimulate_(100op|1000op)"
+
+# go get github.com/kisielk/errcheck
+errcheck:
+	@errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
+
+test: 
+	@go test -v -cover .
+	@go test -v ./cmd/bolt
+
+.PHONY: fmt test
diff --git a/vendor/github.com/coreos/bbolt/README.md b/vendor/github.com/coreos/bbolt/README.md
new file mode 100644
index 0000000..8523e33
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/README.md
@@ -0,0 +1,852 @@
+Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg)
+====
+
+Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
+[LMDB project][lmdb]. The goal of the project is to provide a simple,
+fast, and reliable database for projects that don't require a full database
+server such as Postgres or MySQL.
+
+Since Bolt is meant to be used as such a low-level piece of functionality,
+simplicity is key. The API will be small and only focus on getting values
+and setting values. That's it.
+
+[hyc_symas]: https://twitter.com/hyc_symas
+[lmdb]: http://symas.com/mdb/
+
+## Project Status
+
+Bolt is stable and the API is fixed. Full unit test coverage and randomized
+black box testing are used to ensure database consistency and thread safety.
+Bolt is currently in high-load production environments serving databases as
+large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
+services every day.
+
+## Table of Contents
+
+- [Getting Started](#getting-started)
+  - [Installing](#installing)
+  - [Opening a database](#opening-a-database)
+  - [Transactions](#transactions)
+    - [Read-write transactions](#read-write-transactions)
+    - [Read-only transactions](#read-only-transactions)
+    - [Batch read-write transactions](#batch-read-write-transactions)
+    - [Managing transactions manually](#managing-transactions-manually)
+  - [Using buckets](#using-buckets)
+  - [Using key/value pairs](#using-keyvalue-pairs)
+  - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
+  - [Iterating over keys](#iterating-over-keys)
+    - [Prefix scans](#prefix-scans)
+    - [Range scans](#range-scans)
+    - [ForEach()](#foreach)
+  - [Nested buckets](#nested-buckets)
+  - [Database backups](#database-backups)
+  - [Statistics](#statistics)
+  - [Read-Only Mode](#read-only-mode)
+  - [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
+- [Resources](#resources)
+- [Comparison with other databases](#comparison-with-other-databases)
+  - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
+  - [LevelDB, RocksDB](#leveldb-rocksdb)
+  - [LMDB](#lmdb)
+- [Caveats & Limitations](#caveats--limitations)
+- [Reading the Source](#reading-the-source)
+- [Other Projects Using Bolt](#other-projects-using-bolt)
+
+## Getting Started
+
+### Installing
+
+To start using Bolt, install Go and run `go get`:
+
+```sh
+$ go get github.com/boltdb/bolt/...
+```
+
+This will retrieve the library and install the `bolt` command line utility into
+your `$GOBIN` path.
+
+
+### Opening a database
+
+The top-level object in Bolt is a `DB`. It is represented as a single file on
+your disk and represents a consistent snapshot of your data.
+
+To open your database, simply use the `bolt.Open()` function:
+
+```go
+package main
+
+import (
+	"log"
+
+	"github.com/boltdb/bolt"
+)
+
+func main() {
+	// Open the my.db data file in your current directory.
+	// It will be created if it doesn't exist.
+	db, err := bolt.Open("my.db", 0600, nil)
+	if err != nil {
+		log.Fatal(err)
+	}
+	defer db.Close()
+
+	...
+}
+```
+
+Please note that Bolt obtains a file lock on the data file so multiple processes
+cannot open the same database at the same time. Opening an already open Bolt
+database will cause it to hang until the other process closes it. To prevent
+an indefinite wait you can pass a timeout option to the `Open()` function:
+
+```go
+db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
+```
+
+
+### Transactions
+
+Bolt allows only one read-write transaction at a time but allows as many
+read-only transactions as you want at a time. Each transaction has a consistent
+view of the data as it existed when the transaction started.
+
+Individual transactions and all objects created from them (e.g. buckets, keys)
+are not thread safe. To work with data in multiple goroutines you must start
+a transaction for each one or use locking to ensure only one goroutine accesses
+a transaction at a time. Creating transaction from the `DB` is thread safe.
+
+Read-only transactions and read-write transactions should not depend on one
+another and generally shouldn't be opened simultaneously in the same goroutine.
+This can cause a deadlock as the read-write transaction needs to periodically
+re-map the data file but it cannot do so while a read-only transaction is open.
+
+
+#### Read-write transactions
+
+To start a read-write transaction, you can use the `DB.Update()` function:
+
+```go
+err := db.Update(func(tx *bolt.Tx) error {
+	...
+	return nil
+})
+```
+
+Inside the closure, you have a consistent view of the database. You commit the
+transaction by returning `nil` at the end. You can also rollback the transaction
+at any point by returning an error. All database operations are allowed inside
+a read-write transaction.
+
+Always check the return error as it will report any disk failures that can cause
+your transaction to not complete. If you return an error within your closure
+it will be passed through.
+
+
+#### Read-only transactions
+
+To start a read-only transaction, you can use the `DB.View()` function:
+
+```go
+err := db.View(func(tx *bolt.Tx) error {
+	...
+	return nil
+})
+```
+
+You also get a consistent view of the database within this closure, however,
+no mutating operations are allowed within a read-only transaction. You can only
+retrieve buckets, retrieve values, and copy the database within a read-only
+transaction.
+
+
+#### Batch read-write transactions
+
+Each `DB.Update()` waits for disk to commit the writes. This overhead
+can be minimized by combining multiple updates with the `DB.Batch()`
+function:
+
+```go
+err := db.Batch(func(tx *bolt.Tx) error {
+	...
+	return nil
+})
+```
+
+Concurrent Batch calls are opportunistically combined into larger
+transactions. Batch is only useful when there are multiple goroutines
+calling it.
+
+The trade-off is that `Batch` can call the given
+function multiple times, if parts of the transaction fail. The
+function must be idempotent and side effects must take effect only
+after a successful return from `DB.Batch()`.
+
+For example: don't display messages from inside the function, instead
+set variables in the enclosing scope:
+
+```go
+var id uint64
+err := db.Batch(func(tx *bolt.Tx) error {
+	// Find last key in bucket, decode as bigendian uint64, increment
+	// by one, encode back to []byte, and add new key.
+	...
+	id = newValue
+	return nil
+})
+if err != nil {
+	return ...
+}
+fmt.Println("Allocated ID %d", id)
+```
+
+
+#### Managing transactions manually
+
+The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
+function. These helper functions will start the transaction, execute a function,
+and then safely close your transaction if an error is returned. This is the
+recommended way to use Bolt transactions.
+
+However, sometimes you may want to manually start and end your transactions.
+You can use the `Tx.Begin()` function directly but **please** be sure to close
+the transaction.
+
+```go
+// Start a writable transaction.
+tx, err := db.Begin(true)
+if err != nil {
+    return err
+}
+defer tx.Rollback()
+
+// Use the transaction...
+_, err := tx.CreateBucket([]byte("MyBucket"))
+if err != nil {
+    return err
+}
+
+// Commit the transaction and check for error.
+if err := tx.Commit(); err != nil {
+    return err
+}
+```
+
+The first argument to `DB.Begin()` is a boolean stating if the transaction
+should be writable.
+
+
+### Using buckets
+
+Buckets are collections of key/value pairs within the database. All keys in a
+bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
+function:
+
+```go
+db.Update(func(tx *bolt.Tx) error {
+	b, err := tx.CreateBucket([]byte("MyBucket"))
+	if err != nil {
+		return fmt.Errorf("create bucket: %s", err)
+	}
+	return nil
+})
+```
+
+You can also create a bucket only if it doesn't exist by using the
+`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
+function for all your top-level buckets after you open your database so you can
+guarantee that they exist for future transactions.
+
+To delete a bucket, simply call the `Tx.DeleteBucket()` function.
+
+
+### Using key/value pairs
+
+To save a key/value pair to a bucket, use the `Bucket.Put()` function:
+
+```go
+db.Update(func(tx *bolt.Tx) error {
+	b := tx.Bucket([]byte("MyBucket"))
+	err := b.Put([]byte("answer"), []byte("42"))
+	return err
+})
+```
+
+This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
+bucket. To retrieve this value, we can use the `Bucket.Get()` function:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+	b := tx.Bucket([]byte("MyBucket"))
+	v := b.Get([]byte("answer"))
+	fmt.Printf("The answer is: %s\n", v)
+	return nil
+})
+```
+
+The `Get()` function does not return an error because its operation is
+guaranteed to work (unless there is some kind of system failure). If the key
+exists then it will return its byte slice value. If it doesn't exist then it
+will return `nil`. It's important to note that you can have a zero-length value
+set to a key which is different than the key not existing.
+
+Use the `Bucket.Delete()` function to delete a key from the bucket.
+
+Please note that values returned from `Get()` are only valid while the
+transaction is open. If you need to use a value outside of the transaction
+then you must use `copy()` to copy it to another byte slice.
+
+
+### Autoincrementing integer for the bucket
+By using the `NextSequence()` function, you can let Bolt determine a sequence
+which can be used as the unique identifier for your key/value pairs. See the
+example below.
+
+```go
+// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
+func (s *Store) CreateUser(u *User) error {
+    return s.db.Update(func(tx *bolt.Tx) error {
+        // Retrieve the users bucket.
+        // This should be created when the DB is first opened.
+        b := tx.Bucket([]byte("users"))
+
+        // Generate ID for the user.
+        // This returns an error only if the Tx is closed or not writeable.
+        // That can't happen in an Update() call so I ignore the error check.
+        id, _ := b.NextSequence()
+        u.ID = int(id)
+
+        // Marshal user data into bytes.
+        buf, err := json.Marshal(u)
+        if err != nil {
+            return err
+        }
+
+        // Persist bytes to users bucket.
+        return b.Put(itob(u.ID), buf)
+    })
+}
+
+// itob returns an 8-byte big endian representation of v.
+func itob(v int) []byte {
+    b := make([]byte, 8)
+    binary.BigEndian.PutUint64(b, uint64(v))
+    return b
+}
+
+type User struct {
+    ID int
+    ...
+}
+```
+
+### Iterating over keys
+
+Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
+iteration over these keys extremely fast. To iterate over keys we'll use a
+`Cursor`:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+	// Assume bucket exists and has keys
+	b := tx.Bucket([]byte("MyBucket"))
+
+	c := b.Cursor()
+
+	for k, v := c.First(); k != nil; k, v = c.Next() {
+		fmt.Printf("key=%s, value=%s\n", k, v)
+	}
+
+	return nil
+})
+```
+
+The cursor allows you to move to a specific point in the list of keys and move
+forward or backward through the keys one at a time.
+
+The following functions are available on the cursor:
+
+```
+First()  Move to the first key.
+Last()   Move to the last key.
+Seek()   Move to a specific key.
+Next()   Move to the next key.
+Prev()   Move to the previous key.
+```
+
+Each of those functions has a return signature of `(key []byte, value []byte)`.
+When you have iterated to the end of the cursor then `Next()` will return a
+`nil` key.  You must seek to a position using `First()`, `Last()`, or `Seek()`
+before calling `Next()` or `Prev()`. If you do not seek to a position then
+these functions will return a `nil` key.
+
+During iteration, if the key is non-`nil` but the value is `nil`, that means
+the key refers to a bucket rather than a value.  Use `Bucket.Bucket()` to
+access the sub-bucket.
+
+
+#### Prefix scans
+
+To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+	// Assume bucket exists and has keys
+	c := tx.Bucket([]byte("MyBucket")).Cursor()
+
+	prefix := []byte("1234")
+	for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
+		fmt.Printf("key=%s, value=%s\n", k, v)
+	}
+
+	return nil
+})
+```
+
+#### Range scans
+
+Another common use case is scanning over a range such as a time range. If you
+use a sortable time encoding such as RFC3339 then you can query a specific
+date range like this:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+	// Assume our events bucket exists and has RFC3339 encoded time keys.
+	c := tx.Bucket([]byte("Events")).Cursor()
+
+	// Our time range spans the 90's decade.
+	min := []byte("1990-01-01T00:00:00Z")
+	max := []byte("2000-01-01T00:00:00Z")
+
+	// Iterate over the 90's.
+	for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
+		fmt.Printf("%s: %s\n", k, v)
+	}
+
+	return nil
+})
+```
+
+Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
+
+
+#### ForEach()
+
+You can also use the function `ForEach()` if you know you'll be iterating over
+all the keys in a bucket:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+	// Assume bucket exists and has keys
+	b := tx.Bucket([]byte("MyBucket"))
+
+	b.ForEach(func(k, v []byte) error {
+		fmt.Printf("key=%s, value=%s\n", k, v)
+		return nil
+	})
+	return nil
+})
+```
+
+
+### Nested buckets
+
+You can also store a bucket in a key to create nested buckets. The API is the
+same as the bucket management API on the `DB` object:
+
+```go
+func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
+func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
+func (*Bucket) DeleteBucket(key []byte) error
+```
+
+
+### Database backups
+
+Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
+function to write a consistent view of the database to a writer. If you call
+this from a read-only transaction, it will perform a hot backup and not block
+your other database reads and writes.
+
+By default, it will use a regular file handle which will utilize the operating
+system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
+documentation for information about optimizing for larger-than-RAM datasets.
+
+One common use case is to backup over HTTP so you can use tools like `cURL` to
+do database backups:
+
+```go
+func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
+	err := db.View(func(tx *bolt.Tx) error {
+		w.Header().Set("Content-Type", "application/octet-stream")
+		w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
+		w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
+		_, err := tx.WriteTo(w)
+		return err
+	})
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+	}
+}
+```
+
+Then you can backup using this command:
+
+```sh
+$ curl http://localhost/backup > my.db
+```
+
+Or you can open your browser to `http://localhost/backup` and it will download
+automatically.
+
+If you want to backup to another file you can use the `Tx.CopyFile()` helper
+function.
+
+
+### Statistics
+
+The database keeps a running count of many of the internal operations it
+performs so you can better understand what's going on. By grabbing a snapshot
+of these stats at two points in time we can see what operations were performed
+in that time range.
+
+For example, we could start a goroutine to log stats every 10 seconds:
+
+```go
+go func() {
+	// Grab the initial stats.
+	prev := db.Stats()
+
+	for {
+		// Wait for 10s.
+		time.Sleep(10 * time.Second)
+
+		// Grab the current stats and diff them.
+		stats := db.Stats()
+		diff := stats.Sub(&prev)
+
+		// Encode stats to JSON and print to STDERR.
+		json.NewEncoder(os.Stderr).Encode(diff)
+
+		// Save stats for the next loop.
+		prev = stats
+	}
+}()
+```
+
+It's also useful to pipe these stats to a service such as statsd for monitoring
+or to provide an HTTP endpoint that will perform a fixed-length sample.
+
+
+### Read-Only Mode
+
+Sometimes it is useful to create a shared, read-only Bolt database. To this,
+set the `Options.ReadOnly` flag when opening your database. Read-only mode
+uses a shared lock to allow multiple processes to read from the database but
+it will block any processes from opening the database in read-write mode.
+
+```go
+db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
+if err != nil {
+	log.Fatal(err)
+}
+```
+
+### Mobile Use (iOS/Android)
+
+Bolt is able to run on mobile devices by leveraging the binding feature of the
+[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
+contain your database logic and a reference to a `*bolt.DB` with a initializing
+constructor that takes in a filepath where the database file will be stored.
+Neither Android nor iOS require extra permissions or cleanup from using this method.
+
+```go
+func NewBoltDB(filepath string) *BoltDB {
+	db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	return &BoltDB{db}
+}
+
+type BoltDB struct {
+	db *bolt.DB
+	...
+}
+
+func (b *BoltDB) Path() string {
+	return b.db.Path()
+}
+
+func (b *BoltDB) Close() {
+	b.db.Close()
+}
+```
+
+Database logic should be defined as methods on this wrapper struct.
+
+To initialize this struct from the native language (both platforms now sync
+their local storage to the cloud. These snippets disable that functionality for the
+database file):
+
+#### Android
+
+```java
+String path;
+if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
+    path = getNoBackupFilesDir().getAbsolutePath();
+} else{
+    path = getFilesDir().getAbsolutePath();
+}
+Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
+```
+
+#### iOS
+
+```objc
+- (void)demo {
+    NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
+                                                          NSUserDomainMask,
+                                                          YES) objectAtIndex:0];
+	GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
+	[self addSkipBackupAttributeToItemAtPath:demo.path];
+	//Some DB Logic would go here
+	[demo close];
+}
+
+- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
+{
+    NSURL* URL= [NSURL fileURLWithPath: filePathString];
+    assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
+
+    NSError *error = nil;
+    BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
+                                  forKey: NSURLIsExcludedFromBackupKey error: &error];
+    if(!success){
+        NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
+    }
+    return success;
+}
+
+```
+
+## Resources
+
+For more information on getting started with Bolt, check out the following articles:
+
+* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
+* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
+
+
+## Comparison with other databases
+
+### Postgres, MySQL, & other relational databases
+
+Relational databases structure data into rows and are only accessible through
+the use of SQL. This approach provides flexibility in how you store and query
+your data but also incurs overhead in parsing and planning SQL statements. Bolt
+accesses all data by a byte slice key. This makes Bolt fast to read and write
+data by key but provides no built-in support for joining values together.
+
+Most relational databases (with the exception of SQLite) are standalone servers
+that run separately from your application. This gives your systems
+flexibility to connect multiple application servers to a single database
+server but also adds overhead in serializing and transporting data over the
+network. Bolt runs as a library included in your application so all data access
+has to go through your application's process. This brings data closer to your
+application but limits multi-process access to the data.
+
+
+### LevelDB, RocksDB
+
+LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
+they are libraries bundled into the application, however, their underlying
+structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
+random writes by using a write ahead log and multi-tiered, sorted files called
+SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
+have trade-offs.
+
+If you require a high random write throughput (>10,000 w/sec) or you need to use
+spinning disks then LevelDB could be a good choice. If your application is
+read-heavy or does a lot of range scans then Bolt could be a good choice.
+
+One other important consideration is that LevelDB does not have transactions.
+It supports batch writing of key/values pairs and it supports read snapshots
+but it will not give you the ability to do a compare-and-swap operation safely.
+Bolt supports fully serializable ACID transactions.
+
+
+### LMDB
+
+Bolt was originally a port of LMDB so it is architecturally similar. Both use
+a B+tree, have ACID semantics with fully serializable transactions, and support
+lock-free MVCC using a single writer and multiple readers.
+
+The two projects have somewhat diverged. LMDB heavily focuses on raw performance
+while Bolt has focused on simplicity and ease of use. For example, LMDB allows
+several unsafe actions such as direct writes for the sake of performance. Bolt
+opts to disallow actions which can leave the database in a corrupted state. The
+only exception to this in Bolt is `DB.NoSync`.
+
+There are also a few differences in API. LMDB requires a maximum mmap size when
+opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
+automatically. LMDB overloads the getter and setter functions with multiple
+flags whereas Bolt splits these specialized cases into their own functions.
+
+
+## Caveats & Limitations
+
+It's important to pick the right tool for the job and Bolt is no exception.
+Here are a few things to note when evaluating and using Bolt:
+
+* Bolt is good for read intensive workloads. Sequential write performance is
+  also fast but random writes can be slow. You can use `DB.Batch()` or add a
+  write-ahead log to help mitigate this issue.
+
+* Bolt uses a B+tree internally so there can be a lot of random page access.
+  SSDs provide a significant performance boost over spinning disks.
+
+* Try to avoid long running read transactions. Bolt uses copy-on-write so
+  old pages cannot be reclaimed while an old transaction is using them.
+
+* Byte slices returned from Bolt are only valid during a transaction. Once the
+  transaction has been committed or rolled back then the memory they point to
+  can be reused by a new page or can be unmapped from virtual memory and you'll
+  see an `unexpected fault address` panic when accessing it.
+
+* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
+  buckets that have random inserts will cause your database to have very poor
+  page utilization.
+
+* Use larger buckets in general. Smaller buckets causes poor page utilization
+  once they become larger than the page size (typically 4KB).
+
+* Bulk loading a lot of random writes into a new bucket can be slow as the
+  page will not split until the transaction is committed. Randomly inserting
+  more than 100,000 key/value pairs into a single new bucket in a single
+  transaction is not advised.
+
+* Bolt uses a memory-mapped file so the underlying operating system handles the
+  caching of the data. Typically, the OS will cache as much of the file as it
+  can in memory and will release memory as needed to other processes. This means
+  that Bolt can show very high memory usage when working with large databases.
+  However, this is expected and the OS will release memory as needed. Bolt can
+  handle databases much larger than the available physical RAM, provided its
+  memory-map fits in the process virtual address space. It may be problematic
+  on 32-bits systems.
+
+* The data structures in the Bolt database are memory mapped so the data file
+  will be endian specific. This means that you cannot copy a Bolt file from a
+  little endian machine to a big endian machine and have it work. For most
+  users this is not a concern since most modern CPUs are little endian.
+
+* Because of the way pages are laid out on disk, Bolt cannot truncate data files
+  and return free pages back to the disk. Instead, Bolt maintains a free list
+  of unused pages within its data file. These free pages can be reused by later
+  transactions. This works well for many use cases as databases generally tend
+  to grow. However, it's important to note that deleting large chunks of data
+  will not allow you to reclaim that space on disk.
+
+  For more information on page allocation, [see this comment][page-allocation].
+
+[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
+
+
+## Reading the Source
+
+Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
+transactional key/value database so it can be a good starting point for people
+interested in how databases work.
+
+The best places to start are the main entry points into Bolt:
+
+- `Open()` - Initializes the reference to the database. It's responsible for
+  creating the database if it doesn't exist, obtaining an exclusive lock on the
+  file, reading the meta pages, & memory-mapping the file.
+
+- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
+  value of the `writable` argument. This requires briefly obtaining the "meta"
+  lock to keep track of open transactions. Only one read-write transaction can
+  exist at a time so the "rwlock" is acquired during the life of a read-write
+  transaction.
+
+- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
+  arguments, a cursor is used to traverse the B+tree to the page and position
+  where they key & value will be written. Once the position is found, the bucket
+  materializes the underlying page and the page's parent pages into memory as
+  "nodes". These nodes are where mutations occur during read-write transactions.
+  These changes get flushed to disk during commit.
+
+- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
+  to move to the page & position of a key/value pair. During a read-only
+  transaction, the key and value data is returned as a direct reference to the
+  underlying mmap file so there's no allocation overhead. For read-write
+  transactions, this data may reference the mmap file or one of the in-memory
+  node values.
+
+- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
+  or in-memory nodes. It can seek to a specific key, move to the first or last
+  value, or it can move forward or backward. The cursor handles the movement up
+  and down the B+tree transparently to the end user.
+
+- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
+  into pages to be written to disk. Writing to disk then occurs in two phases.
+  First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
+  new meta page with an incremented transaction ID is written and another
+  `fsync()` occurs. This two phase write ensures that partially written data
+  pages are ignored in the event of a crash since the meta page pointing to them
+  is never written. Partially written meta pages are invalidated because they
+  are written with a checksum.
+
+If you have additional notes that could be helpful for others, please submit
+them via pull request.
+
+
+## Other Projects Using Bolt
+
+Below is a list of public, open source projects that use Bolt:
+
+* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
+* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
+* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
+* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
+* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
+* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
+* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
+* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
+* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
+* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
+* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
+* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
+* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
+* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
+* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
+* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
+* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
+* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
+* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
+* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
+* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
+* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
+* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
+* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
+* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
+* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
+* [stow](https://github.com/djherbis/stow) -  a persistence manager for objects
+  backed by boltdb.
+* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
+  simple tx and key scans.
+* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
+* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
+* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
+* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
+* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
+* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
+* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
+* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
+* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
+* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
+* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
+
+If you are using Bolt in a project please send a pull request to add it to the list.
diff --git a/vendor/github.com/coreos/bbolt/appveyor.yml b/vendor/github.com/coreos/bbolt/appveyor.yml
new file mode 100644
index 0000000..6e26e94
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/appveyor.yml
@@ -0,0 +1,18 @@
+version: "{build}"
+
+os: Windows Server 2012 R2
+
+clone_folder: c:\gopath\src\github.com\boltdb\bolt
+
+environment:
+  GOPATH: c:\gopath
+
+install:
+  - echo %PATH%
+  - echo %GOPATH%
+  - go version
+  - go env
+  - go get -v -t ./...
+
+build_script:
+  - go test -v ./...
diff --git a/vendor/github.com/coreos/bbolt/bolt_386.go b/vendor/github.com/coreos/bbolt/bolt_386.go
new file mode 100644
index 0000000..e659bfb
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_386.go
@@ -0,0 +1,7 @@
+package bolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x7FFFFFFF // 2GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0xFFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_amd64.go b/vendor/github.com/coreos/bbolt/bolt_amd64.go
new file mode 100644
index 0000000..cca6b7e
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_amd64.go
@@ -0,0 +1,7 @@
+package bolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_arm.go b/vendor/github.com/coreos/bbolt/bolt_arm.go
new file mode 100644
index 0000000..e659bfb
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_arm.go
@@ -0,0 +1,7 @@
+package bolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x7FFFFFFF // 2GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0xFFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_arm64.go b/vendor/github.com/coreos/bbolt/bolt_arm64.go
new file mode 100644
index 0000000..6d23093
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_arm64.go
@@ -0,0 +1,9 @@
+// +build arm64
+
+package bolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_linux.go b/vendor/github.com/coreos/bbolt/bolt_linux.go
new file mode 100644
index 0000000..2b67666
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_linux.go
@@ -0,0 +1,10 @@
+package bolt
+
+import (
+	"syscall"
+)
+
+// fdatasync flushes written data to a file descriptor.
+func fdatasync(db *DB) error {
+	return syscall.Fdatasync(int(db.file.Fd()))
+}
diff --git a/vendor/github.com/coreos/bbolt/bolt_openbsd.go b/vendor/github.com/coreos/bbolt/bolt_openbsd.go
new file mode 100644
index 0000000..7058c3d
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_openbsd.go
@@ -0,0 +1,27 @@
+package bolt
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+const (
+	msAsync      = 1 << iota // perform asynchronous writes
+	msSync                   // perform synchronous writes
+	msInvalidate             // invalidate cached data
+)
+
+func msync(db *DB) error {
+	_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
+	if errno != 0 {
+		return errno
+	}
+	return nil
+}
+
+func fdatasync(db *DB) error {
+	if db.data != nil {
+		return msync(db)
+	}
+	return db.file.Sync()
+}
diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc.go b/vendor/github.com/coreos/bbolt/bolt_ppc.go
new file mode 100644
index 0000000..645ddc3
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_ppc.go
@@ -0,0 +1,9 @@
+// +build ppc
+
+package bolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x7FFFFFFF // 2GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0xFFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc64.go b/vendor/github.com/coreos/bbolt/bolt_ppc64.go
new file mode 100644
index 0000000..2dc6be0
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_ppc64.go
@@ -0,0 +1,9 @@
+// +build ppc64
+
+package bolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc64le.go b/vendor/github.com/coreos/bbolt/bolt_ppc64le.go
new file mode 100644
index 0000000..8351e12
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_ppc64le.go
@@ -0,0 +1,9 @@
+// +build ppc64le
+
+package bolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_s390x.go b/vendor/github.com/coreos/bbolt/bolt_s390x.go
new file mode 100644
index 0000000..f4dd26b
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_s390x.go
@@ -0,0 +1,9 @@
+// +build s390x
+
+package bolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_unix.go b/vendor/github.com/coreos/bbolt/bolt_unix.go
new file mode 100644
index 0000000..cad62dd
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_unix.go
@@ -0,0 +1,89 @@
+// +build !windows,!plan9,!solaris
+
+package bolt
+
+import (
+	"fmt"
+	"os"
+	"syscall"
+	"time"
+	"unsafe"
+)
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
+	var t time.Time
+	for {
+		// If we're beyond our timeout then return an error.
+		// This can only occur after we've attempted a flock once.
+		if t.IsZero() {
+			t = time.Now()
+		} else if timeout > 0 && time.Since(t) > timeout {
+			return ErrTimeout
+		}
+		flag := syscall.LOCK_SH
+		if exclusive {
+			flag = syscall.LOCK_EX
+		}
+
+		// Otherwise attempt to obtain an exclusive lock.
+		err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
+		if err == nil {
+			return nil
+		} else if err != syscall.EWOULDBLOCK {
+			return err
+		}
+
+		// Wait for a bit and try again.
+		time.Sleep(50 * time.Millisecond)
+	}
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+	return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
+}
+
+// mmap memory maps a DB's data file.
+func mmap(db *DB, sz int) error {
+	// Map the data file to memory.
+	b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
+	if err != nil {
+		return err
+	}
+
+	// Advise the kernel that the mmap is accessed randomly.
+	if err := madvise(b, syscall.MADV_RANDOM); err != nil {
+		return fmt.Errorf("madvise: %s", err)
+	}
+
+	// Save the original byte slice and convert to a byte array pointer.
+	db.dataref = b
+	db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
+	db.datasz = sz
+	return nil
+}
+
+// munmap unmaps a DB's data file from memory.
+func munmap(db *DB) error {
+	// Ignore the unmap if we have no mapped data.
+	if db.dataref == nil {
+		return nil
+	}
+
+	// Unmap using the original byte slice.
+	err := syscall.Munmap(db.dataref)
+	db.dataref = nil
+	db.data = nil
+	db.datasz = 0
+	return err
+}
+
+// NOTE: This function is copied from stdlib because it is not available on darwin.
+func madvise(b []byte, advice int) (err error) {
+	_, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
diff --git a/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go b/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
new file mode 100644
index 0000000..307bf2b
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
@@ -0,0 +1,90 @@
+package bolt
+
+import (
+	"fmt"
+	"os"
+	"syscall"
+	"time"
+	"unsafe"
+
+	"golang.org/x/sys/unix"
+)
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
+	var t time.Time
+	for {
+		// If we're beyond our timeout then return an error.
+		// This can only occur after we've attempted a flock once.
+		if t.IsZero() {
+			t = time.Now()
+		} else if timeout > 0 && time.Since(t) > timeout {
+			return ErrTimeout
+		}
+		var lock syscall.Flock_t
+		lock.Start = 0
+		lock.Len = 0
+		lock.Pid = 0
+		lock.Whence = 0
+		lock.Pid = 0
+		if exclusive {
+			lock.Type = syscall.F_WRLCK
+		} else {
+			lock.Type = syscall.F_RDLCK
+		}
+		err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
+		if err == nil {
+			return nil
+		} else if err != syscall.EAGAIN {
+			return err
+		}
+
+		// Wait for a bit and try again.
+		time.Sleep(50 * time.Millisecond)
+	}
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+	var lock syscall.Flock_t
+	lock.Start = 0
+	lock.Len = 0
+	lock.Type = syscall.F_UNLCK
+	lock.Whence = 0
+	return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
+}
+
+// mmap memory maps a DB's data file.
+func mmap(db *DB, sz int) error {
+	// Map the data file to memory.
+	b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
+	if err != nil {
+		return err
+	}
+
+	// Advise the kernel that the mmap is accessed randomly.
+	if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
+		return fmt.Errorf("madvise: %s", err)
+	}
+
+	// Save the original byte slice and convert to a byte array pointer.
+	db.dataref = b
+	db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
+	db.datasz = sz
+	return nil
+}
+
+// munmap unmaps a DB's data file from memory.
+func munmap(db *DB) error {
+	// Ignore the unmap if we have no mapped data.
+	if db.dataref == nil {
+		return nil
+	}
+
+	// Unmap using the original byte slice.
+	err := unix.Munmap(db.dataref)
+	db.dataref = nil
+	db.data = nil
+	db.datasz = 0
+	return err
+}
diff --git a/vendor/github.com/coreos/bbolt/bolt_windows.go b/vendor/github.com/coreos/bbolt/bolt_windows.go
new file mode 100644
index 0000000..d538e6a
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bolt_windows.go
@@ -0,0 +1,144 @@
+package bolt
+
+import (
+	"fmt"
+	"os"
+	"syscall"
+	"time"
+	"unsafe"
+)
+
+// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
+var (
+	modkernel32      = syscall.NewLazyDLL("kernel32.dll")
+	procLockFileEx   = modkernel32.NewProc("LockFileEx")
+	procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
+)
+
+const (
+	lockExt = ".lock"
+
+	// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
+	flagLockExclusive       = 2
+	flagLockFailImmediately = 1
+
+	// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
+	errLockViolation syscall.Errno = 0x21
+)
+
+func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
+	r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
+	if r == 0 {
+		return err
+	}
+	return nil
+}
+
+func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
+	r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
+	if r == 0 {
+		return err
+	}
+	return nil
+}
+
+// fdatasync flushes written data to a file descriptor.
+func fdatasync(db *DB) error {
+	return db.file.Sync()
+}
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
+	// Create a separate lock file on windows because a process
+	// cannot share an exclusive lock on the same file. This is
+	// needed during Tx.WriteTo().
+	f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
+	if err != nil {
+		return err
+	}
+	db.lockfile = f
+
+	var t time.Time
+	for {
+		// If we're beyond our timeout then return an error.
+		// This can only occur after we've attempted a flock once.
+		if t.IsZero() {
+			t = time.Now()
+		} else if timeout > 0 && time.Since(t) > timeout {
+			return ErrTimeout
+		}
+
+		var flag uint32 = flagLockFailImmediately
+		if exclusive {
+			flag |= flagLockExclusive
+		}
+
+		err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
+		if err == nil {
+			return nil
+		} else if err != errLockViolation {
+			return err
+		}
+
+		// Wait for a bit and try again.
+		time.Sleep(50 * time.Millisecond)
+	}
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+	err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
+	db.lockfile.Close()
+	os.Remove(db.path+lockExt)
+	return err
+}
+
+// mmap memory maps a DB's data file.
+// Based on: https://github.com/edsrzf/mmap-go
+func mmap(db *DB, sz int) error {
+	if !db.readOnly {
+		// Truncate the database to the size of the mmap.
+		if err := db.file.Truncate(int64(sz)); err != nil {
+			return fmt.Errorf("truncate: %s", err)
+		}
+	}
+
+	// Open a file mapping handle.
+	sizelo := uint32(sz >> 32)
+	sizehi := uint32(sz) & 0xffffffff
+	h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
+	if h == 0 {
+		return os.NewSyscallError("CreateFileMapping", errno)
+	}
+
+	// Create the memory map.
+	addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
+	if addr == 0 {
+		return os.NewSyscallError("MapViewOfFile", errno)
+	}
+
+	// Close mapping handle.
+	if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
+		return os.NewSyscallError("CloseHandle", err)
+	}
+
+	// Convert to a byte array.
+	db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
+	db.datasz = sz
+
+	return nil
+}
+
+// munmap unmaps a pointer from a file.
+// Based on: https://github.com/edsrzf/mmap-go
+func munmap(db *DB) error {
+	if db.data == nil {
+		return nil
+	}
+
+	addr := (uintptr)(unsafe.Pointer(&db.data[0]))
+	if err := syscall.UnmapViewOfFile(addr); err != nil {
+		return os.NewSyscallError("UnmapViewOfFile", err)
+	}
+	return nil
+}
diff --git a/vendor/github.com/coreos/bbolt/boltsync_unix.go b/vendor/github.com/coreos/bbolt/boltsync_unix.go
new file mode 100644
index 0000000..f504425
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/boltsync_unix.go
@@ -0,0 +1,8 @@
+// +build !windows,!plan9,!linux,!openbsd
+
+package bolt
+
+// fdatasync flushes written data to a file descriptor.
+func fdatasync(db *DB) error {
+	return db.file.Sync()
+}
diff --git a/vendor/github.com/coreos/bbolt/bucket.go b/vendor/github.com/coreos/bbolt/bucket.go
new file mode 100644
index 0000000..d2f8c52
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/bucket.go
@@ -0,0 +1,748 @@
+package bolt
+
+import (
+	"bytes"
+	"fmt"
+	"unsafe"
+)
+
+const (
+	// MaxKeySize is the maximum length of a key, in bytes.
+	MaxKeySize = 32768
+
+	// MaxValueSize is the maximum length of a value, in bytes.
+	MaxValueSize = (1 << 31) - 2
+)
+
+const (
+	maxUint = ^uint(0)
+	minUint = 0
+	maxInt  = int(^uint(0) >> 1)
+	minInt  = -maxInt - 1
+)
+
+const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
+
+const (
+	minFillPercent = 0.1
+	maxFillPercent = 1.0
+)
+
+// DefaultFillPercent is the percentage that split pages are filled.
+// This value can be changed by setting Bucket.FillPercent.
+const DefaultFillPercent = 0.5
+
+// Bucket represents a collection of key/value pairs inside the database.
+type Bucket struct {
+	*bucket
+	tx       *Tx                // the associated transaction
+	buckets  map[string]*Bucket // subbucket cache
+	page     *page              // inline page reference
+	rootNode *node              // materialized node for the root page.
+	nodes    map[pgid]*node     // node cache
+
+	// Sets the threshold for filling nodes when they split. By default,
+	// the bucket will fill to 50% but it can be useful to increase this
+	// amount if you know that your write workloads are mostly append-only.
+	//
+	// This is non-persisted across transactions so it must be set in every Tx.
+	FillPercent float64
+}
+
+// bucket represents the on-file representation of a bucket.
+// This is stored as the "value" of a bucket key. If the bucket is small enough,
+// then its root page can be stored inline in the "value", after the bucket
+// header. In the case of inline buckets, the "root" will be 0.
+type bucket struct {
+	root     pgid   // page id of the bucket's root-level page
+	sequence uint64 // monotonically incrementing, used by NextSequence()
+}
+
+// newBucket returns a new bucket associated with a transaction.
+func newBucket(tx *Tx) Bucket {
+	var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
+	if tx.writable {
+		b.buckets = make(map[string]*Bucket)
+		b.nodes = make(map[pgid]*node)
+	}
+	return b
+}
+
+// Tx returns the tx of the bucket.
+func (b *Bucket) Tx() *Tx {
+	return b.tx
+}
+
+// Root returns the root of the bucket.
+func (b *Bucket) Root() pgid {
+	return b.root
+}
+
+// Writable returns whether the bucket is writable.
+func (b *Bucket) Writable() bool {
+	return b.tx.writable
+}
+
+// Cursor creates a cursor associated with the bucket.
+// The cursor is only valid as long as the transaction is open.
+// Do not use a cursor after the transaction is closed.
+func (b *Bucket) Cursor() *Cursor {
+	// Update transaction statistics.
+	b.tx.stats.CursorCount++
+
+	// Allocate and return a cursor.
+	return &Cursor{
+		bucket: b,
+		stack:  make([]elemRef, 0),
+	}
+}
+
+// Bucket retrieves a nested bucket by name.
+// Returns nil if the bucket does not exist.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (b *Bucket) Bucket(name []byte) *Bucket {
+	if b.buckets != nil {
+		if child := b.buckets[string(name)]; child != nil {
+			return child
+		}
+	}
+
+	// Move cursor to key.
+	c := b.Cursor()
+	k, v, flags := c.seek(name)
+
+	// Return nil if the key doesn't exist or it is not a bucket.
+	if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
+		return nil
+	}
+
+	// Otherwise create a bucket and cache it.
+	var child = b.openBucket(v)
+	if b.buckets != nil {
+		b.buckets[string(name)] = child
+	}
+
+	return child
+}
+
+// Helper method that re-interprets a sub-bucket value
+// from a parent into a Bucket
+func (b *Bucket) openBucket(value []byte) *Bucket {
+	var child = newBucket(b.tx)
+
+	// If this is a writable transaction then we need to copy the bucket entry.
+	// Read-only transactions can point directly at the mmap entry.
+	if b.tx.writable {
+		child.bucket = &bucket{}
+		*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
+	} else {
+		child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
+	}
+
+	// Save a reference to the inline page if the bucket is inline.
+	if child.root == 0 {
+		child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
+	}
+
+	return &child
+}
+
+// CreateBucket creates a new bucket at the given key and returns the new bucket.
+// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
+	if b.tx.db == nil {
+		return nil, ErrTxClosed
+	} else if !b.tx.writable {
+		return nil, ErrTxNotWritable
+	} else if len(key) == 0 {
+		return nil, ErrBucketNameRequired
+	}
+
+	// Move cursor to correct position.
+	c := b.Cursor()
+	k, _, flags := c.seek(key)
+
+	// Return an error if there is an existing key.
+	if bytes.Equal(key, k) {
+		if (flags & bucketLeafFlag) != 0 {
+			return nil, ErrBucketExists
+		} else {
+			return nil, ErrIncompatibleValue
+		}
+	}
+
+	// Create empty, inline bucket.
+	var bucket = Bucket{
+		bucket:      &bucket{},
+		rootNode:    &node{isLeaf: true},
+		FillPercent: DefaultFillPercent,
+	}
+	var value = bucket.write()
+
+	// Insert into node.
+	key = cloneBytes(key)
+	c.node().put(key, key, value, 0, bucketLeafFlag)
+
+	// Since subbuckets are not allowed on inline buckets, we need to
+	// dereference the inline page, if it exists. This will cause the bucket
+	// to be treated as a regular, non-inline bucket for the rest of the tx.
+	b.page = nil
+
+	return b.Bucket(key), nil
+}
+
+// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
+// Returns an error if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
+	child, err := b.CreateBucket(key)
+	if err == ErrBucketExists {
+		return b.Bucket(key), nil
+	} else if err != nil {
+		return nil, err
+	}
+	return child, nil
+}
+
+// DeleteBucket deletes a bucket at the given key.
+// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
+func (b *Bucket) DeleteBucket(key []byte) error {
+	if b.tx.db == nil {
+		return ErrTxClosed
+	} else if !b.Writable() {
+		return ErrTxNotWritable
+	}
+
+	// Move cursor to correct position.
+	c := b.Cursor()
+	k, _, flags := c.seek(key)
+
+	// Return an error if bucket doesn't exist or is not a bucket.
+	if !bytes.Equal(key, k) {
+		return ErrBucketNotFound
+	} else if (flags & bucketLeafFlag) == 0 {
+		return ErrIncompatibleValue
+	}
+
+	// Recursively delete all child buckets.
+	child := b.Bucket(key)
+	err := child.ForEach(func(k, v []byte) error {
+		if v == nil {
+			if err := child.DeleteBucket(k); err != nil {
+				return fmt.Errorf("delete bucket: %s", err)
+			}
+		}
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+
+	// Remove cached copy.
+	delete(b.buckets, string(key))
+
+	// Release all bucket pages to freelist.
+	child.nodes = nil
+	child.rootNode = nil
+	child.free()
+
+	// Delete the node if we have a matching key.
+	c.node().del(key)
+
+	return nil
+}
+
+// Get retrieves the value for a key in the bucket.
+// Returns a nil value if the key does not exist or if the key is a nested bucket.
+// The returned value is only valid for the life of the transaction.
+func (b *Bucket) Get(key []byte) []byte {
+	k, v, flags := b.Cursor().seek(key)
+
+	// Return nil if this is a bucket.
+	if (flags & bucketLeafFlag) != 0 {
+		return nil
+	}
+
+	// If our target node isn't the same key as what's passed in then return nil.
+	if !bytes.Equal(key, k) {
+		return nil
+	}
+	return v
+}
+
+// Put sets the value for a key in the bucket.
+// If the key exist then its previous value will be overwritten.
+// Supplied value must remain valid for the life of the transaction.
+// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
+func (b *Bucket) Put(key []byte, value []byte) error {
+	if b.tx.db == nil {
+		return ErrTxClosed
+	} else if !b.Writable() {
+		return ErrTxNotWritable
+	} else if len(key) == 0 {
+		return ErrKeyRequired
+	} else if len(key) > MaxKeySize {
+		return ErrKeyTooLarge
+	} else if int64(len(value)) > MaxValueSize {
+		return ErrValueTooLarge
+	}
+
+	// Move cursor to correct position.
+	c := b.Cursor()
+	k, _, flags := c.seek(key)
+
+	// Return an error if there is an existing key with a bucket value.
+	if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
+		return ErrIncompatibleValue
+	}
+
+	// Insert into node.
+	key = cloneBytes(key)
+	c.node().put(key, key, value, 0, 0)
+
+	return nil
+}
+
+// Delete removes a key from the bucket.
+// If the key does not exist then nothing is done and a nil error is returned.
+// Returns an error if the bucket was created from a read-only transaction.
+func (b *Bucket) Delete(key []byte) error {
+	if b.tx.db == nil {
+		return ErrTxClosed
+	} else if !b.Writable() {
+		return ErrTxNotWritable
+	}
+
+	// Move cursor to correct position.
+	c := b.Cursor()
+	_, _, flags := c.seek(key)
+
+	// Return an error if there is already existing bucket value.
+	if (flags & bucketLeafFlag) != 0 {
+		return ErrIncompatibleValue
+	}
+
+	// Delete the node if we have a matching key.
+	c.node().del(key)
+
+	return nil
+}
+
+// NextSequence returns an autoincrementing integer for the bucket.
+func (b *Bucket) NextSequence() (uint64, error) {
+	if b.tx.db == nil {
+		return 0, ErrTxClosed
+	} else if !b.Writable() {
+		return 0, ErrTxNotWritable
+	}
+
+	// Materialize the root node if it hasn't been already so that the
+	// bucket will be saved during commit.
+	if b.rootNode == nil {
+		_ = b.node(b.root, nil)
+	}
+
+	// Increment and return the sequence.
+	b.bucket.sequence++
+	return b.bucket.sequence, nil
+}
+
+// ForEach executes a function for each key/value pair in a bucket.
+// If the provided function returns an error then the iteration is stopped and
+// the error is returned to the caller. The provided function must not modify
+// the bucket; this will result in undefined behavior.
+func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
+	if b.tx.db == nil {
+		return ErrTxClosed
+	}
+	c := b.Cursor()
+	for k, v := c.First(); k != nil; k, v = c.Next() {
+		if err := fn(k, v); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Stat returns stats on a bucket.
+func (b *Bucket) Stats() BucketStats {
+	var s, subStats BucketStats
+	pageSize := b.tx.db.pageSize
+	s.BucketN += 1
+	if b.root == 0 {
+		s.InlineBucketN += 1
+	}
+	b.forEachPage(func(p *page, depth int) {
+		if (p.flags & leafPageFlag) != 0 {
+			s.KeyN += int(p.count)
+
+			// used totals the used bytes for the page
+			used := pageHeaderSize
+
+			if p.count != 0 {
+				// If page has any elements, add all element headers.
+				used += leafPageElementSize * int(p.count-1)
+
+				// Add all element key, value sizes.
+				// The computation takes advantage of the fact that the position
+				// of the last element's key/value equals to the total of the sizes
+				// of all previous elements' keys and values.
+				// It also includes the last element's header.
+				lastElement := p.leafPageElement(p.count - 1)
+				used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
+			}
+
+			if b.root == 0 {
+				// For inlined bucket just update the inline stats
+				s.InlineBucketInuse += used
+			} else {
+				// For non-inlined bucket update all the leaf stats
+				s.LeafPageN++
+				s.LeafInuse += used
+				s.LeafOverflowN += int(p.overflow)
+
+				// Collect stats from sub-buckets.
+				// Do that by iterating over all element headers
+				// looking for the ones with the bucketLeafFlag.
+				for i := uint16(0); i < p.count; i++ {
+					e := p.leafPageElement(i)
+					if (e.flags & bucketLeafFlag) != 0 {
+						// For any bucket element, open the element value
+						// and recursively call Stats on the contained bucket.
+						subStats.Add(b.openBucket(e.value()).Stats())
+					}
+				}
+			}
+		} else if (p.flags & branchPageFlag) != 0 {
+			s.BranchPageN++
+			lastElement := p.branchPageElement(p.count - 1)
+
+			// used totals the used bytes for the page
+			// Add header and all element headers.
+			used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
+
+			// Add size of all keys and values.
+			// Again, use the fact that last element's position equals to
+			// the total of key, value sizes of all previous elements.
+			used += int(lastElement.pos + lastElement.ksize)
+			s.BranchInuse += used
+			s.BranchOverflowN += int(p.overflow)
+		}
+
+		// Keep track of maximum page depth.
+		if depth+1 > s.Depth {
+			s.Depth = (depth + 1)
+		}
+	})
+
+	// Alloc stats can be computed from page counts and pageSize.
+	s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
+	s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
+
+	// Add the max depth of sub-buckets to get total nested depth.
+	s.Depth += subStats.Depth
+	// Add the stats for all sub-buckets
+	s.Add(subStats)
+	return s
+}
+
+// forEachPage iterates over every page in a bucket, including inline pages.
+func (b *Bucket) forEachPage(fn func(*page, int)) {
+	// If we have an inline page then just use that.
+	if b.page != nil {
+		fn(b.page, 0)
+		return
+	}
+
+	// Otherwise traverse the page hierarchy.
+	b.tx.forEachPage(b.root, 0, fn)
+}
+
+// forEachPageNode iterates over every page (or node) in a bucket.
+// This also includes inline pages.
+func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
+	// If we have an inline page or root node then just use that.
+	if b.page != nil {
+		fn(b.page, nil, 0)
+		return
+	}
+	b._forEachPageNode(b.root, 0, fn)
+}
+
+func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
+	var p, n = b.pageNode(pgid)
+
+	// Execute function.
+	fn(p, n, depth)
+
+	// Recursively loop over children.
+	if p != nil {
+		if (p.flags & branchPageFlag) != 0 {
+			for i := 0; i < int(p.count); i++ {
+				elem := p.branchPageElement(uint16(i))
+				b._forEachPageNode(elem.pgid, depth+1, fn)
+			}
+		}
+	} else {
+		if !n.isLeaf {
+			for _, inode := range n.inodes {
+				b._forEachPageNode(inode.pgid, depth+1, fn)
+			}
+		}
+	}
+}
+
+// spill writes all the nodes for this bucket to dirty pages.
+func (b *Bucket) spill() error {
+	// Spill all child buckets first.
+	for name, child := range b.buckets {
+		// If the child bucket is small enough and it has no child buckets then
+		// write it inline into the parent bucket's page. Otherwise spill it
+		// like a normal bucket and make the parent value a pointer to the page.
+		var value []byte
+		if child.inlineable() {
+			child.free()
+			value = child.write()
+		} else {
+			if err := child.spill(); err != nil {
+				return err
+			}
+
+			// Update the child bucket header in this bucket.
+			value = make([]byte, unsafe.Sizeof(bucket{}))
+			var bucket = (*bucket)(unsafe.Pointer(&value[0]))
+			*bucket = *child.bucket
+		}
+
+		// Skip writing the bucket if there are no materialized nodes.
+		if child.rootNode == nil {
+			continue
+		}
+
+		// Update parent node.
+		var c = b.Cursor()
+		k, _, flags := c.seek([]byte(name))
+		if !bytes.Equal([]byte(name), k) {
+			panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
+		}
+		if flags&bucketLeafFlag == 0 {
+			panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
+		}
+		c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
+	}
+
+	// Ignore if there's not a materialized root node.
+	if b.rootNode == nil {
+		return nil
+	}
+
+	// Spill nodes.
+	if err := b.rootNode.spill(); err != nil {
+		return err
+	}
+	b.rootNode = b.rootNode.root()
+
+	// Update the root node for this bucket.
+	if b.rootNode.pgid >= b.tx.meta.pgid {
+		panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
+	}
+	b.root = b.rootNode.pgid
+
+	return nil
+}
+
+// inlineable returns true if a bucket is small enough to be written inline
+// and if it contains no subbuckets. Otherwise returns false.
+func (b *Bucket) inlineable() bool {
+	var n = b.rootNode
+
+	// Bucket must only contain a single leaf node.
+	if n == nil || !n.isLeaf {
+		return false
+	}
+
+	// Bucket is not inlineable if it contains subbuckets or if it goes beyond
+	// our threshold for inline bucket size.
+	var size = pageHeaderSize
+	for _, inode := range n.inodes {
+		size += leafPageElementSize + len(inode.key) + len(inode.value)
+
+		if inode.flags&bucketLeafFlag != 0 {
+			return false
+		} else if size > b.maxInlineBucketSize() {
+			return false
+		}
+	}
+
+	return true
+}
+
+// Returns the maximum total size of a bucket to make it a candidate for inlining.
+func (b *Bucket) maxInlineBucketSize() int {
+	return b.tx.db.pageSize / 4
+}
+
+// write allocates and writes a bucket to a byte slice.
+func (b *Bucket) write() []byte {
+	// Allocate the appropriate size.
+	var n = b.rootNode
+	var value = make([]byte, bucketHeaderSize+n.size())
+
+	// Write a bucket header.
+	var bucket = (*bucket)(unsafe.Pointer(&value[0]))
+	*bucket = *b.bucket
+
+	// Convert byte slice to a fake page and write the root node.
+	var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
+	n.write(p)
+
+	return value
+}
+
+// rebalance attempts to balance all nodes.
+func (b *Bucket) rebalance() {
+	for _, n := range b.nodes {
+		n.rebalance()
+	}
+	for _, child := range b.buckets {
+		child.rebalance()
+	}
+}
+
+// node creates a node from a page and associates it with a given parent.
+func (b *Bucket) node(pgid pgid, parent *node) *node {
+	_assert(b.nodes != nil, "nodes map expected")
+
+	// Retrieve node if it's already been created.
+	if n := b.nodes[pgid]; n != nil {
+		return n
+	}
+
+	// Otherwise create a node and cache it.
+	n := &node{bucket: b, parent: parent}
+	if parent == nil {
+		b.rootNode = n
+	} else {
+		parent.children = append(parent.children, n)
+	}
+
+	// Use the inline page if this is an inline bucket.
+	var p = b.page
+	if p == nil {
+		p = b.tx.page(pgid)
+	}
+
+	// Read the page into the node and cache it.
+	n.read(p)
+	b.nodes[pgid] = n
+
+	// Update statistics.
+	b.tx.stats.NodeCount++
+
+	return n
+}
+
+// free recursively frees all pages in the bucket.
+func (b *Bucket) free() {
+	if b.root == 0 {
+		return
+	}
+
+	var tx = b.tx
+	b.forEachPageNode(func(p *page, n *node, _ int) {
+		if p != nil {
+			tx.db.freelist.free(tx.meta.txid, p)
+		} else {
+			n.free()
+		}
+	})
+	b.root = 0
+}
+
+// dereference removes all references to the old mmap.
+func (b *Bucket) dereference() {
+	if b.rootNode != nil {
+		b.rootNode.root().dereference()
+	}
+
+	for _, child := range b.buckets {
+		child.dereference()
+	}
+}
+
+// pageNode returns the in-memory node, if it exists.
+// Otherwise returns the underlying page.
+func (b *Bucket) pageNode(id pgid) (*page, *node) {
+	// Inline buckets have a fake page embedded in their value so treat them
+	// differently. We'll return the rootNode (if available) or the fake page.
+	if b.root == 0 {
+		if id != 0 {
+			panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
+		}
+		if b.rootNode != nil {
+			return nil, b.rootNode
+		}
+		return b.page, nil
+	}
+
+	// Check the node cache for non-inline buckets.
+	if b.nodes != nil {
+		if n := b.nodes[id]; n != nil {
+			return nil, n
+		}
+	}
+
+	// Finally lookup the page from the transaction if no node is materialized.
+	return b.tx.page(id), nil
+}
+
+// BucketStats records statistics about resources used by a bucket.
+type BucketStats struct {
+	// Page count statistics.
+	BranchPageN     int // number of logical branch pages
+	BranchOverflowN int // number of physical branch overflow pages
+	LeafPageN       int // number of logical leaf pages
+	LeafOverflowN   int // number of physical leaf overflow pages
+
+	// Tree statistics.
+	KeyN  int // number of keys/value pairs
+	Depth int // number of levels in B+tree
+
+	// Page size utilization.
+	BranchAlloc int // bytes allocated for physical branch pages
+	BranchInuse int // bytes actually used for branch data
+	LeafAlloc   int // bytes allocated for physical leaf pages
+	LeafInuse   int // bytes actually used for leaf data
+
+	// Bucket statistics
+	BucketN           int // total number of buckets including the top bucket
+	InlineBucketN     int // total number on inlined buckets
+	InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
+}
+
+func (s *BucketStats) Add(other BucketStats) {
+	s.BranchPageN += other.BranchPageN
+	s.BranchOverflowN += other.BranchOverflowN
+	s.LeafPageN += other.LeafPageN
+	s.LeafOverflowN += other.LeafOverflowN
+	s.KeyN += other.KeyN
+	if s.Depth < other.Depth {
+		s.Depth = other.Depth
+	}
+	s.BranchAlloc += other.BranchAlloc
+	s.BranchInuse += other.BranchInuse
+	s.LeafAlloc += other.LeafAlloc
+	s.LeafInuse += other.LeafInuse
+
+	s.BucketN += other.BucketN
+	s.InlineBucketN += other.InlineBucketN
+	s.InlineBucketInuse += other.InlineBucketInuse
+}
+
+// cloneBytes returns a copy of a given slice.
+func cloneBytes(v []byte) []byte {
+	var clone = make([]byte, len(v))
+	copy(clone, v)
+	return clone
+}
diff --git a/vendor/github.com/coreos/bbolt/cursor.go b/vendor/github.com/coreos/bbolt/cursor.go
new file mode 100644
index 0000000..1be9f35
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/cursor.go
@@ -0,0 +1,400 @@
+package bolt
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+)
+
+// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
+// Cursors see nested buckets with value == nil.
+// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
+//
+// Keys and values returned from the cursor are only valid for the life of the transaction.
+//
+// Changing data while traversing with a cursor may cause it to be invalidated
+// and return unexpected keys and/or values. You must reposition your cursor
+// after mutating data.
+type Cursor struct {
+	bucket *Bucket
+	stack  []elemRef
+}
+
+// Bucket returns the bucket that this cursor was created from.
+func (c *Cursor) Bucket() *Bucket {
+	return c.bucket
+}
+
+// First moves the cursor to the first item in the bucket and returns its key and value.
+// If the bucket is empty then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) First() (key []byte, value []byte) {
+	_assert(c.bucket.tx.db != nil, "tx closed")
+	c.stack = c.stack[:0]
+	p, n := c.bucket.pageNode(c.bucket.root)
+	c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
+	c.first()
+
+	// If we land on an empty page then move to the next value.
+	// https://github.com/boltdb/bolt/issues/450
+	if c.stack[len(c.stack)-1].count() == 0 {
+		c.next()
+	}
+
+	k, v, flags := c.keyValue()
+	if (flags & uint32(bucketLeafFlag)) != 0 {
+		return k, nil
+	}
+	return k, v
+
+}
+
+// Last moves the cursor to the last item in the bucket and returns its key and value.
+// If the bucket is empty then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Last() (key []byte, value []byte) {
+	_assert(c.bucket.tx.db != nil, "tx closed")
+	c.stack = c.stack[:0]
+	p, n := c.bucket.pageNode(c.bucket.root)
+	ref := elemRef{page: p, node: n}
+	ref.index = ref.count() - 1
+	c.stack = append(c.stack, ref)
+	c.last()
+	k, v, flags := c.keyValue()
+	if (flags & uint32(bucketLeafFlag)) != 0 {
+		return k, nil
+	}
+	return k, v
+}
+
+// Next moves the cursor to the next item in the bucket and returns its key and value.
+// If the cursor is at the end of the bucket then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Next() (key []byte, value []byte) {
+	_assert(c.bucket.tx.db != nil, "tx closed")
+	k, v, flags := c.next()
+	if (flags & uint32(bucketLeafFlag)) != 0 {
+		return k, nil
+	}
+	return k, v
+}
+
+// Prev moves the cursor to the previous item in the bucket and returns its key and value.
+// If the cursor is at the beginning of the bucket then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Prev() (key []byte, value []byte) {
+	_assert(c.bucket.tx.db != nil, "tx closed")
+
+	// Attempt to move back one element until we're successful.
+	// Move up the stack as we hit the beginning of each page in our stack.
+	for i := len(c.stack) - 1; i >= 0; i-- {
+		elem := &c.stack[i]
+		if elem.index > 0 {
+			elem.index--
+			break
+		}
+		c.stack = c.stack[:i]
+	}
+
+	// If we've hit the end then return nil.
+	if len(c.stack) == 0 {
+		return nil, nil
+	}
+
+	// Move down the stack to find the last element of the last leaf under this branch.
+	c.last()
+	k, v, flags := c.keyValue()
+	if (flags & uint32(bucketLeafFlag)) != 0 {
+		return k, nil
+	}
+	return k, v
+}
+
+// Seek moves the cursor to a given key and returns it.
+// If the key does not exist then the next key is used. If no keys
+// follow, a nil key is returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
+	k, v, flags := c.seek(seek)
+
+	// If we ended up after the last element of a page then move to the next one.
+	if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
+		k, v, flags = c.next()
+	}
+
+	if k == nil {
+		return nil, nil
+	} else if (flags & uint32(bucketLeafFlag)) != 0 {
+		return k, nil
+	}
+	return k, v
+}
+
+// Delete removes the current key/value under the cursor from the bucket.
+// Delete fails if current key/value is a bucket or if the transaction is not writable.
+func (c *Cursor) Delete() error {
+	if c.bucket.tx.db == nil {
+		return ErrTxClosed
+	} else if !c.bucket.Writable() {
+		return ErrTxNotWritable
+	}
+
+	key, _, flags := c.keyValue()
+	// Return an error if current value is a bucket.
+	if (flags & bucketLeafFlag) != 0 {
+		return ErrIncompatibleValue
+	}
+	c.node().del(key)
+
+	return nil
+}
+
+// seek moves the cursor to a given key and returns it.
+// If the key does not exist then the next key is used.
+func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
+	_assert(c.bucket.tx.db != nil, "tx closed")
+
+	// Start from root page/node and traverse to correct page.
+	c.stack = c.stack[:0]
+	c.search(seek, c.bucket.root)
+	ref := &c.stack[len(c.stack)-1]
+
+	// If the cursor is pointing to the end of page/node then return nil.
+	if ref.index >= ref.count() {
+		return nil, nil, 0
+	}
+
+	// If this is a bucket then return a nil value.
+	return c.keyValue()
+}
+
+// first moves the cursor to the first leaf element under the last page in the stack.
+func (c *Cursor) first() {
+	for {
+		// Exit when we hit a leaf page.
+		var ref = &c.stack[len(c.stack)-1]
+		if ref.isLeaf() {
+			break
+		}
+
+		// Keep adding pages pointing to the first element to the stack.
+		var pgid pgid
+		if ref.node != nil {
+			pgid = ref.node.inodes[ref.index].pgid
+		} else {
+			pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
+		}
+		p, n := c.bucket.pageNode(pgid)
+		c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
+	}
+}
+
+// last moves the cursor to the last leaf element under the last page in the stack.
+func (c *Cursor) last() {
+	for {
+		// Exit when we hit a leaf page.
+		ref := &c.stack[len(c.stack)-1]
+		if ref.isLeaf() {
+			break
+		}
+
+		// Keep adding pages pointing to the last element in the stack.
+		var pgid pgid
+		if ref.node != nil {
+			pgid = ref.node.inodes[ref.index].pgid
+		} else {
+			pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
+		}
+		p, n := c.bucket.pageNode(pgid)
+
+		var nextRef = elemRef{page: p, node: n}
+		nextRef.index = nextRef.count() - 1
+		c.stack = append(c.stack, nextRef)
+	}
+}
+
+// next moves to the next leaf element and returns the key and value.
+// If the cursor is at the last leaf element then it stays there and returns nil.
+func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
+	for {
+		// Attempt to move over one element until we're successful.
+		// Move up the stack as we hit the end of each page in our stack.
+		var i int
+		for i = len(c.stack) - 1; i >= 0; i-- {
+			elem := &c.stack[i]
+			if elem.index < elem.count()-1 {
+				elem.index++
+				break
+			}
+		}
+
+		// If we've hit the root page then stop and return. This will leave the
+		// cursor on the last element of the last page.
+		if i == -1 {
+			return nil, nil, 0
+		}
+
+		// Otherwise start from where we left off in the stack and find the
+		// first element of the first leaf page.
+		c.stack = c.stack[:i+1]
+		c.first()
+
+		// If this is an empty page then restart and move back up the stack.
+		// https://github.com/boltdb/bolt/issues/450
+		if c.stack[len(c.stack)-1].count() == 0 {
+			continue
+		}
+
+		return c.keyValue()
+	}
+}
+
+// search recursively performs a binary search against a given page/node until it finds a given key.
+func (c *Cursor) search(key []byte, pgid pgid) {
+	p, n := c.bucket.pageNode(pgid)
+	if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
+		panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
+	}
+	e := elemRef{page: p, node: n}
+	c.stack = append(c.stack, e)
+
+	// If we're on a leaf page/node then find the specific node.
+	if e.isLeaf() {
+		c.nsearch(key)
+		return
+	}
+
+	if n != nil {
+		c.searchNode(key, n)
+		return
+	}
+	c.searchPage(key, p)
+}
+
+func (c *Cursor) searchNode(key []byte, n *node) {
+	var exact bool
+	index := sort.Search(len(n.inodes), func(i int) bool {
+		// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
+		// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
+		ret := bytes.Compare(n.inodes[i].key, key)
+		if ret == 0 {
+			exact = true
+		}
+		return ret != -1
+	})
+	if !exact && index > 0 {
+		index--
+	}
+	c.stack[len(c.stack)-1].index = index
+
+	// Recursively search to the next page.
+	c.search(key, n.inodes[index].pgid)
+}
+
+func (c *Cursor) searchPage(key []byte, p *page) {
+	// Binary search for the correct range.
+	inodes := p.branchPageElements()
+
+	var exact bool
+	index := sort.Search(int(p.count), func(i int) bool {
+		// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
+		// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
+		ret := bytes.Compare(inodes[i].key(), key)
+		if ret == 0 {
+			exact = true
+		}
+		return ret != -1
+	})
+	if !exact && index > 0 {
+		index--
+	}
+	c.stack[len(c.stack)-1].index = index
+
+	// Recursively search to the next page.
+	c.search(key, inodes[index].pgid)
+}
+
+// nsearch searches the leaf node on the top of the stack for a key.
+func (c *Cursor) nsearch(key []byte) {
+	e := &c.stack[len(c.stack)-1]
+	p, n := e.page, e.node
+
+	// If we have a node then search its inodes.
+	if n != nil {
+		index := sort.Search(len(n.inodes), func(i int) bool {
+			return bytes.Compare(n.inodes[i].key, key) != -1
+		})
+		e.index = index
+		return
+	}
+
+	// If we have a page then search its leaf elements.
+	inodes := p.leafPageElements()
+	index := sort.Search(int(p.count), func(i int) bool {
+		return bytes.Compare(inodes[i].key(), key) != -1
+	})
+	e.index = index
+}
+
+// keyValue returns the key and value of the current leaf element.
+func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
+	ref := &c.stack[len(c.stack)-1]
+	if ref.count() == 0 || ref.index >= ref.count() {
+		return nil, nil, 0
+	}
+
+	// Retrieve value from node.
+	if ref.node != nil {
+		inode := &ref.node.inodes[ref.index]
+		return inode.key, inode.value, inode.flags
+	}
+
+	// Or retrieve value from page.
+	elem := ref.page.leafPageElement(uint16(ref.index))
+	return elem.key(), elem.value(), elem.flags
+}
+
+// node returns the node that the cursor is currently positioned on.
+func (c *Cursor) node() *node {
+	_assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
+
+	// If the top of the stack is a leaf node then just return it.
+	if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
+		return ref.node
+	}
+
+	// Start from root and traverse down the hierarchy.
+	var n = c.stack[0].node
+	if n == nil {
+		n = c.bucket.node(c.stack[0].page.id, nil)
+	}
+	for _, ref := range c.stack[:len(c.stack)-1] {
+		_assert(!n.isLeaf, "expected branch node")
+		n = n.childAt(int(ref.index))
+	}
+	_assert(n.isLeaf, "expected leaf node")
+	return n
+}
+
+// elemRef represents a reference to an element on a given page/node.
+type elemRef struct {
+	page  *page
+	node  *node
+	index int
+}
+
+// isLeaf returns whether the ref is pointing at a leaf page/node.
+func (r *elemRef) isLeaf() bool {
+	if r.node != nil {
+		return r.node.isLeaf
+	}
+	return (r.page.flags & leafPageFlag) != 0
+}
+
+// count returns the number of inodes or page elements.
+func (r *elemRef) count() int {
+	if r.node != nil {
+		return len(r.node.inodes)
+	}
+	return int(r.page.count)
+}
diff --git a/vendor/github.com/coreos/bbolt/db.go b/vendor/github.com/coreos/bbolt/db.go
new file mode 100644
index 0000000..1223493
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/db.go
@@ -0,0 +1,1036 @@
+package bolt
+
+import (
+	"errors"
+	"fmt"
+	"hash/fnv"
+	"log"
+	"os"
+	"runtime"
+	"runtime/debug"
+	"strings"
+	"sync"
+	"time"
+	"unsafe"
+)
+
+// The largest step that can be taken when remapping the mmap.
+const maxMmapStep = 1 << 30 // 1GB
+
+// The data file format version.
+const version = 2
+
+// Represents a marker value to indicate that a file is a Bolt DB.
+const magic uint32 = 0xED0CDAED
+
+// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
+// syncing changes to a file.  This is required as some operating systems,
+// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
+// must be synchronized using the msync(2) syscall.
+const IgnoreNoSync = runtime.GOOS == "openbsd"
+
+// Default values if not set in a DB instance.
+const (
+	DefaultMaxBatchSize  int = 1000
+	DefaultMaxBatchDelay     = 10 * time.Millisecond
+	DefaultAllocSize         = 16 * 1024 * 1024
+)
+
+// default page size for db is set to the OS page size.
+var defaultPageSize = os.Getpagesize()
+
+// DB represents a collection of buckets persisted to a file on disk.
+// All data access is performed through transactions which can be obtained through the DB.
+// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
+type DB struct {
+	// When enabled, the database will perform a Check() after every commit.
+	// A panic is issued if the database is in an inconsistent state. This
+	// flag has a large performance impact so it should only be used for
+	// debugging purposes.
+	StrictMode bool
+
+	// Setting the NoSync flag will cause the database to skip fsync()
+	// calls after each commit. This can be useful when bulk loading data
+	// into a database and you can restart the bulk load in the event of
+	// a system failure or database corruption. Do not set this flag for
+	// normal use.
+	//
+	// If the package global IgnoreNoSync constant is true, this value is
+	// ignored.  See the comment on that constant for more details.
+	//
+	// THIS IS UNSAFE. PLEASE USE WITH CAUTION.
+	NoSync bool
+
+	// When true, skips the truncate call when growing the database.
+	// Setting this to true is only safe on non-ext3/ext4 systems.
+	// Skipping truncation avoids preallocation of hard drive space and
+	// bypasses a truncate() and fsync() syscall on remapping.
+	//
+	// https://github.com/boltdb/bolt/issues/284
+	NoGrowSync bool
+
+	// If you want to read the entire database fast, you can set MmapFlag to
+	// syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead.
+	MmapFlags int
+
+	// MaxBatchSize is the maximum size of a batch. Default value is
+	// copied from DefaultMaxBatchSize in Open.
+	//
+	// If <=0, disables batching.
+	//
+	// Do not change concurrently with calls to Batch.
+	MaxBatchSize int
+
+	// MaxBatchDelay is the maximum delay before a batch starts.
+	// Default value is copied from DefaultMaxBatchDelay in Open.
+	//
+	// If <=0, effectively disables batching.
+	//
+	// Do not change concurrently with calls to Batch.
+	MaxBatchDelay time.Duration
+
+	// AllocSize is the amount of space allocated when the database
+	// needs to create new pages. This is done to amortize the cost
+	// of truncate() and fsync() when growing the data file.
+	AllocSize int
+
+	path     string
+	file     *os.File
+	lockfile *os.File // windows only
+	dataref  []byte   // mmap'ed readonly, write throws SEGV
+	data     *[maxMapSize]byte
+	datasz   int
+	filesz   int // current on disk file size
+	meta0    *meta
+	meta1    *meta
+	pageSize int
+	opened   bool
+	rwtx     *Tx
+	txs      []*Tx
+	freelist *freelist
+	stats    Stats
+
+	pagePool sync.Pool
+
+	batchMu sync.Mutex
+	batch   *batch
+
+	rwlock   sync.Mutex   // Allows only one writer at a time.
+	metalock sync.Mutex   // Protects meta page access.
+	mmaplock sync.RWMutex // Protects mmap access during remapping.
+	statlock sync.RWMutex // Protects stats access.
+
+	ops struct {
+		writeAt func(b []byte, off int64) (n int, err error)
+	}
+
+	// Read only mode.
+	// When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately.
+	readOnly bool
+}
+
+// Path returns the path to currently open database file.
+func (db *DB) Path() string {
+	return db.path
+}
+
+// GoString returns the Go string representation of the database.
+func (db *DB) GoString() string {
+	return fmt.Sprintf("bolt.DB{path:%q}", db.path)
+}
+
+// String returns the string representation of the database.
+func (db *DB) String() string {
+	return fmt.Sprintf("DB<%q>", db.path)
+}
+
+// Open creates and opens a database at the given path.
+// If the file does not exist then it will be created automatically.
+// Passing in nil options will cause Bolt to open the database with the default options.
+func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
+	var db = &DB{opened: true}
+
+	// Set default options if no options are provided.
+	if options == nil {
+		options = DefaultOptions
+	}
+	db.NoGrowSync = options.NoGrowSync
+	db.MmapFlags = options.MmapFlags
+
+	// Set default values for later DB operations.
+	db.MaxBatchSize = DefaultMaxBatchSize
+	db.MaxBatchDelay = DefaultMaxBatchDelay
+	db.AllocSize = DefaultAllocSize
+
+	flag := os.O_RDWR
+	if options.ReadOnly {
+		flag = os.O_RDONLY
+		db.readOnly = true
+	}
+
+	// Open data file and separate sync handler for metadata writes.
+	db.path = path
+	var err error
+	if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil {
+		_ = db.close()
+		return nil, err
+	}
+
+	// Lock file so that other processes using Bolt in read-write mode cannot
+	// use the database  at the same time. This would cause corruption since
+	// the two processes would write meta pages and free pages separately.
+	// The database file is locked exclusively (only one process can grab the lock)
+	// if !options.ReadOnly.
+	// The database file is locked using the shared lock (more than one process may
+	// hold a lock at the same time) otherwise (options.ReadOnly is set).
+	if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil {
+		_ = db.close()
+		return nil, err
+	}
+
+	// Default values for test hooks
+	db.ops.writeAt = db.file.WriteAt
+
+	// Initialize the database if it doesn't exist.
+	if info, err := db.file.Stat(); err != nil {
+		return nil, err
+	} else if info.Size() == 0 {
+		// Initialize new files with meta pages.
+		if err := db.init(); err != nil {
+			return nil, err
+		}
+	} else {
+		// Read the first meta page to determine the page size.
+		var buf [0x1000]byte
+		if _, err := db.file.ReadAt(buf[:], 0); err == nil {
+			m := db.pageInBuffer(buf[:], 0).meta()
+			if err := m.validate(); err != nil {
+				// If we can't read the page size, we can assume it's the same
+				// as the OS -- since that's how the page size was chosen in the
+				// first place.
+				//
+				// If the first page is invalid and this OS uses a different
+				// page size than what the database was created with then we
+				// are out of luck and cannot access the database.
+				db.pageSize = os.Getpagesize()
+			} else {
+				db.pageSize = int(m.pageSize)
+			}
+		}
+	}
+
+	// Initialize page pool.
+	db.pagePool = sync.Pool{
+		New: func() interface{} {
+			return make([]byte, db.pageSize)
+		},
+	}
+
+	// Memory map the data file.
+	if err := db.mmap(options.InitialMmapSize); err != nil {
+		_ = db.close()
+		return nil, err
+	}
+
+	// Read in the freelist.
+	db.freelist = newFreelist()
+	db.freelist.read(db.page(db.meta().freelist))
+
+	// Mark the database as opened and return.
+	return db, nil
+}
+
+// mmap opens the underlying memory-mapped file and initializes the meta references.
+// minsz is the minimum size that the new mmap can be.
+func (db *DB) mmap(minsz int) error {
+	db.mmaplock.Lock()
+	defer db.mmaplock.Unlock()
+
+	info, err := db.file.Stat()
+	if err != nil {
+		return fmt.Errorf("mmap stat error: %s", err)
+	} else if int(info.Size()) < db.pageSize*2 {
+		return fmt.Errorf("file size too small")
+	}
+
+	// Ensure the size is at least the minimum size.
+	var size = int(info.Size())
+	if size < minsz {
+		size = minsz
+	}
+	size, err = db.mmapSize(size)
+	if err != nil {
+		return err
+	}
+
+	// Dereference all mmap references before unmapping.
+	if db.rwtx != nil {
+		db.rwtx.root.dereference()
+	}
+
+	// Unmap existing data before continuing.
+	if err := db.munmap(); err != nil {
+		return err
+	}
+
+	// Memory-map the data file as a byte slice.
+	if err := mmap(db, size); err != nil {
+		return err
+	}
+
+	// Save references to the meta pages.
+	db.meta0 = db.page(0).meta()
+	db.meta1 = db.page(1).meta()
+
+	// Validate the meta pages. We only return an error if both meta pages fail
+	// validation, since meta0 failing validation means that it wasn't saved
+	// properly -- but we can recover using meta1. And vice-versa.
+	err0 := db.meta0.validate()
+	err1 := db.meta1.validate()
+	if err0 != nil && err1 != nil {
+		return err0
+	}
+
+	return nil
+}
+
+// munmap unmaps the data file from memory.
+func (db *DB) munmap() error {
+	if err := munmap(db); err != nil {
+		return fmt.Errorf("unmap error: " + err.Error())
+	}
+	return nil
+}
+
+// mmapSize determines the appropriate size for the mmap given the current size
+// of the database. The minimum size is 32KB and doubles until it reaches 1GB.
+// Returns an error if the new mmap size is greater than the max allowed.
+func (db *DB) mmapSize(size int) (int, error) {
+	// Double the size from 32KB until 1GB.
+	for i := uint(15); i <= 30; i++ {
+		if size <= 1<<i {
+			return 1 << i, nil
+		}
+	}
+
+	// Verify the requested size is not above the maximum allowed.
+	if size > maxMapSize {
+		return 0, fmt.Errorf("mmap too large")
+	}
+
+	// If larger than 1GB then grow by 1GB at a time.
+	sz := int64(size)
+	if remainder := sz % int64(maxMmapStep); remainder > 0 {
+		sz += int64(maxMmapStep) - remainder
+	}
+
+	// Ensure that the mmap size is a multiple of the page size.
+	// This should always be true since we're incrementing in MBs.
+	pageSize := int64(db.pageSize)
+	if (sz % pageSize) != 0 {
+		sz = ((sz / pageSize) + 1) * pageSize
+	}
+
+	// If we've exceeded the max size then only grow up to the max size.
+	if sz > maxMapSize {
+		sz = maxMapSize
+	}
+
+	return int(sz), nil
+}
+
+// init creates a new database file and initializes its meta pages.
+func (db *DB) init() error {
+	// Set the page size to the OS page size.
+	db.pageSize = os.Getpagesize()
+
+	// Create two meta pages on a buffer.
+	buf := make([]byte, db.pageSize*4)
+	for i := 0; i < 2; i++ {
+		p := db.pageInBuffer(buf[:], pgid(i))
+		p.id = pgid(i)
+		p.flags = metaPageFlag
+
+		// Initialize the meta page.
+		m := p.meta()
+		m.magic = magic
+		m.version = version
+		m.pageSize = uint32(db.pageSize)
+		m.freelist = 2
+		m.root = bucket{root: 3}
+		m.pgid = 4
+		m.txid = txid(i)
+		m.checksum = m.sum64()
+	}
+
+	// Write an empty freelist at page 3.
+	p := db.pageInBuffer(buf[:], pgid(2))
+	p.id = pgid(2)
+	p.flags = freelistPageFlag
+	p.count = 0
+
+	// Write an empty leaf page at page 4.
+	p = db.pageInBuffer(buf[:], pgid(3))
+	p.id = pgid(3)
+	p.flags = leafPageFlag
+	p.count = 0
+
+	// Write the buffer to our data file.
+	if _, err := db.ops.writeAt(buf, 0); err != nil {
+		return err
+	}
+	if err := fdatasync(db); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// Close releases all database resources.
+// All transactions must be closed before closing the database.
+func (db *DB) Close() error {
+	db.rwlock.Lock()
+	defer db.rwlock.Unlock()
+
+	db.metalock.Lock()
+	defer db.metalock.Unlock()
+
+	db.mmaplock.RLock()
+	defer db.mmaplock.RUnlock()
+
+	return db.close()
+}
+
+func (db *DB) close() error {
+	if !db.opened {
+		return nil
+	}
+
+	db.opened = false
+
+	db.freelist = nil
+
+	// Clear ops.
+	db.ops.writeAt = nil
+
+	// Close the mmap.
+	if err := db.munmap(); err != nil {
+		return err
+	}
+
+	// Close file handles.
+	if db.file != nil {
+		// No need to unlock read-only file.
+		if !db.readOnly {
+			// Unlock the file.
+			if err := funlock(db); err != nil {
+				log.Printf("bolt.Close(): funlock error: %s", err)
+			}
+		}
+
+		// Close the file descriptor.
+		if err := db.file.Close(); err != nil {
+			return fmt.Errorf("db file close: %s", err)
+		}
+		db.file = nil
+	}
+
+	db.path = ""
+	return nil
+}
+
+// Begin starts a new transaction.
+// Multiple read-only transactions can be used concurrently but only one
+// write transaction can be used at a time. Starting multiple write transactions
+// will cause the calls to block and be serialized until the current write
+// transaction finishes.
+//
+// Transactions should not be dependent on one another. Opening a read
+// transaction and a write transaction in the same goroutine can cause the
+// writer to deadlock because the database periodically needs to re-mmap itself
+// as it grows and it cannot do that while a read transaction is open.
+//
+// If a long running read transaction (for example, a snapshot transaction) is
+// needed, you might want to set DB.InitialMmapSize to a large enough value
+// to avoid potential blocking of write transaction.
+//
+// IMPORTANT: You must close read-only transactions after you are finished or
+// else the database will not reclaim old pages.
+func (db *DB) Begin(writable bool) (*Tx, error) {
+	if writable {
+		return db.beginRWTx()
+	}
+	return db.beginTx()
+}
+
+func (db *DB) beginTx() (*Tx, error) {
+	// Lock the meta pages while we initialize the transaction. We obtain
+	// the meta lock before the mmap lock because that's the order that the
+	// write transaction will obtain them.
+	db.metalock.Lock()
+
+	// Obtain a read-only lock on the mmap. When the mmap is remapped it will
+	// obtain a write lock so all transactions must finish before it can be
+	// remapped.
+	db.mmaplock.RLock()
+
+	// Exit if the database is not open yet.
+	if !db.opened {
+		db.mmaplock.RUnlock()
+		db.metalock.Unlock()
+		return nil, ErrDatabaseNotOpen
+	}
+
+	// Create a transaction associated with the database.
+	t := &Tx{}
+	t.init(db)
+
+	// Keep track of transaction until it closes.
+	db.txs = append(db.txs, t)
+	n := len(db.txs)
+
+	// Unlock the meta pages.
+	db.metalock.Unlock()
+
+	// Update the transaction stats.
+	db.statlock.Lock()
+	db.stats.TxN++
+	db.stats.OpenTxN = n
+	db.statlock.Unlock()
+
+	return t, nil
+}
+
+func (db *DB) beginRWTx() (*Tx, error) {
+	// If the database was opened with Options.ReadOnly, return an error.
+	if db.readOnly {
+		return nil, ErrDatabaseReadOnly
+	}
+
+	// Obtain writer lock. This is released by the transaction when it closes.
+	// This enforces only one writer transaction at a time.
+	db.rwlock.Lock()
+
+	// Once we have the writer lock then we can lock the meta pages so that
+	// we can set up the transaction.
+	db.metalock.Lock()
+	defer db.metalock.Unlock()
+
+	// Exit if the database is not open yet.
+	if !db.opened {
+		db.rwlock.Unlock()
+		return nil, ErrDatabaseNotOpen
+	}
+
+	// Create a transaction associated with the database.
+	t := &Tx{writable: true}
+	t.init(db)
+	db.rwtx = t
+
+	// Free any pages associated with closed read-only transactions.
+	var minid txid = 0xFFFFFFFFFFFFFFFF
+	for _, t := range db.txs {
+		if t.meta.txid < minid {
+			minid = t.meta.txid
+		}
+	}
+	if minid > 0 {
+		db.freelist.release(minid - 1)
+	}
+
+	return t, nil
+}
+
+// removeTx removes a transaction from the database.
+func (db *DB) removeTx(tx *Tx) {
+	// Release the read lock on the mmap.
+	db.mmaplock.RUnlock()
+
+	// Use the meta lock to restrict access to the DB object.
+	db.metalock.Lock()
+
+	// Remove the transaction.
+	for i, t := range db.txs {
+		if t == tx {
+			db.txs = append(db.txs[:i], db.txs[i+1:]...)
+			break
+		}
+	}
+	n := len(db.txs)
+
+	// Unlock the meta pages.
+	db.metalock.Unlock()
+
+	// Merge statistics.
+	db.statlock.Lock()
+	db.stats.OpenTxN = n
+	db.stats.TxStats.add(&tx.stats)
+	db.statlock.Unlock()
+}
+
+// Update executes a function within the context of a read-write managed transaction.
+// If no error is returned from the function then the transaction is committed.
+// If an error is returned then the entire transaction is rolled back.
+// Any error that is returned from the function or returned from the commit is
+// returned from the Update() method.
+//
+// Attempting to manually commit or rollback within the function will cause a panic.
+func (db *DB) Update(fn func(*Tx) error) error {
+	t, err := db.Begin(true)
+	if err != nil {
+		return err
+	}
+
+	// Make sure the transaction rolls back in the event of a panic.
+	defer func() {
+		if t.db != nil {
+			t.rollback()
+		}
+	}()
+
+	// Mark as a managed tx so that the inner function cannot manually commit.
+	t.managed = true
+
+	// If an error is returned from the function then rollback and return error.
+	err = fn(t)
+	t.managed = false
+	if err != nil {
+		_ = t.Rollback()
+		return err
+	}
+
+	return t.Commit()
+}
+
+// View executes a function within the context of a managed read-only transaction.
+// Any error that is returned from the function is returned from the View() method.
+//
+// Attempting to manually rollback within the function will cause a panic.
+func (db *DB) View(fn func(*Tx) error) error {
+	t, err := db.Begin(false)
+	if err != nil {
+		return err
+	}
+
+	// Make sure the transaction rolls back in the event of a panic.
+	defer func() {
+		if t.db != nil {
+			t.rollback()
+		}
+	}()
+
+	// Mark as a managed tx so that the inner function cannot manually rollback.
+	t.managed = true
+
+	// If an error is returned from the function then pass it through.
+	err = fn(t)
+	t.managed = false
+	if err != nil {
+		_ = t.Rollback()
+		return err
+	}
+
+	if err := t.Rollback(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// Batch calls fn as part of a batch. It behaves similar to Update,
+// except:
+//
+// 1. concurrent Batch calls can be combined into a single Bolt
+// transaction.
+//
+// 2. the function passed to Batch may be called multiple times,
+// regardless of whether it returns error or not.
+//
+// This means that Batch function side effects must be idempotent and
+// take permanent effect only after a successful return is seen in
+// caller.
+//
+// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
+// and DB.MaxBatchDelay, respectively.
+//
+// Batch is only useful when there are multiple goroutines calling it.
+func (db *DB) Batch(fn func(*Tx) error) error {
+	errCh := make(chan error, 1)
+
+	db.batchMu.Lock()
+	if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
+		// There is no existing batch, or the existing batch is full; start a new one.
+		db.batch = &batch{
+			db: db,
+		}
+		db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
+	}
+	db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
+	if len(db.batch.calls) >= db.MaxBatchSize {
+		// wake up batch, it's ready to run
+		go db.batch.trigger()
+	}
+	db.batchMu.Unlock()
+
+	err := <-errCh
+	if err == trySolo {
+		err = db.Update(fn)
+	}
+	return err
+}
+
+type call struct {
+	fn  func(*Tx) error
+	err chan<- error
+}
+
+type batch struct {
+	db    *DB
+	timer *time.Timer
+	start sync.Once
+	calls []call
+}
+
+// trigger runs the batch if it hasn't already been run.
+func (b *batch) trigger() {
+	b.start.Do(b.run)
+}
+
+// run performs the transactions in the batch and communicates results
+// back to DB.Batch.
+func (b *batch) run() {
+	b.db.batchMu.Lock()
+	b.timer.Stop()
+	// Make sure no new work is added to this batch, but don't break
+	// other batches.
+	if b.db.batch == b {
+		b.db.batch = nil
+	}
+	b.db.batchMu.Unlock()
+
+retry:
+	for len(b.calls) > 0 {
+		var failIdx = -1
+		err := b.db.Update(func(tx *Tx) error {
+			for i, c := range b.calls {
+				if err := safelyCall(c.fn, tx); err != nil {
+					failIdx = i
+					return err
+				}
+			}
+			return nil
+		})
+
+		if failIdx >= 0 {
+			// take the failing transaction out of the batch. it's
+			// safe to shorten b.calls here because db.batch no longer
+			// points to us, and we hold the mutex anyway.
+			c := b.calls[failIdx]
+			b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
+			// tell the submitter re-run it solo, continue with the rest of the batch
+			c.err <- trySolo
+			continue retry
+		}
+
+		// pass success, or bolt internal errors, to all callers
+		for _, c := range b.calls {
+			if c.err != nil {
+				c.err <- err
+			}
+		}
+		break retry
+	}
+}
+
+// trySolo is a special sentinel error value used for signaling that a
+// transaction function should be re-run. It should never be seen by
+// callers.
+var trySolo = errors.New("batch function returned an error and should be re-run solo")
+
+type panicked struct {
+	reason interface{}
+}
+
+func (p panicked) Error() string {
+	if err, ok := p.reason.(error); ok {
+		return err.Error()
+	}
+	return fmt.Sprintf("panic: %v", p.reason)
+}
+
+func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
+	defer func() {
+		if p := recover(); p != nil {
+			err = panicked{p}
+		}
+	}()
+	return fn(tx)
+}
+
+// Sync executes fdatasync() against the database file handle.
+//
+// This is not necessary under normal operation, however, if you use NoSync
+// then it allows you to force the database file to sync against the disk.
+func (db *DB) Sync() error { return fdatasync(db) }
+
+// Stats retrieves ongoing performance stats for the database.
+// This is only updated when a transaction closes.
+func (db *DB) Stats() Stats {
+	db.statlock.RLock()
+	defer db.statlock.RUnlock()
+	return db.stats
+}
+
+// This is for internal access to the raw data bytes from the C cursor, use
+// carefully, or not at all.
+func (db *DB) Info() *Info {
+	return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize}
+}
+
+// page retrieves a page reference from the mmap based on the current page size.
+func (db *DB) page(id pgid) *page {
+	pos := id * pgid(db.pageSize)
+	return (*page)(unsafe.Pointer(&db.data[pos]))
+}
+
+// pageInBuffer retrieves a page reference from a given byte array based on the current page size.
+func (db *DB) pageInBuffer(b []byte, id pgid) *page {
+	return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)]))
+}
+
+// meta retrieves the current meta page reference.
+func (db *DB) meta() *meta {
+	// We have to return the meta with the highest txid which doesn't fail
+	// validation. Otherwise, we can cause errors when in fact the database is
+	// in a consistent state. metaA is the one with the higher txid.
+	metaA := db.meta0
+	metaB := db.meta1
+	if db.meta1.txid > db.meta0.txid {
+		metaA = db.meta1
+		metaB = db.meta0
+	}
+
+	// Use higher meta page if valid. Otherwise fallback to previous, if valid.
+	if err := metaA.validate(); err == nil {
+		return metaA
+	} else if err := metaB.validate(); err == nil {
+		return metaB
+	}
+
+	// This should never be reached, because both meta1 and meta0 were validated
+	// on mmap() and we do fsync() on every write.
+	panic("bolt.DB.meta(): invalid meta pages")
+}
+
+// allocate returns a contiguous block of memory starting at a given page.
+func (db *DB) allocate(count int) (*page, error) {
+	// Allocate a temporary buffer for the page.
+	var buf []byte
+	if count == 1 {
+		buf = db.pagePool.Get().([]byte)
+	} else {
+		buf = make([]byte, count*db.pageSize)
+	}
+	p := (*page)(unsafe.Pointer(&buf[0]))
+	p.overflow = uint32(count - 1)
+
+	// Use pages from the freelist if they are available.
+	if p.id = db.freelist.allocate(count); p.id != 0 {
+		return p, nil
+	}
+
+	// Resize mmap() if we're at the end.
+	p.id = db.rwtx.meta.pgid
+	var minsz = int((p.id+pgid(count))+1) * db.pageSize
+	if minsz >= db.datasz {
+		if err := db.mmap(minsz); err != nil {
+			return nil, fmt.Errorf("mmap allocate error: %s", err)
+		}
+	}
+
+	// Move the page id high water mark.
+	db.rwtx.meta.pgid += pgid(count)
+
+	return p, nil
+}
+
+// grow grows the size of the database to the given sz.
+func (db *DB) grow(sz int) error {
+	// Ignore if the new size is less than available file size.
+	if sz <= db.filesz {
+		return nil
+	}
+
+	// If the data is smaller than the alloc size then only allocate what's needed.
+	// Once it goes over the allocation size then allocate in chunks.
+	if db.datasz < db.AllocSize {
+		sz = db.datasz
+	} else {
+		sz += db.AllocSize
+	}
+
+	// Truncate and fsync to ensure file size metadata is flushed.
+	// https://github.com/boltdb/bolt/issues/284
+	if !db.NoGrowSync && !db.readOnly {
+		if runtime.GOOS != "windows" {
+			if err := db.file.Truncate(int64(sz)); err != nil {
+				return fmt.Errorf("file resize error: %s", err)
+			}
+		}
+		if err := db.file.Sync(); err != nil {
+			return fmt.Errorf("file sync error: %s", err)
+		}
+	}
+
+	db.filesz = sz
+	return nil
+}
+
+func (db *DB) IsReadOnly() bool {
+	return db.readOnly
+}
+
+// Options represents the options that can be set when opening a database.
+type Options struct {
+	// Timeout is the amount of time to wait to obtain a file lock.
+	// When set to zero it will wait indefinitely. This option is only
+	// available on Darwin and Linux.
+	Timeout time.Duration
+
+	// Sets the DB.NoGrowSync flag before memory mapping the file.
+	NoGrowSync bool
+
+	// Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
+	// grab a shared lock (UNIX).
+	ReadOnly bool
+
+	// Sets the DB.MmapFlags flag before memory mapping the file.
+	MmapFlags int
+
+	// InitialMmapSize is the initial mmap size of the database
+	// in bytes. Read transactions won't block write transaction
+	// if the InitialMmapSize is large enough to hold database mmap
+	// size. (See DB.Begin for more information)
+	//
+	// If <=0, the initial map size is 0.
+	// If initialMmapSize is smaller than the previous database size,
+	// it takes no effect.
+	InitialMmapSize int
+}
+
+// DefaultOptions represent the options used if nil options are passed into Open().
+// No timeout is used which will cause Bolt to wait indefinitely for a lock.
+var DefaultOptions = &Options{
+	Timeout:    0,
+	NoGrowSync: false,
+}
+
+// Stats represents statistics about the database.
+type Stats struct {
+	// Freelist stats
+	FreePageN     int // total number of free pages on the freelist
+	PendingPageN  int // total number of pending pages on the freelist
+	FreeAlloc     int // total bytes allocated in free pages
+	FreelistInuse int // total bytes used by the freelist
+
+	// Transaction stats
+	TxN     int // total number of started read transactions
+	OpenTxN int // number of currently open read transactions
+
+	TxStats TxStats // global, ongoing stats.
+}
+
+// Sub calculates and returns the difference between two sets of database stats.
+// This is useful when obtaining stats at two different points and time and
+// you need the performance counters that occurred within that time span.
+func (s *Stats) Sub(other *Stats) Stats {
+	if other == nil {
+		return *s
+	}
+	var diff Stats
+	diff.FreePageN = s.FreePageN
+	diff.PendingPageN = s.PendingPageN
+	diff.FreeAlloc = s.FreeAlloc
+	diff.FreelistInuse = s.FreelistInuse
+	diff.TxN = other.TxN - s.TxN
+	diff.TxStats = s.TxStats.Sub(&other.TxStats)
+	return diff
+}
+
+func (s *Stats) add(other *Stats) {
+	s.TxStats.add(&other.TxStats)
+}
+
+type Info struct {
+	Data     uintptr
+	PageSize int
+}
+
+type meta struct {
+	magic    uint32
+	version  uint32
+	pageSize uint32
+	flags    uint32
+	root     bucket
+	freelist pgid
+	pgid     pgid
+	txid     txid
+	checksum uint64
+}
+
+// validate checks the marker bytes and version of the meta page to ensure it matches this binary.
+func (m *meta) validate() error {
+	if m.magic != magic {
+		return ErrInvalid
+	} else if m.version != version {
+		return ErrVersionMismatch
+	} else if m.checksum != 0 && m.checksum != m.sum64() {
+		return ErrChecksum
+	}
+	return nil
+}
+
+// copy copies one meta object to another.
+func (m *meta) copy(dest *meta) {
+	*dest = *m
+}
+
+// write writes the meta onto a page.
+func (m *meta) write(p *page) {
+	if m.root.root >= m.pgid {
+		panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
+	} else if m.freelist >= m.pgid {
+		panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
+	}
+
+	// Page id is either going to be 0 or 1 which we can determine by the transaction ID.
+	p.id = pgid(m.txid % 2)
+	p.flags |= metaPageFlag
+
+	// Calculate the checksum.
+	m.checksum = m.sum64()
+
+	m.copy(p.meta())
+}
+
+// generates the checksum for the meta.
+func (m *meta) sum64() uint64 {
+	var h = fnv.New64a()
+	_, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:])
+	return h.Sum64()
+}
+
+// _assert will panic with a given formatted message if the given condition is false.
+func _assert(condition bool, msg string, v ...interface{}) {
+	if !condition {
+		panic(fmt.Sprintf("assertion failed: "+msg, v...))
+	}
+}
+
+func warn(v ...interface{})              { fmt.Fprintln(os.Stderr, v...) }
+func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) }
+
+func printstack() {
+	stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n")
+	fmt.Fprintln(os.Stderr, stack)
+}
diff --git a/vendor/github.com/coreos/bbolt/doc.go b/vendor/github.com/coreos/bbolt/doc.go
new file mode 100644
index 0000000..cc93784
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/doc.go
@@ -0,0 +1,44 @@
+/*
+Package bolt implements a low-level key/value store in pure Go. It supports
+fully serializable transactions, ACID semantics, and lock-free MVCC with
+multiple readers and a single writer. Bolt can be used for projects that
+want a simple data store without the need to add large dependencies such as
+Postgres or MySQL.
+
+Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
+optimized for fast read access and does not require recovery in the event of a
+system crash. Transactions which have not finished committing will simply be
+rolled back in the event of a crash.
+
+The design of Bolt is based on Howard Chu's LMDB database project.
+
+Bolt currently works on Windows, Mac OS X, and Linux.
+
+
+Basics
+
+There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
+a collection of buckets and is represented by a single file on disk. A bucket is
+a collection of unique keys that are associated with values.
+
+Transactions provide either read-only or read-write access to the database.
+Read-only transactions can retrieve key/value pairs and can use Cursors to
+iterate over the dataset sequentially. Read-write transactions can create and
+delete buckets and can insert and remove keys. Only one read-write transaction
+is allowed at a time.
+
+
+Caveats
+
+The database uses a read-only, memory-mapped data file to ensure that
+applications cannot corrupt the database, however, this means that keys and
+values returned from Bolt cannot be changed. Writing to a read-only byte slice
+will cause Go to panic.
+
+Keys and values retrieved from the database are only valid for the life of
+the transaction. When used outside the transaction, these byte slices can
+point to different data or can point to invalid memory which will cause a panic.
+
+
+*/
+package bolt
diff --git a/vendor/github.com/coreos/bbolt/errors.go b/vendor/github.com/coreos/bbolt/errors.go
new file mode 100644
index 0000000..a3620a3
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/errors.go
@@ -0,0 +1,71 @@
+package bolt
+
+import "errors"
+
+// These errors can be returned when opening or calling methods on a DB.
+var (
+	// ErrDatabaseNotOpen is returned when a DB instance is accessed before it
+	// is opened or after it is closed.
+	ErrDatabaseNotOpen = errors.New("database not open")
+
+	// ErrDatabaseOpen is returned when opening a database that is
+	// already open.
+	ErrDatabaseOpen = errors.New("database already open")
+
+	// ErrInvalid is returned when both meta pages on a database are invalid.
+	// This typically occurs when a file is not a bolt database.
+	ErrInvalid = errors.New("invalid database")
+
+	// ErrVersionMismatch is returned when the data file was created with a
+	// different version of Bolt.
+	ErrVersionMismatch = errors.New("version mismatch")
+
+	// ErrChecksum is returned when either meta page checksum does not match.
+	ErrChecksum = errors.New("checksum error")
+
+	// ErrTimeout is returned when a database cannot obtain an exclusive lock
+	// on the data file after the timeout passed to Open().
+	ErrTimeout = errors.New("timeout")
+)
+
+// These errors can occur when beginning or committing a Tx.
+var (
+	// ErrTxNotWritable is returned when performing a write operation on a
+	// read-only transaction.
+	ErrTxNotWritable = errors.New("tx not writable")
+
+	// ErrTxClosed is returned when committing or rolling back a transaction
+	// that has already been committed or rolled back.
+	ErrTxClosed = errors.New("tx closed")
+
+	// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
+	// read-only database.
+	ErrDatabaseReadOnly = errors.New("database is in read-only mode")
+)
+
+// These errors can occur when putting or deleting a value or a bucket.
+var (
+	// ErrBucketNotFound is returned when trying to access a bucket that has
+	// not been created yet.
+	ErrBucketNotFound = errors.New("bucket not found")
+
+	// ErrBucketExists is returned when creating a bucket that already exists.
+	ErrBucketExists = errors.New("bucket already exists")
+
+	// ErrBucketNameRequired is returned when creating a bucket with a blank name.
+	ErrBucketNameRequired = errors.New("bucket name required")
+
+	// ErrKeyRequired is returned when inserting a zero-length key.
+	ErrKeyRequired = errors.New("key required")
+
+	// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
+	ErrKeyTooLarge = errors.New("key too large")
+
+	// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
+	ErrValueTooLarge = errors.New("value too large")
+
+	// ErrIncompatibleValue is returned when trying create or delete a bucket
+	// on an existing non-bucket key or when trying to create or delete a
+	// non-bucket key on an existing bucket key.
+	ErrIncompatibleValue = errors.New("incompatible value")
+)
diff --git a/vendor/github.com/coreos/bbolt/freelist.go b/vendor/github.com/coreos/bbolt/freelist.go
new file mode 100644
index 0000000..1b7ba91
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/freelist.go
@@ -0,0 +1,248 @@
+package bolt
+
+import (
+	"fmt"
+	"sort"
+	"unsafe"
+)
+
+// freelist represents a list of all pages that are available for allocation.
+// It also tracks pages that have been freed but are still in use by open transactions.
+type freelist struct {
+	ids     []pgid          // all free and available free page ids.
+	pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
+	cache   map[pgid]bool   // fast lookup of all free and pending page ids.
+}
+
+// newFreelist returns an empty, initialized freelist.
+func newFreelist() *freelist {
+	return &freelist{
+		pending: make(map[txid][]pgid),
+		cache:   make(map[pgid]bool),
+	}
+}
+
+// size returns the size of the page after serialization.
+func (f *freelist) size() int {
+	return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count())
+}
+
+// count returns count of pages on the freelist
+func (f *freelist) count() int {
+	return f.free_count() + f.pending_count()
+}
+
+// free_count returns count of free pages
+func (f *freelist) free_count() int {
+	return len(f.ids)
+}
+
+// pending_count returns count of pending pages
+func (f *freelist) pending_count() int {
+	var count int
+	for _, list := range f.pending {
+		count += len(list)
+	}
+	return count
+}
+
+// all returns a list of all free ids and all pending ids in one sorted list.
+func (f *freelist) all() []pgid {
+	m := make(pgids, 0)
+
+	for _, list := range f.pending {
+		m = append(m, list...)
+	}
+
+	sort.Sort(m)
+	return pgids(f.ids).merge(m)
+}
+
+// allocate returns the starting page id of a contiguous list of pages of a given size.
+// If a contiguous block cannot be found then 0 is returned.
+func (f *freelist) allocate(n int) pgid {
+	if len(f.ids) == 0 {
+		return 0
+	}
+
+	var initial, previd pgid
+	for i, id := range f.ids {
+		if id <= 1 {
+			panic(fmt.Sprintf("invalid page allocation: %d", id))
+		}
+
+		// Reset initial page if this is not contiguous.
+		if previd == 0 || id-previd != 1 {
+			initial = id
+		}
+
+		// If we found a contiguous block then remove it and return it.
+		if (id-initial)+1 == pgid(n) {
+			// If we're allocating off the beginning then take the fast path
+			// and just adjust the existing slice. This will use extra memory
+			// temporarily but the append() in free() will realloc the slice
+			// as is necessary.
+			if (i + 1) == n {
+				f.ids = f.ids[i+1:]
+			} else {
+				copy(f.ids[i-n+1:], f.ids[i+1:])
+				f.ids = f.ids[:len(f.ids)-n]
+			}
+
+			// Remove from the free cache.
+			for i := pgid(0); i < pgid(n); i++ {
+				delete(f.cache, initial+i)
+			}
+
+			return initial
+		}
+
+		previd = id
+	}
+	return 0
+}
+
+// free releases a page and its overflow for a given transaction id.
+// If the page is already free then a panic will occur.
+func (f *freelist) free(txid txid, p *page) {
+	if p.id <= 1 {
+		panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
+	}
+
+	// Free page and all its overflow pages.
+	var ids = f.pending[txid]
+	for id := p.id; id <= p.id+pgid(p.overflow); id++ {
+		// Verify that page is not already free.
+		if f.cache[id] {
+			panic(fmt.Sprintf("page %d already freed", id))
+		}
+
+		// Add to the freelist and cache.
+		ids = append(ids, id)
+		f.cache[id] = true
+	}
+	f.pending[txid] = ids
+}
+
+// release moves all page ids for a transaction id (or older) to the freelist.
+func (f *freelist) release(txid txid) {
+	m := make(pgids, 0)
+	for tid, ids := range f.pending {
+		if tid <= txid {
+			// Move transaction's pending pages to the available freelist.
+			// Don't remove from the cache since the page is still free.
+			m = append(m, ids...)
+			delete(f.pending, tid)
+		}
+	}
+	sort.Sort(m)
+	f.ids = pgids(f.ids).merge(m)
+}
+
+// rollback removes the pages from a given pending tx.
+func (f *freelist) rollback(txid txid) {
+	// Remove page ids from cache.
+	for _, id := range f.pending[txid] {
+		delete(f.cache, id)
+	}
+
+	// Remove pages from pending list.
+	delete(f.pending, txid)
+}
+
+// freed returns whether a given page is in the free list.
+func (f *freelist) freed(pgid pgid) bool {
+	return f.cache[pgid]
+}
+
+// read initializes the freelist from a freelist page.
+func (f *freelist) read(p *page) {
+	// If the page.count is at the max uint16 value (64k) then it's considered
+	// an overflow and the size of the freelist is stored as the first element.
+	idx, count := 0, int(p.count)
+	if count == 0xFFFF {
+		idx = 1
+		count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
+	}
+
+	// Copy the list of page ids from the freelist.
+	if count == 0 {
+		f.ids = nil
+	} else {
+		ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
+		f.ids = make([]pgid, len(ids))
+		copy(f.ids, ids)
+
+		// Make sure they're sorted.
+		sort.Sort(pgids(f.ids))
+	}
+
+	// Rebuild the page cache.
+	f.reindex()
+}
+
+// write writes the page ids onto a freelist page. All free and pending ids are
+// saved to disk since in the event of a program crash, all pending ids will
+// become free.
+func (f *freelist) write(p *page) error {
+	// Combine the old free pgids and pgids waiting on an open transaction.
+	ids := f.all()
+
+	// Update the header flag.
+	p.flags |= freelistPageFlag
+
+	// The page.count can only hold up to 64k elements so if we overflow that
+	// number then we handle it by putting the size in the first element.
+	if len(ids) == 0 {
+		p.count = uint16(len(ids))
+	} else if len(ids) < 0xFFFF {
+		p.count = uint16(len(ids))
+		copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
+	} else {
+		p.count = 0xFFFF
+		((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids))
+		copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids)
+	}
+
+	return nil
+}
+
+// reload reads the freelist from a page and filters out pending items.
+func (f *freelist) reload(p *page) {
+	f.read(p)
+
+	// Build a cache of only pending pages.
+	pcache := make(map[pgid]bool)
+	for _, pendingIDs := range f.pending {
+		for _, pendingID := range pendingIDs {
+			pcache[pendingID] = true
+		}
+	}
+
+	// Check each page in the freelist and build a new available freelist
+	// with any pages not in the pending lists.
+	var a []pgid
+	for _, id := range f.ids {
+		if !pcache[id] {
+			a = append(a, id)
+		}
+	}
+	f.ids = a
+
+	// Once the available list is rebuilt then rebuild the free cache so that
+	// it includes the available and pending free pages.
+	f.reindex()
+}
+
+// reindex rebuilds the free cache based on available and pending free lists.
+func (f *freelist) reindex() {
+	f.cache = make(map[pgid]bool)
+	for _, id := range f.ids {
+		f.cache[id] = true
+	}
+	for _, pendingIDs := range f.pending {
+		for _, pendingID := range pendingIDs {
+			f.cache[pendingID] = true
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/bbolt/node.go b/vendor/github.com/coreos/bbolt/node.go
new file mode 100644
index 0000000..159318b
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/node.go
@@ -0,0 +1,604 @@
+package bolt
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+	"unsafe"
+)
+
+// node represents an in-memory, deserialized page.
+type node struct {
+	bucket     *Bucket
+	isLeaf     bool
+	unbalanced bool
+	spilled    bool
+	key        []byte
+	pgid       pgid
+	parent     *node
+	children   nodes
+	inodes     inodes
+}
+
+// root returns the top-level node this node is attached to.
+func (n *node) root() *node {
+	if n.parent == nil {
+		return n
+	}
+	return n.parent.root()
+}
+
+// minKeys returns the minimum number of inodes this node should have.
+func (n *node) minKeys() int {
+	if n.isLeaf {
+		return 1
+	}
+	return 2
+}
+
+// size returns the size of the node after serialization.
+func (n *node) size() int {
+	sz, elsz := pageHeaderSize, n.pageElementSize()
+	for i := 0; i < len(n.inodes); i++ {
+		item := &n.inodes[i]
+		sz += elsz + len(item.key) + len(item.value)
+	}
+	return sz
+}
+
+// sizeLessThan returns true if the node is less than a given size.
+// This is an optimization to avoid calculating a large node when we only need
+// to know if it fits inside a certain page size.
+func (n *node) sizeLessThan(v int) bool {
+	sz, elsz := pageHeaderSize, n.pageElementSize()
+	for i := 0; i < len(n.inodes); i++ {
+		item := &n.inodes[i]
+		sz += elsz + len(item.key) + len(item.value)
+		if sz >= v {
+			return false
+		}
+	}
+	return true
+}
+
+// pageElementSize returns the size of each page element based on the type of node.
+func (n *node) pageElementSize() int {
+	if n.isLeaf {
+		return leafPageElementSize
+	}
+	return branchPageElementSize
+}
+
+// childAt returns the child node at a given index.
+func (n *node) childAt(index int) *node {
+	if n.isLeaf {
+		panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
+	}
+	return n.bucket.node(n.inodes[index].pgid, n)
+}
+
+// childIndex returns the index of a given child node.
+func (n *node) childIndex(child *node) int {
+	index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
+	return index
+}
+
+// numChildren returns the number of children.
+func (n *node) numChildren() int {
+	return len(n.inodes)
+}
+
+// nextSibling returns the next node with the same parent.
+func (n *node) nextSibling() *node {
+	if n.parent == nil {
+		return nil
+	}
+	index := n.parent.childIndex(n)
+	if index >= n.parent.numChildren()-1 {
+		return nil
+	}
+	return n.parent.childAt(index + 1)
+}
+
+// prevSibling returns the previous node with the same parent.
+func (n *node) prevSibling() *node {
+	if n.parent == nil {
+		return nil
+	}
+	index := n.parent.childIndex(n)
+	if index == 0 {
+		return nil
+	}
+	return n.parent.childAt(index - 1)
+}
+
+// put inserts a key/value.
+func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
+	if pgid >= n.bucket.tx.meta.pgid {
+		panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
+	} else if len(oldKey) <= 0 {
+		panic("put: zero-length old key")
+	} else if len(newKey) <= 0 {
+		panic("put: zero-length new key")
+	}
+
+	// Find insertion index.
+	index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
+
+	// Add capacity and shift nodes if we don't have an exact match and need to insert.
+	exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
+	if !exact {
+		n.inodes = append(n.inodes, inode{})
+		copy(n.inodes[index+1:], n.inodes[index:])
+	}
+
+	inode := &n.inodes[index]
+	inode.flags = flags
+	inode.key = newKey
+	inode.value = value
+	inode.pgid = pgid
+	_assert(len(inode.key) > 0, "put: zero-length inode key")
+}
+
+// del removes a key from the node.
+func (n *node) del(key []byte) {
+	// Find index of key.
+	index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
+
+	// Exit if the key isn't found.
+	if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
+		return
+	}
+
+	// Delete inode from the node.
+	n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
+
+	// Mark the node as needing rebalancing.
+	n.unbalanced = true
+}
+
+// read initializes the node from a page.
+func (n *node) read(p *page) {
+	n.pgid = p.id
+	n.isLeaf = ((p.flags & leafPageFlag) != 0)
+	n.inodes = make(inodes, int(p.count))
+
+	for i := 0; i < int(p.count); i++ {
+		inode := &n.inodes[i]
+		if n.isLeaf {
+			elem := p.leafPageElement(uint16(i))
+			inode.flags = elem.flags
+			inode.key = elem.key()
+			inode.value = elem.value()
+		} else {
+			elem := p.branchPageElement(uint16(i))
+			inode.pgid = elem.pgid
+			inode.key = elem.key()
+		}
+		_assert(len(inode.key) > 0, "read: zero-length inode key")
+	}
+
+	// Save first key so we can find the node in the parent when we spill.
+	if len(n.inodes) > 0 {
+		n.key = n.inodes[0].key
+		_assert(len(n.key) > 0, "read: zero-length node key")
+	} else {
+		n.key = nil
+	}
+}
+
+// write writes the items onto one or more pages.
+func (n *node) write(p *page) {
+	// Initialize page.
+	if n.isLeaf {
+		p.flags |= leafPageFlag
+	} else {
+		p.flags |= branchPageFlag
+	}
+
+	if len(n.inodes) >= 0xFFFF {
+		panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
+	}
+	p.count = uint16(len(n.inodes))
+
+	// Stop here if there are no items to write.
+	if p.count == 0 {
+		return
+	}
+
+	// Loop over each item and write it to the page.
+	b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
+	for i, item := range n.inodes {
+		_assert(len(item.key) > 0, "write: zero-length inode key")
+
+		// Write the page element.
+		if n.isLeaf {
+			elem := p.leafPageElement(uint16(i))
+			elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
+			elem.flags = item.flags
+			elem.ksize = uint32(len(item.key))
+			elem.vsize = uint32(len(item.value))
+		} else {
+			elem := p.branchPageElement(uint16(i))
+			elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
+			elem.ksize = uint32(len(item.key))
+			elem.pgid = item.pgid
+			_assert(elem.pgid != p.id, "write: circular dependency occurred")
+		}
+
+		// If the length of key+value is larger than the max allocation size
+		// then we need to reallocate the byte array pointer.
+		//
+		// See: https://github.com/boltdb/bolt/pull/335
+		klen, vlen := len(item.key), len(item.value)
+		if len(b) < klen+vlen {
+			b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
+		}
+
+		// Write data for the element to the end of the page.
+		copy(b[0:], item.key)
+		b = b[klen:]
+		copy(b[0:], item.value)
+		b = b[vlen:]
+	}
+
+	// DEBUG ONLY: n.dump()
+}
+
+// split breaks up a node into multiple smaller nodes, if appropriate.
+// This should only be called from the spill() function.
+func (n *node) split(pageSize int) []*node {
+	var nodes []*node
+
+	node := n
+	for {
+		// Split node into two.
+		a, b := node.splitTwo(pageSize)
+		nodes = append(nodes, a)
+
+		// If we can't split then exit the loop.
+		if b == nil {
+			break
+		}
+
+		// Set node to b so it gets split on the next iteration.
+		node = b
+	}
+
+	return nodes
+}
+
+// splitTwo breaks up a node into two smaller nodes, if appropriate.
+// This should only be called from the split() function.
+func (n *node) splitTwo(pageSize int) (*node, *node) {
+	// Ignore the split if the page doesn't have at least enough nodes for
+	// two pages or if the nodes can fit in a single page.
+	if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
+		return n, nil
+	}
+
+	// Determine the threshold before starting a new node.
+	var fillPercent = n.bucket.FillPercent
+	if fillPercent < minFillPercent {
+		fillPercent = minFillPercent
+	} else if fillPercent > maxFillPercent {
+		fillPercent = maxFillPercent
+	}
+	threshold := int(float64(pageSize) * fillPercent)
+
+	// Determine split position and sizes of the two pages.
+	splitIndex, _ := n.splitIndex(threshold)
+
+	// Split node into two separate nodes.
+	// If there's no parent then we'll need to create one.
+	if n.parent == nil {
+		n.parent = &node{bucket: n.bucket, children: []*node{n}}
+	}
+
+	// Create a new node and add it to the parent.
+	next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
+	n.parent.children = append(n.parent.children, next)
+
+	// Split inodes across two nodes.
+	next.inodes = n.inodes[splitIndex:]
+	n.inodes = n.inodes[:splitIndex]
+
+	// Update the statistics.
+	n.bucket.tx.stats.Split++
+
+	return n, next
+}
+
+// splitIndex finds the position where a page will fill a given threshold.
+// It returns the index as well as the size of the first page.
+// This is only be called from split().
+func (n *node) splitIndex(threshold int) (index, sz int) {
+	sz = pageHeaderSize
+
+	// Loop until we only have the minimum number of keys required for the second page.
+	for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
+		index = i
+		inode := n.inodes[i]
+		elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
+
+		// If we have at least the minimum number of keys and adding another
+		// node would put us over the threshold then exit and return.
+		if i >= minKeysPerPage && sz+elsize > threshold {
+			break
+		}
+
+		// Add the element size to the total size.
+		sz += elsize
+	}
+
+	return
+}
+
+// spill writes the nodes to dirty pages and splits nodes as it goes.
+// Returns an error if dirty pages cannot be allocated.
+func (n *node) spill() error {
+	var tx = n.bucket.tx
+	if n.spilled {
+		return nil
+	}
+
+	// Spill child nodes first. Child nodes can materialize sibling nodes in
+	// the case of split-merge so we cannot use a range loop. We have to check
+	// the children size on every loop iteration.
+	sort.Sort(n.children)
+	for i := 0; i < len(n.children); i++ {
+		if err := n.children[i].spill(); err != nil {
+			return err
+		}
+	}
+
+	// We no longer need the child list because it's only used for spill tracking.
+	n.children = nil
+
+	// Split nodes into appropriate sizes. The first node will always be n.
+	var nodes = n.split(tx.db.pageSize)
+	for _, node := range nodes {
+		// Add node's page to the freelist if it's not new.
+		if node.pgid > 0 {
+			tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
+			node.pgid = 0
+		}
+
+		// Allocate contiguous space for the node.
+		p, err := tx.allocate((node.size() / tx.db.pageSize) + 1)
+		if err != nil {
+			return err
+		}
+
+		// Write the node.
+		if p.id >= tx.meta.pgid {
+			panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
+		}
+		node.pgid = p.id
+		node.write(p)
+		node.spilled = true
+
+		// Insert into parent inodes.
+		if node.parent != nil {
+			var key = node.key
+			if key == nil {
+				key = node.inodes[0].key
+			}
+
+			node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
+			node.key = node.inodes[0].key
+			_assert(len(node.key) > 0, "spill: zero-length node key")
+		}
+
+		// Update the statistics.
+		tx.stats.Spill++
+	}
+
+	// If the root node split and created a new root then we need to spill that
+	// as well. We'll clear out the children to make sure it doesn't try to respill.
+	if n.parent != nil && n.parent.pgid == 0 {
+		n.children = nil
+		return n.parent.spill()
+	}
+
+	return nil
+}
+
+// rebalance attempts to combine the node with sibling nodes if the node fill
+// size is below a threshold or if there are not enough keys.
+func (n *node) rebalance() {
+	if !n.unbalanced {
+		return
+	}
+	n.unbalanced = false
+
+	// Update statistics.
+	n.bucket.tx.stats.Rebalance++
+
+	// Ignore if node is above threshold (25%) and has enough keys.
+	var threshold = n.bucket.tx.db.pageSize / 4
+	if n.size() > threshold && len(n.inodes) > n.minKeys() {
+		return
+	}
+
+	// Root node has special handling.
+	if n.parent == nil {
+		// If root node is a branch and only has one node then collapse it.
+		if !n.isLeaf && len(n.inodes) == 1 {
+			// Move root's child up.
+			child := n.bucket.node(n.inodes[0].pgid, n)
+			n.isLeaf = child.isLeaf
+			n.inodes = child.inodes[:]
+			n.children = child.children
+
+			// Reparent all child nodes being moved.
+			for _, inode := range n.inodes {
+				if child, ok := n.bucket.nodes[inode.pgid]; ok {
+					child.parent = n
+				}
+			}
+
+			// Remove old child.
+			child.parent = nil
+			delete(n.bucket.nodes, child.pgid)
+			child.free()
+		}
+
+		return
+	}
+
+	// If node has no keys then just remove it.
+	if n.numChildren() == 0 {
+		n.parent.del(n.key)
+		n.parent.removeChild(n)
+		delete(n.bucket.nodes, n.pgid)
+		n.free()
+		n.parent.rebalance()
+		return
+	}
+
+	_assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
+
+	// Destination node is right sibling if idx == 0, otherwise left sibling.
+	var target *node
+	var useNextSibling = (n.parent.childIndex(n) == 0)
+	if useNextSibling {
+		target = n.nextSibling()
+	} else {
+		target = n.prevSibling()
+	}
+
+	// If both this node and the target node are too small then merge them.
+	if useNextSibling {
+		// Reparent all child nodes being moved.
+		for _, inode := range target.inodes {
+			if child, ok := n.bucket.nodes[inode.pgid]; ok {
+				child.parent.removeChild(child)
+				child.parent = n
+				child.parent.children = append(child.parent.children, child)
+			}
+		}
+
+		// Copy over inodes from target and remove target.
+		n.inodes = append(n.inodes, target.inodes...)
+		n.parent.del(target.key)
+		n.parent.removeChild(target)
+		delete(n.bucket.nodes, target.pgid)
+		target.free()
+	} else {
+		// Reparent all child nodes being moved.
+		for _, inode := range n.inodes {
+			if child, ok := n.bucket.nodes[inode.pgid]; ok {
+				child.parent.removeChild(child)
+				child.parent = target
+				child.parent.children = append(child.parent.children, child)
+			}
+		}
+
+		// Copy over inodes to target and remove node.
+		target.inodes = append(target.inodes, n.inodes...)
+		n.parent.del(n.key)
+		n.parent.removeChild(n)
+		delete(n.bucket.nodes, n.pgid)
+		n.free()
+	}
+
+	// Either this node or the target node was deleted from the parent so rebalance it.
+	n.parent.rebalance()
+}
+
+// removes a node from the list of in-memory children.
+// This does not affect the inodes.
+func (n *node) removeChild(target *node) {
+	for i, child := range n.children {
+		if child == target {
+			n.children = append(n.children[:i], n.children[i+1:]...)
+			return
+		}
+	}
+}
+
+// dereference causes the node to copy all its inode key/value references to heap memory.
+// This is required when the mmap is reallocated so inodes are not pointing to stale data.
+func (n *node) dereference() {
+	if n.key != nil {
+		key := make([]byte, len(n.key))
+		copy(key, n.key)
+		n.key = key
+		_assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
+	}
+
+	for i := range n.inodes {
+		inode := &n.inodes[i]
+
+		key := make([]byte, len(inode.key))
+		copy(key, inode.key)
+		inode.key = key
+		_assert(len(inode.key) > 0, "dereference: zero-length inode key")
+
+		value := make([]byte, len(inode.value))
+		copy(value, inode.value)
+		inode.value = value
+	}
+
+	// Recursively dereference children.
+	for _, child := range n.children {
+		child.dereference()
+	}
+
+	// Update statistics.
+	n.bucket.tx.stats.NodeDeref++
+}
+
+// free adds the node's underlying page to the freelist.
+func (n *node) free() {
+	if n.pgid != 0 {
+		n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
+		n.pgid = 0
+	}
+}
+
+// dump writes the contents of the node to STDERR for debugging purposes.
+/*
+func (n *node) dump() {
+	// Write node header.
+	var typ = "branch"
+	if n.isLeaf {
+		typ = "leaf"
+	}
+	warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
+
+	// Write out abbreviated version of each item.
+	for _, item := range n.inodes {
+		if n.isLeaf {
+			if item.flags&bucketLeafFlag != 0 {
+				bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
+				warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
+			} else {
+				warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
+			}
+		} else {
+			warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
+		}
+	}
+	warn("")
+}
+*/
+
+type nodes []*node
+
+func (s nodes) Len() int           { return len(s) }
+func (s nodes) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
+
+// inode represents an internal node inside of a node.
+// It can be used to point to elements in a page or point
+// to an element which hasn't been added to a page yet.
+type inode struct {
+	flags uint32
+	pgid  pgid
+	key   []byte
+	value []byte
+}
+
+type inodes []inode
diff --git a/vendor/github.com/coreos/bbolt/page.go b/vendor/github.com/coreos/bbolt/page.go
new file mode 100644
index 0000000..7651a6b
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/page.go
@@ -0,0 +1,178 @@
+package bolt
+
+import (
+	"fmt"
+	"os"
+	"sort"
+	"unsafe"
+)
+
+const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
+
+const minKeysPerPage = 2
+
+const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
+const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
+
+const (
+	branchPageFlag   = 0x01
+	leafPageFlag     = 0x02
+	metaPageFlag     = 0x04
+	freelistPageFlag = 0x10
+)
+
+const (
+	bucketLeafFlag = 0x01
+)
+
+type pgid uint64
+
+type page struct {
+	id       pgid
+	flags    uint16
+	count    uint16
+	overflow uint32
+	ptr      uintptr
+}
+
+// typ returns a human readable page type string used for debugging.
+func (p *page) typ() string {
+	if (p.flags & branchPageFlag) != 0 {
+		return "branch"
+	} else if (p.flags & leafPageFlag) != 0 {
+		return "leaf"
+	} else if (p.flags & metaPageFlag) != 0 {
+		return "meta"
+	} else if (p.flags & freelistPageFlag) != 0 {
+		return "freelist"
+	}
+	return fmt.Sprintf("unknown<%02x>", p.flags)
+}
+
+// meta returns a pointer to the metadata section of the page.
+func (p *page) meta() *meta {
+	return (*meta)(unsafe.Pointer(&p.ptr))
+}
+
+// leafPageElement retrieves the leaf node by index
+func (p *page) leafPageElement(index uint16) *leafPageElement {
+	n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
+	return n
+}
+
+// leafPageElements retrieves a list of leaf nodes.
+func (p *page) leafPageElements() []leafPageElement {
+	if p.count == 0 {
+		return nil
+	}
+	return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
+}
+
+// branchPageElement retrieves the branch node by index
+func (p *page) branchPageElement(index uint16) *branchPageElement {
+	return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
+}
+
+// branchPageElements retrieves a list of branch nodes.
+func (p *page) branchPageElements() []branchPageElement {
+	if p.count == 0 {
+		return nil
+	}
+	return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
+}
+
+// dump writes n bytes of the page to STDERR as hex output.
+func (p *page) hexdump(n int) {
+	buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
+	fmt.Fprintf(os.Stderr, "%x\n", buf)
+}
+
+type pages []*page
+
+func (s pages) Len() int           { return len(s) }
+func (s pages) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
+
+// branchPageElement represents a node on a branch page.
+type branchPageElement struct {
+	pos   uint32
+	ksize uint32
+	pgid  pgid
+}
+
+// key returns a byte slice of the node key.
+func (n *branchPageElement) key() []byte {
+	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+	return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
+}
+
+// leafPageElement represents a node on a leaf page.
+type leafPageElement struct {
+	flags uint32
+	pos   uint32
+	ksize uint32
+	vsize uint32
+}
+
+// key returns a byte slice of the node key.
+func (n *leafPageElement) key() []byte {
+	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+	return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
+}
+
+// value returns a byte slice of the node value.
+func (n *leafPageElement) value() []byte {
+	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+	return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
+}
+
+// PageInfo represents human readable information about a page.
+type PageInfo struct {
+	ID            int
+	Type          string
+	Count         int
+	OverflowCount int
+}
+
+type pgids []pgid
+
+func (s pgids) Len() int           { return len(s) }
+func (s pgids) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
+
+// merge returns the sorted union of a and b.
+func (a pgids) merge(b pgids) pgids {
+	// Return the opposite slice if one is nil.
+	if len(a) == 0 {
+		return b
+	} else if len(b) == 0 {
+		return a
+	}
+
+	// Create a list to hold all elements from both lists.
+	merged := make(pgids, 0, len(a)+len(b))
+
+	// Assign lead to the slice with a lower starting value, follow to the higher value.
+	lead, follow := a, b
+	if b[0] < a[0] {
+		lead, follow = b, a
+	}
+
+	// Continue while there are elements in the lead.
+	for len(lead) > 0 {
+		// Merge largest prefix of lead that is ahead of follow[0].
+		n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
+		merged = append(merged, lead[:n]...)
+		if n >= len(lead) {
+			break
+		}
+
+		// Swap lead and follow.
+		lead, follow = follow, lead[n:]
+	}
+
+	// Append what's left in follow.
+	merged = append(merged, follow...)
+
+	return merged
+}
diff --git a/vendor/github.com/coreos/bbolt/tx.go b/vendor/github.com/coreos/bbolt/tx.go
new file mode 100644
index 0000000..1cfb4cd
--- /dev/null
+++ b/vendor/github.com/coreos/bbolt/tx.go
@@ -0,0 +1,682 @@
+package bolt
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"sort"
+	"strings"
+	"time"
+	"unsafe"
+)
+
+// txid represents the internal transaction identifier.
+type txid uint64
+
+// Tx represents a read-only or read/write transaction on the database.
+// Read-only transactions can be used for retrieving values for keys and creating cursors.
+// Read/write transactions can create and remove buckets and create and remove keys.
+//
+// IMPORTANT: You must commit or rollback transactions when you are done with
+// them. Pages can not be reclaimed by the writer until no more transactions
+// are using them. A long running read transaction can cause the database to
+// quickly grow.
+type Tx struct {
+	writable       bool
+	managed        bool
+	db             *DB
+	meta           *meta
+	root           Bucket
+	pages          map[pgid]*page
+	stats          TxStats
+	commitHandlers []func()
+
+	// WriteFlag specifies the flag for write-related methods like WriteTo().
+	// Tx opens the database file with the specified flag to copy the data.
+	//
+	// By default, the flag is unset, which works well for mostly in-memory
+	// workloads. For databases that are much larger than available RAM,
+	// set the flag to syscall.O_DIRECT to avoid trashing the page cache.
+	WriteFlag int
+}
+
+// init initializes the transaction.
+func (tx *Tx) init(db *DB) {
+	tx.db = db
+	tx.pages = nil
+
+	// Copy the meta page since it can be changed by the writer.
+	tx.meta = &meta{}
+	db.meta().copy(tx.meta)
+
+	// Copy over the root bucket.
+	tx.root = newBucket(tx)
+	tx.root.bucket = &bucket{}
+	*tx.root.bucket = tx.meta.root
+
+	// Increment the transaction id and add a page cache for writable transactions.
+	if tx.writable {
+		tx.pages = make(map[pgid]*page)
+		tx.meta.txid += txid(1)
+	}
+}
+
+// ID returns the transaction id.
+func (tx *Tx) ID() int {
+	return int(tx.meta.txid)
+}
+
+// DB returns a reference to the database that created the transaction.
+func (tx *Tx) DB() *DB {
+	return tx.db
+}
+
+// Size returns current database size in bytes as seen by this transaction.
+func (tx *Tx) Size() int64 {
+	return int64(tx.meta.pgid) * int64(tx.db.pageSize)
+}
+
+// Writable returns whether the transaction can perform write operations.
+func (tx *Tx) Writable() bool {
+	return tx.writable
+}
+
+// Cursor creates a cursor associated with the root bucket.
+// All items in the cursor will return a nil value because all root bucket keys point to buckets.
+// The cursor is only valid as long as the transaction is open.
+// Do not use a cursor after the transaction is closed.
+func (tx *Tx) Cursor() *Cursor {
+	return tx.root.Cursor()
+}
+
+// Stats retrieves a copy of the current transaction statistics.
+func (tx *Tx) Stats() TxStats {
+	return tx.stats
+}
+
+// Bucket retrieves a bucket by name.
+// Returns nil if the bucket does not exist.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (tx *Tx) Bucket(name []byte) *Bucket {
+	return tx.root.Bucket(name)
+}
+
+// CreateBucket creates a new bucket.
+// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
+	return tx.root.CreateBucket(name)
+}
+
+// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
+// Returns an error if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
+	return tx.root.CreateBucketIfNotExists(name)
+}
+
+// DeleteBucket deletes a bucket.
+// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
+func (tx *Tx) DeleteBucket(name []byte) error {
+	return tx.root.DeleteBucket(name)
+}
+
+// ForEach executes a function for each bucket in the root.
+// If the provided function returns an error then the iteration is stopped and
+// the error is returned to the caller.
+func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
+	return tx.root.ForEach(func(k, v []byte) error {
+		if err := fn(k, tx.root.Bucket(k)); err != nil {
+			return err
+		}
+		return nil
+	})
+}
+
+// OnCommit adds a handler function to be executed after the transaction successfully commits.
+func (tx *Tx) OnCommit(fn func()) {
+	tx.commitHandlers = append(tx.commitHandlers, fn)
+}
+
+// Commit writes all changes to disk and updates the meta page.
+// Returns an error if a disk write error occurs, or if Commit is
+// called on a read-only transaction.
+func (tx *Tx) Commit() error {
+	_assert(!tx.managed, "managed tx commit not allowed")
+	if tx.db == nil {
+		return ErrTxClosed
+	} else if !tx.writable {
+		return ErrTxNotWritable
+	}
+
+	// TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
+
+	// Rebalance nodes which have had deletions.
+	var startTime = time.Now()
+	tx.root.rebalance()
+	if tx.stats.Rebalance > 0 {
+		tx.stats.RebalanceTime += time.Since(startTime)
+	}
+
+	// spill data onto dirty pages.
+	startTime = time.Now()
+	if err := tx.root.spill(); err != nil {
+		tx.rollback()
+		return err
+	}
+	tx.stats.SpillTime += time.Since(startTime)
+
+	// Free the old root bucket.
+	tx.meta.root.root = tx.root.root
+
+	opgid := tx.meta.pgid
+
+	// Free the freelist and allocate new pages for it. This will overestimate
+	// the size of the freelist but not underestimate the size (which would be bad).
+	tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
+	p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
+	if err != nil {
+		tx.rollback()
+		return err
+	}
+	if err := tx.db.freelist.write(p); err != nil {
+		tx.rollback()
+		return err
+	}
+	tx.meta.freelist = p.id
+
+	// If the high water mark has moved up then attempt to grow the database.
+	if tx.meta.pgid > opgid {
+		if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
+			tx.rollback()
+			return err
+		}
+	}
+
+	// Write dirty pages to disk.
+	startTime = time.Now()
+	if err := tx.write(); err != nil {
+		tx.rollback()
+		return err
+	}
+
+	// If strict mode is enabled then perform a consistency check.
+	// Only the first consistency error is reported in the panic.
+	if tx.db.StrictMode {
+		ch := tx.Check()
+		var errs []string
+		for {
+			err, ok := <-ch
+			if !ok {
+				break
+			}
+			errs = append(errs, err.Error())
+		}
+		if len(errs) > 0 {
+			panic("check fail: " + strings.Join(errs, "\n"))
+		}
+	}
+
+	// Write meta to disk.
+	if err := tx.writeMeta(); err != nil {
+		tx.rollback()
+		return err
+	}
+	tx.stats.WriteTime += time.Since(startTime)
+
+	// Finalize the transaction.
+	tx.close()
+
+	// Execute commit handlers now that the locks have been removed.
+	for _, fn := range tx.commitHandlers {
+		fn()
+	}
+
+	return nil
+}
+
+// Rollback closes the transaction and ignores all previous updates. Read-only
+// transactions must be rolled back and not committed.
+func (tx *Tx) Rollback() error {
+	_assert(!tx.managed, "managed tx rollback not allowed")
+	if tx.db == nil {
+		return ErrTxClosed
+	}
+	tx.rollback()
+	return nil
+}
+
+func (tx *Tx) rollback() {
+	if tx.db == nil {
+		return
+	}
+	if tx.writable {
+		tx.db.freelist.rollback(tx.meta.txid)
+		tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
+	}
+	tx.close()
+}
+
+func (tx *Tx) close() {
+	if tx.db == nil {
+		return
+	}
+	if tx.writable {
+		// Grab freelist stats.
+		var freelistFreeN = tx.db.freelist.free_count()
+		var freelistPendingN = tx.db.freelist.pending_count()
+		var freelistAlloc = tx.db.freelist.size()
+
+		// Remove transaction ref & writer lock.
+		tx.db.rwtx = nil
+		tx.db.rwlock.Unlock()
+
+		// Merge statistics.
+		tx.db.statlock.Lock()
+		tx.db.stats.FreePageN = freelistFreeN
+		tx.db.stats.PendingPageN = freelistPendingN
+		tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
+		tx.db.stats.FreelistInuse = freelistAlloc
+		tx.db.stats.TxStats.add(&tx.stats)
+		tx.db.statlock.Unlock()
+	} else {
+		tx.db.removeTx(tx)
+	}
+
+	// Clear all references.
+	tx.db = nil
+	tx.meta = nil
+	tx.root = Bucket{tx: tx}
+	tx.pages = nil
+}
+
+// Copy writes the entire database to a writer.
+// This function exists for backwards compatibility. Use WriteTo() instead.
+func (tx *Tx) Copy(w io.Writer) error {
+	_, err := tx.WriteTo(w)
+	return err
+}
+
+// WriteTo writes the entire database to a writer.
+// If err == nil then exactly tx.Size() bytes will be written into the writer.
+func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
+	// Attempt to open reader with WriteFlag
+	f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
+	if err != nil {
+		return 0, err
+	}
+	defer func() { _ = f.Close() }()
+
+	// Generate a meta page. We use the same page data for both meta pages.
+	buf := make([]byte, tx.db.pageSize)
+	page := (*page)(unsafe.Pointer(&buf[0]))
+	page.flags = metaPageFlag
+	*page.meta() = *tx.meta
+
+	// Write meta 0.
+	page.id = 0
+	page.meta().checksum = page.meta().sum64()
+	nn, err := w.Write(buf)
+	n += int64(nn)
+	if err != nil {
+		return n, fmt.Errorf("meta 0 copy: %s", err)
+	}
+
+	// Write meta 1 with a lower transaction id.
+	page.id = 1
+	page.meta().txid -= 1
+	page.meta().checksum = page.meta().sum64()
+	nn, err = w.Write(buf)
+	n += int64(nn)
+	if err != nil {
+		return n, fmt.Errorf("meta 1 copy: %s", err)
+	}
+
+	// Move past the meta pages in the file.
+	if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
+		return n, fmt.Errorf("seek: %s", err)
+	}
+
+	// Copy data pages.
+	wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
+	n += wn
+	if err != nil {
+		return n, err
+	}
+
+	return n, f.Close()
+}
+
+// CopyFile copies the entire database to file at the given path.
+// A reader transaction is maintained during the copy so it is safe to continue
+// using the database while a copy is in progress.
+func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
+	f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
+	if err != nil {
+		return err
+	}
+
+	err = tx.Copy(f)
+	if err != nil {
+		_ = f.Close()
+		return err
+	}
+	return f.Close()
+}
+
+// Check performs several consistency checks on the database for this transaction.
+// An error is returned if any inconsistency is found.
+//
+// It can be safely run concurrently on a writable transaction. However, this
+// incurs a high cost for large databases and databases with a lot of subbuckets
+// because of caching. This overhead can be removed if running on a read-only
+// transaction, however, it is not safe to execute other writer transactions at
+// the same time.
+func (tx *Tx) Check() <-chan error {
+	ch := make(chan error)
+	go tx.check(ch)
+	return ch
+}
+
+func (tx *Tx) check(ch chan error) {
+	// Check if any pages are double freed.
+	freed := make(map[pgid]bool)
+	for _, id := range tx.db.freelist.all() {
+		if freed[id] {
+			ch <- fmt.Errorf("page %d: already freed", id)
+		}
+		freed[id] = true
+	}
+
+	// Track every reachable page.
+	reachable := make(map[pgid]*page)
+	reachable[0] = tx.page(0) // meta0
+	reachable[1] = tx.page(1) // meta1
+	for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
+		reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
+	}
+
+	// Recursively check buckets.
+	tx.checkBucket(&tx.root, reachable, freed, ch)
+
+	// Ensure all pages below high water mark are either reachable or freed.
+	for i := pgid(0); i < tx.meta.pgid; i++ {
+		_, isReachable := reachable[i]
+		if !isReachable && !freed[i] {
+			ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
+		}
+	}
+
+	// Close the channel to signal completion.
+	close(ch)
+}
+
+func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
+	// Ignore inline buckets.
+	if b.root == 0 {
+		return
+	}
+
+	// Check every page used by this bucket.
+	b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
+		if p.id > tx.meta.pgid {
+			ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
+		}
+
+		// Ensure each page is only referenced once.
+		for i := pgid(0); i <= pgid(p.overflow); i++ {
+			var id = p.id + i
+			if _, ok := reachable[id]; ok {
+				ch <- fmt.Errorf("page %d: multiple references", int(id))
+			}
+			reachable[id] = p
+		}
+
+		// We should only encounter un-freed leaf and branch pages.
+		if freed[p.id] {
+			ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
+		} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
+			ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
+		}
+	})
+
+	// Check each bucket within this bucket.
+	_ = b.ForEach(func(k, v []byte) error {
+		if child := b.Bucket(k); child != nil {
+			tx.checkBucket(child, reachable, freed, ch)
+		}
+		return nil
+	})
+}
+
+// allocate returns a contiguous block of memory starting at a given page.
+func (tx *Tx) allocate(count int) (*page, error) {
+	p, err := tx.db.allocate(count)
+	if err != nil {
+		return nil, err
+	}
+
+	// Save to our page cache.
+	tx.pages[p.id] = p
+
+	// Update statistics.
+	tx.stats.PageCount++
+	tx.stats.PageAlloc += count * tx.db.pageSize
+
+	return p, nil
+}
+
+// write writes any dirty pages to disk.
+func (tx *Tx) write() error {
+	// Sort pages by id.
+	pages := make(pages, 0, len(tx.pages))
+	for _, p := range tx.pages {
+		pages = append(pages, p)
+	}
+	// Clear out page cache early.
+	tx.pages = make(map[pgid]*page)
+	sort.Sort(pages)
+
+	// Write pages to disk in order.
+	for _, p := range pages {
+		size := (int(p.overflow) + 1) * tx.db.pageSize
+		offset := int64(p.id) * int64(tx.db.pageSize)
+
+		// Write out page in "max allocation" sized chunks.
+		ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
+		for {
+			// Limit our write to our max allocation size.
+			sz := size
+			if sz > maxAllocSize-1 {
+				sz = maxAllocSize - 1
+			}
+
+			// Write chunk to disk.
+			buf := ptr[:sz]
+			if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
+				return err
+			}
+
+			// Update statistics.
+			tx.stats.Write++
+
+			// Exit inner for loop if we've written all the chunks.
+			size -= sz
+			if size == 0 {
+				break
+			}
+
+			// Otherwise move offset forward and move pointer to next chunk.
+			offset += int64(sz)
+			ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
+		}
+	}
+
+	// Ignore file sync if flag is set on DB.
+	if !tx.db.NoSync || IgnoreNoSync {
+		if err := fdatasync(tx.db); err != nil {
+			return err
+		}
+	}
+
+	// Put small pages back to page pool.
+	for _, p := range pages {
+		// Ignore page sizes over 1 page.
+		// These are allocated using make() instead of the page pool.
+		if int(p.overflow) != 0 {
+			continue
+		}
+
+		buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
+
+		// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
+		for i := range buf {
+			buf[i] = 0
+		}
+		tx.db.pagePool.Put(buf)
+	}
+
+	return nil
+}
+
+// writeMeta writes the meta to the disk.
+func (tx *Tx) writeMeta() error {
+	// Create a temporary buffer for the meta page.
+	buf := make([]byte, tx.db.pageSize)
+	p := tx.db.pageInBuffer(buf, 0)
+	tx.meta.write(p)
+
+	// Write the meta page to file.
+	if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
+		return err
+	}
+	if !tx.db.NoSync || IgnoreNoSync {
+		if err := fdatasync(tx.db); err != nil {
+			return err
+		}
+	}
+
+	// Update statistics.
+	tx.stats.Write++
+
+	return nil
+}
+
+// page returns a reference to the page with a given id.
+// If page has been written to then a temporary buffered page is returned.
+func (tx *Tx) page(id pgid) *page {
+	// Check the dirty pages first.
+	if tx.pages != nil {
+		if p, ok := tx.pages[id]; ok {
+			return p
+		}
+	}
+
+	// Otherwise return directly from the mmap.
+	return tx.db.page(id)
+}
+
+// forEachPage iterates over every page within a given page and executes a function.
+func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
+	p := tx.page(pgid)
+
+	// Execute function.
+	fn(p, depth)
+
+	// Recursively loop over children.
+	if (p.flags & branchPageFlag) != 0 {
+		for i := 0; i < int(p.count); i++ {
+			elem := p.branchPageElement(uint16(i))
+			tx.forEachPage(elem.pgid, depth+1, fn)
+		}
+	}
+}
+
+// Page returns page information for a given page number.
+// This is only safe for concurrent use when used by a writable transaction.
+func (tx *Tx) Page(id int) (*PageInfo, error) {
+	if tx.db == nil {
+		return nil, ErrTxClosed
+	} else if pgid(id) >= tx.meta.pgid {
+		return nil, nil
+	}
+
+	// Build the page info.
+	p := tx.db.page(pgid(id))
+	info := &PageInfo{
+		ID:            id,
+		Count:         int(p.count),
+		OverflowCount: int(p.overflow),
+	}
+
+	// Determine the type (or if it's free).
+	if tx.db.freelist.freed(pgid(id)) {
+		info.Type = "free"
+	} else {
+		info.Type = p.typ()
+	}
+
+	return info, nil
+}
+
+// TxStats represents statistics about the actions performed by the transaction.
+type TxStats struct {
+	// Page statistics.
+	PageCount int // number of page allocations
+	PageAlloc int // total bytes allocated
+
+	// Cursor statistics.
+	CursorCount int // number of cursors created
+
+	// Node statistics
+	NodeCount int // number of node allocations
+	NodeDeref int // number of node dereferences
+
+	// Rebalance statistics.
+	Rebalance     int           // number of node rebalances
+	RebalanceTime time.Duration // total time spent rebalancing
+
+	// Split/Spill statistics.
+	Split     int           // number of nodes split
+	Spill     int           // number of nodes spilled
+	SpillTime time.Duration // total time spent spilling
+
+	// Write statistics.
+	Write     int           // number of writes performed
+	WriteTime time.Duration // total time spent writing to disk
+}
+
+func (s *TxStats) add(other *TxStats) {
+	s.PageCount += other.PageCount
+	s.PageAlloc += other.PageAlloc
+	s.CursorCount += other.CursorCount
+	s.NodeCount += other.NodeCount
+	s.NodeDeref += other.NodeDeref
+	s.Rebalance += other.Rebalance
+	s.RebalanceTime += other.RebalanceTime
+	s.Split += other.Split
+	s.Spill += other.Spill
+	s.SpillTime += other.SpillTime
+	s.Write += other.Write
+	s.WriteTime += other.WriteTime
+}
+
+// Sub calculates and returns the difference between two sets of transaction stats.
+// This is useful when obtaining stats at two different points and time and
+// you need the performance counters that occurred within that time span.
+func (s *TxStats) Sub(other *TxStats) TxStats {
+	var diff TxStats
+	diff.PageCount = s.PageCount - other.PageCount
+	diff.PageAlloc = s.PageAlloc - other.PageAlloc
+	diff.CursorCount = s.CursorCount - other.CursorCount
+	diff.NodeCount = s.NodeCount - other.NodeCount
+	diff.NodeDeref = s.NodeDeref - other.NodeDeref
+	diff.Rebalance = s.Rebalance - other.Rebalance
+	diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
+	diff.Split = s.Split - other.Split
+	diff.Spill = s.Spill - other.Spill
+	diff.SpillTime = s.SpillTime - other.SpillTime
+	diff.Write = s.Write - other.Write
+	diff.WriteTime = s.WriteTime - other.WriteTime
+	return diff
+}
diff --git a/vendor/github.com/coreos/etcd/.dockerignore b/vendor/github.com/coreos/etcd/.dockerignore
new file mode 100644
index 0000000..6b8710a
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/.dockerignore
@@ -0,0 +1 @@
+.git
diff --git a/vendor/github.com/coreos/etcd/.gitignore b/vendor/github.com/coreos/etcd/.gitignore
new file mode 100644
index 0000000..b055a98
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/.gitignore
@@ -0,0 +1,21 @@
+/agent-*
+/coverage
+/covdir
+/docs
+/vendor
+/gopath
+/gopath.proto
+/go-bindata
+/release
+/machine*
+/bin
+.vagrant
+*.etcd
+*.log
+/etcd
+*.swp
+/hack/insta-discovery/.env
+*.test
+hack/tls-setup/certs
+.idea
+*.bak
diff --git a/vendor/github.com/coreos/etcd/.godir b/vendor/github.com/coreos/etcd/.godir
new file mode 100644
index 0000000..00ff6aa
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/.godir
@@ -0,0 +1 @@
+github.com/coreos/etcd
diff --git a/vendor/github.com/coreos/etcd/.header b/vendor/github.com/coreos/etcd/.header
new file mode 100644
index 0000000..0446af6
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/.header
@@ -0,0 +1,13 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
diff --git a/vendor/github.com/coreos/etcd/.travis.yml b/vendor/github.com/coreos/etcd/.travis.yml
new file mode 100644
index 0000000..bbe4af7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/.travis.yml
@@ -0,0 +1,73 @@
+language: go
+go_import_path: github.com/coreos/etcd
+
+sudo: required
+
+services: docker
+
+go:
+- 1.10.7
+
+notifications:
+  on_success: never
+  on_failure: never
+
+env:
+  matrix:
+  - TARGET=linux-amd64-integration
+  - TARGET=linux-amd64-functional
+  - TARGET=linux-amd64-unit
+  - TARGET=all-build
+  - TARGET=linux-386-unit
+
+matrix:
+  fast_finish: true
+  allow_failures:
+  - go: 1.10.7
+    env: TARGET=linux-386-unit
+  exclude:
+  - go: tip
+    env: TARGET=linux-386-unit
+
+before_install:
+- if [[ $TRAVIS_GO_VERSION == 1.* ]]; then docker pull gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION}; fi
+
+install:
+- pushd cmd/etcd && go get -t -v ./... && popd
+
+script:
+ - echo "TRAVIS_GO_VERSION=${TRAVIS_GO_VERSION}"
+ - >
+    case "${TARGET}" in
+      linux-amd64-integration)
+        docker run --rm \
+          --volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
+          /bin/bash -c "GOARCH=amd64 PASSES='integration' ./test"
+        ;;
+      linux-amd64-functional)
+        docker run --rm \
+          --volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
+          /bin/bash -c "./build && GOARCH=amd64 PASSES='functional' ./test"
+        ;;
+      linux-amd64-unit)
+        docker run --rm \
+          --volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
+          /bin/bash -c "GOARCH=amd64 PASSES='unit' ./test"
+        ;;
+      all-build)
+        docker run --rm \
+          --volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
+          /bin/bash -c "GOARCH=amd64 PASSES='build' ./test \
+            && GOARCH=386 PASSES='build' ./test \
+            && GO_BUILD_FLAGS='-v' GOOS=darwin GOARCH=amd64 ./build \
+            && GO_BUILD_FLAGS='-v' GOOS=windows GOARCH=amd64 ./build \
+            && GO_BUILD_FLAGS='-v' GOARCH=arm ./build \
+            && GO_BUILD_FLAGS='-v' GOARCH=arm64 ./build \
+            && GO_BUILD_FLAGS='-v' GOARCH=ppc64le ./build"
+        ;;
+      linux-386-unit)
+        docker run --rm \
+          --volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
+          /bin/bash -c "GOARCH=386 PASSES='unit' ./test"
+        ;;
+    esac
diff --git a/vendor/github.com/coreos/etcd/.words b/vendor/github.com/coreos/etcd/.words
new file mode 100644
index 0000000..31fffef
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/.words
@@ -0,0 +1,44 @@
+DefaultMaxRequestBytes
+ErrCodeEnhanceYourCalm
+ErrTimeout
+GoAway
+KeepAlive
+Keepalive
+MiB
+ResourceExhausted
+RPC
+RPCs
+TODO
+backoff
+blackhole
+blackholed
+cancelable
+cancelation
+cluster_proxy
+defragment
+defragmenting
+etcd
+gRPC
+goroutine
+goroutines
+healthcheck
+iff
+inflight
+keepalive
+keepalives
+keyspace
+linearization
+localhost
+mutex
+prefetching
+protobuf
+prometheus
+rafthttp
+repin
+serializable
+teardown
+too_many_pings
+uncontended
+unprefixed
+unlisting
+
diff --git a/vendor/github.com/coreos/etcd/CHANGELOG.md b/vendor/github.com/coreos/etcd/CHANGELOG.md
new file mode 100644
index 0000000..603e501
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/CHANGELOG.md
@@ -0,0 +1,746 @@
+## [v3.3.0](https://github.com/coreos/etcd/releases/tag/v3.3.0)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.2.0...v3.3.0) and [v3.3 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_3.md) for any breaking changes.
+
+### Improved
+
+- Use [`coreos/bbolt`](https://github.com/coreos/bbolt/releases) to replace [`boltdb/bolt`](https://github.com/boltdb/bolt#project-status).
+  - Fix [etcd database size grows until `mvcc: database space exceeded`](https://github.com/coreos/etcd/issues/8009).
+- [Reduce memory allocation](https://github.com/coreos/etcd/pull/8428) on [Range operations](https://github.com/coreos/etcd/pull/8475).
+- [Rate limit](https://github.com/coreos/etcd/pull/8099) and [randomize](https://github.com/coreos/etcd/pull/8101) lease revoke on restart or leader elections.
+  - Prevent [spikes in Raft proposal rate](https://github.com/coreos/etcd/issues/8096).
+- Support `clientv3` balancer failover under [network faults/partitions](https://github.com/coreos/etcd/issues/8711).
+- Better warning on [mismatched `--initial-cluster`](https://github.com/coreos/etcd/pull/8083) flag.
+
+### Changed(Breaking Changes)
+
+- Require [Go 1.9+](https://github.com/coreos/etcd/issues/6174).
+  - Compile with *Go 1.9.2*.
+  - Deprecate [`golang.org/x/net/context`](https://github.com/coreos/etcd/pull/8511).
+- Require [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) [**`v1.7.4`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.4) or [**`v1.7.5+`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.5):
+  - Deprecate [`metadata.Incoming/OutgoingContext`](https://github.com/coreos/etcd/pull/7896).
+  - Deprecate `grpclog.Logger`, upgrade to [`grpclog.LoggerV2`](https://github.com/coreos/etcd/pull/8533).
+  - Deprecate [`grpc.ErrClientConnTimeout`](https://github.com/coreos/etcd/pull/8505) errors in `clientv3`.
+  - Use [`MaxRecvMsgSize` and `MaxSendMsgSize`](https://github.com/coreos/etcd/pull/8437) to limit message size, in etcd server.
+- Upgrade [`github.com/grpc-ecosystem/grpc-gateway`](https://github.com/grpc-ecosystem/grpc-gateway/releases) `v1.2.2` to `v1.3.0`.
+- Translate [gRPC status error in v3 client `Snapshot` API](https://github.com/coreos/etcd/pull/9038).
+- Upgrade [`github.com/ugorji/go/codec`](https://github.com/ugorji/go) for v2 `client`.
+  - [Regenerated](https://github.com/coreos/etcd/pull/8721) v2 `client` source code with latest `ugorji/go/codec`.
+- Fix [`/health` endpoint JSON output](https://github.com/coreos/etcd/pull/8312).
+- v3 `etcdctl` [`lease timetolive LEASE_ID`](https://github.com/coreos/etcd/issues/9028) on expired lease now prints [`lease LEASE_ID already expired`](https://github.com/coreos/etcd/pull/9047).
+  - <=3.2 prints `lease LEASE_ID granted with TTL(0s), remaining(-1s)`.
+
+### Added(`etcd`)
+
+- Add [`--experimental-enable-v2v3`](https://github.com/coreos/etcd/pull/8407) flag to [emulate v2 API with v3](https://github.com/coreos/etcd/issues/6925).
+- Add [`--experimental-corrupt-check-time`](https://github.com/coreos/etcd/pull/8420) flag to [raise corrupt alarm monitoring](https://github.com/coreos/etcd/issues/7125).
+- Add [`--experimental-initial-corrupt-check`](https://github.com/coreos/etcd/pull/8554) flag to [check database hash before serving client/peer traffic](https://github.com/coreos/etcd/issues/8313).
+- Add [`--max-txn-ops`](https://github.com/coreos/etcd/pull/7976) flag to [configure maximum number operations in transaction](https://github.com/coreos/etcd/issues/7826).
+- Add [`--max-request-bytes`](https://github.com/coreos/etcd/pull/7968) flag to [configure maximum client request size](https://github.com/coreos/etcd/issues/7923).
+  - If not configured, it defaults to 1.5 MiB.
+- Add [`--client-crl-file`, `--peer-crl-file`](https://github.com/coreos/etcd/pull/8124) flags for [Certificate revocation list](https://github.com/coreos/etcd/issues/4034).
+- Add [`--peer-require-cn`](https://github.com/coreos/etcd/pull/8616) flag to support [CN-based auth for inter-peer connection](https://github.com/coreos/etcd/issues/8262).
+- Add [`--listen-metrics-urls`](https://github.com/coreos/etcd/pull/8242) flag for additional `/metrics` endpoints.
+  - Support [additional (non) TLS `/metrics` endpoints for a TLS-enabled cluster](https://github.com/coreos/etcd/pull/8282).
+  - e.g. `--listen-metrics-urls=https://localhost:2378,http://localhost:9379` to serve `/metrics` in secure port 2378 and insecure port 9379.
+  - Useful for [bypassing critical APIs when monitoring etcd](https://github.com/coreos/etcd/issues/8060).
+- Add [`--auto-compaction-mode`](https://github.com/coreos/etcd/pull/8123) flag to [support revision-based compaction](https://github.com/coreos/etcd/issues/8098).
+- Change `--auto-compaction-retention` flag to [accept string values](https://github.com/coreos/etcd/pull/8563) with [finer granularity](https://github.com/coreos/etcd/issues/8503).
+- Add [`--grpc-keepalive-min-time`, `--grpc-keepalive-interval`, `--grpc-keepalive-timeout`](https://github.com/coreos/etcd/pull/8535) flags to configure server-side keepalive policies.
+- Serve [`/health` endpoint as unhealthy](https://github.com/coreos/etcd/pull/8272) when [alarm is raised](https://github.com/coreos/etcd/issues/8207).
+- Provide [error information in `/health`](https://github.com/coreos/etcd/pull/8312).
+  - e.g. `{"health":false,"errors":["NOSPACE"]}`.
+- Move [logging setup to embed package](https://github.com/coreos/etcd/pull/8810)
+  - Disable gRPC server log by default.
+- Use [monotonic time in Go 1.9](https://github.com/coreos/etcd/pull/8507) for `lease` package.
+- Warn on [empty hosts in advertise URLs](https://github.com/coreos/etcd/pull/8384).
+  - Address [advertise client URLs accepts empty hosts](https://github.com/coreos/etcd/issues/8379).
+  - etcd `v3.4` will exit on this error.
+    - e.g. `--advertise-client-urls=http://:2379`.
+- Warn on [shadowed environment variables](https://github.com/coreos/etcd/pull/8385).
+  - Address [error on shadowed environment variables](https://github.com/coreos/etcd/issues/8380).
+  - etcd `v3.4` will exit on this error.
+
+### Added(API)
+
+- Support [ranges in transaction comparisons](https://github.com/coreos/etcd/pull/8025) for [disconnected linearized reads](https://github.com/coreos/etcd/issues/7924).
+- Add [nested transactions](https://github.com/coreos/etcd/pull/8102) to extend [proxy use cases](https://github.com/coreos/etcd/issues/7857).
+- Add [lease comparison target in transaction](https://github.com/coreos/etcd/pull/8324).
+- Add [lease list](https://github.com/coreos/etcd/pull/8358).
+- Add [hash by revision](https://github.com/coreos/etcd/pull/8263) for [better corruption checking against boltdb](https://github.com/coreos/etcd/issues/8016).
+
+### Added(`etcd/clientv3`)
+
+- Add [health balancer](https://github.com/coreos/etcd/pull/8545) to fix [watch API hangs](https://github.com/coreos/etcd/issues/7247), improve [endpoint switch under network faults](https://github.com/coreos/etcd/issues/7941).
+- [Refactor balancer](https://github.com/coreos/etcd/pull/8840) and add [client-side keepalive pings](https://github.com/coreos/etcd/pull/8199) to handle [network partitions](https://github.com/coreos/etcd/issues/8711).
+- Add [`MaxCallSendMsgSize` and `MaxCallRecvMsgSize`](https://github.com/coreos/etcd/pull/9047) fields to [`clientv3.Config`](https://godoc.org/github.com/coreos/etcd/clientv3#Config).
+  - Fix [exceeded response size limit error in client-side](https://github.com/coreos/etcd/issues/9043).
+  - Address [kubernetes#51099](https://github.com/kubernetes/kubernetes/issues/51099).
+  - `MaxCallSendMsgSize` default value is 2 MiB, if not configured.
+  - `MaxCallRecvMsgSize` default value is `math.MaxInt32`, if not configured.
+- Accept [`Compare_LEASE`](https://github.com/coreos/etcd/pull/8324) in [`clientv3.Compare`](https://godoc.org/github.com/coreos/etcd/clientv3#Compare).
+- Add [`LeaseValue` helper](https://github.com/coreos/etcd/pull/8488) to `Cmp` `LeaseID` values in `Txn`.
+- Add [`MoveLeader`](https://github.com/coreos/etcd/pull/8153) to `Maintenance`.
+- Add [`HashKV`](https://github.com/coreos/etcd/pull/8351) to `Maintenance`.
+- Add [`Leases`](https://github.com/coreos/etcd/pull/8358) to `Lease`.
+- Add [`clientv3/ordering`](https://github.com/coreos/etcd/pull/8092) for enforce [ordering in serialized requests](https://github.com/coreos/etcd/issues/7623).
+
+### Added(v2 `etcdctl`)
+
+- Add [`backup --with-v3`](https://github.com/coreos/etcd/pull/8479) flag.
+
+### Added(v3 `etcdctl`)
+
+- Add [`--discovery-srv`](https://github.com/coreos/etcd/pull/8462) flag.
+- Add [`--keepalive-time`, `--keepalive-timeout`](https://github.com/coreos/etcd/pull/8663) flags.
+- Add [`lease list`](https://github.com/coreos/etcd/pull/8358) command.
+- Add [`lease keep-alive --once`](https://github.com/coreos/etcd/pull/8775) flag.
+- Make [`lease timetolive LEASE_ID`](https://github.com/coreos/etcd/issues/9028) on expired lease print [`lease LEASE_ID already expired`](https://github.com/coreos/etcd/pull/9047).
+  - <=3.2 prints `lease LEASE_ID granted with TTL(0s), remaining(-1s)`.
+- Add [`defrag --data-dir`](https://github.com/coreos/etcd/pull/8367) flag.
+- Add [`move-leader`](https://github.com/coreos/etcd/pull/8153) command.
+- Add [`endpoint hashkv`](https://github.com/coreos/etcd/pull/8351) command.
+- Add [`endpoint --cluster`](https://github.com/coreos/etcd/pull/8143) flag, equivalent to [v2 `etcdctl cluster-health`](https://github.com/coreos/etcd/issues/8117).
+- Make `endpoint health` command terminate with [non-zero exit code on unhealthy status](https://github.com/coreos/etcd/pull/8342).
+- Add [`lock --ttl`](https://github.com/coreos/etcd/pull/8370) flag.
+- Support [`watch [key] [range_end] -- [exec-command…]`](https://github.com/coreos/etcd/pull/8919), equivalent to [v2 `etcdctl exec-watch`](https://github.com/coreos/etcd/issues/8814).
+- Enable [`clientv3.WithRequireLeader(context.Context)` for `watch`](https://github.com/coreos/etcd/pull/8672) command.
+- Print [`"del"` instead of `"delete"`](https://github.com/coreos/etcd/pull/8297) in `txn` interactive mode.
+- Print [`ETCD_INITIAL_ADVERTISE_PEER_URLS` in `member add`](https://github.com/coreos/etcd/pull/8332).
+
+### Added(metrics)
+
+- Add [`etcd --listen-metrics-urls`](https://github.com/coreos/etcd/pull/8242) flag for additional `/metrics` endpoints.
+  - Useful for [bypassing critical APIs when monitoring etcd](https://github.com/coreos/etcd/issues/8060).
+- Add [`etcd_server_version`](https://github.com/coreos/etcd/pull/8960) Prometheus metric.
+  - To replace [Kubernetes `etcd-version-monitor`](https://github.com/coreos/etcd/issues/8948).
+- Add [`etcd_debugging_mvcc_db_compaction_keys_total`](https://github.com/coreos/etcd/pull/8280) Prometheus metric.
+- Add [`etcd_debugging_server_lease_expired_total`](https://github.com/coreos/etcd/pull/8064) Prometheus metric.
+  - To improve [lease revoke monitoring](https://github.com/coreos/etcd/issues/8050).
+- Document [Prometheus 2.0 rules](https://github.com/coreos/etcd/pull/8879).
+- Initialize gRPC server [metrics with zero values](https://github.com/coreos/etcd/pull/8878).
+
+### Added(`grpc-proxy`)
+
+- Add [`grpc-proxy start --experimental-leasing-prefix`](https://github.com/coreos/etcd/pull/8341) flag:
+  - For disconnected linearized reads.
+  - Based on [V system leasing](https://github.com/coreos/etcd/issues/6065).
+  - See ["Disconnected consistent reads with etcd" blog post](https://coreos.com/blog/coreos-labs-disconnected-consistent-reads-with-etcd).
+- Add [`grpc-proxy start --experimental-serializable-ordering`](https://github.com/coreos/etcd/pull/8315) flag.
+  - To ensure serializable reads have monotonically increasing store revisions across endpoints.
+- Add [`grpc-proxy start --metrics-addr`](https://github.com/coreos/etcd/pull/8242) flag for an additional `/metrics` endpoint.
+  - Set `--metrics-addr=http://[HOST]:9379` to serve `/metrics` in insecure port 9379.
+- Serve [`/health` endpoint in grpc-proxy](https://github.com/coreos/etcd/pull/8322).
+- Add [`grpc-proxy start --debug`](https://github.com/coreos/etcd/pull/8994) flag.
+
+### Added(gRPC gateway)
+
+- Replace [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) endpoint with [`/v3beta`](https://github.com/coreos/etcd/pull/8880).
+  - To deprecate [`/v3alpha`](https://github.com/coreos/etcd/issues/8125) in `v3.4`.
+- Support ["authorization" token](https://github.com/coreos/etcd/pull/7999).
+- Support [websocket for bi-directional streams](https://github.com/coreos/etcd/pull/8257).
+  - Fix [`Watch` API with gRPC gateway](https://github.com/coreos/etcd/issues/8237).
+- Upgrade gRPC gateway to [v1.3.0](https://github.com/coreos/etcd/issues/8838).
+
+### Added(`etcd/raft`)
+
+- Add [non-voting member](https://github.com/coreos/etcd/pull/8751).
+  - To implement [Raft thesis 4.2.1 Catching up new servers](https://github.com/coreos/etcd/issues/8568).
+  - `Learner` node does not vote or promote itself.
+
+### Added/Fixed(Security/Auth)
+
+- Add [CRL based connection rejection](https://github.com/coreos/etcd/pull/8124) to manage [revoked certs](https://github.com/coreos/etcd/issues/4034).
+- Document [TLS authentication changes](https://github.com/coreos/etcd/pull/8895):
+  - [Server accepts connections if IP matches, without checking DNS entries](https://github.com/coreos/etcd/pull/8223). For instance, if peer cert contains IP addresses and DNS names in Subject Alternative Name (SAN) field, and the remote IP address matches one of those IP addresses, server just accepts connection without further checking the DNS names.
+  - [Server supports reverse-lookup on wildcard DNS `SAN`](https://github.com/coreos/etcd/pull/8281). For instance, if peer cert contains only DNS names (no IP addresses) in Subject Alternative Name (SAN) field, server first reverse-lookups the remote IP address to get a list of names mapping to that address (e.g. `nslookup IPADDR`). Then accepts the connection if those names have a matching name with peer cert's DNS names (either by exact or wildcard match). If none is matched, server forward-lookups each DNS entry in peer cert (e.g. look up `example.default.svc` when the entry is `*.example.default.svc`), and accepts connection only when the host's resolved addresses have the matching IP address with the peer's remote IP address.
+- Add [`etcd --peer-require-cn`](https://github.com/coreos/etcd/pull/8616) flag.
+  - To support [CommonName(CN) based auth](https://github.com/coreos/etcd/issues/8262) for inter peer connection.
+- [Swap priority](https://github.com/coreos/etcd/pull/8594) of cert CommonName(CN) and username + password.
+  - To address ["username and password specified in the request should take priority over CN in the cert"](https://github.com/coreos/etcd/issues/8584).
+- Protect [lease revoke with auth](https://github.com/coreos/etcd/pull/8031).
+- Provide user's role on [auth permission error](https://github.com/coreos/etcd/pull/8164).
+- Fix [auth store panic with disabled token](https://github.com/coreos/etcd/pull/8695).
+- Update `golang.org/x/crypto/bcrypt` (see [golang/crypto@6c586e1](https://github.com/golang/crypto/commit/6c586e17d90a7d08bbbc4069984180dce3b04117)).
+
+### Fixed(v2)
+
+- [Fail-over v2 client](https://github.com/coreos/etcd/pull/8519) to next endpoint on [oneshot failure](https://github.com/coreos/etcd/issues/8515).
+- [Put back `/v2/machines`](https://github.com/coreos/etcd/pull/8062) endpoint for python-etcd wrapper.
+
+### Fixed(v3)
+
+- Fix [range/put/delete operation metrics](https://github.com/coreos/etcd/pull/8054) with transaction:
+  - `etcd_debugging_mvcc_range_total`
+  - `etcd_debugging_mvcc_put_total`
+  - `etcd_debugging_mvcc_delete_total`
+  - `etcd_debugging_mvcc_txn_total`
+- Fix [`etcd_debugging_mvcc_keys_total`](https://github.com/coreos/etcd/pull/8390) on restore.
+- Fix [`etcd_debugging_mvcc_db_total_size_in_bytes`](https://github.com/coreos/etcd/pull/8120) on restore.
+  - Also change to [`prometheus.NewGaugeFunc`](https://github.com/coreos/etcd/pull/8150).
+- Fix [backend database in-memory index corruption](https://github.com/coreos/etcd/pull/8127) issue on restore (only 3.2.0 is affected).
+- Fix [watch restore from snapshot](https://github.com/coreos/etcd/pull/8427).
+- Fix ["put at-most-once" in `clientv3`](https://github.com/coreos/etcd/pull/8335).
+- Handle [empty key permission](https://github.com/coreos/etcd/pull/8514) in `etcdctl`.
+- [Fix server crash](https://github.com/coreos/etcd/pull/8010) on [invalid transaction request from gRPC gateway](https://github.com/coreos/etcd/issues/7889).
+- Fix [`clientv3.WatchResponse.Canceled`](https://github.com/coreos/etcd/pull/8283) on [compacted watch request](https://github.com/coreos/etcd/issues/8231).
+- Handle [WAL renaming failure on Windows](https://github.com/coreos/etcd/pull/8286).
+- Make [peer dial timeout longer](https://github.com/coreos/etcd/pull/8599).
+  - See [coreos/etcd-operator#1300](https://github.com/coreos/etcd-operator/issues/1300) for more detail.
+- Make server [wait up to request time-out](https://github.com/coreos/etcd/pull/8267) with [pending RPCs](https://github.com/coreos/etcd/issues/8224).
+- Fix [`grpc.Server` panic on `GracefulStop`](https://github.com/coreos/etcd/pull/8987) with [TLS-enabled server](https://github.com/coreos/etcd/issues/8916).
+- Fix ["multiple peer URLs cannot start" issue](https://github.com/coreos/etcd/issues/8383).
+- Fix server-side auth so [concurrent auth operations do not return old revision error](https://github.com/coreos/etcd/pull/8442).
+- Fix [`concurrency/stm` `Put` with serializable snapshot](https://github.com/coreos/etcd/pull/8439).
+  - Use store revision from first fetch to resolve write conflicts instead of modified revision.
+- Fix [`grpc-proxy` Snapshot API error handling](https://github.com/coreos/etcd/commit/dbd16d52fbf81e5fd806d21ff5e9148d5bf203ab).
+- Fix [`grpc-proxy` KV API `PrevKv` flag handling](https://github.com/coreos/etcd/pull/8366).
+- Fix [`grpc-proxy` KV API `KeysOnly` flag handling](https://github.com/coreos/etcd/pull/8552).
+- Upgrade [`coreos/go-systemd`](https://github.com/coreos/go-systemd/releases) to `v15` (see https://github.com/coreos/go-systemd/releases/tag/v15).
+
+### Other
+
+- Support previous two minor versions (see our [new release policy](https://github.com/coreos/etcd/pull/8805)).
+- `v3.3.x` is the last release cycle that supports `ACI`:
+  - AppC was [officially suspended](https://github.com/appc/spec#-disclaimer-), as of late 2016.
+  - [`acbuild`](https://github.com/containers/build#this-project-is-currently-unmaintained) is not maintained anymore.
+  - `*.aci` files won't be available from etcd `v3.4` release.
+- Add container registry [`gcr.io/etcd-development/etcd`](https://gcr.io/etcd-development/etcd).
+  - [quay.io/coreos/etcd](https://quay.io/coreos/etcd) is still supported as secondary.
+
+
+## [v3.2.12](https://github.com/coreos/etcd/releases/tag/v3.2.12) (2017-12-20)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.2.11...v3.2.12) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
+
+### Fixed
+
+- Fix [error message of `Revision` compactor](https://github.com/coreos/etcd/pull/8999) in server-side.
+
+### Added(`etcd/clientv3`,`etcdctl/v3`)
+
+- Add [`MaxCallSendMsgSize` and `MaxCallRecvMsgSize`](https://github.com/coreos/etcd/pull/9047) fields to [`clientv3.Config`](https://godoc.org/github.com/coreos/etcd/clientv3#Config).
+  - Fix [exceeded response size limit error in client-side](https://github.com/coreos/etcd/issues/9043).
+  - Address [kubernetes#51099](https://github.com/kubernetes/kubernetes/issues/51099).
+  - `MaxCallSendMsgSize` default value is 2 MiB, if not configured.
+  - `MaxCallRecvMsgSize` default value is `math.MaxInt32`, if not configured.
+
+### Other
+
+- Pin [grpc v1.7.5](https://github.com/grpc/grpc-go/releases/tag/v1.7.5), [grpc-gateway v1.3.0](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.3.0).
+  - No code change, just to be explicit about recommended versions.
+
+
+## [v3.2.11](https://github.com/coreos/etcd/releases/tag/v3.2.11) (2017-12-05)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.2.10...v3.2.11) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
+
+### Fixed
+
+- Fix racey grpc-go's server handler transport `WriteStatus` call to prevent [TLS-enabled etcd server crash](https://github.com/coreos/etcd/issues/8904):
+  - Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) `v1.7.3` to `v1.7.4`.
+  - Add [gRPC RPC failure warnings](https://github.com/coreos/etcd/pull/8939) to help debug such issues in the future.
+- Remove `--listen-metrics-urls` flag in monitoring document (non-released in `v3.2.x`, planned for `v3.3.x`).
+
+### Added
+
+- Provide [more cert details](https://github.com/coreos/etcd/pull/8952/files) on TLS handshake failures.
+
+
+## [v3.1.11](https://github.com/coreos/etcd/releases/tag/v3.1.11) (2017-11-28)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.1.10...v3.1.11) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
+
+### Fixed
+
+- [#8411](https://github.com/coreos/etcd/issues/8411),[#8806](https://github.com/coreos/etcd/pull/8806) mvcc: fix watch restore from snapshot
+- [#8009](https://github.com/coreos/etcd/issues/8009),[#8902](https://github.com/coreos/etcd/pull/8902) backport coreos/bbolt v1.3.1-coreos.5
+
+
+## [v3.2.10](https://github.com/coreos/etcd/releases/tag/v3.2.10) (2017-11-16)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.2.9...v3.2.10) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
+
+### Fixed
+
+- Replace backend key-value database `boltdb/bolt` with [`coreos/bbolt`](https://github.com/coreos/bbolt/releases) to address [backend database size issue](https://github.com/coreos/etcd/issues/8009).
+- Fix `clientv3` balancer to handle [network partitions](https://github.com/coreos/etcd/issues/8711):
+  - Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) `v1.2.1` to `v1.7.3`.
+  - Upgrade [`github.com/grpc-ecosystem/grpc-gateway`](https://github.com/grpc-ecosystem/grpc-gateway/releases) `v1.2` to `v1.3`.
+- Revert [discovery SRV auth `ServerName` with `*.{ROOT_DOMAIN}`](https://github.com/coreos/etcd/pull/8651) to support non-wildcard subject alternative names in the certs (see [issue #8445](https://github.com/coreos/etcd/issues/8445) for more contexts).
+  - For instance, `etcd --discovery-srv=etcd.local` will only authenticate peers/clients when the provided certs have root domain `etcd.local` (**not `*.etcd.local`**) as an entry in Subject Alternative Name (SAN) field.
+
+
+## [v3.2.9](https://github.com/coreos/etcd/releases/tag/v3.2.9) (2017-10-06)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.2.8...v3.2.9) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
+
+### Fixed(Security)
+
+- Compile with [Go 1.8.4](https://groups.google.com/d/msg/golang-nuts/sHfMg4gZNps/a-HDgDDDAAAJ).
+- Update `golang.org/x/crypto/bcrypt` (see [golang/crypto@6c586e1](https://github.com/golang/crypto/commit/6c586e17d90a7d08bbbc4069984180dce3b04117)).
+- Fix discovery SRV bootstrapping to [authenticate `ServerName` with `*.{ROOT_DOMAIN}`](https://github.com/coreos/etcd/pull/8651), in order to support sub-domain wildcard matching (see [issue #8445](https://github.com/coreos/etcd/issues/8445) for more contexts).
+  - For instance, `etcd --discovery-srv=etcd.local` will only authenticate peers/clients when the provided certs have root domain `*.etcd.local` as an entry in Subject Alternative Name (SAN) field.
+
+
+## [v3.2.8](https://github.com/coreos/etcd/releases/tag/v3.2.8) (2017-09-29)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.2.7...v3.2.8) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
+
+### Fixed
+
+- Fix v2 client failover to next endpoint on mutable operation.
+- Fix grpc-proxy to respect `KeysOnly` flag.
+
+
+## [v3.2.7](https://github.com/coreos/etcd/releases/tag/v3.2.7) (2017-09-01)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.2.6...v3.2.7) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
+
+### Fixed
+
+- Fix server-side auth so concurrent auth operations do not return old revision error.
+- Fix concurrency/stm Put with serializable snapshot
+  - Use store revision from first fetch to resolve write conflicts instead of modified revision.
+
+
+## [v3.2.6](https://github.com/coreos/etcd/releases/tag/v3.2.6) (2017-08-21)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.2.5...v3.2.6).
+
+### Fixed
+
+- Fix watch restore from snapshot.
+- Fix `etcd_debugging_mvcc_keys_total` inconsistency.
+- Fix multiple URLs for `--listen-peer-urls` flag.
+- Add `--enable-pprof` flag to etcd configuration file format.
+
+
+## [v3.2.5](https://github.com/coreos/etcd/releases/tag/v3.2.5) (2017-08-04)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.2.4...v3.2.5) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
+
+### Changed
+
+- Use reverse lookup to match wildcard DNS SAN.
+- Return non-zero exit code on unhealthy `endpoint health`.
+
+### Fixed
+
+- Fix unreachable /metrics endpoint when `--enable-v2=false`.
+- Fix grpc-proxy to respect `PrevKv` flag.
+
+### Added
+
+- Add container registry `gcr.io/etcd-development/etcd`.
+
+
+## [v3.2.4](https://github.com/coreos/etcd/releases/tag/v3.2.4) (2017-07-19)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.2.3...v3.2.4) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
+
+### Fixed
+
+- Do not block on active client stream when stopping server
+- Fix gRPC proxy Snapshot RPC error handling
+
+
+## [v3.2.3](https://github.com/coreos/etcd/releases/tag/v3.2.3) (2017-07-14)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.2.2...v3.2.3) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
+
+### Fixed
+
+- Let clients establish unlimited streams
+
+### Added
+
+- Tag docker images with minor versions
+  - e.g. `docker pull quay.io/coreos/etcd:v3.2` to fetch latest v3.2 versions
+
+
+## [v3.1.10](https://github.com/coreos/etcd/releases/tag/v3.1.10) (2017-07-14)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.1.9...v3.1.10) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
+
+### Changed
+
+- Compile with Go 1.8.3 to fix panic on `net/http.CloseNotify`
+
+### Added
+
+- Tag docker images with minor versions.
+  - e.g. `docker pull quay.io/coreos/etcd:v3.1` to fetch latest v3.1 versions.
+
+
+## [v3.2.2](https://github.com/coreos/etcd/releases/tag/v3.2.2) (2017-07-07)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.2.1...v3.2.2) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
+
+### Improved
+
+- Rate-limit lease revoke on expiration.
+- Extend leases on promote to avoid queueing effect on lease expiration.
+
+### Fixed
+
+- Use user-provided listen address to connect to gRPC gateway:
+  - `net.Listener` rewrites IPv4 0.0.0.0 to IPv6 [::], breaking IPv6 disabled hosts.
+  - Only v3.2.0, v3.2.1 are affected.
+- Accept connection with matched IP SAN but no DNS match.
+  - Don't check DNS entries in certs if there's a matching IP.
+- Fix 'tools/benchmark' watch command.
+
+
+## [v3.2.1](https://github.com/coreos/etcd/releases/tag/v3.2.1) (2017-06-23)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.2.0...v3.2.1) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
+
+### Fixed
+
+- Fix backend database in-memory index corruption issue on restore (only 3.2.0 is affected).
+- Fix gRPC gateway Txn marshaling issue.
+- Fix backend database size debugging metrics.
+
+
+## [v3.2.0](https://github.com/coreos/etcd/releases/tag/v3.2.0) (2017-06-09)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.1.0...v3.2.0) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
+
+### Improved
+
+- Improve backend read concurrency.
+
+### Added
+
+- Embedded etcd
+  - `Etcd.Peers` field is now `[]*peerListener`.
+- RPCs
+  - Add Election, Lock service.
+- Native client etcdserver/api/v3client
+  - client "embedded" in the server.
+- gRPC proxy
+  - Proxy endpoint discovery.
+  - Namespaces.
+  - Coalesce lease requests.
+- v3 client
+  - STM prefetching.
+  - Add namespace feature.
+  - Add `ErrOldCluster` with server version checking.
+  - Translate `WithPrefix()` into `WithFromKey()` for empty key.
+- v3 etcdctl
+  - Add `check perf` command.
+  - Add `--from-key` flag to role grant-permission command.
+  - `lock` command takes an optional command to execute.
+- etcd flags
+  - Add `--enable-v2` flag to configure v2 backend (enabled by default).
+  - Add `--auth-token` flag.
+- `etcd gateway`
+  - Support DNS SRV priority.
+- Auth
+  - Support Watch API.
+  - JWT tokens.
+- Logging, monitoring
+  - Server warns large snapshot operations.
+  - Add `etcd_debugging_server_lease_expired_total` metrics.
+- Security
+  - Deny incoming peer certs with wrong IP SAN.
+  - Resolve TLS `DNSNames` when SAN checking.
+  - Reload TLS certificates on every client connection.
+- Release
+  - Annotate acbuild with supports-systemd-notify.
+  - Add `nsswitch.conf` to Docker container image.
+  - Add ppc64le, arm64(experimental) builds.
+  - Compile with `Go 1.8.3`.
+
+### Changed
+
+- v3 client
+  - `LeaseTimeToLive` returns TTL=-1 resp on lease not found.
+  - `clientv3.NewFromConfigFile` is moved to `clientv3/yaml.NewConfig`.
+  - concurrency package's elections updated to match RPC interfaces.
+  - let client dial endpoints not in the balancer.
+- Dependencies
+  - Update [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) to `v1.2.1`.
+  - Update [`github.com/grpc-ecosystem/grpc-gateway`](https://github.com/grpc-ecosystem/grpc-gateway/releases) to `v1.2.0`.
+
+### Fixed
+
+- Allow v2 snapshot over 512MB.
+
+
+## [v3.1.9](https://github.com/coreos/etcd/releases/tag/v3.1.9) (2017-06-09)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.1.8...v3.1.9) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
+
+### Fixed
+
+- Allow v2 snapshot over 512MB.
+
+
+## [v3.1.8](https://github.com/coreos/etcd/releases/tag/v3.1.8) (2017-05-19)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.1.7...v3.1.8) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
+
+
+## [v3.1.7](https://github.com/coreos/etcd/releases/tag/v3.1.7) (2017-04-28)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.1.6...v3.1.7) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
+
+
+## [v3.1.6](https://github.com/coreos/etcd/releases/tag/v3.1.6) (2017-04-19)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.1.5...v3.1.6) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
+
+### Changed
+
+- Remove auth check in Status API.
+
+### Fixed
+
+- Fill in Auth API response header.
+
+
+## [v3.1.5](https://github.com/coreos/etcd/releases/tag/v3.1.5) (2017-03-27)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.1.4...v3.1.5) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
+
+### Added
+
+- Add `/etc/nsswitch.conf` file to alpine-based Docker image.
+
+### Fixed
+
+- Fix raft memory leak issue.
+- Fix Windows file path issues.
+
+
+## [v3.1.4](https://github.com/coreos/etcd/releases/tag/v3.1.4) (2017-03-22)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.1.3...v3.1.4) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
+
+
+## [v3.1.3](https://github.com/coreos/etcd/releases/tag/v3.1.3) (2017-03-10)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.1.2...v3.1.3) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
+
+### Changed
+
+- Use machine default host when advertise URLs are default values(`localhost:2379,2380`) AND if listen URL is `0.0.0.0`.
+
+### Fixed
+
+- Fix `etcd gateway` schema handling in DNS discovery.
+- Fix sd_notify behaviors in `gateway`, `grpc-proxy`.
+
+
+## [v3.1.2](https://github.com/coreos/etcd/releases/tag/v3.1.2) (2017-02-24)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.1.1...v3.1.2) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
+
+### Changed
+
+- Use IPv4 default host, by default (when IPv4 and IPv6 are available).
+
+### Fixed
+
+- Fix `etcd gateway` with multiple endpoints.
+
+
+## [v3.1.1](https://github.com/coreos/etcd/releases/tag/v3.1.1) (2017-02-17)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.1.0...v3.1.1) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
+
+### Changed
+
+- Compile with `Go 1.7.5`.
+
+
+## [v2.3.8](https://github.com/coreos/etcd/releases/tag/v2.3.8) (2017-02-17)
+
+See [code changes](https://github.com/coreos/etcd/compare/v2.3.7...v2.3.8).
+
+### Changed
+
+- Compile with `Go 1.7.5`.
+
+
+## [v3.1.0](https://github.com/coreos/etcd/releases/tag/v3.1.0) (2017-01-20)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.0.0...v3.1.0) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
+
+### Improved
+
+- Faster linearizable reads (implements Raft read-index).
+- v3 authentication API is now stable.
+
+### Added
+
+- Automatic leadership transfer when leader steps down.
+- etcd flags
+  - `--strict-reconfig-check` flag is set by default.
+  - Add `--log-output` flag.
+  - Add `--metrics` flag.
+- v3 client
+  - Add `SetEndpoints` method; update endpoints at runtime.
+  - Add `Sync` method; auto-update endpoints at runtime.
+  - Add `Lease TimeToLive` API; fetch lease information.
+  - replace Config.Logger field with global logger.
+  - Get API responses are sorted in ascending order by default.
+- v3 etcdctl
+  - Add `lease timetolive` command.
+  - Add `--print-value-only` flag to get command.
+  - Add `--dest-prefix` flag to make-mirror command.
+  - `get` command responses are sorted in ascending order by default.
+- `recipes` now conform to sessions defined in `clientv3/concurrency`.
+- ACI has symlinks to `/usr/local/bin/etcd*`.
+- Experimental gRPC proxy feature.
+
+### Changed
+
+- Deprecated following gRPC metrics in favor of [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus):
+  - `etcd_grpc_requests_total`
+  - `etcd_grpc_requests_failed_total`
+  - `etcd_grpc_active_streams`
+  - `etcd_grpc_unary_requests_duration_seconds`
+- etcd uses default route IP if advertise URL is not given.
+- Cluster rejects removing members if quorum will be lost.
+- SRV records (e.g., infra1.example.com) must match the discovery domain (i.e., example.com) if no custom certificate authority is given.
+  - `TLSConfig.ServerName` is ignored with user-provided certificates for backwards compatibility; to be deprecated.
+  - For example, `etcd --discovery-srv=example.com` will only authenticate peers/clients when the provided certs have root domain `example.com` as an entry in Subject Alternative Name (SAN) field.
+- Discovery now has upper limit for waiting on retries.
+- Warn on binding listeners through domain names; to be deprecated.
+
+
+## [v3.0.16](https://github.com/coreos/etcd/releases/tag/v3.0.16) (2016-11-13)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.0.15...v3.0.16) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
+
+
+## [v3.0.15](https://github.com/coreos/etcd/releases/tag/v3.0.15) (2016-11-11)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.0.14...v3.0.15) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
+
+### Fixed
+
+- Fix cancel watch request with wrong range end.
+
+
+## [v3.0.14](https://github.com/coreos/etcd/releases/tag/v3.0.14) (2016-11-04)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.0.13...v3.0.14) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
+
+### Added
+
+- v3 `etcdctl migrate` command now supports `--no-ttl` flag to discard keys on transform.
+
+
+## [v3.0.13](https://github.com/coreos/etcd/releases/tag/v3.0.13) (2016-10-24)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.0.12...v3.0.13) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
+
+
+## [v3.0.12](https://github.com/coreos/etcd/releases/tag/v3.0.12) (2016-10-07)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.0.11...v3.0.12) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
+
+
+## [v3.0.11](https://github.com/coreos/etcd/releases/tag/v3.0.11) (2016-10-07)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.0.10...v3.0.11) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
+
+### Added
+
+- Server returns previous key-value (optional)
+  - `clientv3.WithPrevKV` option
+  - v3 etcdctl `put,watch,del --prev-kv` flag
+
+
+## [v3.0.10](https://github.com/coreos/etcd/releases/tag/v3.0.10) (2016-09-23)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.0.9...v3.0.10) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
+
+
+## [v3.0.9](https://github.com/coreos/etcd/releases/tag/v3.0.9) (2016-09-15)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.0.8...v3.0.9) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
+
+### Added
+
+- Warn on domain names on listen URLs (v3.2 will reject domain names).
+
+
+## [v3.0.8](https://github.com/coreos/etcd/releases/tag/v3.0.8) (2016-09-09)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.0.7...v3.0.8) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
+
+### Changed
+
+- Allow only IP addresses in listen URLs (domain names are rejected).
+
+
+## [v3.0.7](https://github.com/coreos/etcd/releases/tag/v3.0.7) (2016-08-31)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.0.6...v3.0.7) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
+
+### Changed
+
+- SRV records only allow A records (RFC 2052).
+
+
+## [v3.0.6](https://github.com/coreos/etcd/releases/tag/v3.0.6) (2016-08-19)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.0.5...v3.0.6) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
+
+
+## [v3.0.5](https://github.com/coreos/etcd/releases/tag/v3.0.5) (2016-08-19)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.0.4...v3.0.5) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
+
+### Changed
+
+- SRV records (e.g., infra1.example.com) must match the discovery domain (i.e., example.com) if no custom certificate authority is given.
+
+
+## [v3.0.4](https://github.com/coreos/etcd/releases/tag/v3.0.4) (2016-07-27)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.0.3...v3.0.4) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
+
+### Changed
+
+- v2 auth can now use common name from TLS certificate when `--client-cert-auth` is enabled.
+
+### Added
+
+- v2 `etcdctl ls` command now supports `--output=json`.
+- Add /var/lib/etcd directory to etcd official Docker image.
+
+
+## [v3.0.3](https://github.com/coreos/etcd/releases/tag/v3.0.3) (2016-07-15)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.0.2...v3.0.3) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
+
+### Changed
+
+- Revert Dockerfile to use `CMD`, instead of `ENTRYPOINT`, to support `etcdctl` run.
+  - Docker commands for v3.0.2 won't work without specifying executable binary paths.
+- v3 etcdctl default endpoints are now `127.0.0.1:2379`.
+
+
+## [v3.0.2](https://github.com/coreos/etcd/releases/tag/v3.0.2) (2016-07-08)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.0.1...v3.0.2) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
+
+### Changed
+
+- Dockerfile uses `ENTRYPOINT`, instead of `CMD`, to run etcd without binary path specified.
+
+
+## [v3.0.1](https://github.com/coreos/etcd/releases/tag/v3.0.1) (2016-07-01)
+
+See [code changes](https://github.com/coreos/etcd/compare/v3.0.0...v3.0.1) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
+
+
+## [v3.0.0](https://github.com/coreos/etcd/releases/tag/v3.0.0) (2016-06-30)
+
+See [code changes](https://github.com/coreos/etcd/compare/v2.3.0...v3.0.0) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
diff --git a/vendor/github.com/coreos/etcd/CODE_OF_CONDUCT.md b/vendor/github.com/coreos/etcd/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..c0c20dd
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/CODE_OF_CONDUCT.md
@@ -0,0 +1,63 @@
+## CoreOS Community Code of Conduct
+
+### Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of
+fostering an open and welcoming community, we pledge to respect all people who
+contribute through reporting issues, posting feature requests, updating
+documentation, submitting pull requests or patches, and other activities.
+
+We are committed to making participation in this project a harassment-free
+experience for everyone, regardless of level of experience, gender, gender
+identity and expression, sexual orientation, disability, personal appearance,
+body size, race, ethnicity, age, religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery
+* Personal attacks
+* Trolling or insulting/derogatory comments
+* Public or private harassment
+* Publishing others' private information, such as physical or electronic addresses, without explicit permission
+* Other unethical or unprofessional conduct.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct. By adopting this Code of Conduct,
+project maintainers commit themselves to fairly and consistently applying these
+principles to every aspect of managing this project. Project maintainers who do
+not follow or enforce the Code of Conduct may be permanently removed from the
+project team.
+
+This code of conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting a project maintainer, Brandon Philips
+<brandon.philips@coreos.com>, and/or Meghan Schofield
+<meghan.schofield@coreos.com>.
+
+This Code of Conduct is adapted from the Contributor Covenant
+(http://contributor-covenant.org), version 1.2.0, available at
+http://contributor-covenant.org/version/1/2/0/
+
+### CoreOS Events Code of Conduct
+
+CoreOS events are working conferences intended for professional networking and
+collaboration in the CoreOS community. Attendees are expected to behave
+according to professional standards and in accordance with their employer’s
+policies on appropriate workplace behavior.
+
+While at CoreOS events or related social networking opportunities, attendees
+should not engage in discriminatory or offensive speech or actions including
+but not limited to gender, sexuality, race, age, disability, or religion.
+Speakers should be especially aware of these concerns.
+
+CoreOS does not condone any statements by speakers contrary to these standards.
+CoreOS reserves the right to deny entrance and/or eject from an event (without
+refund) any individual found to be engaging in discriminatory or offensive
+speech or actions.
+
+Please bring any concerns to the immediate attention of designated on-site
+staff, Brandon Philips <brandon.philips@coreos.com>, and/or Meghan Schofield
+<meghan.schofield@coreos.com>.
diff --git a/vendor/github.com/coreos/etcd/CONTRIBUTING.md b/vendor/github.com/coreos/etcd/CONTRIBUTING.md
new file mode 100644
index 0000000..31cef1f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/CONTRIBUTING.md
@@ -0,0 +1,62 @@
+# How to contribute
+
+etcd is Apache 2.0 licensed and accepts contributions via GitHub pull requests. This document outlines some of the conventions on commit message formatting, contact points for developers, and other resources to help get contributions into etcd.
+
+# Email and chat
+
+- Email: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev)
+- IRC: #[etcd](irc://irc.freenode.org:6667/#etcd) IRC channel on freenode.org
+
+## Getting started
+
+- Fork the repository on GitHub
+- Read the README.md for build instructions
+
+## Reporting bugs and creating issues
+
+Reporting bugs is one of the best ways to contribute. However, a good bug report has some very specific qualities, so please read over our short document on [reporting bugs](https://github.com/coreos/etcd/blob/master/Documentation/reporting_bugs.md) before submitting a bug report. This document might contain links to known issues, another good reason to take a look there before reporting a bug.
+
+## Contribution flow
+
+This is a rough outline of what a contributor's workflow looks like:
+
+- Create a topic branch from where to base the contribution. This is usually master.
+- Make commits of logical units.
+- Make sure commit messages are in the proper format (see below).
+- Push changes in a topic branch to a personal fork of the repository.
+- Submit a pull request to coreos/etcd.
+- The PR must receive a LGTM from two maintainers found in the MAINTAINERS file.
+
+Thanks for contributing!
+
+### Code style
+
+The coding style suggested by the Golang community is used in etcd. See the [style doc](https://github.com/golang/go/wiki/CodeReviewComments) for details.
+
+Please follow this style to make etcd easy to review, maintain and develop.
+
+### Format of the commit message
+
+We follow a rough convention for commit messages that is designed to answer two
+questions: what changed and why. The subject line should feature the what and
+the body of the commit should describe the why.
+
+```
+scripts: add the test-cluster command
+
+this uses tmux to setup a test cluster that can easily be killed and started for debugging.
+
+Fixes #38
+```
+
+The format can be described more formally as follows:
+
+```
+<subsystem>: <what changed>
+<BLANK LINE>
+<why this change was made>
+<BLANK LINE>
+<footer>
+```
+
+The first line is the subject and should be no longer than 70 characters, the second line is always blank, and other lines should be wrapped at 80 characters. This allows the message to be easier to read on GitHub as well as in various git tools.
diff --git a/vendor/github.com/coreos/etcd/DCO b/vendor/github.com/coreos/etcd/DCO
new file mode 100644
index 0000000..716561d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/DCO
@@ -0,0 +1,36 @@
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+    have the right to submit it under the open source license
+    indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+    of my knowledge, is covered under an appropriate open source
+    license and I have the right under that license to submit that
+    work with modifications, whether created in whole or in part
+    by me, under the same open source license (unless I am
+    permitted to submit under a different license), as indicated
+    in the file; or
+
+(c) The contribution was provided directly to me by some other
+    person who certified (a), (b) or (c) and I have not modified
+    it.
+
+(d) I understand and agree that this project and the contribution
+    are public and that a record of the contribution (including all
+    personal information I submit with it, including my sign-off) is
+    maintained indefinitely and may be redistributed consistent with
+    this project or the open source license(s) involved.
diff --git a/vendor/github.com/coreos/etcd/Dockerfile b/vendor/github.com/coreos/etcd/Dockerfile
new file mode 100644
index 0000000..c653734
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/Dockerfile
@@ -0,0 +1,6 @@
+FROM golang
+ADD . /go/src/github.com/coreos/etcd
+ADD cmd/vendor /go/src/github.com/coreos/etcd/vendor
+RUN go install github.com/coreos/etcd
+EXPOSE 2379 2380
+ENTRYPOINT ["etcd"]
diff --git a/vendor/github.com/coreos/etcd/Dockerfile-functional-tester b/vendor/github.com/coreos/etcd/Dockerfile-functional-tester
new file mode 100644
index 0000000..cfd8086
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/Dockerfile-functional-tester
@@ -0,0 +1,53 @@
+FROM ubuntu:17.10
+
+RUN rm /bin/sh && ln -s /bin/bash /bin/sh
+RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
+
+RUN apt-get -y update \
+  && apt-get -y install \
+  build-essential \
+  gcc \
+  apt-utils \
+  pkg-config \
+  software-properties-common \
+  apt-transport-https \
+  libssl-dev \
+  sudo \
+  bash \
+  curl \
+  wget \
+  tar \
+  git \
+  && apt-get -y update \
+  && apt-get -y upgrade \
+  && apt-get -y autoremove \
+  && apt-get -y autoclean
+
+ENV GOROOT /usr/local/go
+ENV GOPATH /go
+ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH}
+ENV GO_VERSION REPLACE_ME_GO_VERSION
+ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang
+RUN rm -rf ${GOROOT} \
+  && curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \
+  && mkdir -p ${GOPATH}/src ${GOPATH}/bin \
+  && go version
+
+RUN mkdir -p ${GOPATH}/src/github.com/coreos/etcd
+ADD . ${GOPATH}/src/github.com/coreos/etcd
+
+RUN go get -v github.com/coreos/gofail \
+  && pushd ${GOPATH}/src/github.com/coreos/etcd \
+  && GO_BUILD_FLAGS="-v" ./build \
+  && cp ./bin/etcd /etcd \
+  && cp ./bin/etcdctl /etcdctl \
+  && GO_BUILD_FLAGS="-v" FAILPOINTS=1 ./build \
+  && cp ./bin/etcd /etcd-failpoints \
+  && ./tools/functional-tester/build \
+  && cp ./bin/etcd-agent /etcd-agent \
+  && cp ./bin/etcd-tester /etcd-tester \
+  && cp ./bin/etcd-runner /etcd-runner \
+  && go build -v -o /benchmark ./cmd/tools/benchmark \
+  && go build -v -o /etcd-test-proxy ./cmd/tools/etcd-test-proxy \
+  && popd \
+  && rm -rf ${GOPATH}/src/github.com/coreos/etcd
diff --git a/vendor/github.com/coreos/etcd/Dockerfile-release b/vendor/github.com/coreos/etcd/Dockerfile-release
new file mode 100644
index 0000000..736445f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/Dockerfile-release
@@ -0,0 +1,17 @@
+FROM alpine:latest
+
+ADD etcd /usr/local/bin/
+ADD etcdctl /usr/local/bin/
+RUN mkdir -p /var/etcd/
+RUN mkdir -p /var/lib/etcd/
+
+# Alpine Linux doesn't use pam, which means that there is no /etc/nsswitch.conf,
+# but Golang relies on /etc/nsswitch.conf to check the order of DNS resolving
+# (see https://github.com/golang/go/commit/9dee7771f561cf6aee081c0af6658cc81fac3918)
+# To fix this we just create /etc/nsswitch.conf and add the following line:
+RUN echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
+
+EXPOSE 2379 2380
+
+# Define default command.
+CMD ["/usr/local/bin/etcd"]
diff --git a/vendor/github.com/coreos/etcd/Dockerfile-release.arm64 b/vendor/github.com/coreos/etcd/Dockerfile-release.arm64
new file mode 100644
index 0000000..d8816e5
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/Dockerfile-release.arm64
@@ -0,0 +1,11 @@
+FROM aarch64/ubuntu:16.04
+
+ADD etcd /usr/local/bin/
+ADD etcdctl /usr/local/bin/
+ADD var/etcd /var/etcd
+ADD var/lib/etcd /var/lib/etcd
+
+EXPOSE 2379 2380
+
+# Define default command.
+CMD ["/usr/local/bin/etcd"]
diff --git a/vendor/github.com/coreos/etcd/Dockerfile-release.ppc64le b/vendor/github.com/coreos/etcd/Dockerfile-release.ppc64le
new file mode 100644
index 0000000..2fb02c4
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/Dockerfile-release.ppc64le
@@ -0,0 +1,11 @@
+FROM ppc64le/ubuntu:16.04
+
+ADD etcd /usr/local/bin/
+ADD etcdctl /usr/local/bin/
+ADD var/etcd /var/etcd
+ADD var/lib/etcd /var/lib/etcd
+
+EXPOSE 2379 2380
+
+# Define default command.
+CMD ["/usr/local/bin/etcd"]
diff --git a/vendor/github.com/coreos/etcd/Dockerfile-test b/vendor/github.com/coreos/etcd/Dockerfile-test
new file mode 100644
index 0000000..dea3ab0
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/Dockerfile-test
@@ -0,0 +1,58 @@
+FROM ubuntu:16.10
+
+RUN rm /bin/sh && ln -s /bin/bash /bin/sh
+RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
+
+RUN apt-get -y update \
+  && apt-get -y install \
+  build-essential \
+  gcc \
+  apt-utils \
+  pkg-config \
+  software-properties-common \
+  apt-transport-https \
+  libssl-dev \
+  sudo \
+  bash \
+  curl \
+  wget \
+  tar \
+  git \
+  netcat \
+  libaspell-dev \
+  libhunspell-dev \
+  hunspell-en-us \
+  aspell-en \
+  shellcheck \
+  && apt-get -y update \
+  && apt-get -y upgrade \
+  && apt-get -y autoremove \
+  && apt-get -y autoclean
+
+ENV GOROOT /usr/local/go
+ENV GOPATH /go
+ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH}
+ENV GO_VERSION REPLACE_ME_GO_VERSION
+ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang
+RUN rm -rf ${GOROOT} \
+  && curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \
+  && mkdir -p ${GOPATH}/src ${GOPATH}/bin \
+  && go version
+
+RUN mkdir -p ${GOPATH}/src/github.com/coreos/etcd
+WORKDIR ${GOPATH}/src/github.com/coreos/etcd
+
+ADD ./scripts/install-marker.sh /tmp/install-marker.sh
+
+RUN go get -v -u -tags spell github.com/chzchzchz/goword \
+  && go get -v -u github.com/coreos/license-bill-of-materials \
+  && go get -v -u honnef.co/go/tools/cmd/gosimple \
+  && go get -v -u honnef.co/go/tools/cmd/unused \
+  && go get -v -u honnef.co/go/tools/cmd/staticcheck \
+  && go get -v -u github.com/gyuho/gocovmerge \
+  && go get -v -u github.com/gordonklaus/ineffassign \
+  && go get -v -u github.com/alexkohler/nakedret \
+  && /tmp/install-marker.sh amd64 \
+  && rm -f /tmp/install-marker.sh \
+  && curl -s https://codecov.io/bash >/codecov \
+  && chmod 700 /codecov
diff --git a/vendor/github.com/coreos/etcd/MAINTAINERS b/vendor/github.com/coreos/etcd/MAINTAINERS
new file mode 100644
index 0000000..9983e3c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/MAINTAINERS
@@ -0,0 +1,8 @@
+Anthony Romano <anthony.romano@coreos.com> (@heyitsanthony) pkg:*
+Brandon Philips <brandon.philips@coreos.com> (@philips) pkg:*
+Fanmin Shi <fanmin.shi@coreos.com> (@fanminshi) pkg:*
+Gyu-Ho Lee <gyu_ho.lee@coreos.com> (@gyuho) pkg:*
+Xiang Li <xiang.li@coreos.com> (@xiang90) pkg:*
+
+Ben Darnell <ben@cockroachlabs.com> (@bdarnell) pkg:github.com/coreos/etcd/raft
+Hitoshi Mitake <mitake.hitoshi@lab.ntt.co.jp> (@mitake) pkg:github.com/coreos/etcd/auth
diff --git a/vendor/github.com/coreos/etcd/Makefile b/vendor/github.com/coreos/etcd/Makefile
new file mode 100644
index 0000000..a8eceef
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/Makefile
@@ -0,0 +1,517 @@
+# run from repository root
+
+
+
+# Example:
+#   make build
+#   make clean
+#   make docker-clean
+#   make docker-start
+#   make docker-kill
+#   make docker-remove
+
+.PHONY: build
+build:
+	GO_BUILD_FLAGS="-v" ./build
+	./bin/etcd --version
+	ETCDCTL_API=3 ./bin/etcdctl version
+
+clean:
+	rm -f ./codecov
+	rm -rf ./agent-*
+	rm -rf ./covdir
+	rm -f ./*.coverprofile
+	rm -f ./*.log
+	rm -f ./bin/Dockerfile-release
+	rm -rf ./bin/*.etcd
+	rm -rf ./default.etcd
+	rm -rf ./tests/e2e/default.etcd
+	rm -rf ./gopath
+	rm -rf ./gopath.proto
+	rm -rf ./release
+	rm -f ./snapshot/localhost:*
+	rm -f ./integration/127.0.0.1:* ./integration/localhost:*
+	rm -f ./clientv3/integration/127.0.0.1:* ./clientv3/integration/localhost:*
+	rm -f ./clientv3/ordering/127.0.0.1:* ./clientv3/ordering/localhost:*
+
+docker-clean:
+	docker images
+	docker image prune --force
+
+docker-start:
+	service docker restart
+
+docker-kill:
+	docker kill `docker ps -q` || true
+
+docker-remove:
+	docker rm --force `docker ps -a -q` || true
+	docker rmi --force `docker images -q` || true
+
+
+
+GO_VERSION ?= 1.10.3
+ETCD_VERSION ?= $(shell git rev-parse --short HEAD || echo "GitNotFound")
+
+TEST_SUFFIX = $(shell date +%s | base64 | head -c 15)
+TEST_OPTS ?= PASSES='unit'
+
+TMP_DIR_MOUNT_FLAG = --mount type=tmpfs,destination=/tmp
+ifdef HOST_TMP_DIR
+	TMP_DIR_MOUNT_FLAG = --mount type=bind,source=$(HOST_TMP_DIR),destination=/tmp
+endif
+
+
+
+# Example:
+#   GO_VERSION=1.8.7 make build-docker-test
+#   make build-docker-test
+#
+#   gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd-development.json)" https://gcr.io
+#   GO_VERSION=1.8.7 make push-docker-test
+#   make push-docker-test
+#
+#   gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
+#   make pull-docker-test
+
+build-docker-test:
+	$(info GO_VERSION: $(GO_VERSION))
+	@sed -i.bak 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' ./tests/Dockerfile
+	docker build \
+	  --tag gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \
+	  --file ./tests/Dockerfile .
+	@mv ./tests/Dockerfile.bak ./tests/Dockerfile
+
+push-docker-test:
+	$(info GO_VERSION: $(GO_VERSION))
+	gcloud docker -- push gcr.io/etcd-development/etcd-test:go$(GO_VERSION)
+
+pull-docker-test:
+	$(info GO_VERSION: $(GO_VERSION))
+	docker pull gcr.io/etcd-development/etcd-test:go$(GO_VERSION)
+
+
+
+# Example:
+#   make build-docker-test
+#   make compile-with-docker-test
+#   make compile-setup-gopath-with-docker-test
+
+compile-with-docker-test:
+	$(info GO_VERSION: $(GO_VERSION))
+	docker run \
+	  --rm \
+	  --mount type=bind,source=`pwd`,destination=/go/src/github.com/coreos/etcd \
+	  gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \
+	  /bin/bash -c "GO_BUILD_FLAGS=-v ./build && ./bin/etcd --version"
+
+compile-setup-gopath-with-docker-test:
+	$(info GO_VERSION: $(GO_VERSION))
+	docker run \
+	  --rm \
+	  --mount type=bind,source=`pwd`,destination=/etcd \
+	  gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \
+	  /bin/bash -c "cd /etcd && ETCD_SETUP_GOPATH=1 GO_BUILD_FLAGS=-v ./build && ./bin/etcd --version && rm -rf ./gopath"
+
+
+
+# Example:
+#
+# Local machine:
+#   TEST_OPTS="PASSES='fmt'" make test
+#   TEST_OPTS="PASSES='fmt bom dep build unit'" make test
+#   TEST_OPTS="PASSES='build unit release integration_e2e functional'" make test
+#   TEST_OPTS="PASSES='build grpcproxy'" make test
+#
+# Example (test with docker):
+#   make pull-docker-test
+#   TEST_OPTS="PASSES='fmt'" make docker-test
+#   TEST_OPTS="VERBOSE=2 PASSES='unit'" make docker-test
+#
+# Travis CI (test with docker):
+#   TEST_OPTS="PASSES='fmt bom dep build unit'" make docker-test
+#
+# Semaphore CI (test with docker):
+#   TEST_OPTS="PASSES='build unit release integration_e2e functional'" make docker-test
+#   HOST_TMP_DIR=/tmp TEST_OPTS="PASSES='build unit release integration_e2e functional'" make docker-test
+#   TEST_OPTS="GOARCH=386 PASSES='build unit integration_e2e'" make docker-test
+#
+# grpc-proxy tests (test with docker):
+#   TEST_OPTS="PASSES='build grpcproxy'" make docker-test
+#   HOST_TMP_DIR=/tmp TEST_OPTS="PASSES='build grpcproxy'" make docker-test
+
+.PHONY: test
+test:
+	$(info TEST_OPTS: $(TEST_OPTS))
+	$(info log-file: test-$(TEST_SUFFIX).log)
+	$(TEST_OPTS) ./test 2>&1 | tee test-$(TEST_SUFFIX).log
+	! egrep "(--- FAIL:|panic: test timed out|appears to have leaked)" -B50 -A10 test-$(TEST_SUFFIX).log
+
+docker-test:
+	$(info GO_VERSION: $(GO_VERSION))
+	$(info ETCD_VERSION: $(ETCD_VERSION))
+	$(info TEST_OPTS: $(TEST_OPTS))
+	$(info log-file: test-$(TEST_SUFFIX).log)
+	$(info HOST_TMP_DIR: $(HOST_TMP_DIR))
+	$(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
+	docker run \
+	  --rm \
+	  $(TMP_DIR_MOUNT_FLAG) \
+	  --mount type=bind,source=`pwd`,destination=/go/src/github.com/coreos/etcd \
+	  gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \
+	  /bin/bash -c "$(TEST_OPTS) ./test 2>&1 | tee test-$(TEST_SUFFIX).log"
+	! egrep "(--- FAIL:|panic: test timed out|appears to have leaked)" -B50 -A10 test-$(TEST_SUFFIX).log
+
+docker-test-coverage:
+	$(info GO_VERSION: $(GO_VERSION))
+	$(info ETCD_VERSION: $(ETCD_VERSION))
+	$(info log-file: docker-test-coverage-$(TEST_SUFFIX).log)
+	$(info HOST_TMP_DIR: $(HOST_TMP_DIR))
+	$(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
+	docker run \
+	  --rm \
+	  $(TMP_DIR_MOUNT_FLAG) \
+	  --mount type=bind,source=`pwd`,destination=/go/src/github.com/coreos/etcd \
+	  gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \
+	  /bin/bash -c "COVERDIR=covdir PASSES='build build_cov cov' ./test 2>&1 | tee docker-test-coverage-$(TEST_SUFFIX).log && /codecov -t 6040de41-c073-4d6f-bbf8-d89256ef31e1"
+	! egrep "(--- FAIL:|panic: test timed out|appears to have leaked)" -B50 -A10 docker-test-coverage-$(TEST_SUFFIX).log
+
+
+
+# Example:
+#   make compile-with-docker-test
+#   ETCD_VERSION=v3-test make build-docker-release-master
+#   ETCD_VERSION=v3-test make push-docker-release-master
+#   gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
+
+build-docker-release-master:
+	$(info ETCD_VERSION: $(ETCD_VERSION))
+	cp ./Dockerfile-release ./bin/Dockerfile-release
+	docker build \
+	  --tag gcr.io/etcd-development/etcd:$(ETCD_VERSION) \
+	  --file ./bin/Dockerfile-release \
+	  ./bin
+	rm -f ./bin/Dockerfile-release
+
+	docker run \
+	  --rm \
+	  gcr.io/etcd-development/etcd:$(ETCD_VERSION) \
+	  /bin/sh -c "/usr/local/bin/etcd --version && ETCDCTL_API=3 /usr/local/bin/etcdctl version"
+
+push-docker-release-master:
+	$(info ETCD_VERSION: $(ETCD_VERSION))
+	gcloud docker -- push gcr.io/etcd-development/etcd:$(ETCD_VERSION)
+
+
+
+# Example:
+#   make build-docker-test
+#   make compile-with-docker-test
+#   make build-docker-static-ip-test
+#
+#   gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd-development.json)" https://gcr.io
+#   make push-docker-static-ip-test
+#
+#   gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
+#   make pull-docker-static-ip-test
+#
+#   make docker-static-ip-test-certs-run
+#   make docker-static-ip-test-certs-metrics-proxy-run
+
+build-docker-static-ip-test:
+	$(info GO_VERSION: $(GO_VERSION))
+	@sed -i.bak 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' ./tests/docker-static-ip/Dockerfile
+	docker build \
+	  --tag gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) \
+	  --file ./tests/docker-static-ip/Dockerfile \
+	  ./tests/docker-static-ip
+	@mv ./tests/docker-static-ip/Dockerfile.bak ./tests/docker-static-ip/Dockerfile
+
+push-docker-static-ip-test:
+	$(info GO_VERSION: $(GO_VERSION))
+	gcloud docker -- push gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION)
+
+pull-docker-static-ip-test:
+	$(info GO_VERSION: $(GO_VERSION))
+	docker pull gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION)
+
+docker-static-ip-test-certs-run:
+	$(info GO_VERSION: $(GO_VERSION))
+	$(info HOST_TMP_DIR: $(HOST_TMP_DIR))
+	$(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
+	docker run \
+	  --rm \
+	  --tty \
+	  $(TMP_DIR_MOUNT_FLAG) \
+	  --mount type=bind,source=`pwd`/bin,destination=/etcd \
+	  --mount type=bind,source=`pwd`/tests/docker-static-ip/certs,destination=/certs \
+	  gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) \
+	  /bin/bash -c "cd /etcd && /certs/run.sh && rm -rf m*.etcd"
+
+docker-static-ip-test-certs-metrics-proxy-run:
+	$(info GO_VERSION: $(GO_VERSION))
+	$(info HOST_TMP_DIR: $(HOST_TMP_DIR))
+	$(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
+	docker run \
+	  --rm \
+	  --tty \
+	  $(TMP_DIR_MOUNT_FLAG) \
+	  --mount type=bind,source=`pwd`/bin,destination=/etcd \
+	  --mount type=bind,source=`pwd`/tests/docker-static-ip/certs-metrics-proxy,destination=/certs-metrics-proxy \
+	  gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) \
+	  /bin/bash -c "cd /etcd && /certs-metrics-proxy/run.sh && rm -rf m*.etcd"
+
+
+
+# Example:
+#   make build-docker-test
+#   make compile-with-docker-test
+#   make build-docker-dns-test
+#
+#   gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd-development.json)" https://gcr.io
+#   make push-docker-dns-test
+#
+#   gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
+#   make pull-docker-dns-test
+#
+#   make docker-dns-test-insecure-run
+#   make docker-dns-test-certs-run
+#   make docker-dns-test-certs-gateway-run
+#   make docker-dns-test-certs-wildcard-run
+#   make docker-dns-test-certs-common-name-auth-run
+#   make docker-dns-test-certs-common-name-multi-run
+
+build-docker-dns-test:
+	$(info GO_VERSION: $(GO_VERSION))
+	@sed -i.bak 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' ./tests/docker-dns/Dockerfile
+	docker build \
+	  --tag gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
+	  --file ./tests/docker-dns/Dockerfile \
+	  ./tests/docker-dns
+	@mv ./tests/docker-dns/Dockerfile.bak ./tests/docker-dns/Dockerfile
+
+	docker run \
+	  --rm \
+	  --dns 127.0.0.1 \
+	  gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
+	  /bin/bash -c "/etc/init.d/bind9 start && cat /dev/null >/etc/hosts && dig etcd.local"
+
+push-docker-dns-test:
+	$(info GO_VERSION: $(GO_VERSION))
+	gcloud docker -- push gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION)
+
+pull-docker-dns-test:
+	$(info GO_VERSION: $(GO_VERSION))
+	docker pull gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION)
+
+docker-dns-test-insecure-run:
+	$(info GO_VERSION: $(GO_VERSION))
+	$(info HOST_TMP_DIR: $(HOST_TMP_DIR))
+	$(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
+	docker run \
+	  --rm \
+	  --tty \
+	  --dns 127.0.0.1 \
+	  $(TMP_DIR_MOUNT_FLAG) \
+	  --mount type=bind,source=`pwd`/bin,destination=/etcd \
+	  --mount type=bind,source=`pwd`/tests/docker-dns/insecure,destination=/insecure \
+	  gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
+	  /bin/bash -c "cd /etcd && /insecure/run.sh && rm -rf m*.etcd"
+
+docker-dns-test-certs-run:
+	$(info GO_VERSION: $(GO_VERSION))
+	$(info HOST_TMP_DIR: $(HOST_TMP_DIR))
+	$(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
+	docker run \
+	  --rm \
+	  --tty \
+	  --dns 127.0.0.1 \
+	  $(TMP_DIR_MOUNT_FLAG) \
+	  --mount type=bind,source=`pwd`/bin,destination=/etcd \
+	  --mount type=bind,source=`pwd`/tests/docker-dns/certs,destination=/certs \
+	  gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
+	  /bin/bash -c "cd /etcd && /certs/run.sh && rm -rf m*.etcd"
+
+docker-dns-test-certs-gateway-run:
+	$(info GO_VERSION: $(GO_VERSION))
+	$(info HOST_TMP_DIR: $(HOST_TMP_DIR))
+	$(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
+	docker run \
+	  --rm \
+	  --tty \
+	  --dns 127.0.0.1 \
+	  $(TMP_DIR_MOUNT_FLAG) \
+	  --mount type=bind,source=`pwd`/bin,destination=/etcd \
+	  --mount type=bind,source=`pwd`/tests/docker-dns/certs-gateway,destination=/certs-gateway \
+	  gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
+	  /bin/bash -c "cd /etcd && /certs-gateway/run.sh && rm -rf m*.etcd"
+
+docker-dns-test-certs-wildcard-run:
+	$(info GO_VERSION: $(GO_VERSION))
+	$(info HOST_TMP_DIR: $(HOST_TMP_DIR))
+	$(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
+	docker run \
+	  --rm \
+	  --tty \
+	  --dns 127.0.0.1 \
+	  $(TMP_DIR_MOUNT_FLAG) \
+	  --mount type=bind,source=`pwd`/bin,destination=/etcd \
+	  --mount type=bind,source=`pwd`/tests/docker-dns/certs-wildcard,destination=/certs-wildcard \
+	  gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
+	  /bin/bash -c "cd /etcd && /certs-wildcard/run.sh && rm -rf m*.etcd"
+
+docker-dns-test-certs-common-name-auth-run:
+	$(info GO_VERSION: $(GO_VERSION))
+	$(info HOST_TMP_DIR: $(HOST_TMP_DIR))
+	$(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
+	docker run \
+	  --rm \
+	  --tty \
+	  --dns 127.0.0.1 \
+	  $(TMP_DIR_MOUNT_FLAG) \
+	  --mount type=bind,source=`pwd`/bin,destination=/etcd \
+	  --mount type=bind,source=`pwd`/tests/docker-dns/certs-common-name-auth,destination=/certs-common-name-auth \
+	  gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
+	  /bin/bash -c "cd /etcd && /certs-common-name-auth/run.sh && rm -rf m*.etcd"
+
+docker-dns-test-certs-common-name-multi-run:
+	$(info GO_VERSION: $(GO_VERSION))
+	$(info HOST_TMP_DIR: $(HOST_TMP_DIR))
+	$(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
+	docker run \
+	  --rm \
+	  --tty \
+	  --dns 127.0.0.1 \
+	  $(TMP_DIR_MOUNT_FLAG) \
+	  --mount type=bind,source=`pwd`/bin,destination=/etcd \
+	  --mount type=bind,source=`pwd`/tests/docker-dns/certs-common-name-multi,destination=/certs-common-name-multi \
+	  gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
+	  /bin/bash -c "cd /etcd && /certs-common-name-multi/run.sh && rm -rf m*.etcd"
+
+
+
+# Example:
+#   make build-docker-test
+#   make compile-with-docker-test
+#   make build-docker-dns-srv-test
+#   gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd-development.json)" https://gcr.io
+#   make push-docker-dns-srv-test
+#   gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
+#   make pull-docker-dns-srv-test
+#   make docker-dns-srv-test-certs-run
+#   make docker-dns-srv-test-certs-gateway-run
+#   make docker-dns-srv-test-certs-wildcard-run
+
+build-docker-dns-srv-test:
+	$(info GO_VERSION: $(GO_VERSION))
+	@sed -i.bak 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' ./tests/docker-dns-srv/Dockerfile
+	docker build \
+	  --tag gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \
+	  --file ./tests/docker-dns-srv/Dockerfile \
+	  ./tests/docker-dns-srv
+	@mv ./tests/docker-dns-srv/Dockerfile.bak ./tests/docker-dns-srv/Dockerfile
+
+	docker run \
+	  --rm \
+	  --dns 127.0.0.1 \
+	  gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \
+	  /bin/bash -c "/etc/init.d/bind9 start && cat /dev/null >/etc/hosts && dig +noall +answer SRV _etcd-client-ssl._tcp.etcd.local && dig +noall +answer SRV _etcd-server-ssl._tcp.etcd.local && dig +noall +answer m1.etcd.local m2.etcd.local m3.etcd.local"
+
+push-docker-dns-srv-test:
+	$(info GO_VERSION: $(GO_VERSION))
+	gcloud docker -- push gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION)
+
+pull-docker-dns-srv-test:
+	$(info GO_VERSION: $(GO_VERSION))
+	docker pull gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION)
+
+docker-dns-srv-test-certs-run:
+	$(info GO_VERSION: $(GO_VERSION))
+	$(info HOST_TMP_DIR: $(HOST_TMP_DIR))
+	$(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
+	docker run \
+	  --rm \
+	  --tty \
+	  --dns 127.0.0.1 \
+	  $(TMP_DIR_MOUNT_FLAG) \
+	  --mount type=bind,source=`pwd`/bin,destination=/etcd \
+	  --mount type=bind,source=`pwd`/tests/docker-dns-srv/certs,destination=/certs \
+	  gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \
+	  /bin/bash -c "cd /etcd && /certs/run.sh && rm -rf m*.etcd"
+
+docker-dns-srv-test-certs-gateway-run:
+	$(info GO_VERSION: $(GO_VERSION))
+	$(info HOST_TMP_DIR: $(HOST_TMP_DIR))
+	$(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
+	docker run \
+	  --rm \
+	  --tty \
+	  --dns 127.0.0.1 \
+	  $(TMP_DIR_MOUNT_FLAG) \
+	  --mount type=bind,source=`pwd`/bin,destination=/etcd \
+	  --mount type=bind,source=`pwd`/tests/docker-dns-srv/certs-gateway,destination=/certs-gateway \
+	  gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \
+	  /bin/bash -c "cd /etcd && /certs-gateway/run.sh && rm -rf m*.etcd"
+
+docker-dns-srv-test-certs-wildcard-run:
+	$(info GO_VERSION: $(GO_VERSION))
+	$(info HOST_TMP_DIR: $(HOST_TMP_DIR))
+	$(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
+	docker run \
+	  --rm \
+	  --tty \
+	  --dns 127.0.0.1 \
+	  $(TMP_DIR_MOUNT_FLAG) \
+	  --mount type=bind,source=`pwd`/bin,destination=/etcd \
+	  --mount type=bind,source=`pwd`/tests/docker-dns-srv/certs-wildcard,destination=/certs-wildcard \
+	  gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \
+	  /bin/bash -c "cd /etcd && /certs-wildcard/run.sh && rm -rf m*.etcd"
+
+
+
+# Example:
+#   make build-functional
+#   make build-docker-functional
+#   make push-docker-functional
+#   make pull-docker-functional
+
+build-functional:
+	$(info GO_VERSION: $(GO_VERSION))
+	$(info ETCD_VERSION: $(ETCD_VERSION))
+	./functional/build
+	./bin/etcd-agent -help || true && \
+	  ./bin/etcd-proxy -help || true && \
+	  ./bin/etcd-runner --help || true && \
+	  ./bin/etcd-tester -help || true
+
+build-docker-functional:
+	$(info GO_VERSION: $(GO_VERSION))
+	$(info ETCD_VERSION: $(ETCD_VERSION))
+	@sed -i.bak 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' ./functional/Dockerfile
+	docker build \
+	  --tag gcr.io/etcd-development/etcd-functional:go$(GO_VERSION) \
+	  --file ./functional/Dockerfile \
+	  .
+	@mv ./functional/Dockerfile.bak ./functional/Dockerfile
+
+	docker run \
+	  --rm \
+	  gcr.io/etcd-development/etcd-functional:go$(GO_VERSION) \
+	  /bin/bash -c "./bin/etcd --version && \
+	   ./bin/etcd-failpoints --version && \
+	   ETCDCTL_API=3 ./bin/etcdctl version && \
+	   ./bin/etcd-agent -help || true && \
+	   ./bin/etcd-proxy -help || true && \
+	   ./bin/etcd-runner --help || true && \
+	   ./bin/etcd-tester -help || true && \
+	   ./bin/benchmark --help || true"
+
+push-docker-functional:
+	$(info GO_VERSION: $(GO_VERSION))
+	$(info ETCD_VERSION: $(ETCD_VERSION))
+	gcloud docker -- push gcr.io/etcd-development/etcd-functional:go$(GO_VERSION)
+
+pull-docker-functional:
+	$(info GO_VERSION: $(GO_VERSION))
+	$(info ETCD_VERSION: $(ETCD_VERSION))
+	docker pull gcr.io/etcd-development/etcd-functional:go$(GO_VERSION)
diff --git a/vendor/github.com/coreos/etcd/Procfile b/vendor/github.com/coreos/etcd/Procfile
new file mode 100644
index 0000000..868967c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/Procfile
@@ -0,0 +1,5 @@
+# Use goreman to run `go get github.com/mattn/goreman`
+etcd1: bin/etcd --name infra1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
+etcd2: bin/etcd --name infra2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
+etcd3: bin/etcd --name infra3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
+#proxy: bin/etcd grpc-proxy start --endpoints=127.0.0.1:2379,127.0.0.1:22379,127.0.0.1:32379 --listen-addr=127.0.0.1:23790 --advertise-client-url=127.0.0.1:23790 --enable-pprof
diff --git a/vendor/github.com/coreos/etcd/Procfile.v2 b/vendor/github.com/coreos/etcd/Procfile.v2
new file mode 100644
index 0000000..41dd49f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/Procfile.v2
@@ -0,0 +1,6 @@
+# Use goreman to run `go get github.com/mattn/goreman`
+etcd1: bin/etcd --name infra1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
+etcd2: bin/etcd --name infra2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
+etcd3: bin/etcd --name infra3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
+# in future, use proxy to listen on 2379
+#proxy: bin/etcd --name infra-proxy1 --proxy=on --listen-client-urls http://127.0.0.1:2378 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --enable-pprof
diff --git a/vendor/github.com/coreos/etcd/README.md b/vendor/github.com/coreos/etcd/README.md
new file mode 100644
index 0000000..2b55901
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/README.md
@@ -0,0 +1,161 @@
+# etcd
+
+[![Go Report Card](https://goreportcard.com/badge/github.com/coreos/etcd?style=flat-square)](https://goreportcard.com/report/github.com/coreos/etcd)
+[![Coverage](https://codecov.io/gh/coreos/etcd/branch/master/graph/badge.svg)](https://codecov.io/gh/coreos/etcd)
+[![Build Status Travis](https://img.shields.io/travis/coreos/etcdlabs.svg?style=flat-square&&branch=master)](https://travis-ci.org/coreos/etcd)
+[![Build Status Semaphore](https://semaphoreci.com/api/v1/coreos/etcd/branches/master/shields_badge.svg)](https://semaphoreci.com/coreos/etcd)
+[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd)
+[![Releases](https://img.shields.io/github/release/coreos/etcd/all.svg?style=flat-square)](https://github.com/coreos/etcd/releases)
+[![LICENSE](https://img.shields.io/github/license/coreos/etcd.svg?style=flat-square)](https://github.com/coreos/etcd/blob/master/LICENSE)
+
+**Note**: The `master` branch may be in an *unstable or even broken state* during development. Please use [releases][github-release] instead of the `master` branch in order to get stable binaries.
+
+*the etcd v2 [documentation](Documentation/v2/README.md) has moved*
+
+![etcd Logo](logos/etcd-horizontal-color.png)
+
+etcd is a distributed reliable key-value store for the most critical data of a distributed system, with a focus on being:
+
+* *Simple*: well-defined, user-facing API (gRPC)
+* *Secure*: automatic TLS with optional client cert authentication
+* *Fast*: benchmarked 10,000 writes/sec
+* *Reliable*: properly distributed using Raft
+
+etcd is written in Go and uses the [Raft][raft] consensus algorithm to manage a highly-available replicated log.
+
+etcd is used [in production by many companies](./Documentation/production-users.md), and the development team stands behind it in critical deployment scenarios, where etcd is frequently teamed with applications such as [Kubernetes][k8s], [fleet][fleet], [locksmith][locksmith], [vulcand][vulcand], [Doorman][doorman], and many others. Reliability is further ensured by rigorous [testing][etcd-tests].
+
+See [etcdctl][etcdctl] for a simple command line client.
+
+[raft]: https://raft.github.io/
+[k8s]: http://kubernetes.io/
+[doorman]: https://github.com/youtube/doorman
+[fleet]: https://github.com/coreos/fleet
+[locksmith]: https://github.com/coreos/locksmith
+[vulcand]: https://github.com/vulcand/vulcand
+[etcdctl]: https://github.com/coreos/etcd/tree/master/etcdctl
+[etcd-tests]: http://dash.etcd.io
+
+## Community meetings
+
+etcd contributors and maintainers have bi-weekly meetings at 11:00 AM (USA Pacific) on Tuesdays. There is an [iCalendar][rfc5545] format for the meetings [here](meeting.ics). Anyone is welcome to join via [Zoom][zoom] or audio-only: +1 669 900 6833. An initial agenda will be posted to the [shared Google docs][shared-meeting-notes] a day before each meeting, and everyone is welcome to suggest additional topics or other agendas.
+
+[rfc5545]: https://tools.ietf.org/html/rfc5545
+[zoom]: https://coreos.zoom.us/j/854793406
+[shared-meeting-notes]: https://docs.google.com/document/d/1DbVXOHvd9scFsSmL2oNg4YGOHJdXqtx583DmeVWrB_M/edit#
+
+## Getting started
+
+### Getting etcd
+
+The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, [rkt][rkt], and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
+
+For those wanting to try the very latest version, [build the latest version of etcd][dl-build] from the `master` branch. This first needs [*Go*](https://golang.org/) installed (version 1.9+ is required). All development occurs on `master`, including new features and bug fixes. Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
+
+[rkt]: https://github.com/rkt/rkt/releases/
+[github-release]: https://github.com/coreos/etcd/releases/
+[branch-management]: ./Documentation/branch_management.md
+[dl-build]: ./Documentation/dl_build.md#build-the-latest-version
+
+### Running etcd
+
+First start a single-member cluster of etcd.
+
+If etcd is installed using the [pre-built release binaries][github-release], run it from the installation location as below:
+
+```sh
+/tmp/etcd-download-test/etcd
+```
+The etcd command can be simply run as such if it is moved to the system path as below:
+
+```sh
+mv /tmp/etcd-download-test/etcd /usr/locale/bin/
+
+etcd
+```
+
+If etcd is [build from the master branch][dl-build], run it as below:
+
+```sh
+./bin/etcd
+```
+
+This will bring up etcd listening on port 2379 for client communication and on port 2380 for server-to-server communication.
+
+Next, let's set a single key, and then retrieve it:
+
+```
+ETCDCTL_API=3 etcdctl put mykey "this is awesome"
+ETCDCTL_API=3 etcdctl get mykey
+```
+
+That's it! etcd is now running and serving client requests. For more
+
+- [Animated quick demo][demo-gif]
+- [Interactive etcd playground][etcd-play]
+
+[demo-gif]: ./Documentation/demo.md
+[etcd-play]: http://play.etcd.io/
+
+### etcd TCP ports
+
+The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication.
+
+[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt
+
+### Running a local etcd cluster
+
+First install [goreman](https://github.com/mattn/goreman), which manages Procfile-based applications.
+
+Our [Procfile script](./Procfile) will set up a local example cluster. Start it with:
+
+```sh
+goreman start
+```
+
+This will bring up 3 etcd members `infra1`, `infra2` and `infra3` and etcd `grpc-proxy`, which runs locally and composes a cluster.
+
+Every cluster member and proxy accepts key value reads and key value writes.
+
+### Running etcd on Kubernetes
+
+To run an etcd cluster on Kubernetes, try [etcd operator](https://github.com/coreos/etcd-operator).
+
+### Next steps
+
+Now it's time to dig into the full etcd API and other guides.
+
+- Read the full [documentation][fulldoc].
+- Explore the full gRPC [API][api].
+- Set up a [multi-machine cluster][clustering].
+- Learn the [config format, env variables and flags][configuration].
+- Find [language bindings and tools][integrations].
+- Use TLS to [secure an etcd cluster][security].
+- [Tune etcd][tuning].
+
+[fulldoc]: ./Documentation/docs.md
+[api]: ./Documentation/dev-guide/api_reference_v3.md
+[clustering]: ./Documentation/op-guide/clustering.md
+[configuration]: ./Documentation/op-guide/configuration.md
+[integrations]: ./Documentation/integrations.md
+[security]: ./Documentation/op-guide/security.md
+[tuning]: ./Documentation/tuning.md
+
+## Contact
+
+- Mailing list: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev)
+- IRC: #[etcd](irc://irc.freenode.org:6667/#etcd) on freenode.org
+- Planning/Roadmap: [milestones](https://github.com/coreos/etcd/milestones), [roadmap](./ROADMAP.md)
+- Bugs: [issues](https://github.com/coreos/etcd/issues)
+
+## Contributing
+
+See [CONTRIBUTING](CONTRIBUTING.md) for details on submitting patches and the contribution workflow.
+
+## Reporting bugs
+
+See [reporting bugs](Documentation/reporting_bugs.md) for details about reporting any issues.
+
+### License
+
+etcd is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
diff --git a/vendor/github.com/coreos/etcd/ROADMAP.md b/vendor/github.com/coreos/etcd/ROADMAP.md
new file mode 100644
index 0000000..f7ae890
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/ROADMAP.md
@@ -0,0 +1,23 @@
+#  etcd roadmap
+
+**work in progress**
+
+This document defines a high level roadmap for etcd development.
+
+The dates below should not be considered authoritative, but rather indicative of the projected timeline of the project. The [milestones defined in GitHub](https://github.com/coreos/etcd/milestones) represent the most up-to-date and issue-for-issue plans.
+
+etcd 3.2 is our current stable branch. The roadmap below outlines new features that will be added to etcd, and while subject to change, define what future stable will look like.
+
+### etcd 3.2 (2017-May)
+- Stable scalable proxy
+- Proxy-as-client interface passthrough
+- Lock service
+- Namespacing proxy
+- TLS Command Name and JWT token based authentication
+- Read-modify-write V3 Put
+- Improved watch performance
+- Support non-blocking concurrent read
+
+### etcd 3.3 (?)
+- TBD
+
diff --git a/vendor/github.com/coreos/etcd/V2Procfile b/vendor/github.com/coreos/etcd/V2Procfile
new file mode 100644
index 0000000..925910f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/V2Procfile
@@ -0,0 +1,5 @@
+# Use goreman to run `go get github.com/mattn/goreman`
+etcd1: bin/etcd --name infra1 --listen-client-urls http://127.0.0.1:12379 --advertise-client-urls http://127.0.0.1:12379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
+etcd2: bin/etcd --name infra2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
+etcd3: bin/etcd --name infra3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
+proxy: bin/etcd --name infra-proxy1 --proxy=on --listen-client-urls http://127.0.0.1:2379 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --enable-pprof
diff --git a/vendor/github.com/coreos/etcd/alarm/alarms.go b/vendor/github.com/coreos/etcd/alarm/alarms.go
new file mode 100644
index 0000000..4f0ebe9
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/alarm/alarms.go
@@ -0,0 +1,152 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package alarm manages health status alarms in etcd.
+package alarm
+
+import (
+	"sync"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/mvcc/backend"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/pkg/capnslog"
+)
+
+var (
+	alarmBucketName = []byte("alarm")
+	plog            = capnslog.NewPackageLogger("github.com/coreos/etcd", "alarm")
+)
+
+type BackendGetter interface {
+	Backend() backend.Backend
+}
+
+type alarmSet map[types.ID]*pb.AlarmMember
+
+// AlarmStore persists alarms to the backend.
+type AlarmStore struct {
+	mu    sync.Mutex
+	types map[pb.AlarmType]alarmSet
+
+	bg BackendGetter
+}
+
+func NewAlarmStore(bg BackendGetter) (*AlarmStore, error) {
+	ret := &AlarmStore{types: make(map[pb.AlarmType]alarmSet), bg: bg}
+	err := ret.restore()
+	return ret, err
+}
+
+func (a *AlarmStore) Activate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
+	a.mu.Lock()
+	defer a.mu.Unlock()
+
+	newAlarm := &pb.AlarmMember{MemberID: uint64(id), Alarm: at}
+	if m := a.addToMap(newAlarm); m != newAlarm {
+		return m
+	}
+
+	v, err := newAlarm.Marshal()
+	if err != nil {
+		plog.Panicf("failed to marshal alarm member")
+	}
+
+	b := a.bg.Backend()
+	b.BatchTx().Lock()
+	b.BatchTx().UnsafePut(alarmBucketName, v, nil)
+	b.BatchTx().Unlock()
+
+	return newAlarm
+}
+
+func (a *AlarmStore) Deactivate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
+	a.mu.Lock()
+	defer a.mu.Unlock()
+
+	t := a.types[at]
+	if t == nil {
+		t = make(alarmSet)
+		a.types[at] = t
+	}
+	m := t[id]
+	if m == nil {
+		return nil
+	}
+
+	delete(t, id)
+
+	v, err := m.Marshal()
+	if err != nil {
+		plog.Panicf("failed to marshal alarm member")
+	}
+
+	b := a.bg.Backend()
+	b.BatchTx().Lock()
+	b.BatchTx().UnsafeDelete(alarmBucketName, v)
+	b.BatchTx().Unlock()
+
+	return m
+}
+
+func (a *AlarmStore) Get(at pb.AlarmType) (ret []*pb.AlarmMember) {
+	a.mu.Lock()
+	defer a.mu.Unlock()
+	if at == pb.AlarmType_NONE {
+		for _, t := range a.types {
+			for _, m := range t {
+				ret = append(ret, m)
+			}
+		}
+		return ret
+	}
+	for _, m := range a.types[at] {
+		ret = append(ret, m)
+	}
+	return ret
+}
+
+func (a *AlarmStore) restore() error {
+	b := a.bg.Backend()
+	tx := b.BatchTx()
+
+	tx.Lock()
+	tx.UnsafeCreateBucket(alarmBucketName)
+	err := tx.UnsafeForEach(alarmBucketName, func(k, v []byte) error {
+		var m pb.AlarmMember
+		if err := m.Unmarshal(k); err != nil {
+			return err
+		}
+		a.addToMap(&m)
+		return nil
+	})
+	tx.Unlock()
+
+	b.ForceCommit()
+	return err
+}
+
+func (a *AlarmStore) addToMap(newAlarm *pb.AlarmMember) *pb.AlarmMember {
+	t := a.types[newAlarm.Alarm]
+	if t == nil {
+		t = make(alarmSet)
+		a.types[newAlarm.Alarm] = t
+	}
+	m := t[types.ID(newAlarm.MemberID)]
+	if m != nil {
+		return m
+	}
+	t[types.ID(newAlarm.MemberID)] = newAlarm
+	return newAlarm
+}
diff --git a/vendor/github.com/coreos/etcd/auth/doc.go b/vendor/github.com/coreos/etcd/auth/doc.go
new file mode 100644
index 0000000..72741a1
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/auth/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package auth provides client role authentication for accessing keys in etcd.
+package auth
diff --git a/vendor/github.com/coreos/etcd/auth/jwt.go b/vendor/github.com/coreos/etcd/auth/jwt.go
new file mode 100644
index 0000000..99b2d6b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/auth/jwt.go
@@ -0,0 +1,139 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+	"context"
+	"crypto/rsa"
+	"io/ioutil"
+
+	jwt "github.com/dgrijalva/jwt-go"
+)
+
+type tokenJWT struct {
+	signMethod string
+	signKey    *rsa.PrivateKey
+	verifyKey  *rsa.PublicKey
+}
+
+func (t *tokenJWT) enable()                         {}
+func (t *tokenJWT) disable()                        {}
+func (t *tokenJWT) invalidateUser(string)           {}
+func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil }
+
+func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) {
+	// rev isn't used in JWT, it is only used in simple token
+	var (
+		username string
+		revision uint64
+	)
+
+	parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {
+		return t.verifyKey, nil
+	})
+
+	switch err.(type) {
+	case nil:
+		if !parsed.Valid {
+			plog.Warningf("invalid jwt token: %s", token)
+			return nil, false
+		}
+
+		claims := parsed.Claims.(jwt.MapClaims)
+
+		username = claims["username"].(string)
+		revision = uint64(claims["revision"].(float64))
+	default:
+		plog.Warningf("failed to parse jwt token: %s", err)
+		return nil, false
+	}
+
+	return &AuthInfo{Username: username, Revision: revision}, true
+}
+
+func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) {
+	// Future work: let a jwt token include permission information would be useful for
+	// permission checking in proxy side.
+	tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod),
+		jwt.MapClaims{
+			"username": username,
+			"revision": revision,
+		})
+
+	token, err := tk.SignedString(t.signKey)
+	if err != nil {
+		plog.Debugf("failed to sign jwt token: %s", err)
+		return "", err
+	}
+
+	plog.Debugf("jwt token: %s", token)
+
+	return token, err
+}
+
+func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, err error) {
+	for k, v := range opts {
+		switch k {
+		case "sign-method":
+			jwtSignMethod = v
+		case "pub-key":
+			jwtPubKeyPath = v
+		case "priv-key":
+			jwtPrivKeyPath = v
+		default:
+			plog.Errorf("unknown token specific option: %s", k)
+			return "", "", "", ErrInvalidAuthOpts
+		}
+	}
+	if len(jwtSignMethod) == 0 {
+		return "", "", "", ErrInvalidAuthOpts
+	}
+	return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil
+}
+
+func newTokenProviderJWT(opts map[string]string) (*tokenJWT, error) {
+	jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, err := prepareOpts(opts)
+	if err != nil {
+		return nil, ErrInvalidAuthOpts
+	}
+
+	t := &tokenJWT{}
+
+	t.signMethod = jwtSignMethod
+
+	verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath)
+	if err != nil {
+		plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err)
+		return nil, err
+	}
+	t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes)
+	if err != nil {
+		plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err)
+		return nil, err
+	}
+
+	signBytes, err := ioutil.ReadFile(jwtPrivKeyPath)
+	if err != nil {
+		plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err)
+		return nil, err
+	}
+	t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes)
+	if err != nil {
+		plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err)
+		return nil, err
+	}
+
+	return t, nil
+}
diff --git a/vendor/github.com/coreos/etcd/auth/nop.go b/vendor/github.com/coreos/etcd/auth/nop.go
new file mode 100644
index 0000000..d437874
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/auth/nop.go
@@ -0,0 +1,35 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+	"context"
+)
+
+type tokenNop struct{}
+
+func (t *tokenNop) enable()                         {}
+func (t *tokenNop) disable()                        {}
+func (t *tokenNop) invalidateUser(string)           {}
+func (t *tokenNop) genTokenPrefix() (string, error) { return "", nil }
+func (t *tokenNop) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) {
+	return nil, false
+}
+func (t *tokenNop) assign(ctx context.Context, username string, revision uint64) (string, error) {
+	return "", ErrAuthFailed
+}
+func newTokenProviderNop() (*tokenNop, error) {
+	return &tokenNop{}, nil
+}
diff --git a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go
new file mode 100644
index 0000000..691b65b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go
@@ -0,0 +1,133 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+	"github.com/coreos/etcd/auth/authpb"
+	"github.com/coreos/etcd/mvcc/backend"
+	"github.com/coreos/etcd/pkg/adt"
+)
+
+func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions {
+	user := getUser(tx, userName)
+	if user == nil {
+		plog.Errorf("invalid user name %s", userName)
+		return nil
+	}
+
+	readPerms := &adt.IntervalTree{}
+	writePerms := &adt.IntervalTree{}
+
+	for _, roleName := range user.Roles {
+		role := getRole(tx, roleName)
+		if role == nil {
+			continue
+		}
+
+		for _, perm := range role.KeyPermission {
+			var ivl adt.Interval
+			var rangeEnd []byte
+
+			if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 {
+				rangeEnd = perm.RangeEnd
+			}
+
+			if len(perm.RangeEnd) != 0 {
+				ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd)
+			} else {
+				ivl = adt.NewBytesAffinePoint(perm.Key)
+			}
+
+			switch perm.PermType {
+			case authpb.READWRITE:
+				readPerms.Insert(ivl, struct{}{})
+				writePerms.Insert(ivl, struct{}{})
+
+			case authpb.READ:
+				readPerms.Insert(ivl, struct{}{})
+
+			case authpb.WRITE:
+				writePerms.Insert(ivl, struct{}{})
+			}
+		}
+	}
+
+	return &unifiedRangePermissions{
+		readPerms:  readPerms,
+		writePerms: writePerms,
+	}
+}
+
+func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
+	if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
+		rangeEnd = nil
+	}
+
+	ivl := adt.NewBytesAffineInterval(key, rangeEnd)
+	switch permtyp {
+	case authpb.READ:
+		return cachedPerms.readPerms.Contains(ivl)
+	case authpb.WRITE:
+		return cachedPerms.writePerms.Contains(ivl)
+	default:
+		plog.Panicf("unknown auth type: %v", permtyp)
+	}
+	return false
+}
+
+func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool {
+	pt := adt.NewBytesAffinePoint(key)
+	switch permtyp {
+	case authpb.READ:
+		return cachedPerms.readPerms.Intersects(pt)
+	case authpb.WRITE:
+		return cachedPerms.writePerms.Intersects(pt)
+	default:
+		plog.Panicf("unknown auth type: %v", permtyp)
+	}
+	return false
+}
+
+func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
+	// assumption: tx is Lock()ed
+	_, ok := as.rangePermCache[userName]
+	if !ok {
+		perms := getMergedPerms(tx, userName)
+		if perms == nil {
+			plog.Errorf("failed to create a unified permission of user %s", userName)
+			return false
+		}
+		as.rangePermCache[userName] = perms
+	}
+
+	if len(rangeEnd) == 0 {
+		return checkKeyPoint(as.rangePermCache[userName], key, permtyp)
+	}
+
+	return checkKeyInterval(as.rangePermCache[userName], key, rangeEnd, permtyp)
+}
+
+func (as *authStore) clearCachedPerm() {
+	as.rangePermCache = make(map[string]*unifiedRangePermissions)
+}
+
+func (as *authStore) invalidateCachedPerm(userName string) {
+	delete(as.rangePermCache, userName)
+}
+
+type unifiedRangePermissions struct {
+	readPerms  *adt.IntervalTree
+	writePerms *adt.IntervalTree
+}
diff --git a/vendor/github.com/coreos/etcd/auth/simple_token.go b/vendor/github.com/coreos/etcd/auth/simple_token.go
new file mode 100644
index 0000000..ac55ad7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/auth/simple_token.go
@@ -0,0 +1,223 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+// CAUTION: This randum number based token mechanism is only for testing purpose.
+// JWT based mechanism will be added in the near future.
+
+import (
+	"context"
+	"crypto/rand"
+	"fmt"
+	"math/big"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+)
+
+const (
+	letters                  = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+	defaultSimpleTokenLength = 16
+)
+
+// var for testing purposes
+var (
+	simpleTokenTTL           = 5 * time.Minute
+	simpleTokenTTLResolution = 1 * time.Second
+)
+
+type simpleTokenTTLKeeper struct {
+	tokens          map[string]time.Time
+	donec           chan struct{}
+	stopc           chan struct{}
+	deleteTokenFunc func(string)
+	mu              *sync.Mutex
+}
+
+func (tm *simpleTokenTTLKeeper) stop() {
+	select {
+	case tm.stopc <- struct{}{}:
+	case <-tm.donec:
+	}
+	<-tm.donec
+}
+
+func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) {
+	tm.tokens[token] = time.Now().Add(simpleTokenTTL)
+}
+
+func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) {
+	if _, ok := tm.tokens[token]; ok {
+		tm.tokens[token] = time.Now().Add(simpleTokenTTL)
+	}
+}
+
+func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) {
+	delete(tm.tokens, token)
+}
+
+func (tm *simpleTokenTTLKeeper) run() {
+	tokenTicker := time.NewTicker(simpleTokenTTLResolution)
+	defer func() {
+		tokenTicker.Stop()
+		close(tm.donec)
+	}()
+	for {
+		select {
+		case <-tokenTicker.C:
+			nowtime := time.Now()
+			tm.mu.Lock()
+			for t, tokenendtime := range tm.tokens {
+				if nowtime.After(tokenendtime) {
+					tm.deleteTokenFunc(t)
+					delete(tm.tokens, t)
+				}
+			}
+			tm.mu.Unlock()
+		case <-tm.stopc:
+			return
+		}
+	}
+}
+
+type tokenSimple struct {
+	indexWaiter       func(uint64) <-chan struct{}
+	simpleTokenKeeper *simpleTokenTTLKeeper
+	simpleTokensMu    sync.Mutex
+	simpleTokens      map[string]string // token -> username
+}
+
+func (t *tokenSimple) genTokenPrefix() (string, error) {
+	ret := make([]byte, defaultSimpleTokenLength)
+
+	for i := 0; i < defaultSimpleTokenLength; i++ {
+		bInt, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters))))
+		if err != nil {
+			return "", err
+		}
+
+		ret[i] = letters[bInt.Int64()]
+	}
+
+	return string(ret), nil
+}
+
+func (t *tokenSimple) assignSimpleTokenToUser(username, token string) {
+	t.simpleTokensMu.Lock()
+	defer t.simpleTokensMu.Unlock()
+	if t.simpleTokenKeeper == nil {
+		return
+	}
+
+	_, ok := t.simpleTokens[token]
+	if ok {
+		plog.Panicf("token %s is alredy used", token)
+	}
+
+	t.simpleTokens[token] = username
+	t.simpleTokenKeeper.addSimpleToken(token)
+}
+
+func (t *tokenSimple) invalidateUser(username string) {
+	if t.simpleTokenKeeper == nil {
+		return
+	}
+	t.simpleTokensMu.Lock()
+	for token, name := range t.simpleTokens {
+		if strings.Compare(name, username) == 0 {
+			delete(t.simpleTokens, token)
+			t.simpleTokenKeeper.deleteSimpleToken(token)
+		}
+	}
+	t.simpleTokensMu.Unlock()
+}
+
+func (t *tokenSimple) enable() {
+	delf := func(tk string) {
+		if username, ok := t.simpleTokens[tk]; ok {
+			plog.Infof("deleting token %s for user %s", tk, username)
+			delete(t.simpleTokens, tk)
+		}
+	}
+	t.simpleTokenKeeper = &simpleTokenTTLKeeper{
+		tokens:          make(map[string]time.Time),
+		donec:           make(chan struct{}),
+		stopc:           make(chan struct{}),
+		deleteTokenFunc: delf,
+		mu:              &t.simpleTokensMu,
+	}
+	go t.simpleTokenKeeper.run()
+}
+
+func (t *tokenSimple) disable() {
+	t.simpleTokensMu.Lock()
+	tk := t.simpleTokenKeeper
+	t.simpleTokenKeeper = nil
+	t.simpleTokens = make(map[string]string) // invalidate all tokens
+	t.simpleTokensMu.Unlock()
+	if tk != nil {
+		tk.stop()
+	}
+}
+
+func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) {
+	if !t.isValidSimpleToken(ctx, token) {
+		return nil, false
+	}
+	t.simpleTokensMu.Lock()
+	username, ok := t.simpleTokens[token]
+	if ok && t.simpleTokenKeeper != nil {
+		t.simpleTokenKeeper.resetSimpleToken(token)
+	}
+	t.simpleTokensMu.Unlock()
+	return &AuthInfo{Username: username, Revision: revision}, ok
+}
+
+func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) {
+	// rev isn't used in simple token, it is only used in JWT
+	index := ctx.Value(AuthenticateParamIndex{}).(uint64)
+	simpleTokenPrefix := ctx.Value(AuthenticateParamSimpleTokenPrefix{}).(string)
+	token := fmt.Sprintf("%s.%d", simpleTokenPrefix, index)
+	t.assignSimpleTokenToUser(username, token)
+
+	return token, nil
+}
+
+func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool {
+	splitted := strings.Split(token, ".")
+	if len(splitted) != 2 {
+		return false
+	}
+	index, err := strconv.Atoi(splitted[1])
+	if err != nil {
+		return false
+	}
+
+	select {
+	case <-t.indexWaiter(uint64(index)):
+		return true
+	case <-ctx.Done():
+	}
+
+	return false
+}
+
+func newTokenProviderSimple(indexWaiter func(uint64) <-chan struct{}) *tokenSimple {
+	return &tokenSimple{
+		simpleTokens: make(map[string]string),
+		indexWaiter:  indexWaiter,
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/auth/store.go b/vendor/github.com/coreos/etcd/auth/store.go
new file mode 100644
index 0000000..d676cb5
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/auth/store.go
@@ -0,0 +1,1136 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+	"bytes"
+	"context"
+	"encoding/binary"
+	"errors"
+	"sort"
+	"strings"
+	"sync"
+	"sync/atomic"
+
+	"github.com/coreos/etcd/auth/authpb"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/mvcc/backend"
+
+	"github.com/coreos/pkg/capnslog"
+	"golang.org/x/crypto/bcrypt"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+)
+
+var (
+	enableFlagKey = []byte("authEnabled")
+	authEnabled   = []byte{1}
+	authDisabled  = []byte{0}
+
+	revisionKey = []byte("authRevision")
+
+	authBucketName      = []byte("auth")
+	authUsersBucketName = []byte("authUsers")
+	authRolesBucketName = []byte("authRoles")
+
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "auth")
+
+	ErrRootUserNotExist     = errors.New("auth: root user does not exist")
+	ErrRootRoleNotExist     = errors.New("auth: root user does not have root role")
+	ErrUserAlreadyExist     = errors.New("auth: user already exists")
+	ErrUserEmpty            = errors.New("auth: user name is empty")
+	ErrUserNotFound         = errors.New("auth: user not found")
+	ErrRoleAlreadyExist     = errors.New("auth: role already exists")
+	ErrRoleNotFound         = errors.New("auth: role not found")
+	ErrAuthFailed           = errors.New("auth: authentication failed, invalid user ID or password")
+	ErrPermissionDenied     = errors.New("auth: permission denied")
+	ErrRoleNotGranted       = errors.New("auth: role is not granted to the user")
+	ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role")
+	ErrAuthNotEnabled       = errors.New("auth: authentication is not enabled")
+	ErrAuthOldRevision      = errors.New("auth: revision in header is old")
+	ErrInvalidAuthToken     = errors.New("auth: invalid auth token")
+	ErrInvalidAuthOpts      = errors.New("auth: invalid auth options")
+	ErrInvalidAuthMgmt      = errors.New("auth: invalid auth management")
+
+	// BcryptCost is the algorithm cost / strength for hashing auth passwords
+	BcryptCost = bcrypt.DefaultCost
+)
+
+const (
+	rootUser = "root"
+	rootRole = "root"
+
+	tokenTypeSimple = "simple"
+	tokenTypeJWT    = "jwt"
+
+	revBytesLen = 8
+)
+
+type AuthInfo struct {
+	Username string
+	Revision uint64
+}
+
+// AuthenticateParamIndex is used for a key of context in the parameters of Authenticate()
+type AuthenticateParamIndex struct{}
+
+// AuthenticateParamSimpleTokenPrefix is used for a key of context in the parameters of Authenticate()
+type AuthenticateParamSimpleTokenPrefix struct{}
+
+type AuthStore interface {
+	// AuthEnable turns on the authentication feature
+	AuthEnable() error
+
+	// AuthDisable turns off the authentication feature
+	AuthDisable()
+
+	// Authenticate does authentication based on given user name and password
+	Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error)
+
+	// Recover recovers the state of auth store from the given backend
+	Recover(b backend.Backend)
+
+	// UserAdd adds a new user
+	UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
+
+	// UserDelete deletes a user
+	UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
+
+	// UserChangePassword changes a password of a user
+	UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
+
+	// UserGrantRole grants a role to the user
+	UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
+
+	// UserGet gets the detailed information of a users
+	UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
+
+	// UserRevokeRole revokes a role of a user
+	UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
+
+	// RoleAdd adds a new role
+	RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
+
+	// RoleGrantPermission grants a permission to a role
+	RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
+
+	// RoleGet gets the detailed information of a role
+	RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
+
+	// RoleRevokePermission gets the detailed information of a role
+	RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
+
+	// RoleDelete gets the detailed information of a role
+	RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
+
+	// UserList gets a list of all users
+	UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
+
+	// RoleList gets a list of all roles
+	RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
+
+	// IsPutPermitted checks put permission of the user
+	IsPutPermitted(authInfo *AuthInfo, key []byte) error
+
+	// IsRangePermitted checks range permission of the user
+	IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
+
+	// IsDeleteRangePermitted checks delete-range permission of the user
+	IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
+
+	// IsAdminPermitted checks admin permission of the user
+	IsAdminPermitted(authInfo *AuthInfo) error
+
+	// GenTokenPrefix produces a random string in a case of simple token
+	// in a case of JWT, it produces an empty string
+	GenTokenPrefix() (string, error)
+
+	// Revision gets current revision of authStore
+	Revision() uint64
+
+	// CheckPassword checks a given pair of username and password is correct
+	CheckPassword(username, password string) (uint64, error)
+
+	// Close does cleanup of AuthStore
+	Close() error
+
+	// AuthInfoFromCtx gets AuthInfo from gRPC's context
+	AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error)
+
+	// AuthInfoFromTLS gets AuthInfo from TLS info of gRPC's context
+	AuthInfoFromTLS(ctx context.Context) *AuthInfo
+
+	// WithRoot generates and installs a token that can be used as a root credential
+	WithRoot(ctx context.Context) context.Context
+
+	// HasRole checks that user has role
+	HasRole(user, role string) bool
+}
+
+type TokenProvider interface {
+	info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool)
+	assign(ctx context.Context, username string, revision uint64) (string, error)
+	enable()
+	disable()
+
+	invalidateUser(string)
+	genTokenPrefix() (string, error)
+}
+
+type authStore struct {
+	// atomic operations; need 64-bit align, or 32-bit tests will crash
+	revision uint64
+
+	be        backend.Backend
+	enabled   bool
+	enabledMu sync.RWMutex
+
+	rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
+
+	tokenProvider TokenProvider
+}
+
+func (as *authStore) AuthEnable() error {
+	as.enabledMu.Lock()
+	defer as.enabledMu.Unlock()
+	if as.enabled {
+		plog.Noticef("Authentication already enabled")
+		return nil
+	}
+	b := as.be
+	tx := b.BatchTx()
+	tx.Lock()
+	defer func() {
+		tx.Unlock()
+		b.ForceCommit()
+	}()
+
+	u := getUser(tx, rootUser)
+	if u == nil {
+		return ErrRootUserNotExist
+	}
+
+	if !hasRootRole(u) {
+		return ErrRootRoleNotExist
+	}
+
+	tx.UnsafePut(authBucketName, enableFlagKey, authEnabled)
+
+	as.enabled = true
+	as.tokenProvider.enable()
+
+	as.rangePermCache = make(map[string]*unifiedRangePermissions)
+
+	as.setRevision(getRevision(tx))
+
+	plog.Noticef("Authentication enabled")
+
+	return nil
+}
+
+func (as *authStore) AuthDisable() {
+	as.enabledMu.Lock()
+	defer as.enabledMu.Unlock()
+	if !as.enabled {
+		return
+	}
+	b := as.be
+	tx := b.BatchTx()
+	tx.Lock()
+	tx.UnsafePut(authBucketName, enableFlagKey, authDisabled)
+	as.commitRevision(tx)
+	tx.Unlock()
+	b.ForceCommit()
+
+	as.enabled = false
+	as.tokenProvider.disable()
+
+	plog.Noticef("Authentication disabled")
+}
+
+func (as *authStore) Close() error {
+	as.enabledMu.Lock()
+	defer as.enabledMu.Unlock()
+	if !as.enabled {
+		return nil
+	}
+	as.tokenProvider.disable()
+	return nil
+}
+
+func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) {
+	if !as.isAuthEnabled() {
+		return nil, ErrAuthNotEnabled
+	}
+
+	tx := as.be.BatchTx()
+	tx.Lock()
+	defer tx.Unlock()
+
+	user := getUser(tx, username)
+	if user == nil {
+		return nil, ErrAuthFailed
+	}
+
+	// Password checking is already performed in the API layer, so we don't need to check for now.
+	// Staleness of password can be detected with OCC in the API layer, too.
+
+	token, err := as.tokenProvider.assign(ctx, username, as.Revision())
+	if err != nil {
+		return nil, err
+	}
+
+	plog.Debugf("authorized %s, token is %s", username, token)
+	return &pb.AuthenticateResponse{Token: token}, nil
+}
+
+func (as *authStore) CheckPassword(username, password string) (uint64, error) {
+	if !as.isAuthEnabled() {
+		return 0, ErrAuthNotEnabled
+	}
+
+	tx := as.be.BatchTx()
+	tx.Lock()
+	defer tx.Unlock()
+
+	user := getUser(tx, username)
+	if user == nil {
+		return 0, ErrAuthFailed
+	}
+
+	if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil {
+		plog.Noticef("authentication failed, invalid password for user %s", username)
+		return 0, ErrAuthFailed
+	}
+
+	return getRevision(tx), nil
+}
+
+func (as *authStore) Recover(be backend.Backend) {
+	enabled := false
+	as.be = be
+	tx := be.BatchTx()
+	tx.Lock()
+	_, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0)
+	if len(vs) == 1 {
+		if bytes.Equal(vs[0], authEnabled) {
+			enabled = true
+		}
+	}
+
+	as.setRevision(getRevision(tx))
+
+	tx.Unlock()
+
+	as.enabledMu.Lock()
+	as.enabled = enabled
+	as.enabledMu.Unlock()
+}
+
+func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+	if len(r.Name) == 0 {
+		return nil, ErrUserEmpty
+	}
+
+	hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost)
+	if err != nil {
+		plog.Errorf("failed to hash password: %s", err)
+		return nil, err
+	}
+
+	tx := as.be.BatchTx()
+	tx.Lock()
+	defer tx.Unlock()
+
+	user := getUser(tx, r.Name)
+	if user != nil {
+		return nil, ErrUserAlreadyExist
+	}
+
+	newUser := &authpb.User{
+		Name:     []byte(r.Name),
+		Password: hashed,
+	}
+
+	putUser(tx, newUser)
+
+	as.commitRevision(tx)
+
+	plog.Noticef("added a new user: %s", r.Name)
+
+	return &pb.AuthUserAddResponse{}, nil
+}
+
+func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+	if as.enabled && strings.Compare(r.Name, rootUser) == 0 {
+		plog.Errorf("the user root must not be deleted")
+		return nil, ErrInvalidAuthMgmt
+	}
+
+	tx := as.be.BatchTx()
+	tx.Lock()
+	defer tx.Unlock()
+
+	user := getUser(tx, r.Name)
+	if user == nil {
+		return nil, ErrUserNotFound
+	}
+
+	delUser(tx, r.Name)
+
+	as.commitRevision(tx)
+
+	as.invalidateCachedPerm(r.Name)
+	as.tokenProvider.invalidateUser(r.Name)
+
+	plog.Noticef("deleted a user: %s", r.Name)
+
+	return &pb.AuthUserDeleteResponse{}, nil
+}
+
+func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+	// TODO(mitake): measure the cost of bcrypt.GenerateFromPassword()
+	// If the cost is too high, we should move the encryption to outside of the raft
+	hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost)
+	if err != nil {
+		plog.Errorf("failed to hash password: %s", err)
+		return nil, err
+	}
+
+	tx := as.be.BatchTx()
+	tx.Lock()
+	defer tx.Unlock()
+
+	user := getUser(tx, r.Name)
+	if user == nil {
+		return nil, ErrUserNotFound
+	}
+
+	updatedUser := &authpb.User{
+		Name:     []byte(r.Name),
+		Roles:    user.Roles,
+		Password: hashed,
+	}
+
+	putUser(tx, updatedUser)
+
+	as.commitRevision(tx)
+
+	as.invalidateCachedPerm(r.Name)
+	as.tokenProvider.invalidateUser(r.Name)
+
+	plog.Noticef("changed a password of a user: %s", r.Name)
+
+	return &pb.AuthUserChangePasswordResponse{}, nil
+}
+
+func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+	tx := as.be.BatchTx()
+	tx.Lock()
+	defer tx.Unlock()
+
+	user := getUser(tx, r.User)
+	if user == nil {
+		return nil, ErrUserNotFound
+	}
+
+	if r.Role != rootRole {
+		role := getRole(tx, r.Role)
+		if role == nil {
+			return nil, ErrRoleNotFound
+		}
+	}
+
+	idx := sort.SearchStrings(user.Roles, r.Role)
+	if idx < len(user.Roles) && strings.Compare(user.Roles[idx], r.Role) == 0 {
+		plog.Warningf("user %s is already granted role %s", r.User, r.Role)
+		return &pb.AuthUserGrantRoleResponse{}, nil
+	}
+
+	user.Roles = append(user.Roles, r.Role)
+	sort.Strings(user.Roles)
+
+	putUser(tx, user)
+
+	as.invalidateCachedPerm(r.User)
+
+	as.commitRevision(tx)
+
+	plog.Noticef("granted role %s to user %s", r.Role, r.User)
+	return &pb.AuthUserGrantRoleResponse{}, nil
+}
+
+func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+	tx := as.be.BatchTx()
+	tx.Lock()
+	user := getUser(tx, r.Name)
+	tx.Unlock()
+
+	if user == nil {
+		return nil, ErrUserNotFound
+	}
+
+	var resp pb.AuthUserGetResponse
+	resp.Roles = append(resp.Roles, user.Roles...)
+	return &resp, nil
+}
+
+func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+	tx := as.be.BatchTx()
+	tx.Lock()
+	users := getAllUsers(tx)
+	tx.Unlock()
+
+	resp := &pb.AuthUserListResponse{Users: make([]string, len(users))}
+	for i := range users {
+		resp.Users[i] = string(users[i].Name)
+	}
+	return resp, nil
+}
+
+func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+	if as.enabled && strings.Compare(r.Name, rootUser) == 0 && strings.Compare(r.Role, rootRole) == 0 {
+		plog.Errorf("the role root must not be revoked from the user root")
+		return nil, ErrInvalidAuthMgmt
+	}
+
+	tx := as.be.BatchTx()
+	tx.Lock()
+	defer tx.Unlock()
+
+	user := getUser(tx, r.Name)
+	if user == nil {
+		return nil, ErrUserNotFound
+	}
+
+	updatedUser := &authpb.User{
+		Name:     user.Name,
+		Password: user.Password,
+	}
+
+	for _, role := range user.Roles {
+		if strings.Compare(role, r.Role) != 0 {
+			updatedUser.Roles = append(updatedUser.Roles, role)
+		}
+	}
+
+	if len(updatedUser.Roles) == len(user.Roles) {
+		return nil, ErrRoleNotGranted
+	}
+
+	putUser(tx, updatedUser)
+
+	as.invalidateCachedPerm(r.Name)
+
+	as.commitRevision(tx)
+
+	plog.Noticef("revoked role %s from user %s", r.Role, r.Name)
+	return &pb.AuthUserRevokeRoleResponse{}, nil
+}
+
+func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+	tx := as.be.BatchTx()
+	tx.Lock()
+	defer tx.Unlock()
+
+	var resp pb.AuthRoleGetResponse
+
+	role := getRole(tx, r.Role)
+	if role == nil {
+		return nil, ErrRoleNotFound
+	}
+	resp.Perm = append(resp.Perm, role.KeyPermission...)
+	return &resp, nil
+}
+
+func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+	tx := as.be.BatchTx()
+	tx.Lock()
+	roles := getAllRoles(tx)
+	tx.Unlock()
+
+	resp := &pb.AuthRoleListResponse{Roles: make([]string, len(roles))}
+	for i := range roles {
+		resp.Roles[i] = string(roles[i].Name)
+	}
+	return resp, nil
+}
+
+func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+	tx := as.be.BatchTx()
+	tx.Lock()
+	defer tx.Unlock()
+
+	role := getRole(tx, r.Role)
+	if role == nil {
+		return nil, ErrRoleNotFound
+	}
+
+	updatedRole := &authpb.Role{
+		Name: role.Name,
+	}
+
+	for _, perm := range role.KeyPermission {
+		if !bytes.Equal(perm.Key, []byte(r.Key)) || !bytes.Equal(perm.RangeEnd, []byte(r.RangeEnd)) {
+			updatedRole.KeyPermission = append(updatedRole.KeyPermission, perm)
+		}
+	}
+
+	if len(role.KeyPermission) == len(updatedRole.KeyPermission) {
+		return nil, ErrPermissionNotGranted
+	}
+
+	putRole(tx, updatedRole)
+
+	// TODO(mitake): currently single role update invalidates every cache
+	// It should be optimized.
+	as.clearCachedPerm()
+
+	as.commitRevision(tx)
+
+	plog.Noticef("revoked key %s from role %s", r.Key, r.Role)
+	return &pb.AuthRoleRevokePermissionResponse{}, nil
+}
+
+func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+	if as.enabled && strings.Compare(r.Role, rootRole) == 0 {
+		plog.Errorf("the role root must not be deleted")
+		return nil, ErrInvalidAuthMgmt
+	}
+
+	tx := as.be.BatchTx()
+	tx.Lock()
+	defer tx.Unlock()
+
+	role := getRole(tx, r.Role)
+	if role == nil {
+		return nil, ErrRoleNotFound
+	}
+
+	delRole(tx, r.Role)
+
+	users := getAllUsers(tx)
+	for _, user := range users {
+		updatedUser := &authpb.User{
+			Name:     user.Name,
+			Password: user.Password,
+		}
+
+		for _, role := range user.Roles {
+			if strings.Compare(role, r.Role) != 0 {
+				updatedUser.Roles = append(updatedUser.Roles, role)
+			}
+		}
+
+		if len(updatedUser.Roles) == len(user.Roles) {
+			continue
+		}
+
+		putUser(tx, updatedUser)
+
+		as.invalidateCachedPerm(string(user.Name))
+	}
+
+	as.commitRevision(tx)
+
+	plog.Noticef("deleted role %s", r.Role)
+	return &pb.AuthRoleDeleteResponse{}, nil
+}
+
+func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+	tx := as.be.BatchTx()
+	tx.Lock()
+	defer tx.Unlock()
+
+	role := getRole(tx, r.Name)
+	if role != nil {
+		return nil, ErrRoleAlreadyExist
+	}
+
+	newRole := &authpb.Role{
+		Name: []byte(r.Name),
+	}
+
+	putRole(tx, newRole)
+
+	as.commitRevision(tx)
+
+	plog.Noticef("Role %s is created", r.Name)
+
+	return &pb.AuthRoleAddResponse{}, nil
+}
+
+func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) {
+	return as.tokenProvider.info(ctx, token, as.Revision())
+}
+
+type permSlice []*authpb.Permission
+
+func (perms permSlice) Len() int {
+	return len(perms)
+}
+
+func (perms permSlice) Less(i, j int) bool {
+	return bytes.Compare(perms[i].Key, perms[j].Key) < 0
+}
+
+func (perms permSlice) Swap(i, j int) {
+	perms[i], perms[j] = perms[j], perms[i]
+}
+
+func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+	tx := as.be.BatchTx()
+	tx.Lock()
+	defer tx.Unlock()
+
+	role := getRole(tx, r.Name)
+	if role == nil {
+		return nil, ErrRoleNotFound
+	}
+
+	idx := sort.Search(len(role.KeyPermission), func(i int) bool {
+		return bytes.Compare(role.KeyPermission[i].Key, []byte(r.Perm.Key)) >= 0
+	})
+
+	if idx < len(role.KeyPermission) && bytes.Equal(role.KeyPermission[idx].Key, r.Perm.Key) && bytes.Equal(role.KeyPermission[idx].RangeEnd, r.Perm.RangeEnd) {
+		// update existing permission
+		role.KeyPermission[idx].PermType = r.Perm.PermType
+	} else {
+		// append new permission to the role
+		newPerm := &authpb.Permission{
+			Key:      []byte(r.Perm.Key),
+			RangeEnd: []byte(r.Perm.RangeEnd),
+			PermType: r.Perm.PermType,
+		}
+
+		role.KeyPermission = append(role.KeyPermission, newPerm)
+		sort.Sort(permSlice(role.KeyPermission))
+	}
+
+	putRole(tx, role)
+
+	// TODO(mitake): currently single role update invalidates every cache
+	// It should be optimized.
+	as.clearCachedPerm()
+
+	as.commitRevision(tx)
+
+	plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)])
+
+	return &pb.AuthRoleGrantPermissionResponse{}, nil
+}
+
+func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error {
+	// TODO(mitake): this function would be costly so we need a caching mechanism
+	if !as.isAuthEnabled() {
+		return nil
+	}
+
+	// only gets rev == 0 when passed AuthInfo{}; no user given
+	if revision == 0 {
+		return ErrUserEmpty
+	}
+
+	if revision < as.Revision() {
+		return ErrAuthOldRevision
+	}
+
+	tx := as.be.BatchTx()
+	tx.Lock()
+	defer tx.Unlock()
+
+	user := getUser(tx, userName)
+	if user == nil {
+		plog.Errorf("invalid user name %s for permission checking", userName)
+		return ErrPermissionDenied
+	}
+
+	// root role should have permission on all ranges
+	if hasRootRole(user) {
+		return nil
+	}
+
+	if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
+		return nil
+	}
+
+	return ErrPermissionDenied
+}
+
+func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error {
+	return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE)
+}
+
+func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
+	return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ)
+}
+
+func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
+	return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE)
+}
+
+func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
+	if !as.isAuthEnabled() {
+		return nil
+	}
+	if authInfo == nil {
+		return ErrUserEmpty
+	}
+
+	tx := as.be.BatchTx()
+	tx.Lock()
+	u := getUser(tx, authInfo.Username)
+	tx.Unlock()
+
+	if u == nil {
+		return ErrUserNotFound
+	}
+
+	if !hasRootRole(u) {
+		return ErrPermissionDenied
+	}
+
+	return nil
+}
+
+func getUser(tx backend.BatchTx, username string) *authpb.User {
+	_, vs := tx.UnsafeRange(authUsersBucketName, []byte(username), nil, 0)
+	if len(vs) == 0 {
+		return nil
+	}
+
+	user := &authpb.User{}
+	err := user.Unmarshal(vs[0])
+	if err != nil {
+		plog.Panicf("failed to unmarshal user struct (name: %s): %s", username, err)
+	}
+	return user
+}
+
+func getAllUsers(tx backend.BatchTx) []*authpb.User {
+	_, vs := tx.UnsafeRange(authUsersBucketName, []byte{0}, []byte{0xff}, -1)
+	if len(vs) == 0 {
+		return nil
+	}
+
+	users := make([]*authpb.User, len(vs))
+	for i := range vs {
+		user := &authpb.User{}
+		err := user.Unmarshal(vs[i])
+		if err != nil {
+			plog.Panicf("failed to unmarshal user struct: %s", err)
+		}
+		users[i] = user
+	}
+	return users
+}
+
+func putUser(tx backend.BatchTx, user *authpb.User) {
+	b, err := user.Marshal()
+	if err != nil {
+		plog.Panicf("failed to marshal user struct (name: %s): %s", user.Name, err)
+	}
+	tx.UnsafePut(authUsersBucketName, user.Name, b)
+}
+
+func delUser(tx backend.BatchTx, username string) {
+	tx.UnsafeDelete(authUsersBucketName, []byte(username))
+}
+
+func getRole(tx backend.BatchTx, rolename string) *authpb.Role {
+	_, vs := tx.UnsafeRange(authRolesBucketName, []byte(rolename), nil, 0)
+	if len(vs) == 0 {
+		return nil
+	}
+
+	role := &authpb.Role{}
+	err := role.Unmarshal(vs[0])
+	if err != nil {
+		plog.Panicf("failed to unmarshal role struct (name: %s): %s", rolename, err)
+	}
+	return role
+}
+
+func getAllRoles(tx backend.BatchTx) []*authpb.Role {
+	_, vs := tx.UnsafeRange(authRolesBucketName, []byte{0}, []byte{0xff}, -1)
+	if len(vs) == 0 {
+		return nil
+	}
+
+	roles := make([]*authpb.Role, len(vs))
+	for i := range vs {
+		role := &authpb.Role{}
+		err := role.Unmarshal(vs[i])
+		if err != nil {
+			plog.Panicf("failed to unmarshal role struct: %s", err)
+		}
+		roles[i] = role
+	}
+	return roles
+}
+
+func putRole(tx backend.BatchTx, role *authpb.Role) {
+	b, err := role.Marshal()
+	if err != nil {
+		plog.Panicf("failed to marshal role struct (name: %s): %s", role.Name, err)
+	}
+
+	tx.UnsafePut(authRolesBucketName, []byte(role.Name), b)
+}
+
+func delRole(tx backend.BatchTx, rolename string) {
+	tx.UnsafeDelete(authRolesBucketName, []byte(rolename))
+}
+
+func (as *authStore) isAuthEnabled() bool {
+	as.enabledMu.RLock()
+	defer as.enabledMu.RUnlock()
+	return as.enabled
+}
+
+func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore {
+	tx := be.BatchTx()
+	tx.Lock()
+
+	tx.UnsafeCreateBucket(authBucketName)
+	tx.UnsafeCreateBucket(authUsersBucketName)
+	tx.UnsafeCreateBucket(authRolesBucketName)
+
+	enabled := false
+	_, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0)
+	if len(vs) == 1 {
+		if bytes.Equal(vs[0], authEnabled) {
+			enabled = true
+		}
+	}
+
+	as := &authStore{
+		be:             be,
+		revision:       getRevision(tx),
+		enabled:        enabled,
+		rangePermCache: make(map[string]*unifiedRangePermissions),
+		tokenProvider:  tp,
+	}
+
+	if enabled {
+		as.tokenProvider.enable()
+	}
+
+	if as.Revision() == 0 {
+		as.commitRevision(tx)
+	}
+
+	tx.Unlock()
+	be.ForceCommit()
+
+	return as
+}
+
+func hasRootRole(u *authpb.User) bool {
+	// u.Roles is sorted in UserGrantRole(), so we can use binary search.
+	idx := sort.SearchStrings(u.Roles, rootRole)
+	return idx != len(u.Roles) && u.Roles[idx] == rootRole
+}
+
+func (as *authStore) commitRevision(tx backend.BatchTx) {
+	atomic.AddUint64(&as.revision, 1)
+	revBytes := make([]byte, revBytesLen)
+	binary.BigEndian.PutUint64(revBytes, as.Revision())
+	tx.UnsafePut(authBucketName, revisionKey, revBytes)
+}
+
+func getRevision(tx backend.BatchTx) uint64 {
+	_, vs := tx.UnsafeRange(authBucketName, []byte(revisionKey), nil, 0)
+	if len(vs) != 1 {
+		// this can happen in the initialization phase
+		return 0
+	}
+
+	return binary.BigEndian.Uint64(vs[0])
+}
+
+func (as *authStore) setRevision(rev uint64) {
+	atomic.StoreUint64(&as.revision, rev)
+}
+
+func (as *authStore) Revision() uint64 {
+	return atomic.LoadUint64(&as.revision)
+}
+
+func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo {
+	peer, ok := peer.FromContext(ctx)
+	if !ok || peer == nil || peer.AuthInfo == nil {
+		return nil
+	}
+
+	tlsInfo := peer.AuthInfo.(credentials.TLSInfo)
+	for _, chains := range tlsInfo.State.VerifiedChains {
+		for _, chain := range chains {
+			cn := chain.Subject.CommonName
+			plog.Debugf("found common name %s", cn)
+
+			ai := &AuthInfo{
+				Username: cn,
+				Revision: as.Revision(),
+			}
+			md, ok := metadata.FromIncomingContext(ctx)
+			if !ok {
+				return nil
+			}
+
+			// gRPC-gateway proxy request to etcd server includes Grpcgateway-Accept
+			// header. The proxy uses etcd client server certificate. If the certificate
+			// has a CommonName we should never use this for authentication.
+			if gw := md["grpcgateway-accept"]; len(gw) > 0 {
+				plog.Warningf("ignoring common name in gRPC-gateway proxy request %s", ai.Username)
+				return nil
+			}
+			return ai
+		}
+	}
+
+	return nil
+}
+
+func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
+	md, ok := metadata.FromIncomingContext(ctx)
+	if !ok {
+		return nil, nil
+	}
+
+	//TODO(mitake|hexfusion) review unifying key names
+	ts, ok := md["token"]
+	if !ok {
+		ts, ok = md["authorization"]
+	}
+	if !ok {
+		return nil, nil
+	}
+
+	token := ts[0]
+	authInfo, uok := as.authInfoFromToken(ctx, token)
+	if !uok {
+		plog.Warningf("invalid auth token: %s", token)
+		return nil, ErrInvalidAuthToken
+	}
+
+	return authInfo, nil
+}
+
+func (as *authStore) GenTokenPrefix() (string, error) {
+	return as.tokenProvider.genTokenPrefix()
+}
+
+func decomposeOpts(optstr string) (string, map[string]string, error) {
+	opts := strings.Split(optstr, ",")
+	tokenType := opts[0]
+
+	typeSpecificOpts := make(map[string]string)
+	for i := 1; i < len(opts); i++ {
+		pair := strings.Split(opts[i], "=")
+
+		if len(pair) != 2 {
+			plog.Errorf("invalid token specific option: %s", optstr)
+			return "", nil, ErrInvalidAuthOpts
+		}
+
+		if _, ok := typeSpecificOpts[pair[0]]; ok {
+			plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr)
+			return "", nil, ErrInvalidAuthOpts
+		}
+
+		typeSpecificOpts[pair[0]] = pair[1]
+	}
+
+	return tokenType, typeSpecificOpts, nil
+
+}
+
+func NewTokenProvider(tokenOpts string, indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) {
+	tokenType, typeSpecificOpts, err := decomposeOpts(tokenOpts)
+	if err != nil {
+		return nil, ErrInvalidAuthOpts
+	}
+
+	switch tokenType {
+	case tokenTypeSimple:
+		plog.Warningf("simple token is not cryptographically signed")
+		return newTokenProviderSimple(indexWaiter), nil
+
+	case tokenTypeJWT:
+		return newTokenProviderJWT(typeSpecificOpts)
+
+	case "":
+		return newTokenProviderNop()
+	default:
+		plog.Errorf("unknown token type: %s", tokenType)
+		return nil, ErrInvalidAuthOpts
+	}
+}
+
+func (as *authStore) WithRoot(ctx context.Context) context.Context {
+	if !as.isAuthEnabled() {
+		return ctx
+	}
+
+	var ctxForAssign context.Context
+	if ts, ok := as.tokenProvider.(*tokenSimple); ok && ts != nil {
+		ctx1 := context.WithValue(ctx, AuthenticateParamIndex{}, uint64(0))
+		prefix, err := ts.genTokenPrefix()
+		if err != nil {
+			plog.Errorf("failed to generate prefix of internally used token")
+			return ctx
+		}
+		ctxForAssign = context.WithValue(ctx1, AuthenticateParamSimpleTokenPrefix{}, prefix)
+	} else {
+		ctxForAssign = ctx
+	}
+
+	token, err := as.tokenProvider.assign(ctxForAssign, "root", as.Revision())
+	if err != nil {
+		// this must not happen
+		plog.Errorf("failed to assign token for lease revoking: %s", err)
+		return ctx
+	}
+
+	mdMap := map[string]string{
+		"token": token,
+	}
+	tokenMD := metadata.New(mdMap)
+
+	// use "mdIncomingKey{}" since it's called from local etcdserver
+	return metadata.NewIncomingContext(ctx, tokenMD)
+}
+
+func (as *authStore) HasRole(user, role string) bool {
+	tx := as.be.BatchTx()
+	tx.Lock()
+	u := getUser(tx, user)
+	tx.Unlock()
+
+	if u == nil {
+		plog.Warningf("tried to check user %s has role %s, but user %s doesn't exist", user, role, user)
+		return false
+	}
+
+	for _, r := range u.Roles {
+		if role == r {
+			return true
+		}
+	}
+
+	return false
+}
diff --git a/vendor/github.com/coreos/etcd/bill-of-materials.json b/vendor/github.com/coreos/etcd/bill-of-materials.json
new file mode 100644
index 0000000..98e2822
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/bill-of-materials.json
@@ -0,0 +1,451 @@
+[
+	{
+		"project": "bitbucket.org/ww/goautoneg",
+		"licenses": [
+			{
+				"type": "BSD 3-clause \"New\" or \"Revised\" License",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/beorn7/perks/quantile",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 0.9891304347826086
+			}
+		]
+	},
+	{
+		"project": "github.com/bgentry/speakeasy",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 0.9441624365482234
+			}
+		]
+	},
+	{
+		"project": "github.com/coreos/bbolt",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/coreos/etcd",
+		"licenses": [
+			{
+				"type": "Apache License 2.0",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/coreos/go-semver/semver",
+		"licenses": [
+			{
+				"type": "Apache License 2.0",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/coreos/go-systemd",
+		"licenses": [
+			{
+				"type": "Apache License 2.0",
+				"confidence": 0.9966703662597114
+			}
+		]
+	},
+	{
+		"project": "github.com/coreos/pkg",
+		"licenses": [
+			{
+				"type": "Apache License 2.0",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/cpuguy83/go-md2man/md2man",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/dgrijalva/jwt-go",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 0.9891304347826086
+			}
+		]
+	},
+	{
+		"project": "github.com/dustin/go-humanize",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 0.96875
+			}
+		]
+	},
+	{
+		"project": "github.com/ghodss/yaml",
+		"licenses": [
+			{
+				"type": "MIT License and BSD 3-clause \"New\" or \"Revised\" License",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/gogo/protobuf",
+		"licenses": [
+			{
+				"type": "BSD 3-clause \"New\" or \"Revised\" License",
+				"confidence": 0.9090909090909091
+			}
+		]
+	},
+	{
+		"project": "github.com/golang/groupcache/lru",
+		"licenses": [
+			{
+				"type": "Apache License 2.0",
+				"confidence": 0.9966703662597114
+			}
+		]
+	},
+	{
+		"project": "github.com/golang/protobuf",
+		"licenses": [
+			{
+				"type": "BSD 3-clause \"New\" or \"Revised\" License",
+				"confidence": 0.92
+			}
+		]
+	},
+	{
+		"project": "github.com/google/btree",
+		"licenses": [
+			{
+				"type": "Apache License 2.0",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/gorilla/websocket",
+		"licenses": [
+			{
+				"type": "BSD 2-clause \"Simplified\" License",
+				"confidence": 0.9852216748768473
+			}
+		]
+	},
+	{
+		"project": "github.com/grpc-ecosystem/go-grpc-prometheus",
+		"licenses": [
+			{
+				"type": "Apache License 2.0",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/grpc-ecosystem/grpc-gateway",
+		"licenses": [
+			{
+				"type": "BSD 3-clause \"New\" or \"Revised\" License",
+				"confidence": 0.979253112033195
+			}
+		]
+	},
+	{
+		"project": "github.com/inconshreveable/mousetrap",
+		"licenses": [
+			{
+				"type": "MIT License and BSD 3-clause \"New\" or \"Revised\" License",
+				"confidence": 1
+			},
+			{
+				"type": "Apache License 2.0",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/jonboulle/clockwork",
+		"licenses": [
+			{
+				"type": "Apache License 2.0",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/mattn/go-runewidth",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/matttproud/golang_protobuf_extensions/pbutil",
+		"licenses": [
+			{
+				"type": "Apache License 2.0",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/olekukonko/tablewriter",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 0.9891304347826086
+			}
+		]
+	},
+	{
+		"project": "github.com/prometheus/client_golang/prometheus",
+		"licenses": [
+			{
+				"type": "Apache License 2.0",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/prometheus/client_model/go",
+		"licenses": [
+			{
+				"type": "Apache License 2.0",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/prometheus/common",
+		"licenses": [
+			{
+				"type": "Apache License 2.0",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/prometheus/procfs",
+		"licenses": [
+			{
+				"type": "Apache License 2.0",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/russross/blackfriday",
+		"licenses": [
+			{
+				"type": "BSD 2-clause \"Simplified\" License",
+				"confidence": 0.9626168224299065
+			}
+		]
+	},
+	{
+		"project": "github.com/sirupsen/logrus",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/soheilhy/cmux",
+		"licenses": [
+			{
+				"type": "Apache License 2.0",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/spf13/cobra",
+		"licenses": [
+			{
+				"type": "Apache License 2.0",
+				"confidence": 0.9573241061130334
+			}
+		]
+	},
+	{
+		"project": "github.com/spf13/pflag",
+		"licenses": [
+			{
+				"type": "BSD 3-clause \"New\" or \"Revised\" License",
+				"confidence": 0.9663865546218487
+			}
+		]
+	},
+	{
+		"project": "github.com/tmc/grpc-websocket-proxy/wsproxy",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 0.9891304347826086
+			}
+		]
+	},
+	{
+		"project": "github.com/ugorji/go/codec",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 0.9946524064171123
+			}
+		]
+	},
+	{
+		"project": "github.com/urfave/cli",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "github.com/xiang90/probing",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "go.uber.org/atomic",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 0.9891304347826086
+			}
+		]
+	},
+	{
+		"project": "go.uber.org/multierr",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 0.9891304347826086
+			}
+		]
+	},
+	{
+		"project": "go.uber.org/zap",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 0.9891304347826086
+			}
+		]
+	},
+	{
+		"project": "golang.org/x/crypto",
+		"licenses": [
+			{
+				"type": "BSD 3-clause \"New\" or \"Revised\" License",
+				"confidence": 0.9663865546218487
+			}
+		]
+	},
+	{
+		"project": "golang.org/x/net",
+		"licenses": [
+			{
+				"type": "BSD 3-clause \"New\" or \"Revised\" License",
+				"confidence": 0.9663865546218487
+			}
+		]
+	},
+	{
+		"project": "golang.org/x/sys/unix",
+		"licenses": [
+			{
+				"type": "BSD 3-clause \"New\" or \"Revised\" License",
+				"confidence": 0.9663865546218487
+			}
+		]
+	},
+	{
+		"project": "golang.org/x/text",
+		"licenses": [
+			{
+				"type": "BSD 3-clause \"New\" or \"Revised\" License",
+				"confidence": 0.9663865546218487
+			}
+		]
+	},
+	{
+		"project": "golang.org/x/time/rate",
+		"licenses": [
+			{
+				"type": "BSD 3-clause \"New\" or \"Revised\" License",
+				"confidence": 0.9663865546218487
+			}
+		]
+	},
+	{
+		"project": "google.golang.org/genproto/googleapis/rpc/status",
+		"licenses": [
+			{
+				"type": "Apache License 2.0",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "google.golang.org/grpc",
+		"licenses": [
+			{
+				"type": "Apache License 2.0",
+				"confidence": 1
+			}
+		]
+	},
+	{
+		"project": "gopkg.in/cheggaaa/pb.v1",
+		"licenses": [
+			{
+				"type": "BSD 3-clause \"New\" or \"Revised\" License",
+				"confidence": 0.9916666666666667
+			}
+		]
+	},
+	{
+		"project": "gopkg.in/yaml.v2",
+		"licenses": [
+			{
+				"type": "The Unlicense",
+				"confidence": 0.35294117647058826
+			},
+			{
+				"type": "MIT License",
+				"confidence": 0.8975609756097561
+			}
+		]
+	}
+]
diff --git a/vendor/github.com/coreos/etcd/bill-of-materials.override.json b/vendor/github.com/coreos/etcd/bill-of-materials.override.json
new file mode 100644
index 0000000..34de90e
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/bill-of-materials.override.json
@@ -0,0 +1,26 @@
+[
+	{
+		"project": "bitbucket.org/ww/goautoneg",
+		"licenses": [
+			{
+				"type": "BSD 3-clause \"New\" or \"Revised\" License"
+			}
+		]
+	},
+	{
+		"project": "github.com/ghodss/yaml",
+		"licenses": [
+			{
+				"type": "MIT License and BSD 3-clause \"New\" or \"Revised\" License"
+			}
+		]
+	},
+	{
+		"project": "github.com/inconshreveable/mousetrap",
+		"licenses": [
+			{
+				"type": "Apache License 2.0"
+			}
+		]
+	}
+]
diff --git a/vendor/github.com/coreos/etcd/build b/vendor/github.com/coreos/etcd/build
new file mode 100755
index 0000000..b233d32
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/build
@@ -0,0 +1,66 @@
+#!/bin/sh -e
+
+# set some environment variables
+ORG_PATH="github.com/coreos"
+REPO_PATH="${ORG_PATH}/etcd"
+
+GIT_SHA=$(git rev-parse --short HEAD || echo "GitNotFound")
+if [ ! -z "$FAILPOINTS" ]; then
+	GIT_SHA="$GIT_SHA"-FAILPOINTS
+fi
+
+# Set GO_LDFLAGS="-s" for building without symbols for debugging.
+GO_LDFLAGS="$GO_LDFLAGS -X ${REPO_PATH}/cmd/vendor/${REPO_PATH}/version.GitSHA=${GIT_SHA}"
+
+# enable/disable failpoints
+toggle_failpoints() {
+	mode="$1"
+	if which gofail >/dev/null 2>&1; then
+		gofail "$mode" etcdserver/ mvcc/backend/
+	elif [ "$mode" != "disable" ]; then
+		echo "FAILPOINTS set but gofail not found"
+		exit 1
+	fi
+}
+
+toggle_failpoints_default() {
+	mode="disable"
+	if [ ! -z "$FAILPOINTS" ]; then mode="enable"; fi
+	toggle_failpoints "$mode"
+}
+
+etcd_build() {
+	out="bin"
+	if [ -n "${BINDIR}" ]; then out="${BINDIR}"; fi
+	toggle_failpoints_default
+	# Static compilation is useful when etcd is run in a container. $GO_BUILD_FLAGS is OK
+
+	# shellcheck disable=SC2086
+	CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "$GO_LDFLAGS" -o "${out}/etcd" ${REPO_PATH}/cmd/etcd || return
+	# shellcheck disable=SC2086
+	CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "$GO_LDFLAGS" -o "${out}/etcdctl" ${REPO_PATH}/cmd/etcdctl || return
+}
+
+etcd_setup_gopath() {
+	d=$(dirname "$0")
+	CDIR=$(cd "$d" && pwd)
+	cd "$CDIR"
+	etcdGOPATH="${CDIR}/gopath"
+	# preserve old gopath to support building with unvendored tooling deps (e.g., gofail)
+	if [ -n "$GOPATH" ]; then
+		GOPATH=":$GOPATH"
+	fi
+	export GOPATH=${etcdGOPATH}$GOPATH
+	rm -rf "${etcdGOPATH}/src"
+	mkdir -p "${etcdGOPATH}"
+	ln -s "${CDIR}/cmd/vendor" "${etcdGOPATH}/src"
+}
+
+toggle_failpoints_default
+
+# only build when called directly, not sourced
+if echo "$0" | grep "build$" >/dev/null; then
+	# force new gopath so builds outside of gopath work
+	etcd_setup_gopath
+	etcd_build
+fi
diff --git a/vendor/github.com/coreos/etcd/build.bat b/vendor/github.com/coreos/etcd/build.bat
new file mode 100755
index 0000000..ff9b209
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/build.bat
@@ -0,0 +1 @@
+powershell -ExecutionPolicy Bypass -File build.ps1
diff --git a/vendor/github.com/coreos/etcd/build.ps1 b/vendor/github.com/coreos/etcd/build.ps1
new file mode 100644
index 0000000..455d37d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/build.ps1
@@ -0,0 +1,81 @@
+$ORG_PATH="github.com/coreos"
+$REPO_PATH="$ORG_PATH/etcd"
+$PWD = $((Get-Item -Path ".\" -Verbose).FullName)
+$FSROOT = $((Get-Location).Drive.Name+":")
+$FSYS = $((Get-WMIObject win32_logicaldisk -filter "DeviceID = '$FSROOT'").filesystem)
+
+if ($FSYS.StartsWith("FAT","CurrentCultureIgnoreCase")) {
+	echo "Error: Cannot build etcd using the $FSYS filesystem (use NTFS instead)"
+	exit 1
+}
+
+# Set $Env:GO_LDFLAGS="-s" for building without symbols.
+$GO_LDFLAGS="$Env:GO_LDFLAGS -X $REPO_PATH/cmd/vendor/$REPO_PATH/version.GitSHA=$GIT_SHA"
+
+# rebuild symlinks
+git ls-files -s cmd | select-string -pattern 120000 | ForEach {
+	$l = $_.ToString()
+	$lnkname = $l.Split('	')[1]
+	$target = "$(git log -p HEAD -- $lnkname | select -last 2 | select -first 1)"
+	$target = $target.SubString(1,$target.Length-1).Replace("/","\")
+	$lnkname = $lnkname.Replace("/","\")
+
+	$terms = $lnkname.Split("\")
+	$dirname = $terms[0..($terms.length-2)] -join "\"
+	$lnkname = "$PWD\$lnkname"
+	$targetAbs = "$((Get-Item -Path "$dirname\$target").FullName)"
+	$targetAbs = $targetAbs.Replace("/", "\")
+
+	if (test-path -pathtype container "$targetAbs") {
+		if (Test-Path "$lnkname") {
+			if ((Get-Item "$lnkname") -is [System.IO.DirectoryInfo]) {
+				# rd so deleting junction doesn't take files with it
+				cmd /c rd  "$lnkname"
+			}
+		}
+		if (Test-Path "$lnkname") {
+			if (!((Get-Item "$lnkname") -is [System.IO.DirectoryInfo])) {
+				cmd /c del /A /F  "$lnkname"
+			}
+		}
+		cmd /c mklink /J  "$lnkname"   "$targetAbs"  ">NUL"
+	} else {
+		# Remove file with symlink data (first run)
+		if (Test-Path "$lnkname") {
+			cmd /c del /A /F  "$lnkname"
+		}
+		cmd /c mklink /H  "$lnkname"   "$targetAbs"  ">NUL"
+	}
+}
+
+if (-not $env:GOPATH) {
+	$orgpath="$PWD\gopath\src\" + $ORG_PATH.Replace("/", "\")
+	if (Test-Path "$orgpath\etcd") {
+		if ((Get-Item "$orgpath\etcd") -is [System.IO.DirectoryInfo]) {
+			# rd so deleting junction doesn't take files with it
+			cmd /c rd  "$orgpath\etcd"
+		}
+	}
+	if (Test-Path "$orgpath") {
+		if ((Get-Item "$orgpath") -is [System.IO.DirectoryInfo]) {
+			# rd so deleting junction doesn't take files with it
+			cmd /c rd  "$orgpath"
+		}
+	}
+	if (Test-Path "$orgpath") {
+		if (!((Get-Item "$orgpath") -is [System.IO.DirectoryInfo])) {
+			# Remove file with symlink data (first run)
+			cmd /c del /A /F  "$orgpath"
+		}
+	}
+	cmd /c mkdir  "$orgpath"
+	cmd /c mklink /J  "$orgpath\etcd"   "$PWD"  ">NUL"
+	$env:GOPATH = "$PWD\gopath"
+}
+
+# Static compilation is useful when etcd is run in a container
+$env:CGO_ENABLED = 0
+$env:GO15VENDOREXPERIMENT = 1
+$GIT_SHA="$(git rev-parse --short HEAD)"
+go build -a -installsuffix cgo -ldflags $GO_LDFLAGS -o bin\etcd.exe "$REPO_PATH\cmd\etcd"
+go build -a -installsuffix cgo -ldflags $GO_LDFLAGS -o bin\etcdctl.exe "$REPO_PATH\cmd\etcdctl"
diff --git a/vendor/github.com/coreos/etcd/client/README.md b/vendor/github.com/coreos/etcd/client/README.md
new file mode 100644
index 0000000..2be731e
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/client/README.md
@@ -0,0 +1,117 @@
+# etcd/client
+
+etcd/client is the Go client library for etcd.
+
+[![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client)
+
+etcd uses `cmd/vendor` directory to store external dependencies, which are
+to be compiled into etcd release binaries. `client` can be imported without
+vendoring. For full compatibility, it is recommended to vendor builds using
+etcd's vendored packages, using tools like godep, as in
+[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
+For more detail, please read [Go vendor design](https://golang.org/s/go15vendor).
+
+## Install
+
+```bash
+go get github.com/coreos/etcd/client
+```
+
+## Usage
+
+```go
+package main
+
+import (
+	"log"
+	"time"
+	"context"
+
+	"github.com/coreos/etcd/client"
+)
+
+func main() {
+	cfg := client.Config{
+		Endpoints:               []string{"http://127.0.0.1:2379"},
+		Transport:               client.DefaultTransport,
+		// set timeout per request to fail fast when the target endpoint is unavailable
+		HeaderTimeoutPerRequest: time.Second,
+	}
+	c, err := client.New(cfg)
+	if err != nil {
+		log.Fatal(err)
+	}
+	kapi := client.NewKeysAPI(c)
+	// set "/foo" key with "bar" value
+	log.Print("Setting '/foo' key with 'bar' value")
+	resp, err := kapi.Set(context.Background(), "/foo", "bar", nil)
+	if err != nil {
+		log.Fatal(err)
+	} else {
+		// print common key info
+		log.Printf("Set is done. Metadata is %q\n", resp)
+	}
+	// get "/foo" key's value
+	log.Print("Getting '/foo' key value")
+	resp, err = kapi.Get(context.Background(), "/foo", nil)
+	if err != nil {
+		log.Fatal(err)
+	} else {
+		// print common key info
+		log.Printf("Get is done. Metadata is %q\n", resp)
+		// print value
+		log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value)
+	}
+}
+```
+
+## Error Handling
+
+etcd client might return three types of errors.
+
+- context error
+
+Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered.
+
+- cluster error
+
+Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned.
+
+- response error
+
+If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error.
+
+Here is the example code to handle client errors:
+
+```go
+cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}}
+c, err := client.New(cfg)
+if err != nil {
+	log.Fatal(err)
+}
+
+kapi := client.NewKeysAPI(c)
+resp, err := kapi.Set(ctx, "test", "bar", nil)
+if err != nil {
+	if err == context.Canceled {
+		// ctx is canceled by another routine
+	} else if err == context.DeadlineExceeded {
+		// ctx is attached with a deadline and it exceeded
+	} else if cerr, ok := err.(*client.ClusterError); ok {
+		// process (cerr.Errors)
+	} else {
+		// bad cluster endpoints, which are not etcd servers
+	}
+}
+```
+
+
+## Caveat
+
+1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process.
+
+2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened.
+
+3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
+
+4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information.
diff --git a/vendor/github.com/coreos/etcd/client/auth_role.go b/vendor/github.com/coreos/etcd/client/auth_role.go
new file mode 100644
index 0000000..b6ba7e1
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/client/auth_role.go
@@ -0,0 +1,236 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"net/http"
+	"net/url"
+)
+
+type Role struct {
+	Role        string       `json:"role"`
+	Permissions Permissions  `json:"permissions"`
+	Grant       *Permissions `json:"grant,omitempty"`
+	Revoke      *Permissions `json:"revoke,omitempty"`
+}
+
+type Permissions struct {
+	KV rwPermission `json:"kv"`
+}
+
+type rwPermission struct {
+	Read  []string `json:"read"`
+	Write []string `json:"write"`
+}
+
+type PermissionType int
+
+const (
+	ReadPermission PermissionType = iota
+	WritePermission
+	ReadWritePermission
+)
+
+// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to
+// interact with etcd's role creation and modification features.
+func NewAuthRoleAPI(c Client) AuthRoleAPI {
+	return &httpAuthRoleAPI{
+		client: c,
+	}
+}
+
+type AuthRoleAPI interface {
+	// AddRole adds a role.
+	AddRole(ctx context.Context, role string) error
+
+	// RemoveRole removes a role.
+	RemoveRole(ctx context.Context, role string) error
+
+	// GetRole retrieves role details.
+	GetRole(ctx context.Context, role string) (*Role, error)
+
+	// GrantRoleKV grants a role some permission prefixes for the KV store.
+	GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
+
+	// RevokeRoleKV revokes some permission prefixes for a role on the KV store.
+	RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
+
+	// ListRoles lists roles.
+	ListRoles(ctx context.Context) ([]string, error)
+}
+
+type httpAuthRoleAPI struct {
+	client httpClient
+}
+
+type authRoleAPIAction struct {
+	verb string
+	name string
+	role *Role
+}
+
+type authRoleAPIList struct{}
+
+func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request {
+	u := v2AuthURL(ep, "roles", "")
+	req, _ := http.NewRequest("GET", u.String(), nil)
+	req.Header.Set("Content-Type", "application/json")
+	return req
+}
+
+func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request {
+	u := v2AuthURL(ep, "roles", l.name)
+	if l.role == nil {
+		req, _ := http.NewRequest(l.verb, u.String(), nil)
+		return req
+	}
+	b, err := json.Marshal(l.role)
+	if err != nil {
+		panic(err)
+	}
+	body := bytes.NewReader(b)
+	req, _ := http.NewRequest(l.verb, u.String(), body)
+	req.Header.Set("Content-Type", "application/json")
+	return req
+}
+
+func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
+	resp, body, err := r.client.Do(ctx, &authRoleAPIList{})
+	if err != nil {
+		return nil, err
+	}
+	if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+		return nil, err
+	}
+	var roleList struct {
+		Roles []Role `json:"roles"`
+	}
+	if err = json.Unmarshal(body, &roleList); err != nil {
+		return nil, err
+	}
+	ret := make([]string, 0, len(roleList.Roles))
+	for _, r := range roleList.Roles {
+		ret = append(ret, r.Role)
+	}
+	return ret, nil
+}
+
+func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
+	role := &Role{
+		Role: rolename,
+	}
+	return r.addRemoveRole(ctx, &authRoleAPIAction{
+		verb: "PUT",
+		name: rolename,
+		role: role,
+	})
+}
+
+func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error {
+	return r.addRemoveRole(ctx, &authRoleAPIAction{
+		verb: "DELETE",
+		name: rolename,
+	})
+}
+
+func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error {
+	resp, body, err := r.client.Do(ctx, req)
+	if err != nil {
+		return err
+	}
+	if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
+		var sec authError
+		err := json.Unmarshal(body, &sec)
+		if err != nil {
+			return err
+		}
+		return sec
+	}
+	return nil
+}
+
+func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) {
+	return r.modRole(ctx, &authRoleAPIAction{
+		verb: "GET",
+		name: rolename,
+	})
+}
+
+func buildRWPermission(prefixes []string, permType PermissionType) rwPermission {
+	var out rwPermission
+	switch permType {
+	case ReadPermission:
+		out.Read = prefixes
+	case WritePermission:
+		out.Write = prefixes
+	case ReadWritePermission:
+		out.Read = prefixes
+		out.Write = prefixes
+	}
+	return out
+}
+
+func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
+	rwp := buildRWPermission(prefixes, permType)
+	role := &Role{
+		Role: rolename,
+		Grant: &Permissions{
+			KV: rwp,
+		},
+	}
+	return r.modRole(ctx, &authRoleAPIAction{
+		verb: "PUT",
+		name: rolename,
+		role: role,
+	})
+}
+
+func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
+	rwp := buildRWPermission(prefixes, permType)
+	role := &Role{
+		Role: rolename,
+		Revoke: &Permissions{
+			KV: rwp,
+		},
+	}
+	return r.modRole(ctx, &authRoleAPIAction{
+		verb: "PUT",
+		name: rolename,
+		role: role,
+	})
+}
+
+func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) {
+	resp, body, err := r.client.Do(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+		var sec authError
+		err = json.Unmarshal(body, &sec)
+		if err != nil {
+			return nil, err
+		}
+		return nil, sec
+	}
+	var role Role
+	if err = json.Unmarshal(body, &role); err != nil {
+		return nil, err
+	}
+	return &role, nil
+}
diff --git a/vendor/github.com/coreos/etcd/client/auth_user.go b/vendor/github.com/coreos/etcd/client/auth_user.go
new file mode 100644
index 0000000..8e7e2ef
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/client/auth_user.go
@@ -0,0 +1,319 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"net/http"
+	"net/url"
+	"path"
+)
+
+var (
+	defaultV2AuthPrefix = "/v2/auth"
+)
+
+type User struct {
+	User     string   `json:"user"`
+	Password string   `json:"password,omitempty"`
+	Roles    []string `json:"roles"`
+	Grant    []string `json:"grant,omitempty"`
+	Revoke   []string `json:"revoke,omitempty"`
+}
+
+// userListEntry is the user representation given by the server for ListUsers
+type userListEntry struct {
+	User  string `json:"user"`
+	Roles []Role `json:"roles"`
+}
+
+type UserRoles struct {
+	User  string `json:"user"`
+	Roles []Role `json:"roles"`
+}
+
+func v2AuthURL(ep url.URL, action string, name string) *url.URL {
+	if name != "" {
+		ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
+		return &ep
+	}
+	ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action)
+	return &ep
+}
+
+// NewAuthAPI constructs a new AuthAPI that uses HTTP to
+// interact with etcd's general auth features.
+func NewAuthAPI(c Client) AuthAPI {
+	return &httpAuthAPI{
+		client: c,
+	}
+}
+
+type AuthAPI interface {
+	// Enable auth.
+	Enable(ctx context.Context) error
+
+	// Disable auth.
+	Disable(ctx context.Context) error
+}
+
+type httpAuthAPI struct {
+	client httpClient
+}
+
+func (s *httpAuthAPI) Enable(ctx context.Context) error {
+	return s.enableDisable(ctx, &authAPIAction{"PUT"})
+}
+
+func (s *httpAuthAPI) Disable(ctx context.Context) error {
+	return s.enableDisable(ctx, &authAPIAction{"DELETE"})
+}
+
+func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
+	resp, body, err := s.client.Do(ctx, req)
+	if err != nil {
+		return err
+	}
+	if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
+		var sec authError
+		err = json.Unmarshal(body, &sec)
+		if err != nil {
+			return err
+		}
+		return sec
+	}
+	return nil
+}
+
+type authAPIAction struct {
+	verb string
+}
+
+func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request {
+	u := v2AuthURL(ep, "enable", "")
+	req, _ := http.NewRequest(l.verb, u.String(), nil)
+	return req
+}
+
+type authError struct {
+	Message string `json:"message"`
+	Code    int    `json:"-"`
+}
+
+func (e authError) Error() string {
+	return e.Message
+}
+
+// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to
+// interact with etcd's user creation and modification features.
+func NewAuthUserAPI(c Client) AuthUserAPI {
+	return &httpAuthUserAPI{
+		client: c,
+	}
+}
+
+type AuthUserAPI interface {
+	// AddUser adds a user.
+	AddUser(ctx context.Context, username string, password string) error
+
+	// RemoveUser removes a user.
+	RemoveUser(ctx context.Context, username string) error
+
+	// GetUser retrieves user details.
+	GetUser(ctx context.Context, username string) (*User, error)
+
+	// GrantUser grants a user some permission roles.
+	GrantUser(ctx context.Context, username string, roles []string) (*User, error)
+
+	// RevokeUser revokes some permission roles from a user.
+	RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
+
+	// ChangePassword changes the user's password.
+	ChangePassword(ctx context.Context, username string, password string) (*User, error)
+
+	// ListUsers lists the users.
+	ListUsers(ctx context.Context) ([]string, error)
+}
+
+type httpAuthUserAPI struct {
+	client httpClient
+}
+
+type authUserAPIAction struct {
+	verb     string
+	username string
+	user     *User
+}
+
+type authUserAPIList struct{}
+
+func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request {
+	u := v2AuthURL(ep, "users", "")
+	req, _ := http.NewRequest("GET", u.String(), nil)
+	req.Header.Set("Content-Type", "application/json")
+	return req
+}
+
+func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request {
+	u := v2AuthURL(ep, "users", l.username)
+	if l.user == nil {
+		req, _ := http.NewRequest(l.verb, u.String(), nil)
+		return req
+	}
+	b, err := json.Marshal(l.user)
+	if err != nil {
+		panic(err)
+	}
+	body := bytes.NewReader(b)
+	req, _ := http.NewRequest(l.verb, u.String(), body)
+	req.Header.Set("Content-Type", "application/json")
+	return req
+}
+
+func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
+	resp, body, err := u.client.Do(ctx, &authUserAPIList{})
+	if err != nil {
+		return nil, err
+	}
+	if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+		var sec authError
+		err = json.Unmarshal(body, &sec)
+		if err != nil {
+			return nil, err
+		}
+		return nil, sec
+	}
+
+	var userList struct {
+		Users []userListEntry `json:"users"`
+	}
+
+	if err = json.Unmarshal(body, &userList); err != nil {
+		return nil, err
+	}
+
+	ret := make([]string, 0, len(userList.Users))
+	for _, u := range userList.Users {
+		ret = append(ret, u.User)
+	}
+	return ret, nil
+}
+
+func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error {
+	user := &User{
+		User:     username,
+		Password: password,
+	}
+	return u.addRemoveUser(ctx, &authUserAPIAction{
+		verb:     "PUT",
+		username: username,
+		user:     user,
+	})
+}
+
+func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error {
+	return u.addRemoveUser(ctx, &authUserAPIAction{
+		verb:     "DELETE",
+		username: username,
+	})
+}
+
+func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error {
+	resp, body, err := u.client.Do(ctx, req)
+	if err != nil {
+		return err
+	}
+	if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
+		var sec authError
+		err = json.Unmarshal(body, &sec)
+		if err != nil {
+			return err
+		}
+		return sec
+	}
+	return nil
+}
+
+func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) {
+	return u.modUser(ctx, &authUserAPIAction{
+		verb:     "GET",
+		username: username,
+	})
+}
+
+func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) {
+	user := &User{
+		User:  username,
+		Grant: roles,
+	}
+	return u.modUser(ctx, &authUserAPIAction{
+		verb:     "PUT",
+		username: username,
+		user:     user,
+	})
+}
+
+func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) {
+	user := &User{
+		User:   username,
+		Revoke: roles,
+	}
+	return u.modUser(ctx, &authUserAPIAction{
+		verb:     "PUT",
+		username: username,
+		user:     user,
+	})
+}
+
+func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) {
+	user := &User{
+		User:     username,
+		Password: password,
+	}
+	return u.modUser(ctx, &authUserAPIAction{
+		verb:     "PUT",
+		username: username,
+		user:     user,
+	})
+}
+
+func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) {
+	resp, body, err := u.client.Do(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+		var sec authError
+		err = json.Unmarshal(body, &sec)
+		if err != nil {
+			return nil, err
+		}
+		return nil, sec
+	}
+	var user User
+	if err = json.Unmarshal(body, &user); err != nil {
+		var userR UserRoles
+		if urerr := json.Unmarshal(body, &userR); urerr != nil {
+			return nil, err
+		}
+		user.User = userR.User
+		for _, r := range userR.Roles {
+			user.Roles = append(user.Roles, r.Role)
+		}
+	}
+	return &user, nil
+}
diff --git a/vendor/github.com/coreos/etcd/client/cancelreq.go b/vendor/github.com/coreos/etcd/client/cancelreq.go
new file mode 100644
index 0000000..76d1f04
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/client/cancelreq.go
@@ -0,0 +1,18 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// borrowed from golang/net/context/ctxhttp/cancelreq.go
+
+package client
+
+import "net/http"
+
+func requestCanceler(tr CancelableTransport, req *http.Request) func() {
+	ch := make(chan struct{})
+	req.Cancel = ch
+
+	return func() {
+		close(ch)
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go
new file mode 100644
index 0000000..e687450
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/client/client.go
@@ -0,0 +1,710 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"math/rand"
+	"net"
+	"net/http"
+	"net/url"
+	"sort"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/version"
+)
+
+var (
+	ErrNoEndpoints           = errors.New("client: no endpoints available")
+	ErrTooManyRedirects      = errors.New("client: too many redirects")
+	ErrClusterUnavailable    = errors.New("client: etcd cluster is unavailable or misconfigured")
+	ErrNoLeaderEndpoint      = errors.New("client: no leader endpoint available")
+	errTooManyRedirectChecks = errors.New("client: too many redirect checks")
+
+	// oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
+	// that Do() will not retry a request
+	oneShotCtxValue interface{}
+)
+
+var DefaultRequestTimeout = 5 * time.Second
+
+var DefaultTransport CancelableTransport = &http.Transport{
+	Proxy: http.ProxyFromEnvironment,
+	Dial: (&net.Dialer{
+		Timeout:   30 * time.Second,
+		KeepAlive: 30 * time.Second,
+	}).Dial,
+	TLSHandshakeTimeout: 10 * time.Second,
+}
+
+type EndpointSelectionMode int
+
+const (
+	// EndpointSelectionRandom is the default value of the 'SelectionMode'.
+	// As the name implies, the client object will pick a node from the members
+	// of the cluster in a random fashion. If the cluster has three members, A, B,
+	// and C, the client picks any node from its three members as its request
+	// destination.
+	EndpointSelectionRandom EndpointSelectionMode = iota
+
+	// If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
+	// requests are sent directly to the cluster leader. This reduces
+	// forwarding roundtrips compared to making requests to etcd followers
+	// who then forward them to the cluster leader. In the event of a leader
+	// failure, however, clients configured this way cannot prioritize among
+	// the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
+	// to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
+	// maintain its knowledge of current cluster state.
+	//
+	// This mode should be used with Client.AutoSync().
+	EndpointSelectionPrioritizeLeader
+)
+
+type Config struct {
+	// Endpoints defines a set of URLs (schemes, hosts and ports only)
+	// that can be used to communicate with a logical etcd cluster. For
+	// example, a three-node cluster could be provided like so:
+	//
+	// 	Endpoints: []string{
+	//		"http://node1.example.com:2379",
+	//		"http://node2.example.com:2379",
+	//		"http://node3.example.com:2379",
+	//	}
+	//
+	// If multiple endpoints are provided, the Client will attempt to
+	// use them all in the event that one or more of them are unusable.
+	//
+	// If Client.Sync is ever called, the Client may cache an alternate
+	// set of endpoints to continue operation.
+	Endpoints []string
+
+	// Transport is used by the Client to drive HTTP requests. If not
+	// provided, DefaultTransport will be used.
+	Transport CancelableTransport
+
+	// CheckRedirect specifies the policy for handling HTTP redirects.
+	// If CheckRedirect is not nil, the Client calls it before
+	// following an HTTP redirect. The sole argument is the number of
+	// requests that have already been made. If CheckRedirect returns
+	// an error, Client.Do will not make any further requests and return
+	// the error back it to the caller.
+	//
+	// If CheckRedirect is nil, the Client uses its default policy,
+	// which is to stop after 10 consecutive requests.
+	CheckRedirect CheckRedirectFunc
+
+	// Username specifies the user credential to add as an authorization header
+	Username string
+
+	// Password is the password for the specified user to add as an authorization header
+	// to the request.
+	Password string
+
+	// HeaderTimeoutPerRequest specifies the time limit to wait for response
+	// header in a single request made by the Client. The timeout includes
+	// connection time, any redirects, and header wait time.
+	//
+	// For non-watch GET request, server returns the response body immediately.
+	// For PUT/POST/DELETE request, server will attempt to commit request
+	// before responding, which is expected to take `100ms + 2 * RTT`.
+	// For watch request, server returns the header immediately to notify Client
+	// watch start. But if server is behind some kind of proxy, the response
+	// header may be cached at proxy, and Client cannot rely on this behavior.
+	//
+	// Especially, wait request will ignore this timeout.
+	//
+	// One API call may send multiple requests to different etcd servers until it
+	// succeeds. Use context of the API to specify the overall timeout.
+	//
+	// A HeaderTimeoutPerRequest of zero means no timeout.
+	HeaderTimeoutPerRequest time.Duration
+
+	// SelectionMode is an EndpointSelectionMode enum that specifies the
+	// policy for choosing the etcd cluster node to which requests are sent.
+	SelectionMode EndpointSelectionMode
+}
+
+func (cfg *Config) transport() CancelableTransport {
+	if cfg.Transport == nil {
+		return DefaultTransport
+	}
+	return cfg.Transport
+}
+
+func (cfg *Config) checkRedirect() CheckRedirectFunc {
+	if cfg.CheckRedirect == nil {
+		return DefaultCheckRedirect
+	}
+	return cfg.CheckRedirect
+}
+
+// CancelableTransport mimics net/http.Transport, but requires that
+// the object also support request cancellation.
+type CancelableTransport interface {
+	http.RoundTripper
+	CancelRequest(req *http.Request)
+}
+
+type CheckRedirectFunc func(via int) error
+
+// DefaultCheckRedirect follows up to 10 redirects, but no more.
+var DefaultCheckRedirect CheckRedirectFunc = func(via int) error {
+	if via > 10 {
+		return ErrTooManyRedirects
+	}
+	return nil
+}
+
+type Client interface {
+	// Sync updates the internal cache of the etcd cluster's membership.
+	Sync(context.Context) error
+
+	// AutoSync periodically calls Sync() every given interval.
+	// The recommended sync interval is 10 seconds to 1 minute, which does
+	// not bring too much overhead to server and makes client catch up the
+	// cluster change in time.
+	//
+	// The example to use it:
+	//
+	//  for {
+	//      err := client.AutoSync(ctx, 10*time.Second)
+	//      if err == context.DeadlineExceeded || err == context.Canceled {
+	//          break
+	//      }
+	//      log.Print(err)
+	//  }
+	AutoSync(context.Context, time.Duration) error
+
+	// Endpoints returns a copy of the current set of API endpoints used
+	// by Client to resolve HTTP requests. If Sync has ever been called,
+	// this may differ from the initial Endpoints provided in the Config.
+	Endpoints() []string
+
+	// SetEndpoints sets the set of API endpoints used by Client to resolve
+	// HTTP requests. If the given endpoints are not valid, an error will be
+	// returned
+	SetEndpoints(eps []string) error
+
+	// GetVersion retrieves the current etcd server and cluster version
+	GetVersion(ctx context.Context) (*version.Versions, error)
+
+	httpClient
+}
+
+func New(cfg Config) (Client, error) {
+	c := &httpClusterClient{
+		clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
+		rand:          rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
+		selectionMode: cfg.SelectionMode,
+	}
+	if cfg.Username != "" {
+		c.credentials = &credentials{
+			username: cfg.Username,
+			password: cfg.Password,
+		}
+	}
+	if err := c.SetEndpoints(cfg.Endpoints); err != nil {
+		return nil, err
+	}
+	return c, nil
+}
+
+type httpClient interface {
+	Do(context.Context, httpAction) (*http.Response, []byte, error)
+}
+
+func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory {
+	return func(ep url.URL) httpClient {
+		return &redirectFollowingHTTPClient{
+			checkRedirect: cr,
+			client: &simpleHTTPClient{
+				transport:     tr,
+				endpoint:      ep,
+				headerTimeout: headerTimeout,
+			},
+		}
+	}
+}
+
+type credentials struct {
+	username string
+	password string
+}
+
+type httpClientFactory func(url.URL) httpClient
+
+type httpAction interface {
+	HTTPRequest(url.URL) *http.Request
+}
+
+type httpClusterClient struct {
+	clientFactory httpClientFactory
+	endpoints     []url.URL
+	pinned        int
+	credentials   *credentials
+	sync.RWMutex
+	rand          *rand.Rand
+	selectionMode EndpointSelectionMode
+}
+
+func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
+	ceps := make([]url.URL, len(eps))
+	copy(ceps, eps)
+
+	// To perform a lookup on the new endpoint list without using the current
+	// client, we'll copy it
+	clientCopy := &httpClusterClient{
+		clientFactory: c.clientFactory,
+		credentials:   c.credentials,
+		rand:          c.rand,
+
+		pinned:    0,
+		endpoints: ceps,
+	}
+
+	mAPI := NewMembersAPI(clientCopy)
+	leader, err := mAPI.Leader(ctx)
+	if err != nil {
+		return "", err
+	}
+	if len(leader.ClientURLs) == 0 {
+		return "", ErrNoLeaderEndpoint
+	}
+
+	return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
+}
+
+func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
+	if len(eps) == 0 {
+		return []url.URL{}, ErrNoEndpoints
+	}
+
+	neps := make([]url.URL, len(eps))
+	for i, ep := range eps {
+		u, err := url.Parse(ep)
+		if err != nil {
+			return []url.URL{}, err
+		}
+		neps[i] = *u
+	}
+	return neps, nil
+}
+
+func (c *httpClusterClient) SetEndpoints(eps []string) error {
+	neps, err := c.parseEndpoints(eps)
+	if err != nil {
+		return err
+	}
+
+	c.Lock()
+	defer c.Unlock()
+
+	c.endpoints = shuffleEndpoints(c.rand, neps)
+	// We're not doing anything for PrioritizeLeader here. This is
+	// due to not having a context meaning we can't call getLeaderEndpoint
+	// However, if you're using PrioritizeLeader, you've already been told
+	// to regularly call sync, where we do have a ctx, and can figure the
+	// leader. PrioritizeLeader is also quite a loose guarantee, so deal
+	// with it
+	c.pinned = 0
+
+	return nil
+}
+
+func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
+	action := act
+	c.RLock()
+	leps := len(c.endpoints)
+	eps := make([]url.URL, leps)
+	n := copy(eps, c.endpoints)
+	pinned := c.pinned
+
+	if c.credentials != nil {
+		action = &authedAction{
+			act:         act,
+			credentials: *c.credentials,
+		}
+	}
+	c.RUnlock()
+
+	if leps == 0 {
+		return nil, nil, ErrNoEndpoints
+	}
+
+	if leps != n {
+		return nil, nil, errors.New("unable to pick endpoint: copy failed")
+	}
+
+	var resp *http.Response
+	var body []byte
+	var err error
+	cerr := &ClusterError{}
+	isOneShot := ctx.Value(&oneShotCtxValue) != nil
+
+	for i := pinned; i < leps+pinned; i++ {
+		k := i % leps
+		hc := c.clientFactory(eps[k])
+		resp, body, err = hc.Do(ctx, action)
+		if err != nil {
+			cerr.Errors = append(cerr.Errors, err)
+			if err == ctx.Err() {
+				return nil, nil, ctx.Err()
+			}
+			if err == context.Canceled || err == context.DeadlineExceeded {
+				return nil, nil, err
+			}
+		} else if resp.StatusCode/100 == 5 {
+			switch resp.StatusCode {
+			case http.StatusInternalServerError, http.StatusServiceUnavailable:
+				// TODO: make sure this is a no leader response
+				cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String()))
+			default:
+				cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
+			}
+			err = cerr.Errors[0]
+		}
+		if err != nil {
+			if !isOneShot {
+				continue
+			}
+			c.Lock()
+			c.pinned = (k + 1) % leps
+			c.Unlock()
+			return nil, nil, err
+		}
+		if k != pinned {
+			c.Lock()
+			c.pinned = k
+			c.Unlock()
+		}
+		return resp, body, nil
+	}
+
+	return nil, nil, cerr
+}
+
+func (c *httpClusterClient) Endpoints() []string {
+	c.RLock()
+	defer c.RUnlock()
+
+	eps := make([]string, len(c.endpoints))
+	for i, ep := range c.endpoints {
+		eps[i] = ep.String()
+	}
+
+	return eps
+}
+
+func (c *httpClusterClient) Sync(ctx context.Context) error {
+	mAPI := NewMembersAPI(c)
+	ms, err := mAPI.List(ctx)
+	if err != nil {
+		return err
+	}
+
+	var eps []string
+	for _, m := range ms {
+		eps = append(eps, m.ClientURLs...)
+	}
+
+	neps, err := c.parseEndpoints(eps)
+	if err != nil {
+		return err
+	}
+
+	npin := 0
+
+	switch c.selectionMode {
+	case EndpointSelectionRandom:
+		c.RLock()
+		eq := endpointsEqual(c.endpoints, neps)
+		c.RUnlock()
+
+		if eq {
+			return nil
+		}
+		// When items in the endpoint list changes, we choose a new pin
+		neps = shuffleEndpoints(c.rand, neps)
+	case EndpointSelectionPrioritizeLeader:
+		nle, err := c.getLeaderEndpoint(ctx, neps)
+		if err != nil {
+			return ErrNoLeaderEndpoint
+		}
+
+		for i, n := range neps {
+			if n.String() == nle {
+				npin = i
+				break
+			}
+		}
+	default:
+		return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
+	}
+
+	c.Lock()
+	defer c.Unlock()
+	c.endpoints = neps
+	c.pinned = npin
+
+	return nil
+}
+
+func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
+	ticker := time.NewTicker(interval)
+	defer ticker.Stop()
+	for {
+		err := c.Sync(ctx)
+		if err != nil {
+			return err
+		}
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		case <-ticker.C:
+		}
+	}
+}
+
+func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
+	act := &getAction{Prefix: "/version"}
+
+	resp, body, err := c.Do(ctx, act)
+	if err != nil {
+		return nil, err
+	}
+
+	switch resp.StatusCode {
+	case http.StatusOK:
+		if len(body) == 0 {
+			return nil, ErrEmptyBody
+		}
+		var vresp version.Versions
+		if err := json.Unmarshal(body, &vresp); err != nil {
+			return nil, ErrInvalidJSON
+		}
+		return &vresp, nil
+	default:
+		var etcdErr Error
+		if err := json.Unmarshal(body, &etcdErr); err != nil {
+			return nil, ErrInvalidJSON
+		}
+		return nil, etcdErr
+	}
+}
+
+type roundTripResponse struct {
+	resp *http.Response
+	err  error
+}
+
+type simpleHTTPClient struct {
+	transport     CancelableTransport
+	endpoint      url.URL
+	headerTimeout time.Duration
+}
+
+func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
+	req := act.HTTPRequest(c.endpoint)
+
+	if err := printcURL(req); err != nil {
+		return nil, nil, err
+	}
+
+	isWait := false
+	if req != nil && req.URL != nil {
+		ws := req.URL.Query().Get("wait")
+		if len(ws) != 0 {
+			var err error
+			isWait, err = strconv.ParseBool(ws)
+			if err != nil {
+				return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
+			}
+		}
+	}
+
+	var hctx context.Context
+	var hcancel context.CancelFunc
+	if !isWait && c.headerTimeout > 0 {
+		hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
+	} else {
+		hctx, hcancel = context.WithCancel(ctx)
+	}
+	defer hcancel()
+
+	reqcancel := requestCanceler(c.transport, req)
+
+	rtchan := make(chan roundTripResponse, 1)
+	go func() {
+		resp, err := c.transport.RoundTrip(req)
+		rtchan <- roundTripResponse{resp: resp, err: err}
+		close(rtchan)
+	}()
+
+	var resp *http.Response
+	var err error
+
+	select {
+	case rtresp := <-rtchan:
+		resp, err = rtresp.resp, rtresp.err
+	case <-hctx.Done():
+		// cancel and wait for request to actually exit before continuing
+		reqcancel()
+		rtresp := <-rtchan
+		resp = rtresp.resp
+		switch {
+		case ctx.Err() != nil:
+			err = ctx.Err()
+		case hctx.Err() != nil:
+			err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
+		default:
+			panic("failed to get error from context")
+		}
+	}
+
+	// always check for resp nil-ness to deal with possible
+	// race conditions between channels above
+	defer func() {
+		if resp != nil {
+			resp.Body.Close()
+		}
+	}()
+
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var body []byte
+	done := make(chan struct{})
+	go func() {
+		body, err = ioutil.ReadAll(resp.Body)
+		done <- struct{}{}
+	}()
+
+	select {
+	case <-ctx.Done():
+		resp.Body.Close()
+		<-done
+		return nil, nil, ctx.Err()
+	case <-done:
+	}
+
+	return resp, body, err
+}
+
+type authedAction struct {
+	act         httpAction
+	credentials credentials
+}
+
+func (a *authedAction) HTTPRequest(url url.URL) *http.Request {
+	r := a.act.HTTPRequest(url)
+	r.SetBasicAuth(a.credentials.username, a.credentials.password)
+	return r
+}
+
+type redirectFollowingHTTPClient struct {
+	client        httpClient
+	checkRedirect CheckRedirectFunc
+}
+
+func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
+	next := act
+	for i := 0; i < 100; i++ {
+		if i > 0 {
+			if err := r.checkRedirect(i); err != nil {
+				return nil, nil, err
+			}
+		}
+		resp, body, err := r.client.Do(ctx, next)
+		if err != nil {
+			return nil, nil, err
+		}
+		if resp.StatusCode/100 == 3 {
+			hdr := resp.Header.Get("Location")
+			if hdr == "" {
+				return nil, nil, fmt.Errorf("Location header not set")
+			}
+			loc, err := url.Parse(hdr)
+			if err != nil {
+				return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr)
+			}
+			next = &redirectedHTTPAction{
+				action:   act,
+				location: *loc,
+			}
+			continue
+		}
+		return resp, body, nil
+	}
+
+	return nil, nil, errTooManyRedirectChecks
+}
+
+type redirectedHTTPAction struct {
+	action   httpAction
+	location url.URL
+}
+
+func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
+	orig := r.action.HTTPRequest(ep)
+	orig.URL = &r.location
+	return orig
+}
+
+func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
+	// copied from Go 1.9<= rand.Rand.Perm
+	n := len(eps)
+	p := make([]int, n)
+	for i := 0; i < n; i++ {
+		j := r.Intn(i + 1)
+		p[i] = p[j]
+		p[j] = i
+	}
+	neps := make([]url.URL, n)
+	for i, k := range p {
+		neps[i] = eps[k]
+	}
+	return neps
+}
+
+func endpointsEqual(left, right []url.URL) bool {
+	if len(left) != len(right) {
+		return false
+	}
+
+	sLeft := make([]string, len(left))
+	sRight := make([]string, len(right))
+	for i, l := range left {
+		sLeft[i] = l.String()
+	}
+	for i, r := range right {
+		sRight[i] = r.String()
+	}
+
+	sort.Strings(sLeft)
+	sort.Strings(sRight)
+	for i := range sLeft {
+		if sLeft[i] != sRight[i] {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/coreos/etcd/client/cluster_error.go b/vendor/github.com/coreos/etcd/client/cluster_error.go
new file mode 100644
index 0000000..34618cd
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/client/cluster_error.go
@@ -0,0 +1,37 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import "fmt"
+
+type ClusterError struct {
+	Errors []error
+}
+
+func (ce *ClusterError) Error() string {
+	s := ErrClusterUnavailable.Error()
+	for i, e := range ce.Errors {
+		s += fmt.Sprintf("; error #%d: %s\n", i, e)
+	}
+	return s
+}
+
+func (ce *ClusterError) Detail() string {
+	s := ""
+	for i, e := range ce.Errors {
+		s += fmt.Sprintf("error #%d: %s\n", i, e)
+	}
+	return s
+}
diff --git a/vendor/github.com/coreos/etcd/client/curl.go b/vendor/github.com/coreos/etcd/client/curl.go
new file mode 100644
index 0000000..c8bc9fb
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/client/curl.go
@@ -0,0 +1,70 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+)
+
+var (
+	cURLDebug = false
+)
+
+func EnablecURLDebug() {
+	cURLDebug = true
+}
+
+func DisablecURLDebug() {
+	cURLDebug = false
+}
+
+// printcURL prints the cURL equivalent request to stderr.
+// It returns an error if the body of the request cannot
+// be read.
+// The caller MUST cancel the request if there is an error.
+func printcURL(req *http.Request) error {
+	if !cURLDebug {
+		return nil
+	}
+	var (
+		command string
+		b       []byte
+		err     error
+	)
+
+	if req.URL != nil {
+		command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String())
+	}
+
+	if req.Body != nil {
+		b, err = ioutil.ReadAll(req.Body)
+		if err != nil {
+			return err
+		}
+		command += fmt.Sprintf(" -d %q", string(b))
+	}
+
+	fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command)
+
+	// reset body
+	body := bytes.NewBuffer(b)
+	req.Body = ioutil.NopCloser(body)
+
+	return nil
+}
diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go
new file mode 100644
index 0000000..442e35f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/client/discover.go
@@ -0,0 +1,40 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+	"github.com/coreos/etcd/pkg/srv"
+)
+
+// Discoverer is an interface that wraps the Discover method.
+type Discoverer interface {
+	// Discover looks up the etcd servers for the domain.
+	Discover(domain string) ([]string, error)
+}
+
+type srvDiscover struct{}
+
+// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
+func NewSRVDiscover() Discoverer {
+	return &srvDiscover{}
+}
+
+func (d *srvDiscover) Discover(domain string) ([]string, error) {
+	srvs, err := srv.GetClient("etcd-client", domain)
+	if err != nil {
+		return nil, err
+	}
+	return srvs.Endpoints, nil
+}
diff --git a/vendor/github.com/coreos/etcd/client/doc.go b/vendor/github.com/coreos/etcd/client/doc.go
new file mode 100644
index 0000000..ad4eca4
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/client/doc.go
@@ -0,0 +1,73 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package client provides bindings for the etcd APIs.
+
+Create a Config and exchange it for a Client:
+
+	import (
+		"net/http"
+		"context"
+
+		"github.com/coreos/etcd/client"
+	)
+
+	cfg := client.Config{
+		Endpoints: []string{"http://127.0.0.1:2379"},
+		Transport: DefaultTransport,
+	}
+
+	c, err := client.New(cfg)
+	if err != nil {
+		// handle error
+	}
+
+Clients are safe for concurrent use by multiple goroutines.
+
+Create a KeysAPI using the Client, then use it to interact with etcd:
+
+	kAPI := client.NewKeysAPI(c)
+
+	// create a new key /foo with the value "bar"
+	_, err = kAPI.Create(context.Background(), "/foo", "bar")
+	if err != nil {
+		// handle error
+	}
+
+	// delete the newly created key only if the value is still "bar"
+	_, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"})
+	if err != nil {
+		// handle error
+	}
+
+Use a custom context to set timeouts on your operations:
+
+	import "time"
+
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+
+	// set a new key, ignoring its previous state
+	_, err := kAPI.Set(ctx, "/ping", "pong", nil)
+	if err != nil {
+		if err == context.DeadlineExceeded {
+			// request took longer than 5s
+		} else {
+			// handle error
+		}
+	}
+
+*/
+package client
diff --git a/vendor/github.com/coreos/etcd/client/keys.generated.go b/vendor/github.com/coreos/etcd/client/keys.generated.go
new file mode 100644
index 0000000..237fdbe
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/client/keys.generated.go
@@ -0,0 +1,5218 @@
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package client
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"runtime"
+	time "time"
+
+	codec1978 "github.com/ugorji/go/codec"
+)
+
+const (
+	// ----- content types ----
+	codecSelferC_UTF87612 = 1
+	codecSelferC_RAW7612  = 0
+	// ----- value types used ----
+	codecSelferValueTypeArray7612 = 10
+	codecSelferValueTypeMap7612   = 9
+	// ----- containerStateValues ----
+	codecSelfer_containerMapKey7612    = 2
+	codecSelfer_containerMapValue7612  = 3
+	codecSelfer_containerMapEnd7612    = 4
+	codecSelfer_containerArrayElem7612 = 6
+	codecSelfer_containerArrayEnd7612  = 7
+)
+
+var (
+	codecSelferBitsize7612                         = uint8(reflect.TypeOf(uint(0)).Bits())
+	codecSelferOnlyMapOrArrayEncodeToStructErr7612 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer7612 struct{}
+
+func init() {
+	if codec1978.GenVersion != 8 {
+		_, file, _, _ := runtime.Caller(0)
+		err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+			8, codec1978.GenVersion, file)
+		panic(err)
+	}
+	if false { // reference the types, but skip this branch at build/run time
+		var v0 time.Duration
+		_ = v0
+	}
+}
+
+func (x *Error) CodecEncodeSelf(e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	if x == nil {
+		r.EncodeNil()
+	} else {
+		yym1 := z.EncBinary()
+		_ = yym1
+		if false {
+		} else if z.HasExtensions() && z.EncExt(x) {
+		} else {
+			yysep2 := !z.EncBinary()
+			yy2arr2 := z.EncBasicHandle().StructToArray
+			_, _ = yysep2, yy2arr2
+			const yyr2 bool = false
+			if yyr2 || yy2arr2 {
+				r.WriteArrayStart(4)
+			} else {
+				r.WriteMapStart(4)
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym4 := z.EncBinary()
+				_ = yym4
+				if false {
+				} else {
+					r.EncodeInt(int64(x.Code))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("errorCode"))
+				r.WriteMapElemValue()
+				yym5 := z.EncBinary()
+				_ = yym5
+				if false {
+				} else {
+					r.EncodeInt(int64(x.Code))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym7 := z.EncBinary()
+				_ = yym7
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Message))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("message"))
+				r.WriteMapElemValue()
+				yym8 := z.EncBinary()
+				_ = yym8
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Message))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym10 := z.EncBinary()
+				_ = yym10
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Cause))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("cause"))
+				r.WriteMapElemValue()
+				yym11 := z.EncBinary()
+				_ = yym11
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Cause))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym13 := z.EncBinary()
+				_ = yym13
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.Index))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("index"))
+				r.WriteMapElemValue()
+				yym14 := z.EncBinary()
+				_ = yym14
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.Index))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayEnd()
+			} else {
+				r.WriteMapEnd()
+			}
+		}
+	}
+}
+
+func (x *Error) CodecDecodeSelf(d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	yym1 := z.DecBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.DecExt(x) {
+	} else {
+		yyct2 := r.ContainerType()
+		if yyct2 == codecSelferValueTypeMap7612 {
+			yyl2 := r.ReadMapStart()
+			if yyl2 == 0 {
+				r.ReadMapEnd()
+			} else {
+				x.codecDecodeSelfFromMap(yyl2, d)
+			}
+		} else if yyct2 == codecSelferValueTypeArray7612 {
+			yyl2 := r.ReadArrayStart()
+			if yyl2 == 0 {
+				r.ReadArrayEnd()
+			} else {
+				x.codecDecodeSelfFromArray(yyl2, d)
+			}
+		} else {
+			panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
+		}
+	}
+}
+
+func (x *Error) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+	_ = yys3Slc
+	var yyhl3 bool = l >= 0
+	for yyj3 := 0; ; yyj3++ {
+		if yyhl3 {
+			if yyj3 >= l {
+				break
+			}
+		} else {
+			if r.CheckBreak() {
+				break
+			}
+		}
+		r.ReadMapElemKey()
+		yys3Slc = r.DecodeStringAsBytes()
+		yys3 := string(yys3Slc)
+		r.ReadMapElemValue()
+		switch yys3 {
+		case "errorCode":
+			if r.TryDecodeAsNil() {
+				x.Code = 0
+			} else {
+				yyv4 := &x.Code
+				yym5 := z.DecBinary()
+				_ = yym5
+				if false {
+				} else {
+					*((*int)(yyv4)) = int(r.DecodeInt(codecSelferBitsize7612))
+				}
+			}
+		case "message":
+			if r.TryDecodeAsNil() {
+				x.Message = ""
+			} else {
+				yyv6 := &x.Message
+				yym7 := z.DecBinary()
+				_ = yym7
+				if false {
+				} else {
+					*((*string)(yyv6)) = r.DecodeString()
+				}
+			}
+		case "cause":
+			if r.TryDecodeAsNil() {
+				x.Cause = ""
+			} else {
+				yyv8 := &x.Cause
+				yym9 := z.DecBinary()
+				_ = yym9
+				if false {
+				} else {
+					*((*string)(yyv8)) = r.DecodeString()
+				}
+			}
+		case "index":
+			if r.TryDecodeAsNil() {
+				x.Index = 0
+			} else {
+				yyv10 := &x.Index
+				yym11 := z.DecBinary()
+				_ = yym11
+				if false {
+				} else {
+					*((*uint64)(yyv10)) = uint64(r.DecodeUint(64))
+				}
+			}
+		default:
+			z.DecStructFieldNotFound(-1, yys3)
+		} // end switch yys3
+	} // end for yyj3
+	r.ReadMapEnd()
+}
+
+func (x *Error) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yyj12 int
+	var yyb12 bool
+	var yyhl12 bool = l >= 0
+	yyj12++
+	if yyhl12 {
+		yyb12 = yyj12 > l
+	} else {
+		yyb12 = r.CheckBreak()
+	}
+	if yyb12 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Code = 0
+	} else {
+		yyv13 := &x.Code
+		yym14 := z.DecBinary()
+		_ = yym14
+		if false {
+		} else {
+			*((*int)(yyv13)) = int(r.DecodeInt(codecSelferBitsize7612))
+		}
+	}
+	yyj12++
+	if yyhl12 {
+		yyb12 = yyj12 > l
+	} else {
+		yyb12 = r.CheckBreak()
+	}
+	if yyb12 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Message = ""
+	} else {
+		yyv15 := &x.Message
+		yym16 := z.DecBinary()
+		_ = yym16
+		if false {
+		} else {
+			*((*string)(yyv15)) = r.DecodeString()
+		}
+	}
+	yyj12++
+	if yyhl12 {
+		yyb12 = yyj12 > l
+	} else {
+		yyb12 = r.CheckBreak()
+	}
+	if yyb12 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Cause = ""
+	} else {
+		yyv17 := &x.Cause
+		yym18 := z.DecBinary()
+		_ = yym18
+		if false {
+		} else {
+			*((*string)(yyv17)) = r.DecodeString()
+		}
+	}
+	yyj12++
+	if yyhl12 {
+		yyb12 = yyj12 > l
+	} else {
+		yyb12 = r.CheckBreak()
+	}
+	if yyb12 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Index = 0
+	} else {
+		yyv19 := &x.Index
+		yym20 := z.DecBinary()
+		_ = yym20
+		if false {
+		} else {
+			*((*uint64)(yyv19)) = uint64(r.DecodeUint(64))
+		}
+	}
+	for {
+		yyj12++
+		if yyhl12 {
+			yyb12 = yyj12 > l
+		} else {
+			yyb12 = r.CheckBreak()
+		}
+		if yyb12 {
+			break
+		}
+		r.ReadArrayElem()
+		z.DecStructFieldNotFound(yyj12-1, "")
+	}
+	r.ReadArrayEnd()
+}
+
+func (x PrevExistType) CodecEncodeSelf(e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	yym1 := z.EncBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.EncExt(x) {
+	} else {
+		r.EncodeString(codecSelferC_UTF87612, string(x))
+	}
+}
+
+func (x *PrevExistType) CodecDecodeSelf(d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	yym1 := z.DecBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.DecExt(x) {
+	} else {
+		*((*string)(x)) = r.DecodeString()
+	}
+}
+
+func (x *WatcherOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	if x == nil {
+		r.EncodeNil()
+	} else {
+		yym1 := z.EncBinary()
+		_ = yym1
+		if false {
+		} else if z.HasExtensions() && z.EncExt(x) {
+		} else {
+			yysep2 := !z.EncBinary()
+			yy2arr2 := z.EncBasicHandle().StructToArray
+			_, _ = yysep2, yy2arr2
+			const yyr2 bool = false
+			if yyr2 || yy2arr2 {
+				r.WriteArrayStart(2)
+			} else {
+				r.WriteMapStart(2)
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym4 := z.EncBinary()
+				_ = yym4
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.AfterIndex))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("AfterIndex"))
+				r.WriteMapElemValue()
+				yym5 := z.EncBinary()
+				_ = yym5
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.AfterIndex))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym7 := z.EncBinary()
+				_ = yym7
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Recursive))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
+				r.WriteMapElemValue()
+				yym8 := z.EncBinary()
+				_ = yym8
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Recursive))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayEnd()
+			} else {
+				r.WriteMapEnd()
+			}
+		}
+	}
+}
+
+func (x *WatcherOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	yym1 := z.DecBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.DecExt(x) {
+	} else {
+		yyct2 := r.ContainerType()
+		if yyct2 == codecSelferValueTypeMap7612 {
+			yyl2 := r.ReadMapStart()
+			if yyl2 == 0 {
+				r.ReadMapEnd()
+			} else {
+				x.codecDecodeSelfFromMap(yyl2, d)
+			}
+		} else if yyct2 == codecSelferValueTypeArray7612 {
+			yyl2 := r.ReadArrayStart()
+			if yyl2 == 0 {
+				r.ReadArrayEnd()
+			} else {
+				x.codecDecodeSelfFromArray(yyl2, d)
+			}
+		} else {
+			panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
+		}
+	}
+}
+
+func (x *WatcherOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+	_ = yys3Slc
+	var yyhl3 bool = l >= 0
+	for yyj3 := 0; ; yyj3++ {
+		if yyhl3 {
+			if yyj3 >= l {
+				break
+			}
+		} else {
+			if r.CheckBreak() {
+				break
+			}
+		}
+		r.ReadMapElemKey()
+		yys3Slc = r.DecodeStringAsBytes()
+		yys3 := string(yys3Slc)
+		r.ReadMapElemValue()
+		switch yys3 {
+		case "AfterIndex":
+			if r.TryDecodeAsNil() {
+				x.AfterIndex = 0
+			} else {
+				yyv4 := &x.AfterIndex
+				yym5 := z.DecBinary()
+				_ = yym5
+				if false {
+				} else {
+					*((*uint64)(yyv4)) = uint64(r.DecodeUint(64))
+				}
+			}
+		case "Recursive":
+			if r.TryDecodeAsNil() {
+				x.Recursive = false
+			} else {
+				yyv6 := &x.Recursive
+				yym7 := z.DecBinary()
+				_ = yym7
+				if false {
+				} else {
+					*((*bool)(yyv6)) = r.DecodeBool()
+				}
+			}
+		default:
+			z.DecStructFieldNotFound(-1, yys3)
+		} // end switch yys3
+	} // end for yyj3
+	r.ReadMapEnd()
+}
+
+func (x *WatcherOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yyj8 int
+	var yyb8 bool
+	var yyhl8 bool = l >= 0
+	yyj8++
+	if yyhl8 {
+		yyb8 = yyj8 > l
+	} else {
+		yyb8 = r.CheckBreak()
+	}
+	if yyb8 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.AfterIndex = 0
+	} else {
+		yyv9 := &x.AfterIndex
+		yym10 := z.DecBinary()
+		_ = yym10
+		if false {
+		} else {
+			*((*uint64)(yyv9)) = uint64(r.DecodeUint(64))
+		}
+	}
+	yyj8++
+	if yyhl8 {
+		yyb8 = yyj8 > l
+	} else {
+		yyb8 = r.CheckBreak()
+	}
+	if yyb8 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Recursive = false
+	} else {
+		yyv11 := &x.Recursive
+		yym12 := z.DecBinary()
+		_ = yym12
+		if false {
+		} else {
+			*((*bool)(yyv11)) = r.DecodeBool()
+		}
+	}
+	for {
+		yyj8++
+		if yyhl8 {
+			yyb8 = yyj8 > l
+		} else {
+			yyb8 = r.CheckBreak()
+		}
+		if yyb8 {
+			break
+		}
+		r.ReadArrayElem()
+		z.DecStructFieldNotFound(yyj8-1, "")
+	}
+	r.ReadArrayEnd()
+}
+
+func (x *CreateInOrderOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	if x == nil {
+		r.EncodeNil()
+	} else {
+		yym1 := z.EncBinary()
+		_ = yym1
+		if false {
+		} else if z.HasExtensions() && z.EncExt(x) {
+		} else {
+			yysep2 := !z.EncBinary()
+			yy2arr2 := z.EncBasicHandle().StructToArray
+			_, _ = yysep2, yy2arr2
+			const yyr2 bool = false
+			if yyr2 || yy2arr2 {
+				r.WriteArrayStart(1)
+			} else {
+				r.WriteMapStart(1)
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym4 := z.EncBinary()
+				_ = yym4
+				if false {
+				} else if z.HasExtensions() && z.EncExt(x.TTL) {
+				} else {
+					r.EncodeInt(int64(x.TTL))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("TTL"))
+				r.WriteMapElemValue()
+				yym5 := z.EncBinary()
+				_ = yym5
+				if false {
+				} else if z.HasExtensions() && z.EncExt(x.TTL) {
+				} else {
+					r.EncodeInt(int64(x.TTL))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayEnd()
+			} else {
+				r.WriteMapEnd()
+			}
+		}
+	}
+}
+
+func (x *CreateInOrderOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	yym1 := z.DecBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.DecExt(x) {
+	} else {
+		yyct2 := r.ContainerType()
+		if yyct2 == codecSelferValueTypeMap7612 {
+			yyl2 := r.ReadMapStart()
+			if yyl2 == 0 {
+				r.ReadMapEnd()
+			} else {
+				x.codecDecodeSelfFromMap(yyl2, d)
+			}
+		} else if yyct2 == codecSelferValueTypeArray7612 {
+			yyl2 := r.ReadArrayStart()
+			if yyl2 == 0 {
+				r.ReadArrayEnd()
+			} else {
+				x.codecDecodeSelfFromArray(yyl2, d)
+			}
+		} else {
+			panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
+		}
+	}
+}
+
+func (x *CreateInOrderOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+	_ = yys3Slc
+	var yyhl3 bool = l >= 0
+	for yyj3 := 0; ; yyj3++ {
+		if yyhl3 {
+			if yyj3 >= l {
+				break
+			}
+		} else {
+			if r.CheckBreak() {
+				break
+			}
+		}
+		r.ReadMapElemKey()
+		yys3Slc = r.DecodeStringAsBytes()
+		yys3 := string(yys3Slc)
+		r.ReadMapElemValue()
+		switch yys3 {
+		case "TTL":
+			if r.TryDecodeAsNil() {
+				x.TTL = 0
+			} else {
+				yyv4 := &x.TTL
+				yym5 := z.DecBinary()
+				_ = yym5
+				if false {
+				} else if z.HasExtensions() && z.DecExt(yyv4) {
+				} else {
+					*((*int64)(yyv4)) = int64(r.DecodeInt(64))
+				}
+			}
+		default:
+			z.DecStructFieldNotFound(-1, yys3)
+		} // end switch yys3
+	} // end for yyj3
+	r.ReadMapEnd()
+}
+
+func (x *CreateInOrderOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yyj6 int
+	var yyb6 bool
+	var yyhl6 bool = l >= 0
+	yyj6++
+	if yyhl6 {
+		yyb6 = yyj6 > l
+	} else {
+		yyb6 = r.CheckBreak()
+	}
+	if yyb6 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.TTL = 0
+	} else {
+		yyv7 := &x.TTL
+		yym8 := z.DecBinary()
+		_ = yym8
+		if false {
+		} else if z.HasExtensions() && z.DecExt(yyv7) {
+		} else {
+			*((*int64)(yyv7)) = int64(r.DecodeInt(64))
+		}
+	}
+	for {
+		yyj6++
+		if yyhl6 {
+			yyb6 = yyj6 > l
+		} else {
+			yyb6 = r.CheckBreak()
+		}
+		if yyb6 {
+			break
+		}
+		r.ReadArrayElem()
+		z.DecStructFieldNotFound(yyj6-1, "")
+	}
+	r.ReadArrayEnd()
+}
+
+func (x *SetOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	if x == nil {
+		r.EncodeNil()
+	} else {
+		yym1 := z.EncBinary()
+		_ = yym1
+		if false {
+		} else if z.HasExtensions() && z.EncExt(x) {
+		} else {
+			yysep2 := !z.EncBinary()
+			yy2arr2 := z.EncBasicHandle().StructToArray
+			_, _ = yysep2, yy2arr2
+			const yyr2 bool = false
+			if yyr2 || yy2arr2 {
+				r.WriteArrayStart(7)
+			} else {
+				r.WriteMapStart(7)
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym4 := z.EncBinary()
+				_ = yym4
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("PrevValue"))
+				r.WriteMapElemValue()
+				yym5 := z.EncBinary()
+				_ = yym5
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym7 := z.EncBinary()
+				_ = yym7
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.PrevIndex))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("PrevIndex"))
+				r.WriteMapElemValue()
+				yym8 := z.EncBinary()
+				_ = yym8
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.PrevIndex))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				x.PrevExist.CodecEncodeSelf(e)
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("PrevExist"))
+				r.WriteMapElemValue()
+				x.PrevExist.CodecEncodeSelf(e)
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym13 := z.EncBinary()
+				_ = yym13
+				if false {
+				} else if z.HasExtensions() && z.EncExt(x.TTL) {
+				} else {
+					r.EncodeInt(int64(x.TTL))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("TTL"))
+				r.WriteMapElemValue()
+				yym14 := z.EncBinary()
+				_ = yym14
+				if false {
+				} else if z.HasExtensions() && z.EncExt(x.TTL) {
+				} else {
+					r.EncodeInt(int64(x.TTL))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym16 := z.EncBinary()
+				_ = yym16
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Refresh))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Refresh"))
+				r.WriteMapElemValue()
+				yym17 := z.EncBinary()
+				_ = yym17
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Refresh))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym19 := z.EncBinary()
+				_ = yym19
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Dir))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Dir"))
+				r.WriteMapElemValue()
+				yym20 := z.EncBinary()
+				_ = yym20
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Dir))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym22 := z.EncBinary()
+				_ = yym22
+				if false {
+				} else {
+					r.EncodeBool(bool(x.NoValueOnSuccess))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess"))
+				r.WriteMapElemValue()
+				yym23 := z.EncBinary()
+				_ = yym23
+				if false {
+				} else {
+					r.EncodeBool(bool(x.NoValueOnSuccess))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayEnd()
+			} else {
+				r.WriteMapEnd()
+			}
+		}
+	}
+}
+
+func (x *SetOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	yym1 := z.DecBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.DecExt(x) {
+	} else {
+		yyct2 := r.ContainerType()
+		if yyct2 == codecSelferValueTypeMap7612 {
+			yyl2 := r.ReadMapStart()
+			if yyl2 == 0 {
+				r.ReadMapEnd()
+			} else {
+				x.codecDecodeSelfFromMap(yyl2, d)
+			}
+		} else if yyct2 == codecSelferValueTypeArray7612 {
+			yyl2 := r.ReadArrayStart()
+			if yyl2 == 0 {
+				r.ReadArrayEnd()
+			} else {
+				x.codecDecodeSelfFromArray(yyl2, d)
+			}
+		} else {
+			panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
+		}
+	}
+}
+
+func (x *SetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+	_ = yys3Slc
+	var yyhl3 bool = l >= 0
+	for yyj3 := 0; ; yyj3++ {
+		if yyhl3 {
+			if yyj3 >= l {
+				break
+			}
+		} else {
+			if r.CheckBreak() {
+				break
+			}
+		}
+		r.ReadMapElemKey()
+		yys3Slc = r.DecodeStringAsBytes()
+		yys3 := string(yys3Slc)
+		r.ReadMapElemValue()
+		switch yys3 {
+		case "PrevValue":
+			if r.TryDecodeAsNil() {
+				x.PrevValue = ""
+			} else {
+				yyv4 := &x.PrevValue
+				yym5 := z.DecBinary()
+				_ = yym5
+				if false {
+				} else {
+					*((*string)(yyv4)) = r.DecodeString()
+				}
+			}
+		case "PrevIndex":
+			if r.TryDecodeAsNil() {
+				x.PrevIndex = 0
+			} else {
+				yyv6 := &x.PrevIndex
+				yym7 := z.DecBinary()
+				_ = yym7
+				if false {
+				} else {
+					*((*uint64)(yyv6)) = uint64(r.DecodeUint(64))
+				}
+			}
+		case "PrevExist":
+			if r.TryDecodeAsNil() {
+				x.PrevExist = ""
+			} else {
+				yyv8 := &x.PrevExist
+				yyv8.CodecDecodeSelf(d)
+			}
+		case "TTL":
+			if r.TryDecodeAsNil() {
+				x.TTL = 0
+			} else {
+				yyv9 := &x.TTL
+				yym10 := z.DecBinary()
+				_ = yym10
+				if false {
+				} else if z.HasExtensions() && z.DecExt(yyv9) {
+				} else {
+					*((*int64)(yyv9)) = int64(r.DecodeInt(64))
+				}
+			}
+		case "Refresh":
+			if r.TryDecodeAsNil() {
+				x.Refresh = false
+			} else {
+				yyv11 := &x.Refresh
+				yym12 := z.DecBinary()
+				_ = yym12
+				if false {
+				} else {
+					*((*bool)(yyv11)) = r.DecodeBool()
+				}
+			}
+		case "Dir":
+			if r.TryDecodeAsNil() {
+				x.Dir = false
+			} else {
+				yyv13 := &x.Dir
+				yym14 := z.DecBinary()
+				_ = yym14
+				if false {
+				} else {
+					*((*bool)(yyv13)) = r.DecodeBool()
+				}
+			}
+		case "NoValueOnSuccess":
+			if r.TryDecodeAsNil() {
+				x.NoValueOnSuccess = false
+			} else {
+				yyv15 := &x.NoValueOnSuccess
+				yym16 := z.DecBinary()
+				_ = yym16
+				if false {
+				} else {
+					*((*bool)(yyv15)) = r.DecodeBool()
+				}
+			}
+		default:
+			z.DecStructFieldNotFound(-1, yys3)
+		} // end switch yys3
+	} // end for yyj3
+	r.ReadMapEnd()
+}
+
+func (x *SetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yyj17 int
+	var yyb17 bool
+	var yyhl17 bool = l >= 0
+	yyj17++
+	if yyhl17 {
+		yyb17 = yyj17 > l
+	} else {
+		yyb17 = r.CheckBreak()
+	}
+	if yyb17 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.PrevValue = ""
+	} else {
+		yyv18 := &x.PrevValue
+		yym19 := z.DecBinary()
+		_ = yym19
+		if false {
+		} else {
+			*((*string)(yyv18)) = r.DecodeString()
+		}
+	}
+	yyj17++
+	if yyhl17 {
+		yyb17 = yyj17 > l
+	} else {
+		yyb17 = r.CheckBreak()
+	}
+	if yyb17 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.PrevIndex = 0
+	} else {
+		yyv20 := &x.PrevIndex
+		yym21 := z.DecBinary()
+		_ = yym21
+		if false {
+		} else {
+			*((*uint64)(yyv20)) = uint64(r.DecodeUint(64))
+		}
+	}
+	yyj17++
+	if yyhl17 {
+		yyb17 = yyj17 > l
+	} else {
+		yyb17 = r.CheckBreak()
+	}
+	if yyb17 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.PrevExist = ""
+	} else {
+		yyv22 := &x.PrevExist
+		yyv22.CodecDecodeSelf(d)
+	}
+	yyj17++
+	if yyhl17 {
+		yyb17 = yyj17 > l
+	} else {
+		yyb17 = r.CheckBreak()
+	}
+	if yyb17 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.TTL = 0
+	} else {
+		yyv23 := &x.TTL
+		yym24 := z.DecBinary()
+		_ = yym24
+		if false {
+		} else if z.HasExtensions() && z.DecExt(yyv23) {
+		} else {
+			*((*int64)(yyv23)) = int64(r.DecodeInt(64))
+		}
+	}
+	yyj17++
+	if yyhl17 {
+		yyb17 = yyj17 > l
+	} else {
+		yyb17 = r.CheckBreak()
+	}
+	if yyb17 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Refresh = false
+	} else {
+		yyv25 := &x.Refresh
+		yym26 := z.DecBinary()
+		_ = yym26
+		if false {
+		} else {
+			*((*bool)(yyv25)) = r.DecodeBool()
+		}
+	}
+	yyj17++
+	if yyhl17 {
+		yyb17 = yyj17 > l
+	} else {
+		yyb17 = r.CheckBreak()
+	}
+	if yyb17 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Dir = false
+	} else {
+		yyv27 := &x.Dir
+		yym28 := z.DecBinary()
+		_ = yym28
+		if false {
+		} else {
+			*((*bool)(yyv27)) = r.DecodeBool()
+		}
+	}
+	yyj17++
+	if yyhl17 {
+		yyb17 = yyj17 > l
+	} else {
+		yyb17 = r.CheckBreak()
+	}
+	if yyb17 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.NoValueOnSuccess = false
+	} else {
+		yyv29 := &x.NoValueOnSuccess
+		yym30 := z.DecBinary()
+		_ = yym30
+		if false {
+		} else {
+			*((*bool)(yyv29)) = r.DecodeBool()
+		}
+	}
+	for {
+		yyj17++
+		if yyhl17 {
+			yyb17 = yyj17 > l
+		} else {
+			yyb17 = r.CheckBreak()
+		}
+		if yyb17 {
+			break
+		}
+		r.ReadArrayElem()
+		z.DecStructFieldNotFound(yyj17-1, "")
+	}
+	r.ReadArrayEnd()
+}
+
+func (x *GetOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	if x == nil {
+		r.EncodeNil()
+	} else {
+		yym1 := z.EncBinary()
+		_ = yym1
+		if false {
+		} else if z.HasExtensions() && z.EncExt(x) {
+		} else {
+			yysep2 := !z.EncBinary()
+			yy2arr2 := z.EncBasicHandle().StructToArray
+			_, _ = yysep2, yy2arr2
+			const yyr2 bool = false
+			if yyr2 || yy2arr2 {
+				r.WriteArrayStart(3)
+			} else {
+				r.WriteMapStart(3)
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym4 := z.EncBinary()
+				_ = yym4
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Recursive))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
+				r.WriteMapElemValue()
+				yym5 := z.EncBinary()
+				_ = yym5
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Recursive))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym7 := z.EncBinary()
+				_ = yym7
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Sort))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Sort"))
+				r.WriteMapElemValue()
+				yym8 := z.EncBinary()
+				_ = yym8
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Sort))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym10 := z.EncBinary()
+				_ = yym10
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Quorum))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Quorum"))
+				r.WriteMapElemValue()
+				yym11 := z.EncBinary()
+				_ = yym11
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Quorum))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayEnd()
+			} else {
+				r.WriteMapEnd()
+			}
+		}
+	}
+}
+
+func (x *GetOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	yym1 := z.DecBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.DecExt(x) {
+	} else {
+		yyct2 := r.ContainerType()
+		if yyct2 == codecSelferValueTypeMap7612 {
+			yyl2 := r.ReadMapStart()
+			if yyl2 == 0 {
+				r.ReadMapEnd()
+			} else {
+				x.codecDecodeSelfFromMap(yyl2, d)
+			}
+		} else if yyct2 == codecSelferValueTypeArray7612 {
+			yyl2 := r.ReadArrayStart()
+			if yyl2 == 0 {
+				r.ReadArrayEnd()
+			} else {
+				x.codecDecodeSelfFromArray(yyl2, d)
+			}
+		} else {
+			panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
+		}
+	}
+}
+
+func (x *GetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+	_ = yys3Slc
+	var yyhl3 bool = l >= 0
+	for yyj3 := 0; ; yyj3++ {
+		if yyhl3 {
+			if yyj3 >= l {
+				break
+			}
+		} else {
+			if r.CheckBreak() {
+				break
+			}
+		}
+		r.ReadMapElemKey()
+		yys3Slc = r.DecodeStringAsBytes()
+		yys3 := string(yys3Slc)
+		r.ReadMapElemValue()
+		switch yys3 {
+		case "Recursive":
+			if r.TryDecodeAsNil() {
+				x.Recursive = false
+			} else {
+				yyv4 := &x.Recursive
+				yym5 := z.DecBinary()
+				_ = yym5
+				if false {
+				} else {
+					*((*bool)(yyv4)) = r.DecodeBool()
+				}
+			}
+		case "Sort":
+			if r.TryDecodeAsNil() {
+				x.Sort = false
+			} else {
+				yyv6 := &x.Sort
+				yym7 := z.DecBinary()
+				_ = yym7
+				if false {
+				} else {
+					*((*bool)(yyv6)) = r.DecodeBool()
+				}
+			}
+		case "Quorum":
+			if r.TryDecodeAsNil() {
+				x.Quorum = false
+			} else {
+				yyv8 := &x.Quorum
+				yym9 := z.DecBinary()
+				_ = yym9
+				if false {
+				} else {
+					*((*bool)(yyv8)) = r.DecodeBool()
+				}
+			}
+		default:
+			z.DecStructFieldNotFound(-1, yys3)
+		} // end switch yys3
+	} // end for yyj3
+	r.ReadMapEnd()
+}
+
+func (x *GetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yyj10 int
+	var yyb10 bool
+	var yyhl10 bool = l >= 0
+	yyj10++
+	if yyhl10 {
+		yyb10 = yyj10 > l
+	} else {
+		yyb10 = r.CheckBreak()
+	}
+	if yyb10 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Recursive = false
+	} else {
+		yyv11 := &x.Recursive
+		yym12 := z.DecBinary()
+		_ = yym12
+		if false {
+		} else {
+			*((*bool)(yyv11)) = r.DecodeBool()
+		}
+	}
+	yyj10++
+	if yyhl10 {
+		yyb10 = yyj10 > l
+	} else {
+		yyb10 = r.CheckBreak()
+	}
+	if yyb10 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Sort = false
+	} else {
+		yyv13 := &x.Sort
+		yym14 := z.DecBinary()
+		_ = yym14
+		if false {
+		} else {
+			*((*bool)(yyv13)) = r.DecodeBool()
+		}
+	}
+	yyj10++
+	if yyhl10 {
+		yyb10 = yyj10 > l
+	} else {
+		yyb10 = r.CheckBreak()
+	}
+	if yyb10 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Quorum = false
+	} else {
+		yyv15 := &x.Quorum
+		yym16 := z.DecBinary()
+		_ = yym16
+		if false {
+		} else {
+			*((*bool)(yyv15)) = r.DecodeBool()
+		}
+	}
+	for {
+		yyj10++
+		if yyhl10 {
+			yyb10 = yyj10 > l
+		} else {
+			yyb10 = r.CheckBreak()
+		}
+		if yyb10 {
+			break
+		}
+		r.ReadArrayElem()
+		z.DecStructFieldNotFound(yyj10-1, "")
+	}
+	r.ReadArrayEnd()
+}
+
+func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	if x == nil {
+		r.EncodeNil()
+	} else {
+		yym1 := z.EncBinary()
+		_ = yym1
+		if false {
+		} else if z.HasExtensions() && z.EncExt(x) {
+		} else {
+			yysep2 := !z.EncBinary()
+			yy2arr2 := z.EncBasicHandle().StructToArray
+			_, _ = yysep2, yy2arr2
+			const yyr2 bool = false
+			if yyr2 || yy2arr2 {
+				r.WriteArrayStart(4)
+			} else {
+				r.WriteMapStart(4)
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym4 := z.EncBinary()
+				_ = yym4
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("PrevValue"))
+				r.WriteMapElemValue()
+				yym5 := z.EncBinary()
+				_ = yym5
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym7 := z.EncBinary()
+				_ = yym7
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.PrevIndex))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("PrevIndex"))
+				r.WriteMapElemValue()
+				yym8 := z.EncBinary()
+				_ = yym8
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.PrevIndex))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym10 := z.EncBinary()
+				_ = yym10
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Recursive))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
+				r.WriteMapElemValue()
+				yym11 := z.EncBinary()
+				_ = yym11
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Recursive))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym13 := z.EncBinary()
+				_ = yym13
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Dir))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Dir"))
+				r.WriteMapElemValue()
+				yym14 := z.EncBinary()
+				_ = yym14
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Dir))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayEnd()
+			} else {
+				r.WriteMapEnd()
+			}
+		}
+	}
+}
+
+func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	yym1 := z.DecBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.DecExt(x) {
+	} else {
+		yyct2 := r.ContainerType()
+		if yyct2 == codecSelferValueTypeMap7612 {
+			yyl2 := r.ReadMapStart()
+			if yyl2 == 0 {
+				r.ReadMapEnd()
+			} else {
+				x.codecDecodeSelfFromMap(yyl2, d)
+			}
+		} else if yyct2 == codecSelferValueTypeArray7612 {
+			yyl2 := r.ReadArrayStart()
+			if yyl2 == 0 {
+				r.ReadArrayEnd()
+			} else {
+				x.codecDecodeSelfFromArray(yyl2, d)
+			}
+		} else {
+			panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
+		}
+	}
+}
+
+func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+	_ = yys3Slc
+	var yyhl3 bool = l >= 0
+	for yyj3 := 0; ; yyj3++ {
+		if yyhl3 {
+			if yyj3 >= l {
+				break
+			}
+		} else {
+			if r.CheckBreak() {
+				break
+			}
+		}
+		r.ReadMapElemKey()
+		yys3Slc = r.DecodeStringAsBytes()
+		yys3 := string(yys3Slc)
+		r.ReadMapElemValue()
+		switch yys3 {
+		case "PrevValue":
+			if r.TryDecodeAsNil() {
+				x.PrevValue = ""
+			} else {
+				yyv4 := &x.PrevValue
+				yym5 := z.DecBinary()
+				_ = yym5
+				if false {
+				} else {
+					*((*string)(yyv4)) = r.DecodeString()
+				}
+			}
+		case "PrevIndex":
+			if r.TryDecodeAsNil() {
+				x.PrevIndex = 0
+			} else {
+				yyv6 := &x.PrevIndex
+				yym7 := z.DecBinary()
+				_ = yym7
+				if false {
+				} else {
+					*((*uint64)(yyv6)) = uint64(r.DecodeUint(64))
+				}
+			}
+		case "Recursive":
+			if r.TryDecodeAsNil() {
+				x.Recursive = false
+			} else {
+				yyv8 := &x.Recursive
+				yym9 := z.DecBinary()
+				_ = yym9
+				if false {
+				} else {
+					*((*bool)(yyv8)) = r.DecodeBool()
+				}
+			}
+		case "Dir":
+			if r.TryDecodeAsNil() {
+				x.Dir = false
+			} else {
+				yyv10 := &x.Dir
+				yym11 := z.DecBinary()
+				_ = yym11
+				if false {
+				} else {
+					*((*bool)(yyv10)) = r.DecodeBool()
+				}
+			}
+		default:
+			z.DecStructFieldNotFound(-1, yys3)
+		} // end switch yys3
+	} // end for yyj3
+	r.ReadMapEnd()
+}
+
+func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yyj12 int
+	var yyb12 bool
+	var yyhl12 bool = l >= 0
+	yyj12++
+	if yyhl12 {
+		yyb12 = yyj12 > l
+	} else {
+		yyb12 = r.CheckBreak()
+	}
+	if yyb12 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.PrevValue = ""
+	} else {
+		yyv13 := &x.PrevValue
+		yym14 := z.DecBinary()
+		_ = yym14
+		if false {
+		} else {
+			*((*string)(yyv13)) = r.DecodeString()
+		}
+	}
+	yyj12++
+	if yyhl12 {
+		yyb12 = yyj12 > l
+	} else {
+		yyb12 = r.CheckBreak()
+	}
+	if yyb12 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.PrevIndex = 0
+	} else {
+		yyv15 := &x.PrevIndex
+		yym16 := z.DecBinary()
+		_ = yym16
+		if false {
+		} else {
+			*((*uint64)(yyv15)) = uint64(r.DecodeUint(64))
+		}
+	}
+	yyj12++
+	if yyhl12 {
+		yyb12 = yyj12 > l
+	} else {
+		yyb12 = r.CheckBreak()
+	}
+	if yyb12 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Recursive = false
+	} else {
+		yyv17 := &x.Recursive
+		yym18 := z.DecBinary()
+		_ = yym18
+		if false {
+		} else {
+			*((*bool)(yyv17)) = r.DecodeBool()
+		}
+	}
+	yyj12++
+	if yyhl12 {
+		yyb12 = yyj12 > l
+	} else {
+		yyb12 = r.CheckBreak()
+	}
+	if yyb12 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Dir = false
+	} else {
+		yyv19 := &x.Dir
+		yym20 := z.DecBinary()
+		_ = yym20
+		if false {
+		} else {
+			*((*bool)(yyv19)) = r.DecodeBool()
+		}
+	}
+	for {
+		yyj12++
+		if yyhl12 {
+			yyb12 = yyj12 > l
+		} else {
+			yyb12 = r.CheckBreak()
+		}
+		if yyb12 {
+			break
+		}
+		r.ReadArrayElem()
+		z.DecStructFieldNotFound(yyj12-1, "")
+	}
+	r.ReadArrayEnd()
+}
+
+func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	if x == nil {
+		r.EncodeNil()
+	} else {
+		yym1 := z.EncBinary()
+		_ = yym1
+		if false {
+		} else if z.HasExtensions() && z.EncExt(x) {
+		} else {
+			yysep2 := !z.EncBinary()
+			yy2arr2 := z.EncBasicHandle().StructToArray
+			_, _ = yysep2, yy2arr2
+			const yyr2 bool = false
+			if yyr2 || yy2arr2 {
+				r.WriteArrayStart(3)
+			} else {
+				r.WriteMapStart(3)
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym4 := z.EncBinary()
+				_ = yym4
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Action))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("action"))
+				r.WriteMapElemValue()
+				yym5 := z.EncBinary()
+				_ = yym5
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Action))
+				}
+			}
+			var yyn6 bool
+			if x.Node == nil {
+				yyn6 = true
+				goto LABEL6
+			}
+		LABEL6:
+			if yyr2 || yy2arr2 {
+				if yyn6 {
+					r.WriteArrayElem()
+					r.EncodeNil()
+				} else {
+					r.WriteArrayElem()
+					if x.Node == nil {
+						r.EncodeNil()
+					} else {
+						x.Node.CodecEncodeSelf(e)
+					}
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("node"))
+				r.WriteMapElemValue()
+				if yyn6 {
+					r.EncodeNil()
+				} else {
+					if x.Node == nil {
+						r.EncodeNil()
+					} else {
+						x.Node.CodecEncodeSelf(e)
+					}
+				}
+			}
+			var yyn9 bool
+			if x.PrevNode == nil {
+				yyn9 = true
+				goto LABEL9
+			}
+		LABEL9:
+			if yyr2 || yy2arr2 {
+				if yyn9 {
+					r.WriteArrayElem()
+					r.EncodeNil()
+				} else {
+					r.WriteArrayElem()
+					if x.PrevNode == nil {
+						r.EncodeNil()
+					} else {
+						x.PrevNode.CodecEncodeSelf(e)
+					}
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("prevNode"))
+				r.WriteMapElemValue()
+				if yyn9 {
+					r.EncodeNil()
+				} else {
+					if x.PrevNode == nil {
+						r.EncodeNil()
+					} else {
+						x.PrevNode.CodecEncodeSelf(e)
+					}
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayEnd()
+			} else {
+				r.WriteMapEnd()
+			}
+		}
+	}
+}
+
+func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	yym1 := z.DecBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.DecExt(x) {
+	} else {
+		yyct2 := r.ContainerType()
+		if yyct2 == codecSelferValueTypeMap7612 {
+			yyl2 := r.ReadMapStart()
+			if yyl2 == 0 {
+				r.ReadMapEnd()
+			} else {
+				x.codecDecodeSelfFromMap(yyl2, d)
+			}
+		} else if yyct2 == codecSelferValueTypeArray7612 {
+			yyl2 := r.ReadArrayStart()
+			if yyl2 == 0 {
+				r.ReadArrayEnd()
+			} else {
+				x.codecDecodeSelfFromArray(yyl2, d)
+			}
+		} else {
+			panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
+		}
+	}
+}
+
+func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+	_ = yys3Slc
+	var yyhl3 bool = l >= 0
+	for yyj3 := 0; ; yyj3++ {
+		if yyhl3 {
+			if yyj3 >= l {
+				break
+			}
+		} else {
+			if r.CheckBreak() {
+				break
+			}
+		}
+		r.ReadMapElemKey()
+		yys3Slc = r.DecodeStringAsBytes()
+		yys3 := string(yys3Slc)
+		r.ReadMapElemValue()
+		switch yys3 {
+		case "action":
+			if r.TryDecodeAsNil() {
+				x.Action = ""
+			} else {
+				yyv4 := &x.Action
+				yym5 := z.DecBinary()
+				_ = yym5
+				if false {
+				} else {
+					*((*string)(yyv4)) = r.DecodeString()
+				}
+			}
+		case "node":
+			if x.Node == nil {
+				x.Node = new(Node)
+			}
+			if r.TryDecodeAsNil() {
+				if x.Node != nil {
+					x.Node = nil
+				}
+			} else {
+				if x.Node == nil {
+					x.Node = new(Node)
+				}
+				x.Node.CodecDecodeSelf(d)
+			}
+		case "prevNode":
+			if x.PrevNode == nil {
+				x.PrevNode = new(Node)
+			}
+			if r.TryDecodeAsNil() {
+				if x.PrevNode != nil {
+					x.PrevNode = nil
+				}
+			} else {
+				if x.PrevNode == nil {
+					x.PrevNode = new(Node)
+				}
+				x.PrevNode.CodecDecodeSelf(d)
+			}
+		default:
+			z.DecStructFieldNotFound(-1, yys3)
+		} // end switch yys3
+	} // end for yyj3
+	r.ReadMapEnd()
+}
+
+func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yyj8 int
+	var yyb8 bool
+	var yyhl8 bool = l >= 0
+	yyj8++
+	if yyhl8 {
+		yyb8 = yyj8 > l
+	} else {
+		yyb8 = r.CheckBreak()
+	}
+	if yyb8 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Action = ""
+	} else {
+		yyv9 := &x.Action
+		yym10 := z.DecBinary()
+		_ = yym10
+		if false {
+		} else {
+			*((*string)(yyv9)) = r.DecodeString()
+		}
+	}
+	if x.Node == nil {
+		x.Node = new(Node)
+	}
+	yyj8++
+	if yyhl8 {
+		yyb8 = yyj8 > l
+	} else {
+		yyb8 = r.CheckBreak()
+	}
+	if yyb8 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		if x.Node != nil {
+			x.Node = nil
+		}
+	} else {
+		if x.Node == nil {
+			x.Node = new(Node)
+		}
+		x.Node.CodecDecodeSelf(d)
+	}
+	if x.PrevNode == nil {
+		x.PrevNode = new(Node)
+	}
+	yyj8++
+	if yyhl8 {
+		yyb8 = yyj8 > l
+	} else {
+		yyb8 = r.CheckBreak()
+	}
+	if yyb8 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		if x.PrevNode != nil {
+			x.PrevNode = nil
+		}
+	} else {
+		if x.PrevNode == nil {
+			x.PrevNode = new(Node)
+		}
+		x.PrevNode.CodecDecodeSelf(d)
+	}
+	for {
+		yyj8++
+		if yyhl8 {
+			yyb8 = yyj8 > l
+		} else {
+			yyb8 = r.CheckBreak()
+		}
+		if yyb8 {
+			break
+		}
+		r.ReadArrayElem()
+		z.DecStructFieldNotFound(yyj8-1, "")
+	}
+	r.ReadArrayEnd()
+}
+
+func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	if x == nil {
+		r.EncodeNil()
+	} else {
+		yym1 := z.EncBinary()
+		_ = yym1
+		if false {
+		} else if z.HasExtensions() && z.EncExt(x) {
+		} else {
+			yysep2 := !z.EncBinary()
+			yy2arr2 := z.EncBasicHandle().StructToArray
+			var yyq2 [8]bool
+			_ = yyq2
+			_, _ = yysep2, yy2arr2
+			const yyr2 bool = false
+			yyq2[1] = x.Dir != false
+			yyq2[6] = x.Expiration != nil
+			yyq2[7] = x.TTL != 0
+			if yyr2 || yy2arr2 {
+				r.WriteArrayStart(8)
+			} else {
+				var yynn2 = 5
+				for _, b := range yyq2 {
+					if b {
+						yynn2++
+					}
+				}
+				r.WriteMapStart(yynn2)
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym4 := z.EncBinary()
+				_ = yym4
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Key))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("key"))
+				r.WriteMapElemValue()
+				yym5 := z.EncBinary()
+				_ = yym5
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Key))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				if yyq2[1] {
+					yym7 := z.EncBinary()
+					_ = yym7
+					if false {
+					} else {
+						r.EncodeBool(bool(x.Dir))
+					}
+				} else {
+					r.EncodeBool(false)
+				}
+			} else {
+				if yyq2[1] {
+					r.WriteMapElemKey()
+					r.EncodeString(codecSelferC_UTF87612, string("dir"))
+					r.WriteMapElemValue()
+					yym8 := z.EncBinary()
+					_ = yym8
+					if false {
+					} else {
+						r.EncodeBool(bool(x.Dir))
+					}
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym10 := z.EncBinary()
+				_ = yym10
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Value))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("value"))
+				r.WriteMapElemValue()
+				yym11 := z.EncBinary()
+				_ = yym11
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Value))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				if x.Nodes == nil {
+					r.EncodeNil()
+				} else {
+					x.Nodes.CodecEncodeSelf(e)
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("nodes"))
+				r.WriteMapElemValue()
+				if x.Nodes == nil {
+					r.EncodeNil()
+				} else {
+					x.Nodes.CodecEncodeSelf(e)
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym16 := z.EncBinary()
+				_ = yym16
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.CreatedIndex))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("createdIndex"))
+				r.WriteMapElemValue()
+				yym17 := z.EncBinary()
+				_ = yym17
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.CreatedIndex))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym19 := z.EncBinary()
+				_ = yym19
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.ModifiedIndex))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("modifiedIndex"))
+				r.WriteMapElemValue()
+				yym20 := z.EncBinary()
+				_ = yym20
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.ModifiedIndex))
+				}
+			}
+			var yyn21 bool
+			if x.Expiration == nil {
+				yyn21 = true
+				goto LABEL21
+			}
+		LABEL21:
+			if yyr2 || yy2arr2 {
+				if yyn21 {
+					r.WriteArrayElem()
+					r.EncodeNil()
+				} else {
+					r.WriteArrayElem()
+					if yyq2[6] {
+						if x.Expiration == nil {
+							r.EncodeNil()
+						} else {
+							yym22 := z.EncBinary()
+							_ = yym22
+							if false {
+							} else if yym23 := z.TimeRtidIfBinc(); yym23 != 0 {
+								r.EncodeBuiltin(yym23, x.Expiration)
+							} else if z.HasExtensions() && z.EncExt(x.Expiration) {
+							} else if yym22 {
+								z.EncBinaryMarshal(x.Expiration)
+							} else if !yym22 && z.IsJSONHandle() {
+								z.EncJSONMarshal(x.Expiration)
+							} else {
+								z.EncFallback(x.Expiration)
+							}
+						}
+					} else {
+						r.EncodeNil()
+					}
+				}
+			} else {
+				if yyq2[6] {
+					r.WriteMapElemKey()
+					r.EncodeString(codecSelferC_UTF87612, string("expiration"))
+					r.WriteMapElemValue()
+					if yyn21 {
+						r.EncodeNil()
+					} else {
+						if x.Expiration == nil {
+							r.EncodeNil()
+						} else {
+							yym24 := z.EncBinary()
+							_ = yym24
+							if false {
+							} else if yym25 := z.TimeRtidIfBinc(); yym25 != 0 {
+								r.EncodeBuiltin(yym25, x.Expiration)
+							} else if z.HasExtensions() && z.EncExt(x.Expiration) {
+							} else if yym24 {
+								z.EncBinaryMarshal(x.Expiration)
+							} else if !yym24 && z.IsJSONHandle() {
+								z.EncJSONMarshal(x.Expiration)
+							} else {
+								z.EncFallback(x.Expiration)
+							}
+						}
+					}
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				if yyq2[7] {
+					yym27 := z.EncBinary()
+					_ = yym27
+					if false {
+					} else {
+						r.EncodeInt(int64(x.TTL))
+					}
+				} else {
+					r.EncodeInt(0)
+				}
+			} else {
+				if yyq2[7] {
+					r.WriteMapElemKey()
+					r.EncodeString(codecSelferC_UTF87612, string("ttl"))
+					r.WriteMapElemValue()
+					yym28 := z.EncBinary()
+					_ = yym28
+					if false {
+					} else {
+						r.EncodeInt(int64(x.TTL))
+					}
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayEnd()
+			} else {
+				r.WriteMapEnd()
+			}
+		}
+	}
+}
+
+func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	yym1 := z.DecBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.DecExt(x) {
+	} else {
+		yyct2 := r.ContainerType()
+		if yyct2 == codecSelferValueTypeMap7612 {
+			yyl2 := r.ReadMapStart()
+			if yyl2 == 0 {
+				r.ReadMapEnd()
+			} else {
+				x.codecDecodeSelfFromMap(yyl2, d)
+			}
+		} else if yyct2 == codecSelferValueTypeArray7612 {
+			yyl2 := r.ReadArrayStart()
+			if yyl2 == 0 {
+				r.ReadArrayEnd()
+			} else {
+				x.codecDecodeSelfFromArray(yyl2, d)
+			}
+		} else {
+			panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
+		}
+	}
+}
+
+func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+	_ = yys3Slc
+	var yyhl3 bool = l >= 0
+	for yyj3 := 0; ; yyj3++ {
+		if yyhl3 {
+			if yyj3 >= l {
+				break
+			}
+		} else {
+			if r.CheckBreak() {
+				break
+			}
+		}
+		r.ReadMapElemKey()
+		yys3Slc = r.DecodeStringAsBytes()
+		yys3 := string(yys3Slc)
+		r.ReadMapElemValue()
+		switch yys3 {
+		case "key":
+			if r.TryDecodeAsNil() {
+				x.Key = ""
+			} else {
+				yyv4 := &x.Key
+				yym5 := z.DecBinary()
+				_ = yym5
+				if false {
+				} else {
+					*((*string)(yyv4)) = r.DecodeString()
+				}
+			}
+		case "dir":
+			if r.TryDecodeAsNil() {
+				x.Dir = false
+			} else {
+				yyv6 := &x.Dir
+				yym7 := z.DecBinary()
+				_ = yym7
+				if false {
+				} else {
+					*((*bool)(yyv6)) = r.DecodeBool()
+				}
+			}
+		case "value":
+			if r.TryDecodeAsNil() {
+				x.Value = ""
+			} else {
+				yyv8 := &x.Value
+				yym9 := z.DecBinary()
+				_ = yym9
+				if false {
+				} else {
+					*((*string)(yyv8)) = r.DecodeString()
+				}
+			}
+		case "nodes":
+			if r.TryDecodeAsNil() {
+				x.Nodes = nil
+			} else {
+				yyv10 := &x.Nodes
+				yyv10.CodecDecodeSelf(d)
+			}
+		case "createdIndex":
+			if r.TryDecodeAsNil() {
+				x.CreatedIndex = 0
+			} else {
+				yyv11 := &x.CreatedIndex
+				yym12 := z.DecBinary()
+				_ = yym12
+				if false {
+				} else {
+					*((*uint64)(yyv11)) = uint64(r.DecodeUint(64))
+				}
+			}
+		case "modifiedIndex":
+			if r.TryDecodeAsNil() {
+				x.ModifiedIndex = 0
+			} else {
+				yyv13 := &x.ModifiedIndex
+				yym14 := z.DecBinary()
+				_ = yym14
+				if false {
+				} else {
+					*((*uint64)(yyv13)) = uint64(r.DecodeUint(64))
+				}
+			}
+		case "expiration":
+			if x.Expiration == nil {
+				x.Expiration = new(time.Time)
+			}
+			if r.TryDecodeAsNil() {
+				if x.Expiration != nil {
+					x.Expiration = nil
+				}
+			} else {
+				if x.Expiration == nil {
+					x.Expiration = new(time.Time)
+				}
+				yym16 := z.DecBinary()
+				_ = yym16
+				if false {
+				} else if yym17 := z.TimeRtidIfBinc(); yym17 != 0 {
+					r.DecodeBuiltin(yym17, x.Expiration)
+				} else if z.HasExtensions() && z.DecExt(x.Expiration) {
+				} else if yym16 {
+					z.DecBinaryUnmarshal(x.Expiration)
+				} else if !yym16 && z.IsJSONHandle() {
+					z.DecJSONUnmarshal(x.Expiration)
+				} else {
+					z.DecFallback(x.Expiration, false)
+				}
+			}
+		case "ttl":
+			if r.TryDecodeAsNil() {
+				x.TTL = 0
+			} else {
+				yyv18 := &x.TTL
+				yym19 := z.DecBinary()
+				_ = yym19
+				if false {
+				} else {
+					*((*int64)(yyv18)) = int64(r.DecodeInt(64))
+				}
+			}
+		default:
+			z.DecStructFieldNotFound(-1, yys3)
+		} // end switch yys3
+	} // end for yyj3
+	r.ReadMapEnd()
+}
+
+func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yyj20 int
+	var yyb20 bool
+	var yyhl20 bool = l >= 0
+	yyj20++
+	if yyhl20 {
+		yyb20 = yyj20 > l
+	} else {
+		yyb20 = r.CheckBreak()
+	}
+	if yyb20 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Key = ""
+	} else {
+		yyv21 := &x.Key
+		yym22 := z.DecBinary()
+		_ = yym22
+		if false {
+		} else {
+			*((*string)(yyv21)) = r.DecodeString()
+		}
+	}
+	yyj20++
+	if yyhl20 {
+		yyb20 = yyj20 > l
+	} else {
+		yyb20 = r.CheckBreak()
+	}
+	if yyb20 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Dir = false
+	} else {
+		yyv23 := &x.Dir
+		yym24 := z.DecBinary()
+		_ = yym24
+		if false {
+		} else {
+			*((*bool)(yyv23)) = r.DecodeBool()
+		}
+	}
+	yyj20++
+	if yyhl20 {
+		yyb20 = yyj20 > l
+	} else {
+		yyb20 = r.CheckBreak()
+	}
+	if yyb20 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Value = ""
+	} else {
+		yyv25 := &x.Value
+		yym26 := z.DecBinary()
+		_ = yym26
+		if false {
+		} else {
+			*((*string)(yyv25)) = r.DecodeString()
+		}
+	}
+	yyj20++
+	if yyhl20 {
+		yyb20 = yyj20 > l
+	} else {
+		yyb20 = r.CheckBreak()
+	}
+	if yyb20 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Nodes = nil
+	} else {
+		yyv27 := &x.Nodes
+		yyv27.CodecDecodeSelf(d)
+	}
+	yyj20++
+	if yyhl20 {
+		yyb20 = yyj20 > l
+	} else {
+		yyb20 = r.CheckBreak()
+	}
+	if yyb20 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.CreatedIndex = 0
+	} else {
+		yyv28 := &x.CreatedIndex
+		yym29 := z.DecBinary()
+		_ = yym29
+		if false {
+		} else {
+			*((*uint64)(yyv28)) = uint64(r.DecodeUint(64))
+		}
+	}
+	yyj20++
+	if yyhl20 {
+		yyb20 = yyj20 > l
+	} else {
+		yyb20 = r.CheckBreak()
+	}
+	if yyb20 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.ModifiedIndex = 0
+	} else {
+		yyv30 := &x.ModifiedIndex
+		yym31 := z.DecBinary()
+		_ = yym31
+		if false {
+		} else {
+			*((*uint64)(yyv30)) = uint64(r.DecodeUint(64))
+		}
+	}
+	if x.Expiration == nil {
+		x.Expiration = new(time.Time)
+	}
+	yyj20++
+	if yyhl20 {
+		yyb20 = yyj20 > l
+	} else {
+		yyb20 = r.CheckBreak()
+	}
+	if yyb20 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		if x.Expiration != nil {
+			x.Expiration = nil
+		}
+	} else {
+		if x.Expiration == nil {
+			x.Expiration = new(time.Time)
+		}
+		yym33 := z.DecBinary()
+		_ = yym33
+		if false {
+		} else if yym34 := z.TimeRtidIfBinc(); yym34 != 0 {
+			r.DecodeBuiltin(yym34, x.Expiration)
+		} else if z.HasExtensions() && z.DecExt(x.Expiration) {
+		} else if yym33 {
+			z.DecBinaryUnmarshal(x.Expiration)
+		} else if !yym33 && z.IsJSONHandle() {
+			z.DecJSONUnmarshal(x.Expiration)
+		} else {
+			z.DecFallback(x.Expiration, false)
+		}
+	}
+	yyj20++
+	if yyhl20 {
+		yyb20 = yyj20 > l
+	} else {
+		yyb20 = r.CheckBreak()
+	}
+	if yyb20 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.TTL = 0
+	} else {
+		yyv35 := &x.TTL
+		yym36 := z.DecBinary()
+		_ = yym36
+		if false {
+		} else {
+			*((*int64)(yyv35)) = int64(r.DecodeInt(64))
+		}
+	}
+	for {
+		yyj20++
+		if yyhl20 {
+			yyb20 = yyj20 > l
+		} else {
+			yyb20 = r.CheckBreak()
+		}
+		if yyb20 {
+			break
+		}
+		r.ReadArrayElem()
+		z.DecStructFieldNotFound(yyj20-1, "")
+	}
+	r.ReadArrayEnd()
+}
+
+func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	if x == nil {
+		r.EncodeNil()
+	} else {
+		yym1 := z.EncBinary()
+		_ = yym1
+		if false {
+		} else if z.HasExtensions() && z.EncExt(x) {
+		} else {
+			h.encNodes((Nodes)(x), e)
+		}
+	}
+}
+
+func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	yym1 := z.DecBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.DecExt(x) {
+	} else {
+		h.decNodes((*Nodes)(x), d)
+	}
+}
+
+func (x *httpKeysAPI) CodecEncodeSelf(e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	if x == nil {
+		r.EncodeNil()
+	} else {
+		yym1 := z.EncBinary()
+		_ = yym1
+		if false {
+		} else if z.HasExtensions() && z.EncExt(x) {
+		} else {
+			yysep2 := !z.EncBinary()
+			yy2arr2 := z.EncBasicHandle().StructToArray
+			_, _ = yysep2, yy2arr2
+			const yyr2 bool = false
+			if yyr2 || yy2arr2 {
+				r.WriteArrayStart(0)
+			} else {
+				r.WriteMapStart(0)
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayEnd()
+			} else {
+				r.WriteMapEnd()
+			}
+		}
+	}
+}
+
+func (x *httpKeysAPI) CodecDecodeSelf(d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	yym1 := z.DecBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.DecExt(x) {
+	} else {
+		yyct2 := r.ContainerType()
+		if yyct2 == codecSelferValueTypeMap7612 {
+			yyl2 := r.ReadMapStart()
+			if yyl2 == 0 {
+				r.ReadMapEnd()
+			} else {
+				x.codecDecodeSelfFromMap(yyl2, d)
+			}
+		} else if yyct2 == codecSelferValueTypeArray7612 {
+			yyl2 := r.ReadArrayStart()
+			if yyl2 == 0 {
+				r.ReadArrayEnd()
+			} else {
+				x.codecDecodeSelfFromArray(yyl2, d)
+			}
+		} else {
+			panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
+		}
+	}
+}
+
+func (x *httpKeysAPI) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+	_ = yys3Slc
+	var yyhl3 bool = l >= 0
+	for yyj3 := 0; ; yyj3++ {
+		if yyhl3 {
+			if yyj3 >= l {
+				break
+			}
+		} else {
+			if r.CheckBreak() {
+				break
+			}
+		}
+		r.ReadMapElemKey()
+		yys3Slc = r.DecodeStringAsBytes()
+		yys3 := string(yys3Slc)
+		r.ReadMapElemValue()
+		switch yys3 {
+		default:
+			z.DecStructFieldNotFound(-1, yys3)
+		} // end switch yys3
+	} // end for yyj3
+	r.ReadMapEnd()
+}
+
+func (x *httpKeysAPI) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yyj4 int
+	var yyb4 bool
+	var yyhl4 bool = l >= 0
+	for {
+		yyj4++
+		if yyhl4 {
+			yyb4 = yyj4 > l
+		} else {
+			yyb4 = r.CheckBreak()
+		}
+		if yyb4 {
+			break
+		}
+		r.ReadArrayElem()
+		z.DecStructFieldNotFound(yyj4-1, "")
+	}
+	r.ReadArrayEnd()
+}
+
+func (x *httpWatcher) CodecEncodeSelf(e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	if x == nil {
+		r.EncodeNil()
+	} else {
+		yym1 := z.EncBinary()
+		_ = yym1
+		if false {
+		} else if z.HasExtensions() && z.EncExt(x) {
+		} else {
+			yysep2 := !z.EncBinary()
+			yy2arr2 := z.EncBasicHandle().StructToArray
+			_, _ = yysep2, yy2arr2
+			const yyr2 bool = false
+			if yyr2 || yy2arr2 {
+				r.WriteArrayStart(0)
+			} else {
+				r.WriteMapStart(0)
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayEnd()
+			} else {
+				r.WriteMapEnd()
+			}
+		}
+	}
+}
+
+func (x *httpWatcher) CodecDecodeSelf(d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	yym1 := z.DecBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.DecExt(x) {
+	} else {
+		yyct2 := r.ContainerType()
+		if yyct2 == codecSelferValueTypeMap7612 {
+			yyl2 := r.ReadMapStart()
+			if yyl2 == 0 {
+				r.ReadMapEnd()
+			} else {
+				x.codecDecodeSelfFromMap(yyl2, d)
+			}
+		} else if yyct2 == codecSelferValueTypeArray7612 {
+			yyl2 := r.ReadArrayStart()
+			if yyl2 == 0 {
+				r.ReadArrayEnd()
+			} else {
+				x.codecDecodeSelfFromArray(yyl2, d)
+			}
+		} else {
+			panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
+		}
+	}
+}
+
+func (x *httpWatcher) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+	_ = yys3Slc
+	var yyhl3 bool = l >= 0
+	for yyj3 := 0; ; yyj3++ {
+		if yyhl3 {
+			if yyj3 >= l {
+				break
+			}
+		} else {
+			if r.CheckBreak() {
+				break
+			}
+		}
+		r.ReadMapElemKey()
+		yys3Slc = r.DecodeStringAsBytes()
+		yys3 := string(yys3Slc)
+		r.ReadMapElemValue()
+		switch yys3 {
+		default:
+			z.DecStructFieldNotFound(-1, yys3)
+		} // end switch yys3
+	} // end for yyj3
+	r.ReadMapEnd()
+}
+
+func (x *httpWatcher) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yyj4 int
+	var yyb4 bool
+	var yyhl4 bool = l >= 0
+	for {
+		yyj4++
+		if yyhl4 {
+			yyb4 = yyj4 > l
+		} else {
+			yyb4 = r.CheckBreak()
+		}
+		if yyb4 {
+			break
+		}
+		r.ReadArrayElem()
+		z.DecStructFieldNotFound(yyj4-1, "")
+	}
+	r.ReadArrayEnd()
+}
+
+func (x *getAction) CodecEncodeSelf(e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	if x == nil {
+		r.EncodeNil()
+	} else {
+		yym1 := z.EncBinary()
+		_ = yym1
+		if false {
+		} else if z.HasExtensions() && z.EncExt(x) {
+		} else {
+			yysep2 := !z.EncBinary()
+			yy2arr2 := z.EncBasicHandle().StructToArray
+			_, _ = yysep2, yy2arr2
+			const yyr2 bool = false
+			if yyr2 || yy2arr2 {
+				r.WriteArrayStart(5)
+			} else {
+				r.WriteMapStart(5)
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym4 := z.EncBinary()
+				_ = yym4
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Prefix"))
+				r.WriteMapElemValue()
+				yym5 := z.EncBinary()
+				_ = yym5
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym7 := z.EncBinary()
+				_ = yym7
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Key))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Key"))
+				r.WriteMapElemValue()
+				yym8 := z.EncBinary()
+				_ = yym8
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Key))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym10 := z.EncBinary()
+				_ = yym10
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Recursive))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
+				r.WriteMapElemValue()
+				yym11 := z.EncBinary()
+				_ = yym11
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Recursive))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym13 := z.EncBinary()
+				_ = yym13
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Sorted))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Sorted"))
+				r.WriteMapElemValue()
+				yym14 := z.EncBinary()
+				_ = yym14
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Sorted))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym16 := z.EncBinary()
+				_ = yym16
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Quorum))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Quorum"))
+				r.WriteMapElemValue()
+				yym17 := z.EncBinary()
+				_ = yym17
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Quorum))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayEnd()
+			} else {
+				r.WriteMapEnd()
+			}
+		}
+	}
+}
+
+func (x *getAction) CodecDecodeSelf(d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	yym1 := z.DecBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.DecExt(x) {
+	} else {
+		yyct2 := r.ContainerType()
+		if yyct2 == codecSelferValueTypeMap7612 {
+			yyl2 := r.ReadMapStart()
+			if yyl2 == 0 {
+				r.ReadMapEnd()
+			} else {
+				x.codecDecodeSelfFromMap(yyl2, d)
+			}
+		} else if yyct2 == codecSelferValueTypeArray7612 {
+			yyl2 := r.ReadArrayStart()
+			if yyl2 == 0 {
+				r.ReadArrayEnd()
+			} else {
+				x.codecDecodeSelfFromArray(yyl2, d)
+			}
+		} else {
+			panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
+		}
+	}
+}
+
+func (x *getAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+	_ = yys3Slc
+	var yyhl3 bool = l >= 0
+	for yyj3 := 0; ; yyj3++ {
+		if yyhl3 {
+			if yyj3 >= l {
+				break
+			}
+		} else {
+			if r.CheckBreak() {
+				break
+			}
+		}
+		r.ReadMapElemKey()
+		yys3Slc = r.DecodeStringAsBytes()
+		yys3 := string(yys3Slc)
+		r.ReadMapElemValue()
+		switch yys3 {
+		case "Prefix":
+			if r.TryDecodeAsNil() {
+				x.Prefix = ""
+			} else {
+				yyv4 := &x.Prefix
+				yym5 := z.DecBinary()
+				_ = yym5
+				if false {
+				} else {
+					*((*string)(yyv4)) = r.DecodeString()
+				}
+			}
+		case "Key":
+			if r.TryDecodeAsNil() {
+				x.Key = ""
+			} else {
+				yyv6 := &x.Key
+				yym7 := z.DecBinary()
+				_ = yym7
+				if false {
+				} else {
+					*((*string)(yyv6)) = r.DecodeString()
+				}
+			}
+		case "Recursive":
+			if r.TryDecodeAsNil() {
+				x.Recursive = false
+			} else {
+				yyv8 := &x.Recursive
+				yym9 := z.DecBinary()
+				_ = yym9
+				if false {
+				} else {
+					*((*bool)(yyv8)) = r.DecodeBool()
+				}
+			}
+		case "Sorted":
+			if r.TryDecodeAsNil() {
+				x.Sorted = false
+			} else {
+				yyv10 := &x.Sorted
+				yym11 := z.DecBinary()
+				_ = yym11
+				if false {
+				} else {
+					*((*bool)(yyv10)) = r.DecodeBool()
+				}
+			}
+		case "Quorum":
+			if r.TryDecodeAsNil() {
+				x.Quorum = false
+			} else {
+				yyv12 := &x.Quorum
+				yym13 := z.DecBinary()
+				_ = yym13
+				if false {
+				} else {
+					*((*bool)(yyv12)) = r.DecodeBool()
+				}
+			}
+		default:
+			z.DecStructFieldNotFound(-1, yys3)
+		} // end switch yys3
+	} // end for yyj3
+	r.ReadMapEnd()
+}
+
+func (x *getAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yyj14 int
+	var yyb14 bool
+	var yyhl14 bool = l >= 0
+	yyj14++
+	if yyhl14 {
+		yyb14 = yyj14 > l
+	} else {
+		yyb14 = r.CheckBreak()
+	}
+	if yyb14 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Prefix = ""
+	} else {
+		yyv15 := &x.Prefix
+		yym16 := z.DecBinary()
+		_ = yym16
+		if false {
+		} else {
+			*((*string)(yyv15)) = r.DecodeString()
+		}
+	}
+	yyj14++
+	if yyhl14 {
+		yyb14 = yyj14 > l
+	} else {
+		yyb14 = r.CheckBreak()
+	}
+	if yyb14 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Key = ""
+	} else {
+		yyv17 := &x.Key
+		yym18 := z.DecBinary()
+		_ = yym18
+		if false {
+		} else {
+			*((*string)(yyv17)) = r.DecodeString()
+		}
+	}
+	yyj14++
+	if yyhl14 {
+		yyb14 = yyj14 > l
+	} else {
+		yyb14 = r.CheckBreak()
+	}
+	if yyb14 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Recursive = false
+	} else {
+		yyv19 := &x.Recursive
+		yym20 := z.DecBinary()
+		_ = yym20
+		if false {
+		} else {
+			*((*bool)(yyv19)) = r.DecodeBool()
+		}
+	}
+	yyj14++
+	if yyhl14 {
+		yyb14 = yyj14 > l
+	} else {
+		yyb14 = r.CheckBreak()
+	}
+	if yyb14 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Sorted = false
+	} else {
+		yyv21 := &x.Sorted
+		yym22 := z.DecBinary()
+		_ = yym22
+		if false {
+		} else {
+			*((*bool)(yyv21)) = r.DecodeBool()
+		}
+	}
+	yyj14++
+	if yyhl14 {
+		yyb14 = yyj14 > l
+	} else {
+		yyb14 = r.CheckBreak()
+	}
+	if yyb14 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Quorum = false
+	} else {
+		yyv23 := &x.Quorum
+		yym24 := z.DecBinary()
+		_ = yym24
+		if false {
+		} else {
+			*((*bool)(yyv23)) = r.DecodeBool()
+		}
+	}
+	for {
+		yyj14++
+		if yyhl14 {
+			yyb14 = yyj14 > l
+		} else {
+			yyb14 = r.CheckBreak()
+		}
+		if yyb14 {
+			break
+		}
+		r.ReadArrayElem()
+		z.DecStructFieldNotFound(yyj14-1, "")
+	}
+	r.ReadArrayEnd()
+}
+
+func (x *waitAction) CodecEncodeSelf(e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	if x == nil {
+		r.EncodeNil()
+	} else {
+		yym1 := z.EncBinary()
+		_ = yym1
+		if false {
+		} else if z.HasExtensions() && z.EncExt(x) {
+		} else {
+			yysep2 := !z.EncBinary()
+			yy2arr2 := z.EncBasicHandle().StructToArray
+			_, _ = yysep2, yy2arr2
+			const yyr2 bool = false
+			if yyr2 || yy2arr2 {
+				r.WriteArrayStart(4)
+			} else {
+				r.WriteMapStart(4)
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym4 := z.EncBinary()
+				_ = yym4
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Prefix"))
+				r.WriteMapElemValue()
+				yym5 := z.EncBinary()
+				_ = yym5
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym7 := z.EncBinary()
+				_ = yym7
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Key))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Key"))
+				r.WriteMapElemValue()
+				yym8 := z.EncBinary()
+				_ = yym8
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Key))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym10 := z.EncBinary()
+				_ = yym10
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.WaitIndex))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("WaitIndex"))
+				r.WriteMapElemValue()
+				yym11 := z.EncBinary()
+				_ = yym11
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.WaitIndex))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym13 := z.EncBinary()
+				_ = yym13
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Recursive))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
+				r.WriteMapElemValue()
+				yym14 := z.EncBinary()
+				_ = yym14
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Recursive))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayEnd()
+			} else {
+				r.WriteMapEnd()
+			}
+		}
+	}
+}
+
+func (x *waitAction) CodecDecodeSelf(d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	yym1 := z.DecBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.DecExt(x) {
+	} else {
+		yyct2 := r.ContainerType()
+		if yyct2 == codecSelferValueTypeMap7612 {
+			yyl2 := r.ReadMapStart()
+			if yyl2 == 0 {
+				r.ReadMapEnd()
+			} else {
+				x.codecDecodeSelfFromMap(yyl2, d)
+			}
+		} else if yyct2 == codecSelferValueTypeArray7612 {
+			yyl2 := r.ReadArrayStart()
+			if yyl2 == 0 {
+				r.ReadArrayEnd()
+			} else {
+				x.codecDecodeSelfFromArray(yyl2, d)
+			}
+		} else {
+			panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
+		}
+	}
+}
+
+func (x *waitAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+	_ = yys3Slc
+	var yyhl3 bool = l >= 0
+	for yyj3 := 0; ; yyj3++ {
+		if yyhl3 {
+			if yyj3 >= l {
+				break
+			}
+		} else {
+			if r.CheckBreak() {
+				break
+			}
+		}
+		r.ReadMapElemKey()
+		yys3Slc = r.DecodeStringAsBytes()
+		yys3 := string(yys3Slc)
+		r.ReadMapElemValue()
+		switch yys3 {
+		case "Prefix":
+			if r.TryDecodeAsNil() {
+				x.Prefix = ""
+			} else {
+				yyv4 := &x.Prefix
+				yym5 := z.DecBinary()
+				_ = yym5
+				if false {
+				} else {
+					*((*string)(yyv4)) = r.DecodeString()
+				}
+			}
+		case "Key":
+			if r.TryDecodeAsNil() {
+				x.Key = ""
+			} else {
+				yyv6 := &x.Key
+				yym7 := z.DecBinary()
+				_ = yym7
+				if false {
+				} else {
+					*((*string)(yyv6)) = r.DecodeString()
+				}
+			}
+		case "WaitIndex":
+			if r.TryDecodeAsNil() {
+				x.WaitIndex = 0
+			} else {
+				yyv8 := &x.WaitIndex
+				yym9 := z.DecBinary()
+				_ = yym9
+				if false {
+				} else {
+					*((*uint64)(yyv8)) = uint64(r.DecodeUint(64))
+				}
+			}
+		case "Recursive":
+			if r.TryDecodeAsNil() {
+				x.Recursive = false
+			} else {
+				yyv10 := &x.Recursive
+				yym11 := z.DecBinary()
+				_ = yym11
+				if false {
+				} else {
+					*((*bool)(yyv10)) = r.DecodeBool()
+				}
+			}
+		default:
+			z.DecStructFieldNotFound(-1, yys3)
+		} // end switch yys3
+	} // end for yyj3
+	r.ReadMapEnd()
+}
+
+func (x *waitAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yyj12 int
+	var yyb12 bool
+	var yyhl12 bool = l >= 0
+	yyj12++
+	if yyhl12 {
+		yyb12 = yyj12 > l
+	} else {
+		yyb12 = r.CheckBreak()
+	}
+	if yyb12 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Prefix = ""
+	} else {
+		yyv13 := &x.Prefix
+		yym14 := z.DecBinary()
+		_ = yym14
+		if false {
+		} else {
+			*((*string)(yyv13)) = r.DecodeString()
+		}
+	}
+	yyj12++
+	if yyhl12 {
+		yyb12 = yyj12 > l
+	} else {
+		yyb12 = r.CheckBreak()
+	}
+	if yyb12 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Key = ""
+	} else {
+		yyv15 := &x.Key
+		yym16 := z.DecBinary()
+		_ = yym16
+		if false {
+		} else {
+			*((*string)(yyv15)) = r.DecodeString()
+		}
+	}
+	yyj12++
+	if yyhl12 {
+		yyb12 = yyj12 > l
+	} else {
+		yyb12 = r.CheckBreak()
+	}
+	if yyb12 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.WaitIndex = 0
+	} else {
+		yyv17 := &x.WaitIndex
+		yym18 := z.DecBinary()
+		_ = yym18
+		if false {
+		} else {
+			*((*uint64)(yyv17)) = uint64(r.DecodeUint(64))
+		}
+	}
+	yyj12++
+	if yyhl12 {
+		yyb12 = yyj12 > l
+	} else {
+		yyb12 = r.CheckBreak()
+	}
+	if yyb12 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Recursive = false
+	} else {
+		yyv19 := &x.Recursive
+		yym20 := z.DecBinary()
+		_ = yym20
+		if false {
+		} else {
+			*((*bool)(yyv19)) = r.DecodeBool()
+		}
+	}
+	for {
+		yyj12++
+		if yyhl12 {
+			yyb12 = yyj12 > l
+		} else {
+			yyb12 = r.CheckBreak()
+		}
+		if yyb12 {
+			break
+		}
+		r.ReadArrayElem()
+		z.DecStructFieldNotFound(yyj12-1, "")
+	}
+	r.ReadArrayEnd()
+}
+
+func (x *setAction) CodecEncodeSelf(e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	if x == nil {
+		r.EncodeNil()
+	} else {
+		yym1 := z.EncBinary()
+		_ = yym1
+		if false {
+		} else if z.HasExtensions() && z.EncExt(x) {
+		} else {
+			yysep2 := !z.EncBinary()
+			yy2arr2 := z.EncBasicHandle().StructToArray
+			_, _ = yysep2, yy2arr2
+			const yyr2 bool = false
+			if yyr2 || yy2arr2 {
+				r.WriteArrayStart(10)
+			} else {
+				r.WriteMapStart(10)
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym4 := z.EncBinary()
+				_ = yym4
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Prefix"))
+				r.WriteMapElemValue()
+				yym5 := z.EncBinary()
+				_ = yym5
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym7 := z.EncBinary()
+				_ = yym7
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Key))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Key"))
+				r.WriteMapElemValue()
+				yym8 := z.EncBinary()
+				_ = yym8
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Key))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym10 := z.EncBinary()
+				_ = yym10
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Value))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Value"))
+				r.WriteMapElemValue()
+				yym11 := z.EncBinary()
+				_ = yym11
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Value))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym13 := z.EncBinary()
+				_ = yym13
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("PrevValue"))
+				r.WriteMapElemValue()
+				yym14 := z.EncBinary()
+				_ = yym14
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym16 := z.EncBinary()
+				_ = yym16
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.PrevIndex))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("PrevIndex"))
+				r.WriteMapElemValue()
+				yym17 := z.EncBinary()
+				_ = yym17
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.PrevIndex))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				x.PrevExist.CodecEncodeSelf(e)
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("PrevExist"))
+				r.WriteMapElemValue()
+				x.PrevExist.CodecEncodeSelf(e)
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym22 := z.EncBinary()
+				_ = yym22
+				if false {
+				} else if z.HasExtensions() && z.EncExt(x.TTL) {
+				} else {
+					r.EncodeInt(int64(x.TTL))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("TTL"))
+				r.WriteMapElemValue()
+				yym23 := z.EncBinary()
+				_ = yym23
+				if false {
+				} else if z.HasExtensions() && z.EncExt(x.TTL) {
+				} else {
+					r.EncodeInt(int64(x.TTL))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym25 := z.EncBinary()
+				_ = yym25
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Refresh))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Refresh"))
+				r.WriteMapElemValue()
+				yym26 := z.EncBinary()
+				_ = yym26
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Refresh))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym28 := z.EncBinary()
+				_ = yym28
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Dir))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Dir"))
+				r.WriteMapElemValue()
+				yym29 := z.EncBinary()
+				_ = yym29
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Dir))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym31 := z.EncBinary()
+				_ = yym31
+				if false {
+				} else {
+					r.EncodeBool(bool(x.NoValueOnSuccess))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess"))
+				r.WriteMapElemValue()
+				yym32 := z.EncBinary()
+				_ = yym32
+				if false {
+				} else {
+					r.EncodeBool(bool(x.NoValueOnSuccess))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayEnd()
+			} else {
+				r.WriteMapEnd()
+			}
+		}
+	}
+}
+
+func (x *setAction) CodecDecodeSelf(d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	yym1 := z.DecBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.DecExt(x) {
+	} else {
+		yyct2 := r.ContainerType()
+		if yyct2 == codecSelferValueTypeMap7612 {
+			yyl2 := r.ReadMapStart()
+			if yyl2 == 0 {
+				r.ReadMapEnd()
+			} else {
+				x.codecDecodeSelfFromMap(yyl2, d)
+			}
+		} else if yyct2 == codecSelferValueTypeArray7612 {
+			yyl2 := r.ReadArrayStart()
+			if yyl2 == 0 {
+				r.ReadArrayEnd()
+			} else {
+				x.codecDecodeSelfFromArray(yyl2, d)
+			}
+		} else {
+			panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
+		}
+	}
+}
+
+func (x *setAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+	_ = yys3Slc
+	var yyhl3 bool = l >= 0
+	for yyj3 := 0; ; yyj3++ {
+		if yyhl3 {
+			if yyj3 >= l {
+				break
+			}
+		} else {
+			if r.CheckBreak() {
+				break
+			}
+		}
+		r.ReadMapElemKey()
+		yys3Slc = r.DecodeStringAsBytes()
+		yys3 := string(yys3Slc)
+		r.ReadMapElemValue()
+		switch yys3 {
+		case "Prefix":
+			if r.TryDecodeAsNil() {
+				x.Prefix = ""
+			} else {
+				yyv4 := &x.Prefix
+				yym5 := z.DecBinary()
+				_ = yym5
+				if false {
+				} else {
+					*((*string)(yyv4)) = r.DecodeString()
+				}
+			}
+		case "Key":
+			if r.TryDecodeAsNil() {
+				x.Key = ""
+			} else {
+				yyv6 := &x.Key
+				yym7 := z.DecBinary()
+				_ = yym7
+				if false {
+				} else {
+					*((*string)(yyv6)) = r.DecodeString()
+				}
+			}
+		case "Value":
+			if r.TryDecodeAsNil() {
+				x.Value = ""
+			} else {
+				yyv8 := &x.Value
+				yym9 := z.DecBinary()
+				_ = yym9
+				if false {
+				} else {
+					*((*string)(yyv8)) = r.DecodeString()
+				}
+			}
+		case "PrevValue":
+			if r.TryDecodeAsNil() {
+				x.PrevValue = ""
+			} else {
+				yyv10 := &x.PrevValue
+				yym11 := z.DecBinary()
+				_ = yym11
+				if false {
+				} else {
+					*((*string)(yyv10)) = r.DecodeString()
+				}
+			}
+		case "PrevIndex":
+			if r.TryDecodeAsNil() {
+				x.PrevIndex = 0
+			} else {
+				yyv12 := &x.PrevIndex
+				yym13 := z.DecBinary()
+				_ = yym13
+				if false {
+				} else {
+					*((*uint64)(yyv12)) = uint64(r.DecodeUint(64))
+				}
+			}
+		case "PrevExist":
+			if r.TryDecodeAsNil() {
+				x.PrevExist = ""
+			} else {
+				yyv14 := &x.PrevExist
+				yyv14.CodecDecodeSelf(d)
+			}
+		case "TTL":
+			if r.TryDecodeAsNil() {
+				x.TTL = 0
+			} else {
+				yyv15 := &x.TTL
+				yym16 := z.DecBinary()
+				_ = yym16
+				if false {
+				} else if z.HasExtensions() && z.DecExt(yyv15) {
+				} else {
+					*((*int64)(yyv15)) = int64(r.DecodeInt(64))
+				}
+			}
+		case "Refresh":
+			if r.TryDecodeAsNil() {
+				x.Refresh = false
+			} else {
+				yyv17 := &x.Refresh
+				yym18 := z.DecBinary()
+				_ = yym18
+				if false {
+				} else {
+					*((*bool)(yyv17)) = r.DecodeBool()
+				}
+			}
+		case "Dir":
+			if r.TryDecodeAsNil() {
+				x.Dir = false
+			} else {
+				yyv19 := &x.Dir
+				yym20 := z.DecBinary()
+				_ = yym20
+				if false {
+				} else {
+					*((*bool)(yyv19)) = r.DecodeBool()
+				}
+			}
+		case "NoValueOnSuccess":
+			if r.TryDecodeAsNil() {
+				x.NoValueOnSuccess = false
+			} else {
+				yyv21 := &x.NoValueOnSuccess
+				yym22 := z.DecBinary()
+				_ = yym22
+				if false {
+				} else {
+					*((*bool)(yyv21)) = r.DecodeBool()
+				}
+			}
+		default:
+			z.DecStructFieldNotFound(-1, yys3)
+		} // end switch yys3
+	} // end for yyj3
+	r.ReadMapEnd()
+}
+
+func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yyj23 int
+	var yyb23 bool
+	var yyhl23 bool = l >= 0
+	yyj23++
+	if yyhl23 {
+		yyb23 = yyj23 > l
+	} else {
+		yyb23 = r.CheckBreak()
+	}
+	if yyb23 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Prefix = ""
+	} else {
+		yyv24 := &x.Prefix
+		yym25 := z.DecBinary()
+		_ = yym25
+		if false {
+		} else {
+			*((*string)(yyv24)) = r.DecodeString()
+		}
+	}
+	yyj23++
+	if yyhl23 {
+		yyb23 = yyj23 > l
+	} else {
+		yyb23 = r.CheckBreak()
+	}
+	if yyb23 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Key = ""
+	} else {
+		yyv26 := &x.Key
+		yym27 := z.DecBinary()
+		_ = yym27
+		if false {
+		} else {
+			*((*string)(yyv26)) = r.DecodeString()
+		}
+	}
+	yyj23++
+	if yyhl23 {
+		yyb23 = yyj23 > l
+	} else {
+		yyb23 = r.CheckBreak()
+	}
+	if yyb23 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Value = ""
+	} else {
+		yyv28 := &x.Value
+		yym29 := z.DecBinary()
+		_ = yym29
+		if false {
+		} else {
+			*((*string)(yyv28)) = r.DecodeString()
+		}
+	}
+	yyj23++
+	if yyhl23 {
+		yyb23 = yyj23 > l
+	} else {
+		yyb23 = r.CheckBreak()
+	}
+	if yyb23 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.PrevValue = ""
+	} else {
+		yyv30 := &x.PrevValue
+		yym31 := z.DecBinary()
+		_ = yym31
+		if false {
+		} else {
+			*((*string)(yyv30)) = r.DecodeString()
+		}
+	}
+	yyj23++
+	if yyhl23 {
+		yyb23 = yyj23 > l
+	} else {
+		yyb23 = r.CheckBreak()
+	}
+	if yyb23 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.PrevIndex = 0
+	} else {
+		yyv32 := &x.PrevIndex
+		yym33 := z.DecBinary()
+		_ = yym33
+		if false {
+		} else {
+			*((*uint64)(yyv32)) = uint64(r.DecodeUint(64))
+		}
+	}
+	yyj23++
+	if yyhl23 {
+		yyb23 = yyj23 > l
+	} else {
+		yyb23 = r.CheckBreak()
+	}
+	if yyb23 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.PrevExist = ""
+	} else {
+		yyv34 := &x.PrevExist
+		yyv34.CodecDecodeSelf(d)
+	}
+	yyj23++
+	if yyhl23 {
+		yyb23 = yyj23 > l
+	} else {
+		yyb23 = r.CheckBreak()
+	}
+	if yyb23 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.TTL = 0
+	} else {
+		yyv35 := &x.TTL
+		yym36 := z.DecBinary()
+		_ = yym36
+		if false {
+		} else if z.HasExtensions() && z.DecExt(yyv35) {
+		} else {
+			*((*int64)(yyv35)) = int64(r.DecodeInt(64))
+		}
+	}
+	yyj23++
+	if yyhl23 {
+		yyb23 = yyj23 > l
+	} else {
+		yyb23 = r.CheckBreak()
+	}
+	if yyb23 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Refresh = false
+	} else {
+		yyv37 := &x.Refresh
+		yym38 := z.DecBinary()
+		_ = yym38
+		if false {
+		} else {
+			*((*bool)(yyv37)) = r.DecodeBool()
+		}
+	}
+	yyj23++
+	if yyhl23 {
+		yyb23 = yyj23 > l
+	} else {
+		yyb23 = r.CheckBreak()
+	}
+	if yyb23 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Dir = false
+	} else {
+		yyv39 := &x.Dir
+		yym40 := z.DecBinary()
+		_ = yym40
+		if false {
+		} else {
+			*((*bool)(yyv39)) = r.DecodeBool()
+		}
+	}
+	yyj23++
+	if yyhl23 {
+		yyb23 = yyj23 > l
+	} else {
+		yyb23 = r.CheckBreak()
+	}
+	if yyb23 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.NoValueOnSuccess = false
+	} else {
+		yyv41 := &x.NoValueOnSuccess
+		yym42 := z.DecBinary()
+		_ = yym42
+		if false {
+		} else {
+			*((*bool)(yyv41)) = r.DecodeBool()
+		}
+	}
+	for {
+		yyj23++
+		if yyhl23 {
+			yyb23 = yyj23 > l
+		} else {
+			yyb23 = r.CheckBreak()
+		}
+		if yyb23 {
+			break
+		}
+		r.ReadArrayElem()
+		z.DecStructFieldNotFound(yyj23-1, "")
+	}
+	r.ReadArrayEnd()
+}
+
+func (x *deleteAction) CodecEncodeSelf(e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	if x == nil {
+		r.EncodeNil()
+	} else {
+		yym1 := z.EncBinary()
+		_ = yym1
+		if false {
+		} else if z.HasExtensions() && z.EncExt(x) {
+		} else {
+			yysep2 := !z.EncBinary()
+			yy2arr2 := z.EncBasicHandle().StructToArray
+			_, _ = yysep2, yy2arr2
+			const yyr2 bool = false
+			if yyr2 || yy2arr2 {
+				r.WriteArrayStart(6)
+			} else {
+				r.WriteMapStart(6)
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym4 := z.EncBinary()
+				_ = yym4
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Prefix"))
+				r.WriteMapElemValue()
+				yym5 := z.EncBinary()
+				_ = yym5
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym7 := z.EncBinary()
+				_ = yym7
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Key))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Key"))
+				r.WriteMapElemValue()
+				yym8 := z.EncBinary()
+				_ = yym8
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Key))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym10 := z.EncBinary()
+				_ = yym10
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("PrevValue"))
+				r.WriteMapElemValue()
+				yym11 := z.EncBinary()
+				_ = yym11
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym13 := z.EncBinary()
+				_ = yym13
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.PrevIndex))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("PrevIndex"))
+				r.WriteMapElemValue()
+				yym14 := z.EncBinary()
+				_ = yym14
+				if false {
+				} else {
+					r.EncodeUint(uint64(x.PrevIndex))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym16 := z.EncBinary()
+				_ = yym16
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Dir))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Dir"))
+				r.WriteMapElemValue()
+				yym17 := z.EncBinary()
+				_ = yym17
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Dir))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym19 := z.EncBinary()
+				_ = yym19
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Recursive))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
+				r.WriteMapElemValue()
+				yym20 := z.EncBinary()
+				_ = yym20
+				if false {
+				} else {
+					r.EncodeBool(bool(x.Recursive))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayEnd()
+			} else {
+				r.WriteMapEnd()
+			}
+		}
+	}
+}
+
+func (x *deleteAction) CodecDecodeSelf(d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	yym1 := z.DecBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.DecExt(x) {
+	} else {
+		yyct2 := r.ContainerType()
+		if yyct2 == codecSelferValueTypeMap7612 {
+			yyl2 := r.ReadMapStart()
+			if yyl2 == 0 {
+				r.ReadMapEnd()
+			} else {
+				x.codecDecodeSelfFromMap(yyl2, d)
+			}
+		} else if yyct2 == codecSelferValueTypeArray7612 {
+			yyl2 := r.ReadArrayStart()
+			if yyl2 == 0 {
+				r.ReadArrayEnd()
+			} else {
+				x.codecDecodeSelfFromArray(yyl2, d)
+			}
+		} else {
+			panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
+		}
+	}
+}
+
+func (x *deleteAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+	_ = yys3Slc
+	var yyhl3 bool = l >= 0
+	for yyj3 := 0; ; yyj3++ {
+		if yyhl3 {
+			if yyj3 >= l {
+				break
+			}
+		} else {
+			if r.CheckBreak() {
+				break
+			}
+		}
+		r.ReadMapElemKey()
+		yys3Slc = r.DecodeStringAsBytes()
+		yys3 := string(yys3Slc)
+		r.ReadMapElemValue()
+		switch yys3 {
+		case "Prefix":
+			if r.TryDecodeAsNil() {
+				x.Prefix = ""
+			} else {
+				yyv4 := &x.Prefix
+				yym5 := z.DecBinary()
+				_ = yym5
+				if false {
+				} else {
+					*((*string)(yyv4)) = r.DecodeString()
+				}
+			}
+		case "Key":
+			if r.TryDecodeAsNil() {
+				x.Key = ""
+			} else {
+				yyv6 := &x.Key
+				yym7 := z.DecBinary()
+				_ = yym7
+				if false {
+				} else {
+					*((*string)(yyv6)) = r.DecodeString()
+				}
+			}
+		case "PrevValue":
+			if r.TryDecodeAsNil() {
+				x.PrevValue = ""
+			} else {
+				yyv8 := &x.PrevValue
+				yym9 := z.DecBinary()
+				_ = yym9
+				if false {
+				} else {
+					*((*string)(yyv8)) = r.DecodeString()
+				}
+			}
+		case "PrevIndex":
+			if r.TryDecodeAsNil() {
+				x.PrevIndex = 0
+			} else {
+				yyv10 := &x.PrevIndex
+				yym11 := z.DecBinary()
+				_ = yym11
+				if false {
+				} else {
+					*((*uint64)(yyv10)) = uint64(r.DecodeUint(64))
+				}
+			}
+		case "Dir":
+			if r.TryDecodeAsNil() {
+				x.Dir = false
+			} else {
+				yyv12 := &x.Dir
+				yym13 := z.DecBinary()
+				_ = yym13
+				if false {
+				} else {
+					*((*bool)(yyv12)) = r.DecodeBool()
+				}
+			}
+		case "Recursive":
+			if r.TryDecodeAsNil() {
+				x.Recursive = false
+			} else {
+				yyv14 := &x.Recursive
+				yym15 := z.DecBinary()
+				_ = yym15
+				if false {
+				} else {
+					*((*bool)(yyv14)) = r.DecodeBool()
+				}
+			}
+		default:
+			z.DecStructFieldNotFound(-1, yys3)
+		} // end switch yys3
+	} // end for yyj3
+	r.ReadMapEnd()
+}
+
+func (x *deleteAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yyj16 int
+	var yyb16 bool
+	var yyhl16 bool = l >= 0
+	yyj16++
+	if yyhl16 {
+		yyb16 = yyj16 > l
+	} else {
+		yyb16 = r.CheckBreak()
+	}
+	if yyb16 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Prefix = ""
+	} else {
+		yyv17 := &x.Prefix
+		yym18 := z.DecBinary()
+		_ = yym18
+		if false {
+		} else {
+			*((*string)(yyv17)) = r.DecodeString()
+		}
+	}
+	yyj16++
+	if yyhl16 {
+		yyb16 = yyj16 > l
+	} else {
+		yyb16 = r.CheckBreak()
+	}
+	if yyb16 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Key = ""
+	} else {
+		yyv19 := &x.Key
+		yym20 := z.DecBinary()
+		_ = yym20
+		if false {
+		} else {
+			*((*string)(yyv19)) = r.DecodeString()
+		}
+	}
+	yyj16++
+	if yyhl16 {
+		yyb16 = yyj16 > l
+	} else {
+		yyb16 = r.CheckBreak()
+	}
+	if yyb16 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.PrevValue = ""
+	} else {
+		yyv21 := &x.PrevValue
+		yym22 := z.DecBinary()
+		_ = yym22
+		if false {
+		} else {
+			*((*string)(yyv21)) = r.DecodeString()
+		}
+	}
+	yyj16++
+	if yyhl16 {
+		yyb16 = yyj16 > l
+	} else {
+		yyb16 = r.CheckBreak()
+	}
+	if yyb16 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.PrevIndex = 0
+	} else {
+		yyv23 := &x.PrevIndex
+		yym24 := z.DecBinary()
+		_ = yym24
+		if false {
+		} else {
+			*((*uint64)(yyv23)) = uint64(r.DecodeUint(64))
+		}
+	}
+	yyj16++
+	if yyhl16 {
+		yyb16 = yyj16 > l
+	} else {
+		yyb16 = r.CheckBreak()
+	}
+	if yyb16 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Dir = false
+	} else {
+		yyv25 := &x.Dir
+		yym26 := z.DecBinary()
+		_ = yym26
+		if false {
+		} else {
+			*((*bool)(yyv25)) = r.DecodeBool()
+		}
+	}
+	yyj16++
+	if yyhl16 {
+		yyb16 = yyj16 > l
+	} else {
+		yyb16 = r.CheckBreak()
+	}
+	if yyb16 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Recursive = false
+	} else {
+		yyv27 := &x.Recursive
+		yym28 := z.DecBinary()
+		_ = yym28
+		if false {
+		} else {
+			*((*bool)(yyv27)) = r.DecodeBool()
+		}
+	}
+	for {
+		yyj16++
+		if yyhl16 {
+			yyb16 = yyj16 > l
+		} else {
+			yyb16 = r.CheckBreak()
+		}
+		if yyb16 {
+			break
+		}
+		r.ReadArrayElem()
+		z.DecStructFieldNotFound(yyj16-1, "")
+	}
+	r.ReadArrayEnd()
+}
+
+func (x *createInOrderAction) CodecEncodeSelf(e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	if x == nil {
+		r.EncodeNil()
+	} else {
+		yym1 := z.EncBinary()
+		_ = yym1
+		if false {
+		} else if z.HasExtensions() && z.EncExt(x) {
+		} else {
+			yysep2 := !z.EncBinary()
+			yy2arr2 := z.EncBasicHandle().StructToArray
+			_, _ = yysep2, yy2arr2
+			const yyr2 bool = false
+			if yyr2 || yy2arr2 {
+				r.WriteArrayStart(4)
+			} else {
+				r.WriteMapStart(4)
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym4 := z.EncBinary()
+				_ = yym4
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Prefix"))
+				r.WriteMapElemValue()
+				yym5 := z.EncBinary()
+				_ = yym5
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym7 := z.EncBinary()
+				_ = yym7
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Dir))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Dir"))
+				r.WriteMapElemValue()
+				yym8 := z.EncBinary()
+				_ = yym8
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Dir))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym10 := z.EncBinary()
+				_ = yym10
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Value))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("Value"))
+				r.WriteMapElemValue()
+				yym11 := z.EncBinary()
+				_ = yym11
+				if false {
+				} else {
+					r.EncodeString(codecSelferC_UTF87612, string(x.Value))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayElem()
+				yym13 := z.EncBinary()
+				_ = yym13
+				if false {
+				} else if z.HasExtensions() && z.EncExt(x.TTL) {
+				} else {
+					r.EncodeInt(int64(x.TTL))
+				}
+			} else {
+				r.WriteMapElemKey()
+				r.EncodeString(codecSelferC_UTF87612, string("TTL"))
+				r.WriteMapElemValue()
+				yym14 := z.EncBinary()
+				_ = yym14
+				if false {
+				} else if z.HasExtensions() && z.EncExt(x.TTL) {
+				} else {
+					r.EncodeInt(int64(x.TTL))
+				}
+			}
+			if yyr2 || yy2arr2 {
+				r.WriteArrayEnd()
+			} else {
+				r.WriteMapEnd()
+			}
+		}
+	}
+}
+
+func (x *createInOrderAction) CodecDecodeSelf(d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	yym1 := z.DecBinary()
+	_ = yym1
+	if false {
+	} else if z.HasExtensions() && z.DecExt(x) {
+	} else {
+		yyct2 := r.ContainerType()
+		if yyct2 == codecSelferValueTypeMap7612 {
+			yyl2 := r.ReadMapStart()
+			if yyl2 == 0 {
+				r.ReadMapEnd()
+			} else {
+				x.codecDecodeSelfFromMap(yyl2, d)
+			}
+		} else if yyct2 == codecSelferValueTypeArray7612 {
+			yyl2 := r.ReadArrayStart()
+			if yyl2 == 0 {
+				r.ReadArrayEnd()
+			} else {
+				x.codecDecodeSelfFromArray(yyl2, d)
+			}
+		} else {
+			panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
+		}
+	}
+}
+
+func (x *createInOrderAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+	_ = yys3Slc
+	var yyhl3 bool = l >= 0
+	for yyj3 := 0; ; yyj3++ {
+		if yyhl3 {
+			if yyj3 >= l {
+				break
+			}
+		} else {
+			if r.CheckBreak() {
+				break
+			}
+		}
+		r.ReadMapElemKey()
+		yys3Slc = r.DecodeStringAsBytes()
+		yys3 := string(yys3Slc)
+		r.ReadMapElemValue()
+		switch yys3 {
+		case "Prefix":
+			if r.TryDecodeAsNil() {
+				x.Prefix = ""
+			} else {
+				yyv4 := &x.Prefix
+				yym5 := z.DecBinary()
+				_ = yym5
+				if false {
+				} else {
+					*((*string)(yyv4)) = r.DecodeString()
+				}
+			}
+		case "Dir":
+			if r.TryDecodeAsNil() {
+				x.Dir = ""
+			} else {
+				yyv6 := &x.Dir
+				yym7 := z.DecBinary()
+				_ = yym7
+				if false {
+				} else {
+					*((*string)(yyv6)) = r.DecodeString()
+				}
+			}
+		case "Value":
+			if r.TryDecodeAsNil() {
+				x.Value = ""
+			} else {
+				yyv8 := &x.Value
+				yym9 := z.DecBinary()
+				_ = yym9
+				if false {
+				} else {
+					*((*string)(yyv8)) = r.DecodeString()
+				}
+			}
+		case "TTL":
+			if r.TryDecodeAsNil() {
+				x.TTL = 0
+			} else {
+				yyv10 := &x.TTL
+				yym11 := z.DecBinary()
+				_ = yym11
+				if false {
+				} else if z.HasExtensions() && z.DecExt(yyv10) {
+				} else {
+					*((*int64)(yyv10)) = int64(r.DecodeInt(64))
+				}
+			}
+		default:
+			z.DecStructFieldNotFound(-1, yys3)
+		} // end switch yys3
+	} // end for yyj3
+	r.ReadMapEnd()
+}
+
+func (x *createInOrderAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+	var yyj12 int
+	var yyb12 bool
+	var yyhl12 bool = l >= 0
+	yyj12++
+	if yyhl12 {
+		yyb12 = yyj12 > l
+	} else {
+		yyb12 = r.CheckBreak()
+	}
+	if yyb12 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Prefix = ""
+	} else {
+		yyv13 := &x.Prefix
+		yym14 := z.DecBinary()
+		_ = yym14
+		if false {
+		} else {
+			*((*string)(yyv13)) = r.DecodeString()
+		}
+	}
+	yyj12++
+	if yyhl12 {
+		yyb12 = yyj12 > l
+	} else {
+		yyb12 = r.CheckBreak()
+	}
+	if yyb12 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Dir = ""
+	} else {
+		yyv15 := &x.Dir
+		yym16 := z.DecBinary()
+		_ = yym16
+		if false {
+		} else {
+			*((*string)(yyv15)) = r.DecodeString()
+		}
+	}
+	yyj12++
+	if yyhl12 {
+		yyb12 = yyj12 > l
+	} else {
+		yyb12 = r.CheckBreak()
+	}
+	if yyb12 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.Value = ""
+	} else {
+		yyv17 := &x.Value
+		yym18 := z.DecBinary()
+		_ = yym18
+		if false {
+		} else {
+			*((*string)(yyv17)) = r.DecodeString()
+		}
+	}
+	yyj12++
+	if yyhl12 {
+		yyb12 = yyj12 > l
+	} else {
+		yyb12 = r.CheckBreak()
+	}
+	if yyb12 {
+		r.ReadArrayEnd()
+		return
+	}
+	r.ReadArrayElem()
+	if r.TryDecodeAsNil() {
+		x.TTL = 0
+	} else {
+		yyv19 := &x.TTL
+		yym20 := z.DecBinary()
+		_ = yym20
+		if false {
+		} else if z.HasExtensions() && z.DecExt(yyv19) {
+		} else {
+			*((*int64)(yyv19)) = int64(r.DecodeInt(64))
+		}
+	}
+	for {
+		yyj12++
+		if yyhl12 {
+			yyb12 = yyj12 > l
+		} else {
+			yyb12 = r.CheckBreak()
+		}
+		if yyb12 {
+			break
+		}
+		r.ReadArrayElem()
+		z.DecStructFieldNotFound(yyj12-1, "")
+	}
+	r.ReadArrayEnd()
+}
+
+func (x codecSelfer7612) encNodes(v Nodes, e *codec1978.Encoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperEncoder(e)
+	_, _, _ = h, z, r
+	r.WriteArrayStart(len(v))
+	for _, yyv1 := range v {
+		r.WriteArrayElem()
+		if yyv1 == nil {
+			r.EncodeNil()
+		} else {
+			yyv1.CodecEncodeSelf(e)
+		}
+	}
+	r.WriteArrayEnd()
+}
+
+func (x codecSelfer7612) decNodes(v *Nodes, d *codec1978.Decoder) {
+	var h codecSelfer7612
+	z, r := codec1978.GenHelperDecoder(d)
+	_, _, _ = h, z, r
+
+	yyv1 := *v
+	yyh1, yyl1 := z.DecSliceHelperStart()
+	var yyc1 bool
+	_ = yyc1
+	if yyl1 == 0 {
+		if yyv1 == nil {
+			yyv1 = []*Node{}
+			yyc1 = true
+		} else if len(yyv1) != 0 {
+			yyv1 = yyv1[:0]
+			yyc1 = true
+		}
+	} else {
+		yyhl1 := yyl1 > 0
+		var yyrl1 int
+		_ = yyrl1
+		if yyhl1 {
+			if yyl1 > cap(yyv1) {
+				yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8)
+				if yyrl1 <= cap(yyv1) {
+					yyv1 = yyv1[:yyrl1]
+				} else {
+					yyv1 = make([]*Node, yyrl1)
+				}
+				yyc1 = true
+			} else if yyl1 != len(yyv1) {
+				yyv1 = yyv1[:yyl1]
+				yyc1 = true
+			}
+		}
+		var yyj1 int
+		// var yydn1 bool
+		for ; (yyhl1 && yyj1 < yyl1) || !(yyhl1 || r.CheckBreak()); yyj1++ {
+			if yyj1 == 0 && len(yyv1) == 0 {
+				if yyhl1 {
+					yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8)
+				} else {
+					yyrl1 = 8
+				}
+				yyv1 = make([]*Node, yyrl1)
+				yyc1 = true
+			}
+			yyh1.ElemContainerState(yyj1)
+			// yydn1 = r.TryDecodeAsNil()
+
+			// if indefinite, etc, then expand the slice if necessary
+			var yydb1 bool
+			if yyj1 >= len(yyv1) {
+				yyv1 = append(yyv1, nil)
+				yyc1 = true
+
+			}
+			if yydb1 {
+				z.DecSwallow()
+			} else {
+				if r.TryDecodeAsNil() {
+					if yyv1[yyj1] != nil {
+						*yyv1[yyj1] = Node{}
+					}
+				} else {
+					if yyv1[yyj1] == nil {
+						yyv1[yyj1] = new(Node)
+					}
+					yyw2 := yyv1[yyj1]
+					yyw2.CodecDecodeSelf(d)
+				}
+
+			}
+
+		}
+		if yyj1 < len(yyv1) {
+			yyv1 = yyv1[:yyj1]
+			yyc1 = true
+		} else if yyj1 == 0 && yyv1 == nil {
+			yyv1 = make([]*Node, 0)
+			yyc1 = true
+		}
+	}
+	yyh1.End()
+	if yyc1 {
+		*v = yyv1
+	}
+
+}
diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/github.com/coreos/etcd/client/keys.go
new file mode 100644
index 0000000..8b9fd3f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/client/keys.go
@@ -0,0 +1,681 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/coreos/etcd/pkg/pathutil"
+	"github.com/ugorji/go/codec"
+)
+
+const (
+	ErrorCodeKeyNotFound  = 100
+	ErrorCodeTestFailed   = 101
+	ErrorCodeNotFile      = 102
+	ErrorCodeNotDir       = 104
+	ErrorCodeNodeExist    = 105
+	ErrorCodeRootROnly    = 107
+	ErrorCodeDirNotEmpty  = 108
+	ErrorCodeUnauthorized = 110
+
+	ErrorCodePrevValueRequired = 201
+	ErrorCodeTTLNaN            = 202
+	ErrorCodeIndexNaN          = 203
+	ErrorCodeInvalidField      = 209
+	ErrorCodeInvalidForm       = 210
+
+	ErrorCodeRaftInternal = 300
+	ErrorCodeLeaderElect  = 301
+
+	ErrorCodeWatcherCleared    = 400
+	ErrorCodeEventIndexCleared = 401
+)
+
+type Error struct {
+	Code    int    `json:"errorCode"`
+	Message string `json:"message"`
+	Cause   string `json:"cause"`
+	Index   uint64 `json:"index"`
+}
+
+func (e Error) Error() string {
+	return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index)
+}
+
+var (
+	ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.")
+	ErrEmptyBody   = errors.New("client: response body is empty")
+)
+
+// PrevExistType is used to define an existence condition when setting
+// or deleting Nodes.
+type PrevExistType string
+
+const (
+	PrevIgnore  = PrevExistType("")
+	PrevExist   = PrevExistType("true")
+	PrevNoExist = PrevExistType("false")
+)
+
+var (
+	defaultV2KeysPrefix = "/v2/keys"
+)
+
+// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value
+// API over HTTP.
+func NewKeysAPI(c Client) KeysAPI {
+	return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix)
+}
+
+// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller
+// to provide a custom base URL path. This should only be used in
+// very rare cases.
+func NewKeysAPIWithPrefix(c Client, p string) KeysAPI {
+	return &httpKeysAPI{
+		client: c,
+		prefix: p,
+	}
+}
+
+type KeysAPI interface {
+	// Get retrieves a set of Nodes from etcd
+	Get(ctx context.Context, key string, opts *GetOptions) (*Response, error)
+
+	// Set assigns a new value to a Node identified by a given key. The caller
+	// may define a set of conditions in the SetOptions. If SetOptions.Dir=true
+	// then value is ignored.
+	Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error)
+
+	// Delete removes a Node identified by the given key, optionally destroying
+	// all of its children as well. The caller may define a set of required
+	// conditions in an DeleteOptions object.
+	Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error)
+
+	// Create is an alias for Set w/ PrevExist=false
+	Create(ctx context.Context, key, value string) (*Response, error)
+
+	// CreateInOrder is used to atomically create in-order keys within the given directory.
+	CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error)
+
+	// Update is an alias for Set w/ PrevExist=true
+	Update(ctx context.Context, key, value string) (*Response, error)
+
+	// Watcher builds a new Watcher targeted at a specific Node identified
+	// by the given key. The Watcher may be configured at creation time
+	// through a WatcherOptions object. The returned Watcher is designed
+	// to emit events that happen to a Node, and optionally to its children.
+	Watcher(key string, opts *WatcherOptions) Watcher
+}
+
+type WatcherOptions struct {
+	// AfterIndex defines the index after-which the Watcher should
+	// start emitting events. For example, if a value of 5 is
+	// provided, the first event will have an index >= 6.
+	//
+	// Setting AfterIndex to 0 (default) means that the Watcher
+	// should start watching for events starting at the current
+	// index, whatever that may be.
+	AfterIndex uint64
+
+	// Recursive specifies whether or not the Watcher should emit
+	// events that occur in children of the given keyspace. If set
+	// to false (default), events will be limited to those that
+	// occur for the exact key.
+	Recursive bool
+}
+
+type CreateInOrderOptions struct {
+	// TTL defines a period of time after-which the Node should
+	// expire and no longer exist. Values <= 0 are ignored. Given
+	// that the zero-value is ignored, TTL cannot be used to set
+	// a TTL of 0.
+	TTL time.Duration
+}
+
+type SetOptions struct {
+	// PrevValue specifies what the current value of the Node must
+	// be in order for the Set operation to succeed.
+	//
+	// Leaving this field empty means that the caller wishes to
+	// ignore the current value of the Node. This cannot be used
+	// to compare the Node's current value to an empty string.
+	//
+	// PrevValue is ignored if Dir=true
+	PrevValue string
+
+	// PrevIndex indicates what the current ModifiedIndex of the
+	// Node must be in order for the Set operation to succeed.
+	//
+	// If PrevIndex is set to 0 (default), no comparison is made.
+	PrevIndex uint64
+
+	// PrevExist specifies whether the Node must currently exist
+	// (PrevExist) or not (PrevNoExist). If the caller does not
+	// care about existence, set PrevExist to PrevIgnore, or simply
+	// leave it unset.
+	PrevExist PrevExistType
+
+	// TTL defines a period of time after-which the Node should
+	// expire and no longer exist. Values <= 0 are ignored. Given
+	// that the zero-value is ignored, TTL cannot be used to set
+	// a TTL of 0.
+	TTL time.Duration
+
+	// Refresh set to true means a TTL value can be updated
+	// without firing a watch or changing the node value. A
+	// value must not be provided when refreshing a key.
+	Refresh bool
+
+	// Dir specifies whether or not this Node should be created as a directory.
+	Dir bool
+
+	// NoValueOnSuccess specifies whether the response contains the current value of the Node.
+	// If set, the response will only contain the current value when the request fails.
+	NoValueOnSuccess bool
+}
+
+type GetOptions struct {
+	// Recursive defines whether or not all children of the Node
+	// should be returned.
+	Recursive bool
+
+	// Sort instructs the server whether or not to sort the Nodes.
+	// If true, the Nodes are sorted alphabetically by key in
+	// ascending order (A to z). If false (default), the Nodes will
+	// not be sorted and the ordering used should not be considered
+	// predictable.
+	Sort bool
+
+	// Quorum specifies whether it gets the latest committed value that
+	// has been applied in quorum of members, which ensures external
+	// consistency (or linearizability).
+	Quorum bool
+}
+
+type DeleteOptions struct {
+	// PrevValue specifies what the current value of the Node must
+	// be in order for the Delete operation to succeed.
+	//
+	// Leaving this field empty means that the caller wishes to
+	// ignore the current value of the Node. This cannot be used
+	// to compare the Node's current value to an empty string.
+	PrevValue string
+
+	// PrevIndex indicates what the current ModifiedIndex of the
+	// Node must be in order for the Delete operation to succeed.
+	//
+	// If PrevIndex is set to 0 (default), no comparison is made.
+	PrevIndex uint64
+
+	// Recursive defines whether or not all children of the Node
+	// should be deleted. If set to true, all children of the Node
+	// identified by the given key will be deleted. If left unset
+	// or explicitly set to false, only a single Node will be
+	// deleted.
+	Recursive bool
+
+	// Dir specifies whether or not this Node should be removed as a directory.
+	Dir bool
+}
+
+type Watcher interface {
+	// Next blocks until an etcd event occurs, then returns a Response
+	// representing that event. The behavior of Next depends on the
+	// WatcherOptions used to construct the Watcher. Next is designed to
+	// be called repeatedly, each time blocking until a subsequent event
+	// is available.
+	//
+	// If the provided context is cancelled, Next will return a non-nil
+	// error. Any other failures encountered while waiting for the next
+	// event (connection issues, deserialization failures, etc) will
+	// also result in a non-nil error.
+	Next(context.Context) (*Response, error)
+}
+
+type Response struct {
+	// Action is the name of the operation that occurred. Possible values
+	// include get, set, delete, update, create, compareAndSwap,
+	// compareAndDelete and expire.
+	Action string `json:"action"`
+
+	// Node represents the state of the relevant etcd Node.
+	Node *Node `json:"node"`
+
+	// PrevNode represents the previous state of the Node. PrevNode is non-nil
+	// only if the Node existed before the action occurred and the action
+	// caused a change to the Node.
+	PrevNode *Node `json:"prevNode"`
+
+	// Index holds the cluster-level index at the time the Response was generated.
+	// This index is not tied to the Node(s) contained in this Response.
+	Index uint64 `json:"-"`
+
+	// ClusterID holds the cluster-level ID reported by the server.  This
+	// should be different for different etcd clusters.
+	ClusterID string `json:"-"`
+}
+
+type Node struct {
+	// Key represents the unique location of this Node (e.g. "/foo/bar").
+	Key string `json:"key"`
+
+	// Dir reports whether node describes a directory.
+	Dir bool `json:"dir,omitempty"`
+
+	// Value is the current data stored on this Node. If this Node
+	// is a directory, Value will be empty.
+	Value string `json:"value"`
+
+	// Nodes holds the children of this Node, only if this Node is a directory.
+	// This slice of will be arbitrarily deep (children, grandchildren, great-
+	// grandchildren, etc.) if a recursive Get or Watch request were made.
+	Nodes Nodes `json:"nodes"`
+
+	// CreatedIndex is the etcd index at-which this Node was created.
+	CreatedIndex uint64 `json:"createdIndex"`
+
+	// ModifiedIndex is the etcd index at-which this Node was last modified.
+	ModifiedIndex uint64 `json:"modifiedIndex"`
+
+	// Expiration is the server side expiration time of the key.
+	Expiration *time.Time `json:"expiration,omitempty"`
+
+	// TTL is the time to live of the key in second.
+	TTL int64 `json:"ttl,omitempty"`
+}
+
+func (n *Node) String() string {
+	return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL)
+}
+
+// TTLDuration returns the Node's TTL as a time.Duration object
+func (n *Node) TTLDuration() time.Duration {
+	return time.Duration(n.TTL) * time.Second
+}
+
+type Nodes []*Node
+
+// interfaces for sorting
+
+func (ns Nodes) Len() int           { return len(ns) }
+func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key }
+func (ns Nodes) Swap(i, j int)      { ns[i], ns[j] = ns[j], ns[i] }
+
+type httpKeysAPI struct {
+	client httpClient
+	prefix string
+}
+
+func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) {
+	act := &setAction{
+		Prefix: k.prefix,
+		Key:    key,
+		Value:  val,
+	}
+
+	if opts != nil {
+		act.PrevValue = opts.PrevValue
+		act.PrevIndex = opts.PrevIndex
+		act.PrevExist = opts.PrevExist
+		act.TTL = opts.TTL
+		act.Refresh = opts.Refresh
+		act.Dir = opts.Dir
+		act.NoValueOnSuccess = opts.NoValueOnSuccess
+	}
+
+	doCtx := ctx
+	if act.PrevExist == PrevNoExist {
+		doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
+	}
+	resp, body, err := k.client.Do(doCtx, act)
+	if err != nil {
+		return nil, err
+	}
+
+	return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
+}
+
+func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) {
+	return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist})
+}
+
+func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) {
+	act := &createInOrderAction{
+		Prefix: k.prefix,
+		Dir:    dir,
+		Value:  val,
+	}
+
+	if opts != nil {
+		act.TTL = opts.TTL
+	}
+
+	resp, body, err := k.client.Do(ctx, act)
+	if err != nil {
+		return nil, err
+	}
+
+	return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
+}
+
+func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) {
+	return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist})
+}
+
+func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) {
+	act := &deleteAction{
+		Prefix: k.prefix,
+		Key:    key,
+	}
+
+	if opts != nil {
+		act.PrevValue = opts.PrevValue
+		act.PrevIndex = opts.PrevIndex
+		act.Dir = opts.Dir
+		act.Recursive = opts.Recursive
+	}
+
+	doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
+	resp, body, err := k.client.Do(doCtx, act)
+	if err != nil {
+		return nil, err
+	}
+
+	return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
+}
+
+func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) {
+	act := &getAction{
+		Prefix: k.prefix,
+		Key:    key,
+	}
+
+	if opts != nil {
+		act.Recursive = opts.Recursive
+		act.Sorted = opts.Sort
+		act.Quorum = opts.Quorum
+	}
+
+	resp, body, err := k.client.Do(ctx, act)
+	if err != nil {
+		return nil, err
+	}
+
+	return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
+}
+
+func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher {
+	act := waitAction{
+		Prefix: k.prefix,
+		Key:    key,
+	}
+
+	if opts != nil {
+		act.Recursive = opts.Recursive
+		if opts.AfterIndex > 0 {
+			act.WaitIndex = opts.AfterIndex + 1
+		}
+	}
+
+	return &httpWatcher{
+		client:   k.client,
+		nextWait: act,
+	}
+}
+
+type httpWatcher struct {
+	client   httpClient
+	nextWait waitAction
+}
+
+func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) {
+	for {
+		httpresp, body, err := hw.client.Do(ctx, &hw.nextWait)
+		if err != nil {
+			return nil, err
+		}
+
+		resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body)
+		if err != nil {
+			if err == ErrEmptyBody {
+				continue
+			}
+			return nil, err
+		}
+
+		hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1
+		return resp, nil
+	}
+}
+
+// v2KeysURL forms a URL representing the location of a key.
+// The endpoint argument represents the base URL of an etcd
+// server. The prefix is the path needed to route from the
+// provided endpoint's path to the root of the keys API
+// (typically "/v2/keys").
+func v2KeysURL(ep url.URL, prefix, key string) *url.URL {
+	// We concatenate all parts together manually. We cannot use
+	// path.Join because it does not reserve trailing slash.
+	// We call CanonicalURLPath to further cleanup the path.
+	if prefix != "" && prefix[0] != '/' {
+		prefix = "/" + prefix
+	}
+	if key != "" && key[0] != '/' {
+		key = "/" + key
+	}
+	ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key)
+	return &ep
+}
+
+type getAction struct {
+	Prefix    string
+	Key       string
+	Recursive bool
+	Sorted    bool
+	Quorum    bool
+}
+
+func (g *getAction) HTTPRequest(ep url.URL) *http.Request {
+	u := v2KeysURL(ep, g.Prefix, g.Key)
+
+	params := u.Query()
+	params.Set("recursive", strconv.FormatBool(g.Recursive))
+	params.Set("sorted", strconv.FormatBool(g.Sorted))
+	params.Set("quorum", strconv.FormatBool(g.Quorum))
+	u.RawQuery = params.Encode()
+
+	req, _ := http.NewRequest("GET", u.String(), nil)
+	return req
+}
+
+type waitAction struct {
+	Prefix    string
+	Key       string
+	WaitIndex uint64
+	Recursive bool
+}
+
+func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
+	u := v2KeysURL(ep, w.Prefix, w.Key)
+
+	params := u.Query()
+	params.Set("wait", "true")
+	params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10))
+	params.Set("recursive", strconv.FormatBool(w.Recursive))
+	u.RawQuery = params.Encode()
+
+	req, _ := http.NewRequest("GET", u.String(), nil)
+	return req
+}
+
+type setAction struct {
+	Prefix           string
+	Key              string
+	Value            string
+	PrevValue        string
+	PrevIndex        uint64
+	PrevExist        PrevExistType
+	TTL              time.Duration
+	Refresh          bool
+	Dir              bool
+	NoValueOnSuccess bool
+}
+
+func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
+	u := v2KeysURL(ep, a.Prefix, a.Key)
+
+	params := u.Query()
+	form := url.Values{}
+
+	// we're either creating a directory or setting a key
+	if a.Dir {
+		params.Set("dir", strconv.FormatBool(a.Dir))
+	} else {
+		// These options are only valid for setting a key
+		if a.PrevValue != "" {
+			params.Set("prevValue", a.PrevValue)
+		}
+		form.Add("value", a.Value)
+	}
+
+	// Options which apply to both setting a key and creating a dir
+	if a.PrevIndex != 0 {
+		params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
+	}
+	if a.PrevExist != PrevIgnore {
+		params.Set("prevExist", string(a.PrevExist))
+	}
+	if a.TTL > 0 {
+		form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
+	}
+
+	if a.Refresh {
+		form.Add("refresh", "true")
+	}
+	if a.NoValueOnSuccess {
+		params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
+	}
+
+	u.RawQuery = params.Encode()
+	body := strings.NewReader(form.Encode())
+
+	req, _ := http.NewRequest("PUT", u.String(), body)
+	req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+	return req
+}
+
+type deleteAction struct {
+	Prefix    string
+	Key       string
+	PrevValue string
+	PrevIndex uint64
+	Dir       bool
+	Recursive bool
+}
+
+func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request {
+	u := v2KeysURL(ep, a.Prefix, a.Key)
+
+	params := u.Query()
+	if a.PrevValue != "" {
+		params.Set("prevValue", a.PrevValue)
+	}
+	if a.PrevIndex != 0 {
+		params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
+	}
+	if a.Dir {
+		params.Set("dir", "true")
+	}
+	if a.Recursive {
+		params.Set("recursive", "true")
+	}
+	u.RawQuery = params.Encode()
+
+	req, _ := http.NewRequest("DELETE", u.String(), nil)
+	req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+	return req
+}
+
+type createInOrderAction struct {
+	Prefix string
+	Dir    string
+	Value  string
+	TTL    time.Duration
+}
+
+func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request {
+	u := v2KeysURL(ep, a.Prefix, a.Dir)
+
+	form := url.Values{}
+	form.Add("value", a.Value)
+	if a.TTL > 0 {
+		form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
+	}
+	body := strings.NewReader(form.Encode())
+
+	req, _ := http.NewRequest("POST", u.String(), body)
+	req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+	return req
+}
+
+func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) {
+	switch code {
+	case http.StatusOK, http.StatusCreated:
+		if len(body) == 0 {
+			return nil, ErrEmptyBody
+		}
+		res, err = unmarshalSuccessfulKeysResponse(header, body)
+	default:
+		err = unmarshalFailedKeysResponse(body)
+	}
+	return res, err
+}
+
+func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) {
+	var res Response
+	err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res)
+	if err != nil {
+		return nil, ErrInvalidJSON
+	}
+	if header.Get("X-Etcd-Index") != "" {
+		res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64)
+		if err != nil {
+			return nil, err
+		}
+	}
+	res.ClusterID = header.Get("X-Etcd-Cluster-ID")
+	return &res, nil
+}
+
+func unmarshalFailedKeysResponse(body []byte) error {
+	var etcdErr Error
+	if err := json.Unmarshal(body, &etcdErr); err != nil {
+		return ErrInvalidJSON
+	}
+	return etcdErr
+}
diff --git a/vendor/github.com/coreos/etcd/client/members.go b/vendor/github.com/coreos/etcd/client/members.go
new file mode 100644
index 0000000..aafa3d1
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/client/members.go
@@ -0,0 +1,303 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+	"path"
+
+	"github.com/coreos/etcd/pkg/types"
+)
+
+var (
+	defaultV2MembersPrefix = "/v2/members"
+	defaultLeaderSuffix    = "/leader"
+)
+
+type Member struct {
+	// ID is the unique identifier of this Member.
+	ID string `json:"id"`
+
+	// Name is a human-readable, non-unique identifier of this Member.
+	Name string `json:"name"`
+
+	// PeerURLs represents the HTTP(S) endpoints this Member uses to
+	// participate in etcd's consensus protocol.
+	PeerURLs []string `json:"peerURLs"`
+
+	// ClientURLs represents the HTTP(S) endpoints on which this Member
+	// serves its client-facing APIs.
+	ClientURLs []string `json:"clientURLs"`
+}
+
+type memberCollection []Member
+
+func (c *memberCollection) UnmarshalJSON(data []byte) error {
+	d := struct {
+		Members []Member
+	}{}
+
+	if err := json.Unmarshal(data, &d); err != nil {
+		return err
+	}
+
+	if d.Members == nil {
+		*c = make([]Member, 0)
+		return nil
+	}
+
+	*c = d.Members
+	return nil
+}
+
+type memberCreateOrUpdateRequest struct {
+	PeerURLs types.URLs
+}
+
+func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) {
+	s := struct {
+		PeerURLs []string `json:"peerURLs"`
+	}{
+		PeerURLs: make([]string, len(m.PeerURLs)),
+	}
+
+	for i, u := range m.PeerURLs {
+		s.PeerURLs[i] = u.String()
+	}
+
+	return json.Marshal(&s)
+}
+
+// NewMembersAPI constructs a new MembersAPI that uses HTTP to
+// interact with etcd's membership API.
+func NewMembersAPI(c Client) MembersAPI {
+	return &httpMembersAPI{
+		client: c,
+	}
+}
+
+type MembersAPI interface {
+	// List enumerates the current cluster membership.
+	List(ctx context.Context) ([]Member, error)
+
+	// Add instructs etcd to accept a new Member into the cluster.
+	Add(ctx context.Context, peerURL string) (*Member, error)
+
+	// Remove demotes an existing Member out of the cluster.
+	Remove(ctx context.Context, mID string) error
+
+	// Update instructs etcd to update an existing Member in the cluster.
+	Update(ctx context.Context, mID string, peerURLs []string) error
+
+	// Leader gets current leader of the cluster
+	Leader(ctx context.Context) (*Member, error)
+}
+
+type httpMembersAPI struct {
+	client httpClient
+}
+
+func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) {
+	req := &membersAPIActionList{}
+	resp, body, err := m.client.Do(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+		return nil, err
+	}
+
+	var mCollection memberCollection
+	if err := json.Unmarshal(body, &mCollection); err != nil {
+		return nil, err
+	}
+
+	return []Member(mCollection), nil
+}
+
+func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) {
+	urls, err := types.NewURLs([]string{peerURL})
+	if err != nil {
+		return nil, err
+	}
+
+	req := &membersAPIActionAdd{peerURLs: urls}
+	resp, body, err := m.client.Do(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil {
+		return nil, err
+	}
+
+	if resp.StatusCode != http.StatusCreated {
+		var merr membersError
+		if err := json.Unmarshal(body, &merr); err != nil {
+			return nil, err
+		}
+		return nil, merr
+	}
+
+	var memb Member
+	if err := json.Unmarshal(body, &memb); err != nil {
+		return nil, err
+	}
+
+	return &memb, nil
+}
+
+func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error {
+	urls, err := types.NewURLs(peerURLs)
+	if err != nil {
+		return err
+	}
+
+	req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID}
+	resp, body, err := m.client.Do(ctx, req)
+	if err != nil {
+		return err
+	}
+
+	if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil {
+		return err
+	}
+
+	if resp.StatusCode != http.StatusNoContent {
+		var merr membersError
+		if err := json.Unmarshal(body, &merr); err != nil {
+			return err
+		}
+		return merr
+	}
+
+	return nil
+}
+
+func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error {
+	req := &membersAPIActionRemove{memberID: memberID}
+	resp, _, err := m.client.Do(ctx, req)
+	if err != nil {
+		return err
+	}
+
+	return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
+}
+
+func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) {
+	req := &membersAPIActionLeader{}
+	resp, body, err := m.client.Do(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+		return nil, err
+	}
+
+	var leader Member
+	if err := json.Unmarshal(body, &leader); err != nil {
+		return nil, err
+	}
+
+	return &leader, nil
+}
+
+type membersAPIActionList struct{}
+
+func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
+	u := v2MembersURL(ep)
+	req, _ := http.NewRequest("GET", u.String(), nil)
+	return req
+}
+
+type membersAPIActionRemove struct {
+	memberID string
+}
+
+func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request {
+	u := v2MembersURL(ep)
+	u.Path = path.Join(u.Path, d.memberID)
+	req, _ := http.NewRequest("DELETE", u.String(), nil)
+	return req
+}
+
+type membersAPIActionAdd struct {
+	peerURLs types.URLs
+}
+
+func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request {
+	u := v2MembersURL(ep)
+	m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
+	b, _ := json.Marshal(&m)
+	req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b))
+	req.Header.Set("Content-Type", "application/json")
+	return req
+}
+
+type membersAPIActionUpdate struct {
+	memberID string
+	peerURLs types.URLs
+}
+
+func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request {
+	u := v2MembersURL(ep)
+	m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
+	u.Path = path.Join(u.Path, a.memberID)
+	b, _ := json.Marshal(&m)
+	req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b))
+	req.Header.Set("Content-Type", "application/json")
+	return req
+}
+
+func assertStatusCode(got int, want ...int) (err error) {
+	for _, w := range want {
+		if w == got {
+			return nil
+		}
+	}
+	return fmt.Errorf("unexpected status code %d", got)
+}
+
+type membersAPIActionLeader struct{}
+
+func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request {
+	u := v2MembersURL(ep)
+	u.Path = path.Join(u.Path, defaultLeaderSuffix)
+	req, _ := http.NewRequest("GET", u.String(), nil)
+	return req
+}
+
+// v2MembersURL add the necessary path to the provided endpoint
+// to route requests to the default v2 members API.
+func v2MembersURL(ep url.URL) *url.URL {
+	ep.Path = path.Join(ep.Path, defaultV2MembersPrefix)
+	return &ep
+}
+
+type membersError struct {
+	Message string `json:"message"`
+	Code    int    `json:"-"`
+}
+
+func (e membersError) Error() string {
+	return e.Message
+}
diff --git a/vendor/github.com/coreos/etcd/client/util.go b/vendor/github.com/coreos/etcd/client/util.go
new file mode 100644
index 0000000..15a8bab
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/client/util.go
@@ -0,0 +1,53 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+	"regexp"
+)
+
+var (
+	roleNotFoundRegExp *regexp.Regexp
+	userNotFoundRegExp *regexp.Regexp
+)
+
+func init() {
+	roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
+	userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
+}
+
+// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
+func IsKeyNotFound(err error) bool {
+	if cErr, ok := err.(Error); ok {
+		return cErr.Code == ErrorCodeKeyNotFound
+	}
+	return false
+}
+
+// IsRoleNotFound returns true if the error means role not found of v2 API.
+func IsRoleNotFound(err error) bool {
+	if ae, ok := err.(authError); ok {
+		return roleNotFoundRegExp.MatchString(ae.Message)
+	}
+	return false
+}
+
+// IsUserNotFound returns true if the error means user not found of v2 API.
+func IsUserNotFound(err error) bool {
+	if ae, ok := err.(authError); ok {
+		return userNotFoundRegExp.MatchString(ae.Message)
+	}
+	return false
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/README.md b/vendor/github.com/coreos/etcd/clientv3/README.md
new file mode 100644
index 0000000..376bfba
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/README.md
@@ -0,0 +1,85 @@
+# etcd/clientv3
+
+[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3)
+
+`etcd/clientv3` is the official Go etcd client for v3.
+
+## Install
+
+```bash
+go get github.com/coreos/etcd/clientv3
+```
+
+## Get started
+
+Create client using `clientv3.New`:
+
+```go
+cli, err := clientv3.New(clientv3.Config{
+	Endpoints:   []string{"localhost:2379", "localhost:22379", "localhost:32379"},
+	DialTimeout: 5 * time.Second,
+})
+if err != nil {
+	// handle error!
+}
+defer cli.Close()
+```
+
+etcd v3 uses [`gRPC`](http://www.grpc.io) for remote procedure calls. And `clientv3` uses
+[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it. 
+If the client is not closed, the connection will have leaky goroutines. To specify client request timeout,
+pass `context.WithTimeout` to APIs:
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), timeout)
+resp, err := cli.Put(ctx, "sample_key", "sample_value")
+cancel()
+if err != nil {
+    // handle error!
+}
+// use the response
+```
+
+etcd uses `cmd/vendor` directory to store external dependencies, which are
+to be compiled into etcd release binaries. `client` can be imported without
+vendoring. For full compatibility, it is recommended to vendor builds using
+etcd's vendored packages, using tools like godep, as in
+[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
+For more detail, please read [Go vendor design](https://golang.org/s/go15vendor).
+
+## Error Handling
+
+etcd client returns 2 types of errors:
+
+1. context error: canceled or deadline exceeded.
+2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes).
+
+Here is the example code to handle client errors:
+
+```go
+resp, err := cli.Put(ctx, "", "")
+if err != nil {
+	switch err {
+	case context.Canceled:
+		log.Fatalf("ctx is canceled by another routine: %v", err)
+	case context.DeadlineExceeded:
+		log.Fatalf("ctx is attached with a deadline is exceeded: %v", err)
+	case rpctypes.ErrEmptyKey:
+		log.Fatalf("client-side error: %v", err)
+	default:
+		log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err)
+	}
+}
+```
+
+## Metrics
+
+The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go).
+
+## Namespacing
+
+The [namespace](https://godoc.org/github.com/coreos/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix.
+
+## Examples
+
+More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3).
diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go
new file mode 100644
index 0000000..7545bb6
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/auth.go
@@ -0,0 +1,233 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"fmt"
+	"strings"
+
+	"github.com/coreos/etcd/auth/authpb"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+	"google.golang.org/grpc"
+)
+
+type (
+	AuthEnableResponse               pb.AuthEnableResponse
+	AuthDisableResponse              pb.AuthDisableResponse
+	AuthenticateResponse             pb.AuthenticateResponse
+	AuthUserAddResponse              pb.AuthUserAddResponse
+	AuthUserDeleteResponse           pb.AuthUserDeleteResponse
+	AuthUserChangePasswordResponse   pb.AuthUserChangePasswordResponse
+	AuthUserGrantRoleResponse        pb.AuthUserGrantRoleResponse
+	AuthUserGetResponse              pb.AuthUserGetResponse
+	AuthUserRevokeRoleResponse       pb.AuthUserRevokeRoleResponse
+	AuthRoleAddResponse              pb.AuthRoleAddResponse
+	AuthRoleGrantPermissionResponse  pb.AuthRoleGrantPermissionResponse
+	AuthRoleGetResponse              pb.AuthRoleGetResponse
+	AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
+	AuthRoleDeleteResponse           pb.AuthRoleDeleteResponse
+	AuthUserListResponse             pb.AuthUserListResponse
+	AuthRoleListResponse             pb.AuthRoleListResponse
+
+	PermissionType authpb.Permission_Type
+	Permission     authpb.Permission
+)
+
+const (
+	PermRead      = authpb.READ
+	PermWrite     = authpb.WRITE
+	PermReadWrite = authpb.READWRITE
+)
+
+type Auth interface {
+	// AuthEnable enables auth of an etcd cluster.
+	AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
+
+	// AuthDisable disables auth of an etcd cluster.
+	AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
+
+	// UserAdd adds a new user to an etcd cluster.
+	UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
+
+	// UserDelete deletes a user from an etcd cluster.
+	UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
+
+	// UserChangePassword changes a password of a user.
+	UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
+
+	// UserGrantRole grants a role to a user.
+	UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
+
+	// UserGet gets a detailed information of a user.
+	UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
+
+	// UserList gets a list of all users.
+	UserList(ctx context.Context) (*AuthUserListResponse, error)
+
+	// UserRevokeRole revokes a role of a user.
+	UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
+
+	// RoleAdd adds a new role to an etcd cluster.
+	RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
+
+	// RoleGrantPermission grants a permission to a role.
+	RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
+
+	// RoleGet gets a detailed information of a role.
+	RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
+
+	// RoleList gets a list of all roles.
+	RoleList(ctx context.Context) (*AuthRoleListResponse, error)
+
+	// RoleRevokePermission revokes a permission from a role.
+	RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
+
+	// RoleDelete deletes a role.
+	RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
+}
+
+type auth struct {
+	remote   pb.AuthClient
+	callOpts []grpc.CallOption
+}
+
+func NewAuth(c *Client) Auth {
+	api := &auth{remote: RetryAuthClient(c)}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
+	resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
+	return (*AuthEnableResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
+	resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
+	return (*AuthDisableResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
+	resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...)
+	return (*AuthUserAddResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
+	resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
+	return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
+	resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
+	return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
+	resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
+	return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
+	resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
+	return (*AuthUserGetResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
+	resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
+	return (*AuthUserListResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
+	resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
+	return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
+	resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
+	return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
+	perm := &authpb.Permission{
+		Key:      []byte(key),
+		RangeEnd: []byte(rangeEnd),
+		PermType: authpb.Permission_Type(permType),
+	}
+	resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
+	return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
+	resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
+	return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
+	resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
+	return (*AuthRoleListResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
+	resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...)
+	return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
+	resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
+	return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
+}
+
+func StrToPermissionType(s string) (PermissionType, error) {
+	val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
+	if ok {
+		return PermissionType(val), nil
+	}
+	return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
+}
+
+type authenticator struct {
+	conn     *grpc.ClientConn // conn in-use
+	remote   pb.AuthClient
+	callOpts []grpc.CallOption
+}
+
+func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
+	resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
+	return (*AuthenticateResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authenticator) close() {
+	auth.conn.Close()
+}
+
+func newAuthenticator(endpoint string, opts []grpc.DialOption, c *Client) (*authenticator, error) {
+	conn, err := grpc.Dial(endpoint, opts...)
+	if err != nil {
+		return nil, err
+	}
+
+	api := &authenticator{
+		conn:   conn,
+		remote: pb.NewAuthClient(conn),
+	}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api, nil
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go
new file mode 100644
index 0000000..7132807
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/client.go
@@ -0,0 +1,576 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+var (
+	ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
+	ErrOldCluster           = errors.New("etcdclient: old cluster version")
+)
+
+// Client provides and manages an etcd v3 client session.
+type Client struct {
+	Cluster
+	KV
+	Lease
+	Watcher
+	Auth
+	Maintenance
+
+	conn     *grpc.ClientConn
+	dialerrc chan error
+
+	cfg      Config
+	creds    *credentials.TransportCredentials
+	balancer *healthBalancer
+	mu       *sync.Mutex
+
+	ctx    context.Context
+	cancel context.CancelFunc
+
+	// Username is a user name for authentication.
+	Username string
+	// Password is a password for authentication.
+	Password string
+	// tokenCred is an instance of WithPerRPCCredentials()'s argument
+	tokenCred *authTokenCredential
+
+	callOpts []grpc.CallOption
+}
+
+// New creates a new etcdv3 client from a given configuration.
+func New(cfg Config) (*Client, error) {
+	if len(cfg.Endpoints) == 0 {
+		return nil, ErrNoAvailableEndpoints
+	}
+
+	return newClient(&cfg)
+}
+
+// NewCtxClient creates a client with a context but no underlying grpc
+// connection. This is useful for embedded cases that override the
+// service interface implementations and do not need connection management.
+func NewCtxClient(ctx context.Context) *Client {
+	cctx, cancel := context.WithCancel(ctx)
+	return &Client{ctx: cctx, cancel: cancel}
+}
+
+// NewFromURL creates a new etcdv3 client from a URL.
+func NewFromURL(url string) (*Client, error) {
+	return New(Config{Endpoints: []string{url}})
+}
+
+// Close shuts down the client's etcd connections.
+func (c *Client) Close() error {
+	c.cancel()
+	c.Watcher.Close()
+	c.Lease.Close()
+	if c.conn != nil {
+		return toErr(c.ctx, c.conn.Close())
+	}
+	return c.ctx.Err()
+}
+
+// Ctx is a context for "out of band" messages (e.g., for sending
+// "clean up" message when another context is canceled). It is
+// canceled on client Close().
+func (c *Client) Ctx() context.Context { return c.ctx }
+
+// Endpoints lists the registered endpoints for the client.
+func (c *Client) Endpoints() (eps []string) {
+	// copy the slice; protect original endpoints from being changed
+	eps = make([]string, len(c.cfg.Endpoints))
+	copy(eps, c.cfg.Endpoints)
+	return
+}
+
+// SetEndpoints updates client's endpoints.
+func (c *Client) SetEndpoints(eps ...string) {
+	c.mu.Lock()
+	c.cfg.Endpoints = eps
+	c.mu.Unlock()
+	c.balancer.updateAddrs(eps...)
+
+	// updating notifyCh can trigger new connections,
+	// need update addrs if all connections are down
+	// or addrs does not include pinAddr.
+	c.balancer.mu.RLock()
+	update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr)
+	c.balancer.mu.RUnlock()
+	if update {
+		select {
+		case c.balancer.updateAddrsC <- notifyNext:
+		case <-c.balancer.stopc:
+		}
+	}
+}
+
+// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
+func (c *Client) Sync(ctx context.Context) error {
+	mresp, err := c.MemberList(ctx)
+	if err != nil {
+		return err
+	}
+	var eps []string
+	for _, m := range mresp.Members {
+		eps = append(eps, m.ClientURLs...)
+	}
+	c.SetEndpoints(eps...)
+	return nil
+}
+
+func (c *Client) autoSync() {
+	if c.cfg.AutoSyncInterval == time.Duration(0) {
+		return
+	}
+
+	for {
+		select {
+		case <-c.ctx.Done():
+			return
+		case <-time.After(c.cfg.AutoSyncInterval):
+			ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
+			err := c.Sync(ctx)
+			cancel()
+			if err != nil && err != c.ctx.Err() {
+				logger.Println("Auto sync endpoints failed:", err)
+			}
+		}
+	}
+}
+
+type authTokenCredential struct {
+	token   string
+	tokenMu *sync.RWMutex
+}
+
+func (cred authTokenCredential) RequireTransportSecurity() bool {
+	return false
+}
+
+func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
+	cred.tokenMu.RLock()
+	defer cred.tokenMu.RUnlock()
+	return map[string]string{
+		"token": cred.token,
+	}, nil
+}
+
+func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
+	proto = "tcp"
+	host = endpoint
+	url, uerr := url.Parse(endpoint)
+	if uerr != nil || !strings.Contains(endpoint, "://") {
+		return proto, host, scheme
+	}
+	scheme = url.Scheme
+
+	// strip scheme:// prefix since grpc dials by host
+	host = url.Host
+	switch url.Scheme {
+	case "http", "https":
+	case "unix", "unixs":
+		proto = "unix"
+		host = url.Host + url.Path
+	default:
+		proto, host = "", ""
+	}
+	return proto, host, scheme
+}
+
+func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
+	creds = c.creds
+	switch scheme {
+	case "unix":
+	case "http":
+		creds = nil
+	case "https", "unixs":
+		if creds != nil {
+			break
+		}
+		tlsconfig := &tls.Config{}
+		emptyCreds := credentials.NewTLS(tlsconfig)
+		creds = &emptyCreds
+	default:
+		creds = nil
+	}
+	return creds
+}
+
+// dialSetupOpts gives the dial opts prior to any authentication
+func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) {
+	if c.cfg.DialTimeout > 0 {
+		opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
+	}
+	if c.cfg.DialKeepAliveTime > 0 {
+		params := keepalive.ClientParameters{
+			Time:    c.cfg.DialKeepAliveTime,
+			Timeout: c.cfg.DialKeepAliveTimeout,
+		}
+		opts = append(opts, grpc.WithKeepaliveParams(params))
+	}
+	opts = append(opts, dopts...)
+
+	f := func(host string, t time.Duration) (net.Conn, error) {
+		proto, host, _ := parseEndpoint(c.balancer.endpoint(host))
+		if host == "" && endpoint != "" {
+			// dialing an endpoint not in the balancer; use
+			// endpoint passed into dial
+			proto, host, _ = parseEndpoint(endpoint)
+		}
+		if proto == "" {
+			return nil, fmt.Errorf("unknown scheme for %q", host)
+		}
+		select {
+		case <-c.ctx.Done():
+			return nil, c.ctx.Err()
+		default:
+		}
+		dialer := &net.Dialer{Timeout: t}
+		conn, err := dialer.DialContext(c.ctx, proto, host)
+		if err != nil {
+			select {
+			case c.dialerrc <- err:
+			default:
+			}
+		}
+		return conn, err
+	}
+	opts = append(opts, grpc.WithDialer(f))
+
+	creds := c.creds
+	if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 {
+		creds = c.processCreds(scheme)
+	}
+	if creds != nil {
+		opts = append(opts, grpc.WithTransportCredentials(*creds))
+	} else {
+		opts = append(opts, grpc.WithInsecure())
+	}
+
+	return opts
+}
+
+// Dial connects to a single endpoint using the client's config.
+func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
+	return c.dial(endpoint)
+}
+
+func (c *Client) getToken(ctx context.Context) error {
+	var err error // return last error in a case of fail
+	var auth *authenticator
+
+	for i := 0; i < len(c.cfg.Endpoints); i++ {
+		endpoint := c.cfg.Endpoints[i]
+		host := getHost(endpoint)
+		// use dial options without dopts to avoid reusing the client balancer
+		auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint), c)
+		if err != nil {
+			continue
+		}
+		defer auth.close()
+
+		var resp *AuthenticateResponse
+		resp, err = auth.authenticate(ctx, c.Username, c.Password)
+		if err != nil {
+			continue
+		}
+
+		c.tokenCred.tokenMu.Lock()
+		c.tokenCred.token = resp.Token
+		c.tokenCred.tokenMu.Unlock()
+
+		return nil
+	}
+
+	return err
+}
+
+func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
+	opts := c.dialSetupOpts(endpoint, dopts...)
+	host := getHost(endpoint)
+	if c.Username != "" && c.Password != "" {
+		c.tokenCred = &authTokenCredential{
+			tokenMu: &sync.RWMutex{},
+		}
+
+		ctx := c.ctx
+		if c.cfg.DialTimeout > 0 {
+			cctx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout)
+			defer cancel()
+			ctx = cctx
+		}
+
+		err := c.getToken(ctx)
+		if err != nil {
+			if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
+				if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
+					err = context.DeadlineExceeded
+				}
+				return nil, err
+			}
+		} else {
+			opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
+		}
+	}
+
+	opts = append(opts, c.cfg.DialOptions...)
+
+	conn, err := grpc.DialContext(c.ctx, host, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return conn, nil
+}
+
+// WithRequireLeader requires client requests to only succeed
+// when the cluster has a leader.
+func WithRequireLeader(ctx context.Context) context.Context {
+	md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
+	return metadata.NewOutgoingContext(ctx, md)
+}
+
+func newClient(cfg *Config) (*Client, error) {
+	if cfg == nil {
+		cfg = &Config{}
+	}
+	var creds *credentials.TransportCredentials
+	if cfg.TLS != nil {
+		c := credentials.NewTLS(cfg.TLS)
+		creds = &c
+	}
+
+	// use a temporary skeleton client to bootstrap first connection
+	baseCtx := context.TODO()
+	if cfg.Context != nil {
+		baseCtx = cfg.Context
+	}
+
+	ctx, cancel := context.WithCancel(baseCtx)
+	client := &Client{
+		conn:     nil,
+		dialerrc: make(chan error, 1),
+		cfg:      *cfg,
+		creds:    creds,
+		ctx:      ctx,
+		cancel:   cancel,
+		mu:       new(sync.Mutex),
+		callOpts: defaultCallOpts,
+	}
+	if cfg.Username != "" && cfg.Password != "" {
+		client.Username = cfg.Username
+		client.Password = cfg.Password
+	}
+	if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
+		if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
+			return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
+		}
+		callOpts := []grpc.CallOption{
+			defaultFailFast,
+			defaultMaxCallSendMsgSize,
+			defaultMaxCallRecvMsgSize,
+		}
+		if cfg.MaxCallSendMsgSize > 0 {
+			callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
+		}
+		if cfg.MaxCallRecvMsgSize > 0 {
+			callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
+		}
+		client.callOpts = callOpts
+	}
+
+	client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) {
+		return grpcHealthCheck(client, ep)
+	})
+
+	// use Endpoints[0] so that for https:// without any tls config given, then
+	// grpc will assume the certificate server name is the endpoint host.
+	conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
+	if err != nil {
+		client.cancel()
+		client.balancer.Close()
+		return nil, err
+	}
+	client.conn = conn
+
+	// wait for a connection
+	if cfg.DialTimeout > 0 {
+		hasConn := false
+		waitc := time.After(cfg.DialTimeout)
+		select {
+		case <-client.balancer.ready():
+			hasConn = true
+		case <-ctx.Done():
+		case <-waitc:
+		}
+		if !hasConn {
+			err := context.DeadlineExceeded
+			select {
+			case err = <-client.dialerrc:
+			default:
+			}
+			client.cancel()
+			client.balancer.Close()
+			conn.Close()
+			return nil, err
+		}
+	}
+
+	client.Cluster = NewCluster(client)
+	client.KV = NewKV(client)
+	client.Lease = NewLease(client)
+	client.Watcher = NewWatcher(client)
+	client.Auth = NewAuth(client)
+	client.Maintenance = NewMaintenance(client)
+
+	if cfg.RejectOldCluster {
+		if err := client.checkVersion(); err != nil {
+			client.Close()
+			return nil, err
+		}
+	}
+
+	go client.autoSync()
+	return client, nil
+}
+
+func (c *Client) checkVersion() (err error) {
+	var wg sync.WaitGroup
+	errc := make(chan error, len(c.cfg.Endpoints))
+	ctx, cancel := context.WithCancel(c.ctx)
+	if c.cfg.DialTimeout > 0 {
+		ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
+	}
+	wg.Add(len(c.cfg.Endpoints))
+	for _, ep := range c.cfg.Endpoints {
+		// if cluster is current, any endpoint gives a recent version
+		go func(e string) {
+			defer wg.Done()
+			resp, rerr := c.Status(ctx, e)
+			if rerr != nil {
+				errc <- rerr
+				return
+			}
+			vs := strings.Split(resp.Version, ".")
+			maj, min := 0, 0
+			if len(vs) >= 2 {
+				maj, _ = strconv.Atoi(vs[0])
+				min, rerr = strconv.Atoi(vs[1])
+			}
+			if maj < 3 || (maj == 3 && min < 2) {
+				rerr = ErrOldCluster
+			}
+			errc <- rerr
+		}(ep)
+	}
+	// wait for success
+	for i := 0; i < len(c.cfg.Endpoints); i++ {
+		if err = <-errc; err == nil {
+			break
+		}
+	}
+	cancel()
+	wg.Wait()
+	return err
+}
+
+// ActiveConnection returns the current in-use connection
+func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
+
+// isHaltErr returns true if the given error and context indicate no forward
+// progress can be made, even after reconnecting.
+func isHaltErr(ctx context.Context, err error) bool {
+	if ctx != nil && ctx.Err() != nil {
+		return true
+	}
+	if err == nil {
+		return false
+	}
+	ev, _ := status.FromError(err)
+	// Unavailable codes mean the system will be right back.
+	// (e.g., can't connect, lost leader)
+	// Treat Internal codes as if something failed, leaving the
+	// system in an inconsistent state, but retrying could make progress.
+	// (e.g., failed in middle of send, corrupted frame)
+	// TODO: are permanent Internal errors possible from grpc?
+	return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
+}
+
+// isUnavailableErr returns true if the given error is an unavailable error
+func isUnavailableErr(ctx context.Context, err error) bool {
+	if ctx != nil && ctx.Err() != nil {
+		return false
+	}
+	if err == nil {
+		return false
+	}
+	ev, _ := status.FromError(err)
+	// Unavailable codes mean the system will be right back.
+	// (e.g., can't connect, lost leader)
+	return ev.Code() == codes.Unavailable
+}
+
+func toErr(ctx context.Context, err error) error {
+	if err == nil {
+		return nil
+	}
+	err = rpctypes.Error(err)
+	if _, ok := err.(rpctypes.EtcdError); ok {
+		return err
+	}
+	ev, _ := status.FromError(err)
+	code := ev.Code()
+	switch code {
+	case codes.DeadlineExceeded:
+		fallthrough
+	case codes.Canceled:
+		if ctx.Err() != nil {
+			err = ctx.Err()
+		}
+	case codes.Unavailable:
+	case codes.FailedPrecondition:
+		err = grpc.ErrClientConnClosing
+	}
+	return err
+}
+
+func canceledByCaller(stopCtx context.Context, err error) bool {
+	if stopCtx.Err() == nil || err == nil {
+		return false
+	}
+
+	return err == context.Canceled || err == context.DeadlineExceeded
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/github.com/coreos/etcd/clientv3/cluster.go
new file mode 100644
index 0000000..785672b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/cluster.go
@@ -0,0 +1,114 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/pkg/types"
+
+	"google.golang.org/grpc"
+)
+
+type (
+	Member               pb.Member
+	MemberListResponse   pb.MemberListResponse
+	MemberAddResponse    pb.MemberAddResponse
+	MemberRemoveResponse pb.MemberRemoveResponse
+	MemberUpdateResponse pb.MemberUpdateResponse
+)
+
+type Cluster interface {
+	// MemberList lists the current cluster membership.
+	MemberList(ctx context.Context) (*MemberListResponse, error)
+
+	// MemberAdd adds a new member into the cluster.
+	MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
+
+	// MemberRemove removes an existing member from the cluster.
+	MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
+
+	// MemberUpdate updates the peer addresses of the member.
+	MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
+}
+
+type cluster struct {
+	remote   pb.ClusterClient
+	callOpts []grpc.CallOption
+}
+
+func NewCluster(c *Client) Cluster {
+	api := &cluster{remote: RetryClusterClient(c)}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
+	api := &cluster{remote: remote}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
+	// fail-fast before panic in rafthttp
+	if _, err := types.NewURLs(peerAddrs); err != nil {
+		return nil, err
+	}
+
+	r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
+	resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	return (*MemberAddResponse)(resp), nil
+}
+
+func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
+	r := &pb.MemberRemoveRequest{ID: id}
+	resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	return (*MemberRemoveResponse)(resp), nil
+}
+
+func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
+	// fail-fast before panic in rafthttp
+	if _, err := types.NewURLs(peerAddrs); err != nil {
+		return nil, err
+	}
+
+	// it is safe to retry on update.
+	r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
+	resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
+	if err == nil {
+		return (*MemberUpdateResponse)(resp), nil
+	}
+	return nil, toErr(ctx, err)
+}
+
+func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
+	// it is safe to retry on list.
+	resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...)
+	if err == nil {
+		return (*MemberListResponse)(resp), nil
+	}
+	return nil, toErr(ctx, err)
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/compact_op.go b/vendor/github.com/coreos/etcd/clientv3/compact_op.go
new file mode 100644
index 0000000..41e80c1
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/compact_op.go
@@ -0,0 +1,51 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+)
+
+// CompactOp represents a compact operation.
+type CompactOp struct {
+	revision int64
+	physical bool
+}
+
+// CompactOption configures compact operation.
+type CompactOption func(*CompactOp)
+
+func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
+	for _, opt := range opts {
+		opt(op)
+	}
+}
+
+// OpCompact wraps slice CompactOption to create a CompactOp.
+func OpCompact(rev int64, opts ...CompactOption) CompactOp {
+	ret := CompactOp{revision: rev}
+	ret.applyCompactOpts(opts)
+	return ret
+}
+
+func (op CompactOp) toRequest() *pb.CompactionRequest {
+	return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
+}
+
+// WithCompactPhysical makes Compact wait until all compacted entries are
+// removed from the etcd server's storage.
+func WithCompactPhysical() CompactOption {
+	return func(op *CompactOp) { op.physical = true }
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/compare.go b/vendor/github.com/coreos/etcd/clientv3/compare.go
new file mode 100644
index 0000000..b5f0a25
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/compare.go
@@ -0,0 +1,140 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+)
+
+type CompareTarget int
+type CompareResult int
+
+const (
+	CompareVersion CompareTarget = iota
+	CompareCreated
+	CompareModified
+	CompareValue
+)
+
+type Cmp pb.Compare
+
+func Compare(cmp Cmp, result string, v interface{}) Cmp {
+	var r pb.Compare_CompareResult
+
+	switch result {
+	case "=":
+		r = pb.Compare_EQUAL
+	case "!=":
+		r = pb.Compare_NOT_EQUAL
+	case ">":
+		r = pb.Compare_GREATER
+	case "<":
+		r = pb.Compare_LESS
+	default:
+		panic("Unknown result op")
+	}
+
+	cmp.Result = r
+	switch cmp.Target {
+	case pb.Compare_VALUE:
+		val, ok := v.(string)
+		if !ok {
+			panic("bad compare value")
+		}
+		cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
+	case pb.Compare_VERSION:
+		cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
+	case pb.Compare_CREATE:
+		cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
+	case pb.Compare_MOD:
+		cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
+	case pb.Compare_LEASE:
+		cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)}
+	default:
+		panic("Unknown compare type")
+	}
+	return cmp
+}
+
+func Value(key string) Cmp {
+	return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
+}
+
+func Version(key string) Cmp {
+	return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
+}
+
+func CreateRevision(key string) Cmp {
+	return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
+}
+
+func ModRevision(key string) Cmp {
+	return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
+}
+
+// LeaseValue compares a key's LeaseID to a value of your choosing. The empty
+// LeaseID is 0, otherwise known as `NoLease`.
+func LeaseValue(key string) Cmp {
+	return Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
+}
+
+// KeyBytes returns the byte slice holding with the comparison key.
+func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
+
+// WithKeyBytes sets the byte slice for the comparison key.
+func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key }
+
+// ValueBytes returns the byte slice holding the comparison value, if any.
+func (cmp *Cmp) ValueBytes() []byte {
+	if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok {
+		return tu.Value
+	}
+	return nil
+}
+
+// WithValueBytes sets the byte slice for the comparison's value.
+func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
+
+// WithRange sets the comparison to scan the range [key, end).
+func (cmp Cmp) WithRange(end string) Cmp {
+	cmp.RangeEnd = []byte(end)
+	return cmp
+}
+
+// WithPrefix sets the comparison to scan all keys prefixed by the key.
+func (cmp Cmp) WithPrefix() Cmp {
+	cmp.RangeEnd = getPrefix(cmp.Key)
+	return cmp
+}
+
+// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
+func mustInt64(val interface{}) int64 {
+	if v, ok := val.(int64); ok {
+		return v
+	}
+	if v, ok := val.(int); ok {
+		return int64(v)
+	}
+	panic("bad value")
+}
+
+// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
+// int64 otherwise.
+func mustInt64orLeaseID(val interface{}) int64 {
+	if v, ok := val.(LeaseID); ok {
+		return int64(v)
+	}
+	return mustInt64(val)
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go
new file mode 100644
index 0000000..dcdbf51
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package concurrency implements concurrency operations on top of
+// etcd such as distributed locks, barriers, and elections.
+package concurrency
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go
new file mode 100644
index 0000000..e18a0ed
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go
@@ -0,0 +1,245 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+	"context"
+	"errors"
+	"fmt"
+
+	v3 "github.com/coreos/etcd/clientv3"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/mvcc/mvccpb"
+)
+
+var (
+	ErrElectionNotLeader = errors.New("election: not leader")
+	ErrElectionNoLeader  = errors.New("election: no leader")
+)
+
+type Election struct {
+	session *Session
+
+	keyPrefix string
+
+	leaderKey     string
+	leaderRev     int64
+	leaderSession *Session
+	hdr           *pb.ResponseHeader
+}
+
+// NewElection returns a new election on a given key prefix.
+func NewElection(s *Session, pfx string) *Election {
+	return &Election{session: s, keyPrefix: pfx + "/"}
+}
+
+// ResumeElection initializes an election with a known leader.
+func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
+	return &Election{
+		session:       s,
+		leaderKey:     leaderKey,
+		leaderRev:     leaderRev,
+		leaderSession: s,
+	}
+}
+
+// Campaign puts a value as eligible for the election. It blocks until
+// it is elected, an error occurs, or the context is cancelled.
+func (e *Election) Campaign(ctx context.Context, val string) error {
+	s := e.session
+	client := e.session.Client()
+
+	k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
+	txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
+	txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
+	txn = txn.Else(v3.OpGet(k))
+	resp, err := txn.Commit()
+	if err != nil {
+		return err
+	}
+	e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
+	if !resp.Succeeded {
+		kv := resp.Responses[0].GetResponseRange().Kvs[0]
+		e.leaderRev = kv.CreateRevision
+		if string(kv.Value) != val {
+			if err = e.Proclaim(ctx, val); err != nil {
+				e.Resign(ctx)
+				return err
+			}
+		}
+	}
+
+	_, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
+	if err != nil {
+		// clean up in case of context cancel
+		select {
+		case <-ctx.Done():
+			e.Resign(client.Ctx())
+		default:
+			e.leaderSession = nil
+		}
+		return err
+	}
+	e.hdr = resp.Header
+
+	return nil
+}
+
+// Proclaim lets the leader announce a new value without another election.
+func (e *Election) Proclaim(ctx context.Context, val string) error {
+	if e.leaderSession == nil {
+		return ErrElectionNotLeader
+	}
+	client := e.session.Client()
+	cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
+	txn := client.Txn(ctx).If(cmp)
+	txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
+	tresp, terr := txn.Commit()
+	if terr != nil {
+		return terr
+	}
+	if !tresp.Succeeded {
+		e.leaderKey = ""
+		return ErrElectionNotLeader
+	}
+
+	e.hdr = tresp.Header
+	return nil
+}
+
+// Resign lets a leader start a new election.
+func (e *Election) Resign(ctx context.Context) (err error) {
+	if e.leaderSession == nil {
+		return nil
+	}
+	client := e.session.Client()
+	cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
+	resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
+	if err == nil {
+		e.hdr = resp.Header
+	}
+	e.leaderKey = ""
+	e.leaderSession = nil
+	return err
+}
+
+// Leader returns the leader value for the current election.
+func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
+	client := e.session.Client()
+	resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
+	if err != nil {
+		return nil, err
+	} else if len(resp.Kvs) == 0 {
+		// no leader currently elected
+		return nil, ErrElectionNoLeader
+	}
+	return resp, nil
+}
+
+// Observe returns a channel that reliably observes ordered leader proposals
+// as GetResponse values on every current elected leader key. It will not
+// necessarily fetch all historical leader updates, but will always post the
+// most recent leader value.
+//
+// The channel closes when the context is canceled or the underlying watcher
+// is otherwise disrupted.
+func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
+	retc := make(chan v3.GetResponse)
+	go e.observe(ctx, retc)
+	return retc
+}
+
+func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
+	client := e.session.Client()
+
+	defer close(ch)
+	for {
+		resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
+		if err != nil {
+			return
+		}
+
+		var kv *mvccpb.KeyValue
+		var hdr *pb.ResponseHeader
+
+		if len(resp.Kvs) == 0 {
+			cctx, cancel := context.WithCancel(ctx)
+			// wait for first key put on prefix
+			opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
+			wch := client.Watch(cctx, e.keyPrefix, opts...)
+			for kv == nil {
+				wr, ok := <-wch
+				if !ok || wr.Err() != nil {
+					cancel()
+					return
+				}
+				// only accept puts; a delete will make observe() spin
+				for _, ev := range wr.Events {
+					if ev.Type == mvccpb.PUT {
+						hdr, kv = &wr.Header, ev.Kv
+						// may have multiple revs; hdr.rev = the last rev
+						// set to kv's rev in case batch has multiple Puts
+						hdr.Revision = kv.ModRevision
+						break
+					}
+				}
+			}
+			cancel()
+		} else {
+			hdr, kv = resp.Header, resp.Kvs[0]
+		}
+
+		select {
+		case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
+		case <-ctx.Done():
+			return
+		}
+
+		cctx, cancel := context.WithCancel(ctx)
+		wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1))
+		keyDeleted := false
+		for !keyDeleted {
+			wr, ok := <-wch
+			if !ok {
+				cancel()
+				return
+			}
+			for _, ev := range wr.Events {
+				if ev.Type == mvccpb.DELETE {
+					keyDeleted = true
+					break
+				}
+				resp.Header = &wr.Header
+				resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
+				select {
+				case ch <- *resp:
+				case <-cctx.Done():
+					cancel()
+					return
+				}
+			}
+		}
+		cancel()
+	}
+}
+
+// Key returns the leader key if elected, empty string otherwise.
+func (e *Election) Key() string { return e.leaderKey }
+
+// Rev returns the leader key's creation revision, if elected.
+func (e *Election) Rev() int64 { return e.leaderRev }
+
+// Header is the response header from the last successful election proposal.
+func (e *Election) Header() *pb.ResponseHeader { return e.hdr }
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go
new file mode 100644
index 0000000..4b6e399
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go
@@ -0,0 +1,65 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+	"context"
+	"fmt"
+
+	v3 "github.com/coreos/etcd/clientv3"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/mvcc/mvccpb"
+)
+
+func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
+	cctx, cancel := context.WithCancel(ctx)
+	defer cancel()
+
+	var wr v3.WatchResponse
+	wch := client.Watch(cctx, key, v3.WithRev(rev))
+	for wr = range wch {
+		for _, ev := range wr.Events {
+			if ev.Type == mvccpb.DELETE {
+				return nil
+			}
+		}
+	}
+	if err := wr.Err(); err != nil {
+		return err
+	}
+	if err := ctx.Err(); err != nil {
+		return err
+	}
+	return fmt.Errorf("lost watcher waiting for delete")
+}
+
+// waitDeletes efficiently waits until all keys matching the prefix and no greater
+// than the create revision.
+func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
+	getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
+	for {
+		resp, err := client.Get(ctx, pfx, getOpts...)
+		if err != nil {
+			return nil, err
+		}
+		if len(resp.Kvs) == 0 {
+			return resp.Header, nil
+		}
+		lastKey := string(resp.Kvs[0].Key)
+		if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
+			return nil, err
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go
new file mode 100644
index 0000000..77b3582
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go
@@ -0,0 +1,117 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+	"context"
+	"fmt"
+	"sync"
+
+	v3 "github.com/coreos/etcd/clientv3"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+)
+
+// Mutex implements the sync Locker interface with etcd
+type Mutex struct {
+	s *Session
+
+	pfx   string
+	myKey string
+	myRev int64
+	hdr   *pb.ResponseHeader
+}
+
+func NewMutex(s *Session, pfx string) *Mutex {
+	return &Mutex{s, pfx + "/", "", -1, nil}
+}
+
+// Lock locks the mutex with a cancelable context. If the context is canceled
+// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
+func (m *Mutex) Lock(ctx context.Context) error {
+	s := m.s
+	client := m.s.Client()
+
+	m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
+	cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
+	// put self in lock waiters via myKey; oldest waiter holds lock
+	put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
+	// reuse key in case this session already holds the lock
+	get := v3.OpGet(m.myKey)
+	// fetch current holder to complete uncontended path with only one RPC
+	getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
+	resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
+	if err != nil {
+		return err
+	}
+	m.myRev = resp.Header.Revision
+	if !resp.Succeeded {
+		m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
+	}
+	// if no key on prefix / the minimum rev is key, already hold the lock
+	ownerKey := resp.Responses[1].GetResponseRange().Kvs
+	if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
+		m.hdr = resp.Header
+		return nil
+	}
+
+	// wait for deletion revisions prior to myKey
+	hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
+	// release lock key if wait failed
+	if werr != nil {
+		m.Unlock(client.Ctx())
+	} else {
+		m.hdr = hdr
+	}
+	return werr
+}
+
+func (m *Mutex) Unlock(ctx context.Context) error {
+	client := m.s.Client()
+	if _, err := client.Delete(ctx, m.myKey); err != nil {
+		return err
+	}
+	m.myKey = "\x00"
+	m.myRev = -1
+	return nil
+}
+
+func (m *Mutex) IsOwner() v3.Cmp {
+	return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
+}
+
+func (m *Mutex) Key() string { return m.myKey }
+
+// Header is the response header received from etcd on acquiring the lock.
+func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
+
+type lockerMutex struct{ *Mutex }
+
+func (lm *lockerMutex) Lock() {
+	client := lm.s.Client()
+	if err := lm.Mutex.Lock(client.Ctx()); err != nil {
+		panic(err)
+	}
+}
+func (lm *lockerMutex) Unlock() {
+	client := lm.s.Client()
+	if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
+		panic(err)
+	}
+}
+
+// NewLocker creates a sync.Locker backed by an etcd mutex.
+func NewLocker(s *Session, pfx string) sync.Locker {
+	return &lockerMutex{NewMutex(s, pfx)}
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go
new file mode 100644
index 0000000..c399d64
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go
@@ -0,0 +1,141 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+	"context"
+	"time"
+
+	v3 "github.com/coreos/etcd/clientv3"
+)
+
+const defaultSessionTTL = 60
+
+// Session represents a lease kept alive for the lifetime of a client.
+// Fault-tolerant applications may use sessions to reason about liveness.
+type Session struct {
+	client *v3.Client
+	opts   *sessionOptions
+	id     v3.LeaseID
+
+	cancel context.CancelFunc
+	donec  <-chan struct{}
+}
+
+// NewSession gets the leased session for a client.
+func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
+	ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
+	for _, opt := range opts {
+		opt(ops)
+	}
+
+	id := ops.leaseID
+	if id == v3.NoLease {
+		resp, err := client.Grant(ops.ctx, int64(ops.ttl))
+		if err != nil {
+			return nil, err
+		}
+		id = v3.LeaseID(resp.ID)
+	}
+
+	ctx, cancel := context.WithCancel(ops.ctx)
+	keepAlive, err := client.KeepAlive(ctx, id)
+	if err != nil || keepAlive == nil {
+		cancel()
+		return nil, err
+	}
+
+	donec := make(chan struct{})
+	s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
+
+	// keep the lease alive until client error or cancelled context
+	go func() {
+		defer close(donec)
+		for range keepAlive {
+			// eat messages until keep alive channel closes
+		}
+	}()
+
+	return s, nil
+}
+
+// Client is the etcd client that is attached to the session.
+func (s *Session) Client() *v3.Client {
+	return s.client
+}
+
+// Lease is the lease ID for keys bound to the session.
+func (s *Session) Lease() v3.LeaseID { return s.id }
+
+// Done returns a channel that closes when the lease is orphaned, expires, or
+// is otherwise no longer being refreshed.
+func (s *Session) Done() <-chan struct{} { return s.donec }
+
+// Orphan ends the refresh for the session lease. This is useful
+// in case the state of the client connection is indeterminate (revoke
+// would fail) or when transferring lease ownership.
+func (s *Session) Orphan() {
+	s.cancel()
+	<-s.donec
+}
+
+// Close orphans the session and revokes the session lease.
+func (s *Session) Close() error {
+	s.Orphan()
+	// if revoke takes longer than the ttl, lease is expired anyway
+	ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second)
+	_, err := s.client.Revoke(ctx, s.id)
+	cancel()
+	return err
+}
+
+type sessionOptions struct {
+	ttl     int
+	leaseID v3.LeaseID
+	ctx     context.Context
+}
+
+// SessionOption configures Session.
+type SessionOption func(*sessionOptions)
+
+// WithTTL configures the session's TTL in seconds.
+// If TTL is <= 0, the default 60 seconds TTL will be used.
+func WithTTL(ttl int) SessionOption {
+	return func(so *sessionOptions) {
+		if ttl > 0 {
+			so.ttl = ttl
+		}
+	}
+}
+
+// WithLease specifies the existing leaseID to be used for the session.
+// This is useful in process restart scenario, for example, to reclaim
+// leadership from an election prior to restart.
+func WithLease(leaseID v3.LeaseID) SessionOption {
+	return func(so *sessionOptions) {
+		so.leaseID = leaseID
+	}
+}
+
+// WithContext assigns a context to the session instead of defaulting to
+// using the client context. This is useful for canceling NewSession and
+// Close operations immediately without having to close the client. If the
+// context is canceled before Close() completes, the session's lease will be
+// abandoned and left to expire instead of being revoked.
+func WithContext(ctx context.Context) SessionOption {
+	return func(so *sessionOptions) {
+		so.ctx = ctx
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go
new file mode 100644
index 0000000..d11023e
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go
@@ -0,0 +1,387 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+	"context"
+	"math"
+
+	v3 "github.com/coreos/etcd/clientv3"
+)
+
+// STM is an interface for software transactional memory.
+type STM interface {
+	// Get returns the value for a key and inserts the key in the txn's read set.
+	// If Get fails, it aborts the transaction with an error, never returning.
+	Get(key ...string) string
+	// Put adds a value for a key to the write set.
+	Put(key, val string, opts ...v3.OpOption)
+	// Rev returns the revision of a key in the read set.
+	Rev(key string) int64
+	// Del deletes a key.
+	Del(key string)
+
+	// commit attempts to apply the txn's changes to the server.
+	commit() *v3.TxnResponse
+	reset()
+}
+
+// Isolation is an enumeration of transactional isolation levels which
+// describes how transactions should interfere and conflict.
+type Isolation int
+
+const (
+	// SerializableSnapshot provides serializable isolation and also checks
+	// for write conflicts.
+	SerializableSnapshot Isolation = iota
+	// Serializable reads within the same transaction attempt return data
+	// from the at the revision of the first read.
+	Serializable
+	// RepeatableReads reads within the same transaction attempt always
+	// return the same data.
+	RepeatableReads
+	// ReadCommitted reads keys from any committed revision.
+	ReadCommitted
+)
+
+// stmError safely passes STM errors through panic to the STM error channel.
+type stmError struct{ err error }
+
+type stmOptions struct {
+	iso      Isolation
+	ctx      context.Context
+	prefetch []string
+}
+
+type stmOption func(*stmOptions)
+
+// WithIsolation specifies the transaction isolation level.
+func WithIsolation(lvl Isolation) stmOption {
+	return func(so *stmOptions) { so.iso = lvl }
+}
+
+// WithAbortContext specifies the context for permanently aborting the transaction.
+func WithAbortContext(ctx context.Context) stmOption {
+	return func(so *stmOptions) { so.ctx = ctx }
+}
+
+// WithPrefetch is a hint to prefetch a list of keys before trying to apply.
+// If an STM transaction will unconditionally fetch a set of keys, prefetching
+// those keys will save the round-trip cost from requesting each key one by one
+// with Get().
+func WithPrefetch(keys ...string) stmOption {
+	return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
+}
+
+// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
+func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
+	opts := &stmOptions{ctx: c.Ctx()}
+	for _, f := range so {
+		f(opts)
+	}
+	if len(opts.prefetch) != 0 {
+		f := apply
+		apply = func(s STM) error {
+			s.Get(opts.prefetch...)
+			return f(s)
+		}
+	}
+	return runSTM(mkSTM(c, opts), apply)
+}
+
+func mkSTM(c *v3.Client, opts *stmOptions) STM {
+	switch opts.iso {
+	case SerializableSnapshot:
+		s := &stmSerializable{
+			stm:      stm{client: c, ctx: opts.ctx},
+			prefetch: make(map[string]*v3.GetResponse),
+		}
+		s.conflicts = func() []v3.Cmp {
+			return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...)
+		}
+		return s
+	case Serializable:
+		s := &stmSerializable{
+			stm:      stm{client: c, ctx: opts.ctx},
+			prefetch: make(map[string]*v3.GetResponse),
+		}
+		s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
+		return s
+	case RepeatableReads:
+		s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
+		s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
+		return s
+	case ReadCommitted:
+		s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
+		s.conflicts = func() []v3.Cmp { return nil }
+		return s
+	default:
+		panic("unsupported stm")
+	}
+}
+
+type stmResponse struct {
+	resp *v3.TxnResponse
+	err  error
+}
+
+func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) {
+	outc := make(chan stmResponse, 1)
+	go func() {
+		defer func() {
+			if r := recover(); r != nil {
+				e, ok := r.(stmError)
+				if !ok {
+					// client apply panicked
+					panic(r)
+				}
+				outc <- stmResponse{nil, e.err}
+			}
+		}()
+		var out stmResponse
+		for {
+			s.reset()
+			if out.err = apply(s); out.err != nil {
+				break
+			}
+			if out.resp = s.commit(); out.resp != nil {
+				break
+			}
+		}
+		outc <- out
+	}()
+	r := <-outc
+	return r.resp, r.err
+}
+
+// stm implements repeatable-read software transactional memory over etcd
+type stm struct {
+	client *v3.Client
+	ctx    context.Context
+	// rset holds read key values and revisions
+	rset readSet
+	// wset holds overwritten keys and their values
+	wset writeSet
+	// getOpts are the opts used for gets
+	getOpts []v3.OpOption
+	// conflicts computes the current conflicts on the txn
+	conflicts func() []v3.Cmp
+}
+
+type stmPut struct {
+	val string
+	op  v3.Op
+}
+
+type readSet map[string]*v3.GetResponse
+
+func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
+	for i, resp := range txnresp.Responses {
+		rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange())
+	}
+}
+
+// first returns the store revision from the first fetch
+func (rs readSet) first() int64 {
+	ret := int64(math.MaxInt64 - 1)
+	for _, resp := range rs {
+		if rev := resp.Header.Revision; rev < ret {
+			ret = rev
+		}
+	}
+	return ret
+}
+
+// cmps guards the txn from updates to read set
+func (rs readSet) cmps() []v3.Cmp {
+	cmps := make([]v3.Cmp, 0, len(rs))
+	for k, rk := range rs {
+		cmps = append(cmps, isKeyCurrent(k, rk))
+	}
+	return cmps
+}
+
+type writeSet map[string]stmPut
+
+func (ws writeSet) get(keys ...string) *stmPut {
+	for _, key := range keys {
+		if wv, ok := ws[key]; ok {
+			return &wv
+		}
+	}
+	return nil
+}
+
+// cmps returns a cmp list testing no writes have happened past rev
+func (ws writeSet) cmps(rev int64) []v3.Cmp {
+	cmps := make([]v3.Cmp, 0, len(ws))
+	for key := range ws {
+		cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev))
+	}
+	return cmps
+}
+
+// puts is the list of ops for all pending writes
+func (ws writeSet) puts() []v3.Op {
+	puts := make([]v3.Op, 0, len(ws))
+	for _, v := range ws {
+		puts = append(puts, v.op)
+	}
+	return puts
+}
+
+func (s *stm) Get(keys ...string) string {
+	if wv := s.wset.get(keys...); wv != nil {
+		return wv.val
+	}
+	return respToValue(s.fetch(keys...))
+}
+
+func (s *stm) Put(key, val string, opts ...v3.OpOption) {
+	s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)}
+}
+
+func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} }
+
+func (s *stm) Rev(key string) int64 {
+	if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 {
+		return resp.Kvs[0].ModRevision
+	}
+	return 0
+}
+
+func (s *stm) commit() *v3.TxnResponse {
+	txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit()
+	if err != nil {
+		panic(stmError{err})
+	}
+	if txnresp.Succeeded {
+		return txnresp
+	}
+	return nil
+}
+
+func (s *stm) fetch(keys ...string) *v3.GetResponse {
+	if len(keys) == 0 {
+		return nil
+	}
+	ops := make([]v3.Op, len(keys))
+	for i, key := range keys {
+		if resp, ok := s.rset[key]; ok {
+			return resp
+		}
+		ops[i] = v3.OpGet(key, s.getOpts...)
+	}
+	txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit()
+	if err != nil {
+		panic(stmError{err})
+	}
+	s.rset.add(keys, txnresp)
+	return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange())
+}
+
+func (s *stm) reset() {
+	s.rset = make(map[string]*v3.GetResponse)
+	s.wset = make(map[string]stmPut)
+}
+
+type stmSerializable struct {
+	stm
+	prefetch map[string]*v3.GetResponse
+}
+
+func (s *stmSerializable) Get(keys ...string) string {
+	if wv := s.wset.get(keys...); wv != nil {
+		return wv.val
+	}
+	firstRead := len(s.rset) == 0
+	for _, key := range keys {
+		if resp, ok := s.prefetch[key]; ok {
+			delete(s.prefetch, key)
+			s.rset[key] = resp
+		}
+	}
+	resp := s.stm.fetch(keys...)
+	if firstRead {
+		// txn's base revision is defined by the first read
+		s.getOpts = []v3.OpOption{
+			v3.WithRev(resp.Header.Revision),
+			v3.WithSerializable(),
+		}
+	}
+	return respToValue(resp)
+}
+
+func (s *stmSerializable) Rev(key string) int64 {
+	s.Get(key)
+	return s.stm.Rev(key)
+}
+
+func (s *stmSerializable) gets() ([]string, []v3.Op) {
+	keys := make([]string, 0, len(s.rset))
+	ops := make([]v3.Op, 0, len(s.rset))
+	for k := range s.rset {
+		keys = append(keys, k)
+		ops = append(ops, v3.OpGet(k))
+	}
+	return keys, ops
+}
+
+func (s *stmSerializable) commit() *v3.TxnResponse {
+	keys, getops := s.gets()
+	txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...)
+	// use Else to prefetch keys in case of conflict to save a round trip
+	txnresp, err := txn.Else(getops...).Commit()
+	if err != nil {
+		panic(stmError{err})
+	}
+	if txnresp.Succeeded {
+		return txnresp
+	}
+	// load prefetch with Else data
+	s.rset.add(keys, txnresp)
+	s.prefetch = s.rset
+	s.getOpts = nil
+	return nil
+}
+
+func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
+	if len(r.Kvs) != 0 {
+		return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision)
+	}
+	return v3.Compare(v3.ModRevision(k), "=", 0)
+}
+
+func respToValue(resp *v3.GetResponse) string {
+	if resp == nil || len(resp.Kvs) == 0 {
+		return ""
+	}
+	return string(resp.Kvs[0].Value)
+}
+
+// NewSTMRepeatable is deprecated.
+func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
+	return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads))
+}
+
+// NewSTMSerializable is deprecated.
+func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
+	return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable))
+}
+
+// NewSTMReadCommitted is deprecated.
+func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
+	return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted))
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go
new file mode 100644
index 0000000..79d6e2a
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/config.go
@@ -0,0 +1,75 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"crypto/tls"
+	"time"
+
+	"google.golang.org/grpc"
+)
+
+type Config struct {
+	// Endpoints is a list of URLs.
+	Endpoints []string `json:"endpoints"`
+
+	// AutoSyncInterval is the interval to update endpoints with its latest members.
+	// 0 disables auto-sync. By default auto-sync is disabled.
+	AutoSyncInterval time.Duration `json:"auto-sync-interval"`
+
+	// DialTimeout is the timeout for failing to establish a connection.
+	DialTimeout time.Duration `json:"dial-timeout"`
+
+	// DialKeepAliveTime is the time after which client pings the server to see if
+	// transport is alive.
+	DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
+
+	// DialKeepAliveTimeout is the time that the client waits for a response for the
+	// keep-alive probe. If the response is not received in this time, the connection is closed.
+	DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
+
+	// MaxCallSendMsgSize is the client-side request send limit in bytes.
+	// If 0, it defaults to 2.0 MiB (2 * 1024 * 1024).
+	// Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit.
+	// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
+	MaxCallSendMsgSize int
+
+	// MaxCallRecvMsgSize is the client-side response receive limit.
+	// If 0, it defaults to "math.MaxInt32", because range response can
+	// easily exceed request send limits.
+	// Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit.
+	// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
+	MaxCallRecvMsgSize int
+
+	// TLS holds the client secure credentials, if any.
+	TLS *tls.Config
+
+	// Username is a user name for authentication.
+	Username string `json:"username"`
+
+	// Password is a password for authentication.
+	Password string `json:"password"`
+
+	// RejectOldCluster when set will refuse to create a client against an outdated cluster.
+	RejectOldCluster bool `json:"reject-old-cluster"`
+
+	// DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
+	DialOptions []grpc.DialOption
+
+	// Context is the default client context; it can be used to cancel grpc dial out and
+	// other operations that do not have an explicit context.
+	Context context.Context
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/doc.go b/vendor/github.com/coreos/etcd/clientv3/doc.go
new file mode 100644
index 0000000..717fbe4
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/doc.go
@@ -0,0 +1,97 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package clientv3 implements the official Go etcd client for v3.
+//
+// Create client using `clientv3.New`:
+//
+//	// expect dial time-out on ipv4 blackhole
+//	_, err := clientv3.New(clientv3.Config{
+//		Endpoints:   []string{"http://254.0.0.1:12345"},
+//		DialTimeout: 2 * time.Second
+//	})
+//
+//	// etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3
+//	if err == context.DeadlineExceeded {
+//		// handle errors
+//	}
+//
+//	// etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1
+//	if err == grpc.ErrClientConnTimeout {
+//		// handle errors
+//	}
+//
+//	cli, err := clientv3.New(clientv3.Config{
+//		Endpoints:   []string{"localhost:2379", "localhost:22379", "localhost:32379"},
+//		DialTimeout: 5 * time.Second,
+//	})
+//	if err != nil {
+//		// handle error!
+//	}
+//	defer cli.Close()
+//
+// Make sure to close the client after using it. If the client is not closed, the
+// connection will have leaky goroutines.
+//
+// To specify a client request timeout, wrap the context with context.WithTimeout:
+//
+//	ctx, cancel := context.WithTimeout(context.Background(), timeout)
+//	resp, err := kvc.Put(ctx, "sample_key", "sample_value")
+//	cancel()
+//	if err != nil {
+//	    // handle error!
+//	}
+//	// use the response
+//
+// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
+// Clients are safe for concurrent use by multiple goroutines.
+//
+// etcd client returns 3 types of errors:
+//
+//  1. context error: canceled or deadline exceeded.
+//  2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded.
+//  3. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
+//
+// Here is the example code to handle client errors:
+//
+//	resp, err := kvc.Put(ctx, "", "")
+//	if err != nil {
+//		if err == context.Canceled {
+//			// ctx is canceled by another routine
+//		} else if err == context.DeadlineExceeded {
+//			// ctx is attached with a deadline and it exceeded
+//		} else if ev, ok := status.FromError(err); ok {
+//			code := ev.Code()
+//			if code == codes.DeadlineExceeded {
+//				// server-side context might have timed-out first (due to clock skew)
+//				// while original client-side context is not timed-out yet
+//			}
+//		} else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok {
+//			// process (verr.Errors)
+//		} else {
+//			// bad cluster endpoints, which are not etcd servers
+//		}
+//	}
+//
+//	go func() { cli.Close() }()
+//	_, err := kvc.Get(ctx, "a")
+//	if err != nil {
+//		if err == context.Canceled {
+//			// grpc balancer calls 'Get' with an inflight client.Close
+//		} else if err == grpc.ErrClientConnClosing {
+//			// grpc balancer calls 'Get' after client.Close.
+//		}
+//	}
+//
+package clientv3
diff --git a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go
new file mode 100644
index 0000000..5918cba
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go
@@ -0,0 +1,609 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"errors"
+	"net/url"
+	"strings"
+	"sync"
+	"time"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	healthpb "google.golang.org/grpc/health/grpc_health_v1"
+	"google.golang.org/grpc/status"
+)
+
+const (
+	minHealthRetryDuration = 3 * time.Second
+	unknownService         = "unknown service grpc.health.v1.Health"
+)
+
+// ErrNoAddrAvilable is returned by Get() when the balancer does not have
+// any active connection to endpoints at the time.
+// This error is returned only when opts.BlockingWait is true.
+var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available")
+
+type healthCheckFunc func(ep string) (bool, error)
+
+type notifyMsg int
+
+const (
+	notifyReset notifyMsg = iota
+	notifyNext
+)
+
+// healthBalancer does the bare minimum to expose multiple eps
+// to the grpc reconnection code path
+type healthBalancer struct {
+	// addrs are the client's endpoint addresses for grpc
+	addrs []grpc.Address
+
+	// eps holds the raw endpoints from the client
+	eps []string
+
+	// notifyCh notifies grpc of the set of addresses for connecting
+	notifyCh chan []grpc.Address
+
+	// readyc closes once the first connection is up
+	readyc    chan struct{}
+	readyOnce sync.Once
+
+	// healthCheck checks an endpoint's health.
+	healthCheck        healthCheckFunc
+	healthCheckTimeout time.Duration
+
+	unhealthyMu        sync.RWMutex
+	unhealthyHostPorts map[string]time.Time
+
+	// mu protects all fields below.
+	mu sync.RWMutex
+
+	// upc closes when pinAddr transitions from empty to non-empty or the balancer closes.
+	upc chan struct{}
+
+	// downc closes when grpc calls down() on pinAddr
+	downc chan struct{}
+
+	// stopc is closed to signal updateNotifyLoop should stop.
+	stopc    chan struct{}
+	stopOnce sync.Once
+	wg       sync.WaitGroup
+
+	// donec closes when all goroutines are exited
+	donec chan struct{}
+
+	// updateAddrsC notifies updateNotifyLoop to update addrs.
+	updateAddrsC chan notifyMsg
+
+	// grpc issues TLS cert checks using the string passed into dial so
+	// that string must be the host. To recover the full scheme://host URL,
+	// have a map from hosts to the original endpoint.
+	hostPort2ep map[string]string
+
+	// pinAddr is the currently pinned address; set to the empty string on
+	// initialization and shutdown.
+	pinAddr string
+
+	closed bool
+}
+
+func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer {
+	notifyCh := make(chan []grpc.Address)
+	addrs := eps2addrs(eps)
+	hb := &healthBalancer{
+		addrs:              addrs,
+		eps:                eps,
+		notifyCh:           notifyCh,
+		readyc:             make(chan struct{}),
+		healthCheck:        hc,
+		unhealthyHostPorts: make(map[string]time.Time),
+		upc:                make(chan struct{}),
+		stopc:              make(chan struct{}),
+		downc:              make(chan struct{}),
+		donec:              make(chan struct{}),
+		updateAddrsC:       make(chan notifyMsg),
+		hostPort2ep:        getHostPort2ep(eps),
+	}
+	if timeout < minHealthRetryDuration {
+		timeout = minHealthRetryDuration
+	}
+	hb.healthCheckTimeout = timeout
+
+	close(hb.downc)
+	go hb.updateNotifyLoop()
+	hb.wg.Add(1)
+	go func() {
+		defer hb.wg.Done()
+		hb.updateUnhealthy()
+	}()
+	return hb
+}
+
+func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
+
+func (b *healthBalancer) ConnectNotify() <-chan struct{} {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	return b.upc
+}
+
+func (b *healthBalancer) ready() <-chan struct{} { return b.readyc }
+
+func (b *healthBalancer) endpoint(hostPort string) string {
+	b.mu.RLock()
+	defer b.mu.RUnlock()
+	return b.hostPort2ep[hostPort]
+}
+
+func (b *healthBalancer) pinned() string {
+	b.mu.RLock()
+	defer b.mu.RUnlock()
+	return b.pinAddr
+}
+
+func (b *healthBalancer) hostPortError(hostPort string, err error) {
+	if b.endpoint(hostPort) == "" {
+		logger.Lvl(4).Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error())
+		return
+	}
+
+	b.unhealthyMu.Lock()
+	b.unhealthyHostPorts[hostPort] = time.Now()
+	b.unhealthyMu.Unlock()
+	logger.Lvl(4).Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error())
+}
+
+func (b *healthBalancer) removeUnhealthy(hostPort, msg string) {
+	if b.endpoint(hostPort) == "" {
+		logger.Lvl(4).Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg)
+		return
+	}
+
+	b.unhealthyMu.Lock()
+	delete(b.unhealthyHostPorts, hostPort)
+	b.unhealthyMu.Unlock()
+	logger.Lvl(4).Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg)
+}
+
+func (b *healthBalancer) countUnhealthy() (count int) {
+	b.unhealthyMu.RLock()
+	count = len(b.unhealthyHostPorts)
+	b.unhealthyMu.RUnlock()
+	return count
+}
+
+func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) {
+	b.unhealthyMu.RLock()
+	_, unhealthy = b.unhealthyHostPorts[hostPort]
+	b.unhealthyMu.RUnlock()
+	return unhealthy
+}
+
+func (b *healthBalancer) cleanupUnhealthy() {
+	b.unhealthyMu.Lock()
+	for k, v := range b.unhealthyHostPorts {
+		if time.Since(v) > b.healthCheckTimeout {
+			delete(b.unhealthyHostPorts, k)
+			logger.Lvl(4).Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout)
+		}
+	}
+	b.unhealthyMu.Unlock()
+}
+
+func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) {
+	unhealthyCnt := b.countUnhealthy()
+
+	b.mu.RLock()
+	defer b.mu.RUnlock()
+
+	hbAddrs := b.addrs
+	if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) {
+		liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep))
+		for k := range b.hostPort2ep {
+			liveHostPorts[k] = struct{}{}
+		}
+		return hbAddrs, liveHostPorts
+	}
+
+	addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt)
+	liveHostPorts := make(map[string]struct{}, len(addrs))
+	for _, addr := range b.addrs {
+		if !b.isUnhealthy(addr.Addr) {
+			addrs = append(addrs, addr)
+			liveHostPorts[addr.Addr] = struct{}{}
+		}
+	}
+	return addrs, liveHostPorts
+}
+
+func (b *healthBalancer) updateUnhealthy() {
+	for {
+		select {
+		case <-time.After(b.healthCheckTimeout):
+			b.cleanupUnhealthy()
+			pinned := b.pinned()
+			if pinned == "" || b.isUnhealthy(pinned) {
+				select {
+				case b.updateAddrsC <- notifyNext:
+				case <-b.stopc:
+					return
+				}
+			}
+		case <-b.stopc:
+			return
+		}
+	}
+}
+
+func (b *healthBalancer) updateAddrs(eps ...string) {
+	np := getHostPort2ep(eps)
+
+	b.mu.Lock()
+	defer b.mu.Unlock()
+
+	match := len(np) == len(b.hostPort2ep)
+	if match {
+		for k, v := range np {
+			if b.hostPort2ep[k] != v {
+				match = false
+				break
+			}
+		}
+	}
+	if match {
+		// same endpoints, so no need to update address
+		return
+	}
+
+	b.hostPort2ep = np
+	b.addrs, b.eps = eps2addrs(eps), eps
+
+	b.unhealthyMu.Lock()
+	b.unhealthyHostPorts = make(map[string]time.Time)
+	b.unhealthyMu.Unlock()
+}
+
+func (b *healthBalancer) next() {
+	b.mu.RLock()
+	downc := b.downc
+	b.mu.RUnlock()
+	select {
+	case b.updateAddrsC <- notifyNext:
+	case <-b.stopc:
+	}
+	// wait until disconnect so new RPCs are not issued on old connection
+	select {
+	case <-downc:
+	case <-b.stopc:
+	}
+}
+
+func (b *healthBalancer) updateNotifyLoop() {
+	defer close(b.donec)
+
+	for {
+		b.mu.RLock()
+		upc, downc, addr := b.upc, b.downc, b.pinAddr
+		b.mu.RUnlock()
+		// downc or upc should be closed
+		select {
+		case <-downc:
+			downc = nil
+		default:
+		}
+		select {
+		case <-upc:
+			upc = nil
+		default:
+		}
+		switch {
+		case downc == nil && upc == nil:
+			// stale
+			select {
+			case <-b.stopc:
+				return
+			default:
+			}
+		case downc == nil:
+			b.notifyAddrs(notifyReset)
+			select {
+			case <-upc:
+			case msg := <-b.updateAddrsC:
+				b.notifyAddrs(msg)
+			case <-b.stopc:
+				return
+			}
+		case upc == nil:
+			select {
+			// close connections that are not the pinned address
+			case b.notifyCh <- []grpc.Address{{Addr: addr}}:
+			case <-downc:
+			case <-b.stopc:
+				return
+			}
+			select {
+			case <-downc:
+				b.notifyAddrs(notifyReset)
+			case msg := <-b.updateAddrsC:
+				b.notifyAddrs(msg)
+			case <-b.stopc:
+				return
+			}
+		}
+	}
+}
+
+func (b *healthBalancer) notifyAddrs(msg notifyMsg) {
+	if msg == notifyNext {
+		select {
+		case b.notifyCh <- []grpc.Address{}:
+		case <-b.stopc:
+			return
+		}
+	}
+	b.mu.RLock()
+	pinAddr := b.pinAddr
+	downc := b.downc
+	b.mu.RUnlock()
+	addrs, hostPorts := b.liveAddrs()
+
+	var waitDown bool
+	if pinAddr != "" {
+		_, ok := hostPorts[pinAddr]
+		waitDown = !ok
+	}
+
+	select {
+	case b.notifyCh <- addrs:
+		if waitDown {
+			select {
+			case <-downc:
+			case <-b.stopc:
+			}
+		}
+	case <-b.stopc:
+	}
+}
+
+func (b *healthBalancer) Up(addr grpc.Address) func(error) {
+	if !b.mayPin(addr) {
+		return func(err error) {}
+	}
+
+	b.mu.Lock()
+	defer b.mu.Unlock()
+
+	// gRPC might call Up after it called Close. We add this check
+	// to "fix" it up at application layer. Otherwise, will panic
+	// if b.upc is already closed.
+	if b.closed {
+		return func(err error) {}
+	}
+
+	// gRPC might call Up on a stale address.
+	// Prevent updating pinAddr with a stale address.
+	if !hasAddr(b.addrs, addr.Addr) {
+		return func(err error) {}
+	}
+
+	if b.pinAddr != "" {
+		logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr)
+		return func(err error) {}
+	}
+
+	// notify waiting Get()s and pin first connected address
+	close(b.upc)
+	b.downc = make(chan struct{})
+	b.pinAddr = addr.Addr
+	logger.Lvl(4).Infof("clientv3/balancer: pin %q", addr.Addr)
+
+	// notify client that a connection is up
+	b.readyOnce.Do(func() { close(b.readyc) })
+
+	return func(err error) {
+		// If connected to a black hole endpoint or a killed server, the gRPC ping
+		// timeout will induce a network I/O error, and retrying until success;
+		// finding healthy endpoint on retry could take several timeouts and redials.
+		// To avoid wasting retries, gray-list unhealthy endpoints.
+		b.hostPortError(addr.Addr, err)
+
+		b.mu.Lock()
+		b.upc = make(chan struct{})
+		close(b.downc)
+		b.pinAddr = ""
+		b.mu.Unlock()
+		logger.Lvl(4).Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error())
+	}
+}
+
+func (b *healthBalancer) mayPin(addr grpc.Address) bool {
+	if b.endpoint(addr.Addr) == "" { // stale host:port
+		return false
+	}
+
+	b.unhealthyMu.RLock()
+	unhealthyCnt := len(b.unhealthyHostPorts)
+	failedTime, bad := b.unhealthyHostPorts[addr.Addr]
+	b.unhealthyMu.RUnlock()
+
+	b.mu.RLock()
+	skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt
+	b.mu.RUnlock()
+	if skip || !bad {
+		return true
+	}
+
+	// prevent isolated member's endpoint from being infinitely retried, as follows:
+	//   1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm
+	//   2. balancer 'Up' unpins with grpc: failed with network I/O error
+	//   3. grpc-healthcheck still SERVING, thus retry to pin
+	// instead, return before grpc-healthcheck if failed within healthcheck timeout
+	if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout {
+		logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout)
+		return false
+	}
+
+	if ok, _ := b.healthCheck(addr.Addr); ok {
+		b.removeUnhealthy(addr.Addr, "health check success")
+		return true
+	}
+
+	b.hostPortError(addr.Addr, errors.New("health check failed"))
+	return false
+}
+
+func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
+	var (
+		addr   string
+		closed bool
+	)
+
+	// If opts.BlockingWait is false (for fail-fast RPCs), it should return
+	// an address it has notified via Notify immediately instead of blocking.
+	if !opts.BlockingWait {
+		b.mu.RLock()
+		closed = b.closed
+		addr = b.pinAddr
+		b.mu.RUnlock()
+		if closed {
+			return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
+		}
+		if addr == "" {
+			return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
+		}
+		return grpc.Address{Addr: addr}, func() {}, nil
+	}
+
+	for {
+		b.mu.RLock()
+		ch := b.upc
+		b.mu.RUnlock()
+		select {
+		case <-ch:
+		case <-b.donec:
+			return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
+		case <-ctx.Done():
+			return grpc.Address{Addr: ""}, nil, ctx.Err()
+		}
+		b.mu.RLock()
+		closed = b.closed
+		addr = b.pinAddr
+		b.mu.RUnlock()
+		// Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed.
+		if closed {
+			return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
+		}
+		if addr != "" {
+			break
+		}
+	}
+	return grpc.Address{Addr: addr}, func() {}, nil
+}
+
+func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
+
+func (b *healthBalancer) Close() error {
+	b.mu.Lock()
+	// In case gRPC calls close twice. TODO: remove the checking
+	// when we are sure that gRPC wont call close twice.
+	if b.closed {
+		b.mu.Unlock()
+		<-b.donec
+		return nil
+	}
+	b.closed = true
+	b.stopOnce.Do(func() { close(b.stopc) })
+	b.pinAddr = ""
+
+	// In the case of following scenario:
+	//	1. upc is not closed; no pinned address
+	// 	2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks
+	// 	3. client.conn.Close() calls balancer.Close(); closed = true
+	// 	4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled
+	// we must close upc so Get() exits from blocking on upc
+	select {
+	case <-b.upc:
+	default:
+		// terminate all waiting Get()s
+		close(b.upc)
+	}
+
+	b.mu.Unlock()
+	b.wg.Wait()
+
+	// wait for updateNotifyLoop to finish
+	<-b.donec
+	close(b.notifyCh)
+
+	return nil
+}
+
+func grpcHealthCheck(client *Client, ep string) (bool, error) {
+	conn, err := client.dial(ep)
+	if err != nil {
+		return false, err
+	}
+	defer conn.Close()
+	cli := healthpb.NewHealthClient(conn)
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+	resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{})
+	cancel()
+	if err != nil {
+		if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable {
+			if s.Message() == unknownService { // etcd < v3.3.0
+				return true, nil
+			}
+		}
+		return false, err
+	}
+	return resp.Status == healthpb.HealthCheckResponse_SERVING, nil
+}
+
+func hasAddr(addrs []grpc.Address, targetAddr string) bool {
+	for _, addr := range addrs {
+		if targetAddr == addr.Addr {
+			return true
+		}
+	}
+	return false
+}
+
+func getHost(ep string) string {
+	url, uerr := url.Parse(ep)
+	if uerr != nil || !strings.Contains(ep, "://") {
+		return ep
+	}
+	return url.Host
+}
+
+func eps2addrs(eps []string) []grpc.Address {
+	addrs := make([]grpc.Address, len(eps))
+	for i := range eps {
+		addrs[i].Addr = getHost(eps[i])
+	}
+	return addrs
+}
+
+func getHostPort2ep(eps []string) map[string]string {
+	hm := make(map[string]string, len(eps))
+	for i := range eps {
+		_, host, _ := parseEndpoint(eps[i])
+		hm[host] = eps[i]
+	}
+	return hm
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go
new file mode 100644
index 0000000..5a7469b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/kv.go
@@ -0,0 +1,177 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+	"google.golang.org/grpc"
+)
+
+type (
+	CompactResponse pb.CompactionResponse
+	PutResponse     pb.PutResponse
+	GetResponse     pb.RangeResponse
+	DeleteResponse  pb.DeleteRangeResponse
+	TxnResponse     pb.TxnResponse
+)
+
+type KV interface {
+	// Put puts a key-value pair into etcd.
+	// Note that key,value can be plain bytes array and string is
+	// an immutable representation of that bytes array.
+	// To get a string of bytes, do string([]byte{0x10, 0x20}).
+	Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
+
+	// Get retrieves keys.
+	// By default, Get will return the value for "key", if any.
+	// When passed WithRange(end), Get will return the keys in the range [key, end).
+	// When passed WithFromKey(), Get returns keys greater than or equal to key.
+	// When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision;
+	// if the required revision is compacted, the request will fail with ErrCompacted .
+	// When passed WithLimit(limit), the number of returned keys is bounded by limit.
+	// When passed WithSort(), the keys will be sorted.
+	Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error)
+
+	// Delete deletes a key, or optionally using WithRange(end), [key, end).
+	Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
+
+	// Compact compacts etcd KV history before the given rev.
+	Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
+
+	// Do applies a single Op on KV without a transaction.
+	// Do is useful when creating arbitrary operations to be issued at a
+	// later time; the user can range over the operations, calling Do to
+	// execute them. Get/Put/Delete, on the other hand, are best suited
+	// for when the operation should be issued at the time of declaration.
+	Do(ctx context.Context, op Op) (OpResponse, error)
+
+	// Txn creates a transaction.
+	Txn(ctx context.Context) Txn
+}
+
+type OpResponse struct {
+	put *PutResponse
+	get *GetResponse
+	del *DeleteResponse
+	txn *TxnResponse
+}
+
+func (op OpResponse) Put() *PutResponse    { return op.put }
+func (op OpResponse) Get() *GetResponse    { return op.get }
+func (op OpResponse) Del() *DeleteResponse { return op.del }
+func (op OpResponse) Txn() *TxnResponse    { return op.txn }
+
+func (resp *PutResponse) OpResponse() OpResponse {
+	return OpResponse{put: resp}
+}
+func (resp *GetResponse) OpResponse() OpResponse {
+	return OpResponse{get: resp}
+}
+func (resp *DeleteResponse) OpResponse() OpResponse {
+	return OpResponse{del: resp}
+}
+func (resp *TxnResponse) OpResponse() OpResponse {
+	return OpResponse{txn: resp}
+}
+
+type kv struct {
+	remote   pb.KVClient
+	callOpts []grpc.CallOption
+}
+
+func NewKV(c *Client) KV {
+	api := &kv{remote: RetryKVClient(c)}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
+	api := &kv{remote: remote}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
+	r, err := kv.Do(ctx, OpPut(key, val, opts...))
+	return r.put, toErr(ctx, err)
+}
+
+func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
+	r, err := kv.Do(ctx, OpGet(key, opts...))
+	return r.get, toErr(ctx, err)
+}
+
+func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
+	r, err := kv.Do(ctx, OpDelete(key, opts...))
+	return r.del, toErr(ctx, err)
+}
+
+func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
+	resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	return (*CompactResponse)(resp), err
+}
+
+func (kv *kv) Txn(ctx context.Context) Txn {
+	return &txn{
+		kv:       kv,
+		ctx:      ctx,
+		callOpts: kv.callOpts,
+	}
+}
+
+func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
+	var err error
+	switch op.t {
+	case tRange:
+		var resp *pb.RangeResponse
+		resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...)
+		if err == nil {
+			return OpResponse{get: (*GetResponse)(resp)}, nil
+		}
+	case tPut:
+		var resp *pb.PutResponse
+		r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
+		resp, err = kv.remote.Put(ctx, r, kv.callOpts...)
+		if err == nil {
+			return OpResponse{put: (*PutResponse)(resp)}, nil
+		}
+	case tDeleteRange:
+		var resp *pb.DeleteRangeResponse
+		r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
+		resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...)
+		if err == nil {
+			return OpResponse{del: (*DeleteResponse)(resp)}, nil
+		}
+	case tTxn:
+		var resp *pb.TxnResponse
+		resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...)
+		if err == nil {
+			return OpResponse{txn: (*TxnResponse)(resp)}, nil
+		}
+	default:
+		panic("Unknown op")
+	}
+	return OpResponse{}, toErr(ctx, err)
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go
new file mode 100644
index 0000000..3729cf3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/lease.go
@@ -0,0 +1,588 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/metadata"
+)
+
+type (
+	LeaseRevokeResponse pb.LeaseRevokeResponse
+	LeaseID             int64
+)
+
+// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
+type LeaseGrantResponse struct {
+	*pb.ResponseHeader
+	ID    LeaseID
+	TTL   int64
+	Error string
+}
+
+// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
+type LeaseKeepAliveResponse struct {
+	*pb.ResponseHeader
+	ID  LeaseID
+	TTL int64
+}
+
+// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
+type LeaseTimeToLiveResponse struct {
+	*pb.ResponseHeader
+	ID LeaseID `json:"id"`
+
+	// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
+	TTL int64 `json:"ttl"`
+
+	// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
+	GrantedTTL int64 `json:"granted-ttl"`
+
+	// Keys is the list of keys attached to this lease.
+	Keys [][]byte `json:"keys"`
+}
+
+// LeaseStatus represents a lease status.
+type LeaseStatus struct {
+	ID LeaseID `json:"id"`
+	// TODO: TTL int64
+}
+
+// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse.
+type LeaseLeasesResponse struct {
+	*pb.ResponseHeader
+	Leases []LeaseStatus `json:"leases"`
+}
+
+const (
+	// defaultTTL is the assumed lease TTL used for the first keepalive
+	// deadline before the actual TTL is known to the client.
+	defaultTTL = 5 * time.Second
+	// NoLease is a lease ID for the absence of a lease.
+	NoLease LeaseID = 0
+
+	// retryConnWait is how long to wait before retrying request due to an error
+	retryConnWait = 500 * time.Millisecond
+)
+
+// LeaseResponseChSize is the size of buffer to store unsent lease responses.
+// WARNING: DO NOT UPDATE.
+// Only for testing purposes.
+var LeaseResponseChSize = 16
+
+// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
+//
+// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
+type ErrKeepAliveHalted struct {
+	Reason error
+}
+
+func (e ErrKeepAliveHalted) Error() string {
+	s := "etcdclient: leases keep alive halted"
+	if e.Reason != nil {
+		s += ": " + e.Reason.Error()
+	}
+	return s
+}
+
+type Lease interface {
+	// Grant creates a new lease.
+	Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
+
+	// Revoke revokes the given lease.
+	Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
+
+	// TimeToLive retrieves the lease information of the given lease ID.
+	TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
+
+	// Leases retrieves all leases.
+	Leases(ctx context.Context) (*LeaseLeasesResponse, error)
+
+	// KeepAlive keeps the given lease alive forever. If the keepalive response
+	// posted to the channel is not consumed immediately, the lease client will
+	// continue sending keep alive requests to the etcd server at least every
+	// second until latest response is consumed.
+	//
+	// The returned "LeaseKeepAliveResponse" channel closes if underlying keep
+	// alive stream is interrupted in some way the client cannot handle itself;
+	// given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse"
+	// from this closed channel is nil.
+	//
+	// If client keep alive loop halts with an unexpected error (e.g. "etcdserver:
+	// no leader") or canceled by the caller (e.g. context.Canceled), the error
+	// is returned. Otherwise, it retries.
+	//
+	// TODO(v4.0): post errors to last keep alive message before closing
+	// (see https://github.com/coreos/etcd/pull/7866)
+	KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
+
+	// KeepAliveOnce renews the lease once. The response corresponds to the
+	// first message from calling KeepAlive. If the response has a recoverable
+	// error, KeepAliveOnce will retry the RPC with a new keep alive message.
+	//
+	// In most of the cases, Keepalive should be used instead of KeepAliveOnce.
+	KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
+
+	// Close releases all resources Lease keeps for efficient communication
+	// with the etcd server.
+	Close() error
+}
+
+type lessor struct {
+	mu sync.Mutex // guards all fields
+
+	// donec is closed and loopErr is set when recvKeepAliveLoop stops
+	donec   chan struct{}
+	loopErr error
+
+	remote pb.LeaseClient
+
+	stream       pb.Lease_LeaseKeepAliveClient
+	streamCancel context.CancelFunc
+
+	stopCtx    context.Context
+	stopCancel context.CancelFunc
+
+	keepAlives map[LeaseID]*keepAlive
+
+	// firstKeepAliveTimeout is the timeout for the first keepalive request
+	// before the actual TTL is known to the lease client
+	firstKeepAliveTimeout time.Duration
+
+	// firstKeepAliveOnce ensures stream starts after first KeepAlive call.
+	firstKeepAliveOnce sync.Once
+
+	callOpts []grpc.CallOption
+}
+
+// keepAlive multiplexes a keepalive for a lease over multiple channels
+type keepAlive struct {
+	chs  []chan<- *LeaseKeepAliveResponse
+	ctxs []context.Context
+	// deadline is the time the keep alive channels close if no response
+	deadline time.Time
+	// nextKeepAlive is when to send the next keep alive message
+	nextKeepAlive time.Time
+	// donec is closed on lease revoke, expiration, or cancel.
+	donec chan struct{}
+}
+
+func NewLease(c *Client) Lease {
+	return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
+}
+
+func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
+	l := &lessor{
+		donec:                 make(chan struct{}),
+		keepAlives:            make(map[LeaseID]*keepAlive),
+		remote:                remote,
+		firstKeepAliveTimeout: keepAliveTimeout,
+	}
+	if l.firstKeepAliveTimeout == time.Second {
+		l.firstKeepAliveTimeout = defaultTTL
+	}
+	if c != nil {
+		l.callOpts = c.callOpts
+	}
+	reqLeaderCtx := WithRequireLeader(context.Background())
+	l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
+	return l
+}
+
+func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
+	r := &pb.LeaseGrantRequest{TTL: ttl}
+	resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
+	if err == nil {
+		gresp := &LeaseGrantResponse{
+			ResponseHeader: resp.GetHeader(),
+			ID:             LeaseID(resp.ID),
+			TTL:            resp.TTL,
+			Error:          resp.Error,
+		}
+		return gresp, nil
+	}
+	return nil, toErr(ctx, err)
+}
+
+func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
+	r := &pb.LeaseRevokeRequest{ID: int64(id)}
+	resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
+	if err == nil {
+		return (*LeaseRevokeResponse)(resp), nil
+	}
+	return nil, toErr(ctx, err)
+}
+
+func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
+	r := toLeaseTimeToLiveRequest(id, opts...)
+	resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
+	if err == nil {
+		gresp := &LeaseTimeToLiveResponse{
+			ResponseHeader: resp.GetHeader(),
+			ID:             LeaseID(resp.ID),
+			TTL:            resp.TTL,
+			GrantedTTL:     resp.GrantedTTL,
+			Keys:           resp.Keys,
+		}
+		return gresp, nil
+	}
+	return nil, toErr(ctx, err)
+}
+
+func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
+	resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...)
+	if err == nil {
+		leases := make([]LeaseStatus, len(resp.Leases))
+		for i := range resp.Leases {
+			leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
+		}
+		return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
+	}
+	return nil, toErr(ctx, err)
+}
+
+func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
+	ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
+
+	l.mu.Lock()
+	// ensure that recvKeepAliveLoop is still running
+	select {
+	case <-l.donec:
+		err := l.loopErr
+		l.mu.Unlock()
+		close(ch)
+		return ch, ErrKeepAliveHalted{Reason: err}
+	default:
+	}
+	ka, ok := l.keepAlives[id]
+	if !ok {
+		// create fresh keep alive
+		ka = &keepAlive{
+			chs:           []chan<- *LeaseKeepAliveResponse{ch},
+			ctxs:          []context.Context{ctx},
+			deadline:      time.Now().Add(l.firstKeepAliveTimeout),
+			nextKeepAlive: time.Now(),
+			donec:         make(chan struct{}),
+		}
+		l.keepAlives[id] = ka
+	} else {
+		// add channel and context to existing keep alive
+		ka.ctxs = append(ka.ctxs, ctx)
+		ka.chs = append(ka.chs, ch)
+	}
+	l.mu.Unlock()
+
+	go l.keepAliveCtxCloser(id, ctx, ka.donec)
+	l.firstKeepAliveOnce.Do(func() {
+		go l.recvKeepAliveLoop()
+		go l.deadlineLoop()
+	})
+
+	return ch, nil
+}
+
+func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
+	for {
+		resp, err := l.keepAliveOnce(ctx, id)
+		if err == nil {
+			if resp.TTL <= 0 {
+				err = rpctypes.ErrLeaseNotFound
+			}
+			return resp, err
+		}
+		if isHaltErr(ctx, err) {
+			return nil, toErr(ctx, err)
+		}
+	}
+}
+
+func (l *lessor) Close() error {
+	l.stopCancel()
+	// close for synchronous teardown if stream goroutines never launched
+	l.firstKeepAliveOnce.Do(func() { close(l.donec) })
+	<-l.donec
+	return nil
+}
+
+func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) {
+	select {
+	case <-donec:
+		return
+	case <-l.donec:
+		return
+	case <-ctx.Done():
+	}
+
+	l.mu.Lock()
+	defer l.mu.Unlock()
+
+	ka, ok := l.keepAlives[id]
+	if !ok {
+		return
+	}
+
+	// close channel and remove context if still associated with keep alive
+	for i, c := range ka.ctxs {
+		if c == ctx {
+			close(ka.chs[i])
+			ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
+			ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
+			break
+		}
+	}
+	// remove if no one more listeners
+	if len(ka.chs) == 0 {
+		delete(l.keepAlives, id)
+	}
+}
+
+// closeRequireLeader scans keepAlives for ctxs that have require leader
+// and closes the associated channels.
+func (l *lessor) closeRequireLeader() {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+	for _, ka := range l.keepAlives {
+		reqIdxs := 0
+		// find all required leader channels, close, mark as nil
+		for i, ctx := range ka.ctxs {
+			md, ok := metadata.FromOutgoingContext(ctx)
+			if !ok {
+				continue
+			}
+			ks := md[rpctypes.MetadataRequireLeaderKey]
+			if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
+				continue
+			}
+			close(ka.chs[i])
+			ka.chs[i] = nil
+			reqIdxs++
+		}
+		if reqIdxs == 0 {
+			continue
+		}
+		// remove all channels that required a leader from keepalive
+		newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
+		newCtxs := make([]context.Context, len(newChs))
+		newIdx := 0
+		for i := range ka.chs {
+			if ka.chs[i] == nil {
+				continue
+			}
+			newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
+			newIdx++
+		}
+		ka.chs, ka.ctxs = newChs, newCtxs
+	}
+}
+
+func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
+	cctx, cancel := context.WithCancel(ctx)
+	defer cancel()
+
+	stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+
+	err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+
+	resp, rerr := stream.Recv()
+	if rerr != nil {
+		return nil, toErr(ctx, rerr)
+	}
+
+	karesp := &LeaseKeepAliveResponse{
+		ResponseHeader: resp.GetHeader(),
+		ID:             LeaseID(resp.ID),
+		TTL:            resp.TTL,
+	}
+	return karesp, nil
+}
+
+func (l *lessor) recvKeepAliveLoop() (gerr error) {
+	defer func() {
+		l.mu.Lock()
+		close(l.donec)
+		l.loopErr = gerr
+		for _, ka := range l.keepAlives {
+			ka.close()
+		}
+		l.keepAlives = make(map[LeaseID]*keepAlive)
+		l.mu.Unlock()
+	}()
+
+	for {
+		stream, err := l.resetRecv()
+		if err != nil {
+			if canceledByCaller(l.stopCtx, err) {
+				return err
+			}
+		} else {
+			for {
+				resp, err := stream.Recv()
+				if err != nil {
+					if canceledByCaller(l.stopCtx, err) {
+						return err
+					}
+
+					if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
+						l.closeRequireLeader()
+					}
+					break
+				}
+
+				l.recvKeepAlive(resp)
+			}
+		}
+
+		select {
+		case <-time.After(retryConnWait):
+			continue
+		case <-l.stopCtx.Done():
+			return l.stopCtx.Err()
+		}
+	}
+}
+
+// resetRecv opens a new lease stream and starts sending keep alive requests.
+func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
+	sctx, cancel := context.WithCancel(l.stopCtx)
+	stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...)
+	if err != nil {
+		cancel()
+		return nil, err
+	}
+
+	l.mu.Lock()
+	defer l.mu.Unlock()
+	if l.stream != nil && l.streamCancel != nil {
+		l.streamCancel()
+	}
+
+	l.streamCancel = cancel
+	l.stream = stream
+
+	go l.sendKeepAliveLoop(stream)
+	return stream, nil
+}
+
+// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
+func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
+	karesp := &LeaseKeepAliveResponse{
+		ResponseHeader: resp.GetHeader(),
+		ID:             LeaseID(resp.ID),
+		TTL:            resp.TTL,
+	}
+
+	l.mu.Lock()
+	defer l.mu.Unlock()
+
+	ka, ok := l.keepAlives[karesp.ID]
+	if !ok {
+		return
+	}
+
+	if karesp.TTL <= 0 {
+		// lease expired; close all keep alive channels
+		delete(l.keepAlives, karesp.ID)
+		ka.close()
+		return
+	}
+
+	// send update to all channels
+	nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
+	ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
+	for _, ch := range ka.chs {
+		select {
+		case ch <- karesp:
+		default:
+		}
+		// still advance in order to rate-limit keep-alive sends
+		ka.nextKeepAlive = nextKeepAlive
+	}
+}
+
+// deadlineLoop reaps any keep alive channels that have not received a response
+// within the lease TTL
+func (l *lessor) deadlineLoop() {
+	for {
+		select {
+		case <-time.After(time.Second):
+		case <-l.donec:
+			return
+		}
+		now := time.Now()
+		l.mu.Lock()
+		for id, ka := range l.keepAlives {
+			if ka.deadline.Before(now) {
+				// waited too long for response; lease may be expired
+				ka.close()
+				delete(l.keepAlives, id)
+			}
+		}
+		l.mu.Unlock()
+	}
+}
+
+// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
+func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
+	for {
+		var tosend []LeaseID
+
+		now := time.Now()
+		l.mu.Lock()
+		for id, ka := range l.keepAlives {
+			if ka.nextKeepAlive.Before(now) {
+				tosend = append(tosend, id)
+			}
+		}
+		l.mu.Unlock()
+
+		for _, id := range tosend {
+			r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
+			if err := stream.Send(r); err != nil {
+				// TODO do something with this error?
+				return
+			}
+		}
+
+		select {
+		case <-time.After(500 * time.Millisecond):
+		case <-stream.Context().Done():
+			return
+		case <-l.donec:
+			return
+		case <-l.stopCtx.Done():
+			return
+		}
+	}
+}
+
+func (ka *keepAlive) close() {
+	close(ka.donec)
+	for _, ch := range ka.chs {
+		close(ch)
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/leasing/cache.go b/vendor/github.com/coreos/etcd/clientv3/leasing/cache.go
new file mode 100644
index 0000000..6903a78
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/leasing/cache.go
@@ -0,0 +1,306 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package leasing
+
+import (
+	"context"
+	"strings"
+	"sync"
+	"time"
+
+	v3 "github.com/coreos/etcd/clientv3"
+	v3pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/mvcc/mvccpb"
+)
+
+const revokeBackoff = 2 * time.Second
+
+type leaseCache struct {
+	mu      sync.RWMutex
+	entries map[string]*leaseKey
+	revokes map[string]time.Time
+	header  *v3pb.ResponseHeader
+}
+
+type leaseKey struct {
+	response *v3.GetResponse
+	// rev is the leasing key revision.
+	rev   int64
+	waitc chan struct{}
+}
+
+func (lc *leaseCache) Rev(key string) int64 {
+	lc.mu.RLock()
+	defer lc.mu.RUnlock()
+	if li := lc.entries[key]; li != nil {
+		return li.rev
+	}
+	return 0
+}
+
+func (lc *leaseCache) Lock(key string) (chan<- struct{}, int64) {
+	lc.mu.Lock()
+	defer lc.mu.Unlock()
+	if li := lc.entries[key]; li != nil {
+		li.waitc = make(chan struct{})
+		return li.waitc, li.rev
+	}
+	return nil, 0
+}
+
+func (lc *leaseCache) LockRange(begin, end string) (ret []chan<- struct{}) {
+	lc.mu.Lock()
+	defer lc.mu.Unlock()
+	for k, li := range lc.entries {
+		if inRange(k, begin, end) {
+			li.waitc = make(chan struct{})
+			ret = append(ret, li.waitc)
+		}
+	}
+	return ret
+}
+
+func inRange(k, begin, end string) bool {
+	if strings.Compare(k, begin) < 0 {
+		return false
+	}
+	if end != "\x00" && strings.Compare(k, end) >= 0 {
+		return false
+	}
+	return true
+}
+
+func (lc *leaseCache) LockWriteOps(ops []v3.Op) (ret []chan<- struct{}) {
+	for _, op := range ops {
+		if op.IsGet() {
+			continue
+		}
+		key := string(op.KeyBytes())
+		if end := string(op.RangeBytes()); end == "" {
+			if wc, _ := lc.Lock(key); wc != nil {
+				ret = append(ret, wc)
+			}
+		} else {
+			for k := range lc.entries {
+				if !inRange(k, key, end) {
+					continue
+				}
+				if wc, _ := lc.Lock(k); wc != nil {
+					ret = append(ret, wc)
+				}
+			}
+		}
+	}
+	return ret
+}
+
+func (lc *leaseCache) NotifyOps(ops []v3.Op) (wcs []<-chan struct{}) {
+	for _, op := range ops {
+		if op.IsGet() {
+			if _, wc := lc.notify(string(op.KeyBytes())); wc != nil {
+				wcs = append(wcs, wc)
+			}
+		}
+	}
+	return wcs
+}
+
+func (lc *leaseCache) MayAcquire(key string) bool {
+	lc.mu.RLock()
+	lr, ok := lc.revokes[key]
+	lc.mu.RUnlock()
+	return !ok || time.Since(lr) > revokeBackoff
+}
+
+func (lc *leaseCache) Add(key string, resp *v3.GetResponse, op v3.Op) *v3.GetResponse {
+	lk := &leaseKey{resp, resp.Header.Revision, closedCh}
+	lc.mu.Lock()
+	if lc.header == nil || lc.header.Revision < resp.Header.Revision {
+		lc.header = resp.Header
+	}
+	lc.entries[key] = lk
+	ret := lk.get(op)
+	lc.mu.Unlock()
+	return ret
+}
+
+func (lc *leaseCache) Update(key, val []byte, respHeader *v3pb.ResponseHeader) {
+	li := lc.entries[string(key)]
+	if li == nil {
+		return
+	}
+	cacheResp := li.response
+	if len(cacheResp.Kvs) == 0 {
+		kv := &mvccpb.KeyValue{
+			Key:            key,
+			CreateRevision: respHeader.Revision,
+		}
+		cacheResp.Kvs = append(cacheResp.Kvs, kv)
+		cacheResp.Count = 1
+	}
+	cacheResp.Kvs[0].Version++
+	if cacheResp.Kvs[0].ModRevision < respHeader.Revision {
+		cacheResp.Header = respHeader
+		cacheResp.Kvs[0].ModRevision = respHeader.Revision
+		cacheResp.Kvs[0].Value = val
+	}
+}
+
+func (lc *leaseCache) Delete(key string, hdr *v3pb.ResponseHeader) {
+	lc.mu.Lock()
+	defer lc.mu.Unlock()
+	lc.delete(key, hdr)
+}
+
+func (lc *leaseCache) delete(key string, hdr *v3pb.ResponseHeader) {
+	if li := lc.entries[key]; li != nil && hdr.Revision >= li.response.Header.Revision {
+		li.response.Kvs = nil
+		li.response.Header = copyHeader(hdr)
+	}
+}
+
+func (lc *leaseCache) Evict(key string) (rev int64) {
+	lc.mu.Lock()
+	defer lc.mu.Unlock()
+	if li := lc.entries[key]; li != nil {
+		rev = li.rev
+		delete(lc.entries, key)
+		lc.revokes[key] = time.Now()
+	}
+	return rev
+}
+
+func (lc *leaseCache) EvictRange(key, end string) {
+	lc.mu.Lock()
+	defer lc.mu.Unlock()
+	for k := range lc.entries {
+		if inRange(k, key, end) {
+			delete(lc.entries, key)
+			lc.revokes[key] = time.Now()
+		}
+	}
+}
+
+func isBadOp(op v3.Op) bool { return op.Rev() > 0 || len(op.RangeBytes()) > 0 }
+
+func (lc *leaseCache) Get(ctx context.Context, op v3.Op) (*v3.GetResponse, bool) {
+	if isBadOp(op) {
+		return nil, false
+	}
+	key := string(op.KeyBytes())
+	li, wc := lc.notify(key)
+	if li == nil {
+		return nil, true
+	}
+	select {
+	case <-wc:
+	case <-ctx.Done():
+		return nil, true
+	}
+	lc.mu.RLock()
+	lk := *li
+	ret := lk.get(op)
+	lc.mu.RUnlock()
+	return ret, true
+}
+
+func (lk *leaseKey) get(op v3.Op) *v3.GetResponse {
+	ret := *lk.response
+	ret.Header = copyHeader(ret.Header)
+	empty := len(ret.Kvs) == 0 || op.IsCountOnly()
+	empty = empty || (op.MinModRev() > ret.Kvs[0].ModRevision)
+	empty = empty || (op.MaxModRev() != 0 && op.MaxModRev() < ret.Kvs[0].ModRevision)
+	empty = empty || (op.MinCreateRev() > ret.Kvs[0].CreateRevision)
+	empty = empty || (op.MaxCreateRev() != 0 && op.MaxCreateRev() < ret.Kvs[0].CreateRevision)
+	if empty {
+		ret.Kvs = nil
+	} else {
+		kv := *ret.Kvs[0]
+		kv.Key = make([]byte, len(kv.Key))
+		copy(kv.Key, ret.Kvs[0].Key)
+		if !op.IsKeysOnly() {
+			kv.Value = make([]byte, len(kv.Value))
+			copy(kv.Value, ret.Kvs[0].Value)
+		}
+		ret.Kvs = []*mvccpb.KeyValue{&kv}
+	}
+	return &ret
+}
+
+func (lc *leaseCache) notify(key string) (*leaseKey, <-chan struct{}) {
+	lc.mu.RLock()
+	defer lc.mu.RUnlock()
+	if li := lc.entries[key]; li != nil {
+		return li, li.waitc
+	}
+	return nil, nil
+}
+
+func (lc *leaseCache) clearOldRevokes(ctx context.Context) {
+	for {
+		select {
+		case <-ctx.Done():
+			return
+		case <-time.After(time.Second):
+			lc.mu.Lock()
+			for k, lr := range lc.revokes {
+				if time.Now().Sub(lr.Add(revokeBackoff)) > 0 {
+					delete(lc.revokes, k)
+				}
+			}
+			lc.mu.Unlock()
+		}
+	}
+}
+
+func (lc *leaseCache) evalCmp(cmps []v3.Cmp) (cmpVal bool, ok bool) {
+	for _, cmp := range cmps {
+		if len(cmp.RangeEnd) > 0 {
+			return false, false
+		}
+		lk := lc.entries[string(cmp.Key)]
+		if lk == nil {
+			return false, false
+		}
+		if !evalCmp(lk.response, cmp) {
+			return false, true
+		}
+	}
+	return true, true
+}
+
+func (lc *leaseCache) evalOps(ops []v3.Op) ([]*v3pb.ResponseOp, bool) {
+	resps := make([]*v3pb.ResponseOp, len(ops))
+	for i, op := range ops {
+		if !op.IsGet() || isBadOp(op) {
+			// TODO: support read-only Txn
+			return nil, false
+		}
+		lk := lc.entries[string(op.KeyBytes())]
+		if lk == nil {
+			return nil, false
+		}
+		resp := lk.get(op)
+		if resp == nil {
+			return nil, false
+		}
+		resps[i] = &v3pb.ResponseOp{
+			Response: &v3pb.ResponseOp_ResponseRange{
+				(*v3pb.RangeResponse)(resp),
+			},
+		}
+	}
+	return resps, true
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/leasing/doc.go b/vendor/github.com/coreos/etcd/clientv3/leasing/doc.go
new file mode 100644
index 0000000..fc97fc8
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/leasing/doc.go
@@ -0,0 +1,46 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package leasing serves linearizable reads from a local cache by acquiring
+// exclusive write access to keys through a client-side leasing protocol. This
+// leasing layer can either directly wrap the etcd client or it can be exposed
+// through the etcd grpc proxy server, granting multiple clients write access.
+//
+// First, create a leasing KV from a clientv3.Client 'cli':
+//
+//     lkv, err := leasing.NewKV(cli, "leasing-prefix")
+//     if err != nil {
+//         // handle error
+//     }
+//
+// A range request for a key "abc" tries to acquire a leasing key so it can cache the range's
+// key locally. On the server, the leasing key is stored to "leasing-prefix/abc":
+//
+//     resp, err := lkv.Get(context.TODO(), "abc")
+//
+// Future linearized read requests using 'lkv' will be served locally for the lease's lifetime:
+//
+//     resp, err = lkv.Get(context.TODO(), "abc")
+//
+// If another leasing client writes to a leased key, then the owner relinquishes its exclusive
+// access, permitting the writer to modify the key:
+//
+//     lkv2, err := leasing.NewKV(cli, "leasing-prefix")
+//     if err != nil {
+//         // handle error
+//     }
+//     lkv2.Put(context.TODO(), "abc", "456")
+//     resp, err = lkv.Get("abc")
+//
+package leasing
diff --git a/vendor/github.com/coreos/etcd/clientv3/leasing/kv.go b/vendor/github.com/coreos/etcd/clientv3/leasing/kv.go
new file mode 100644
index 0000000..5a5e231
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/leasing/kv.go
@@ -0,0 +1,479 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package leasing
+
+import (
+	"context"
+	"strings"
+	"sync"
+	"time"
+
+	v3 "github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/clientv3/concurrency"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/mvcc/mvccpb"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+type leasingKV struct {
+	cl     *v3.Client
+	kv     v3.KV
+	pfx    string
+	leases leaseCache
+
+	ctx    context.Context
+	cancel context.CancelFunc
+	wg     sync.WaitGroup
+
+	sessionOpts []concurrency.SessionOption
+	session     *concurrency.Session
+	sessionc    chan struct{}
+}
+
+var closedCh chan struct{}
+
+func init() {
+	closedCh = make(chan struct{})
+	close(closedCh)
+}
+
+// NewKV wraps a KV instance so that all requests are wired through a leasing protocol.
+func NewKV(cl *v3.Client, pfx string, opts ...concurrency.SessionOption) (v3.KV, func(), error) {
+	cctx, cancel := context.WithCancel(cl.Ctx())
+	lkv := &leasingKV{
+		cl:          cl,
+		kv:          cl.KV,
+		pfx:         pfx,
+		leases:      leaseCache{revokes: make(map[string]time.Time)},
+		ctx:         cctx,
+		cancel:      cancel,
+		sessionOpts: opts,
+		sessionc:    make(chan struct{}),
+	}
+	lkv.wg.Add(2)
+	go func() {
+		defer lkv.wg.Done()
+		lkv.monitorSession()
+	}()
+	go func() {
+		defer lkv.wg.Done()
+		lkv.leases.clearOldRevokes(cctx)
+	}()
+	return lkv, lkv.Close, lkv.waitSession(cctx)
+}
+
+func (lkv *leasingKV) Close() {
+	lkv.cancel()
+	lkv.wg.Wait()
+}
+
+func (lkv *leasingKV) Get(ctx context.Context, key string, opts ...v3.OpOption) (*v3.GetResponse, error) {
+	return lkv.get(ctx, v3.OpGet(key, opts...))
+}
+
+func (lkv *leasingKV) Put(ctx context.Context, key, val string, opts ...v3.OpOption) (*v3.PutResponse, error) {
+	return lkv.put(ctx, v3.OpPut(key, val, opts...))
+}
+
+func (lkv *leasingKV) Delete(ctx context.Context, key string, opts ...v3.OpOption) (*v3.DeleteResponse, error) {
+	return lkv.delete(ctx, v3.OpDelete(key, opts...))
+}
+
+func (lkv *leasingKV) Do(ctx context.Context, op v3.Op) (v3.OpResponse, error) {
+	switch {
+	case op.IsGet():
+		resp, err := lkv.get(ctx, op)
+		return resp.OpResponse(), err
+	case op.IsPut():
+		resp, err := lkv.put(ctx, op)
+		return resp.OpResponse(), err
+	case op.IsDelete():
+		resp, err := lkv.delete(ctx, op)
+		return resp.OpResponse(), err
+	case op.IsTxn():
+		cmps, thenOps, elseOps := op.Txn()
+		resp, err := lkv.Txn(ctx).If(cmps...).Then(thenOps...).Else(elseOps...).Commit()
+		return resp.OpResponse(), err
+	}
+	return v3.OpResponse{}, nil
+}
+
+func (lkv *leasingKV) Compact(ctx context.Context, rev int64, opts ...v3.CompactOption) (*v3.CompactResponse, error) {
+	return lkv.kv.Compact(ctx, rev, opts...)
+}
+
+func (lkv *leasingKV) Txn(ctx context.Context) v3.Txn {
+	return &txnLeasing{Txn: lkv.kv.Txn(ctx), lkv: lkv, ctx: ctx}
+}
+
+func (lkv *leasingKV) monitorSession() {
+	for lkv.ctx.Err() == nil {
+		if lkv.session != nil {
+			select {
+			case <-lkv.session.Done():
+			case <-lkv.ctx.Done():
+				return
+			}
+		}
+		lkv.leases.mu.Lock()
+		select {
+		case <-lkv.sessionc:
+			lkv.sessionc = make(chan struct{})
+		default:
+		}
+		lkv.leases.entries = make(map[string]*leaseKey)
+		lkv.leases.mu.Unlock()
+
+		s, err := concurrency.NewSession(lkv.cl, lkv.sessionOpts...)
+		if err != nil {
+			continue
+		}
+
+		lkv.leases.mu.Lock()
+		lkv.session = s
+		close(lkv.sessionc)
+		lkv.leases.mu.Unlock()
+	}
+}
+
+func (lkv *leasingKV) monitorLease(ctx context.Context, key string, rev int64) {
+	cctx, cancel := context.WithCancel(lkv.ctx)
+	defer cancel()
+	for cctx.Err() == nil {
+		if rev == 0 {
+			resp, err := lkv.kv.Get(ctx, lkv.pfx+key)
+			if err != nil {
+				continue
+			}
+			rev = resp.Header.Revision
+			if len(resp.Kvs) == 0 || string(resp.Kvs[0].Value) == "REVOKE" {
+				lkv.rescind(cctx, key, rev)
+				return
+			}
+		}
+		wch := lkv.cl.Watch(cctx, lkv.pfx+key, v3.WithRev(rev+1))
+		for resp := range wch {
+			for _, ev := range resp.Events {
+				if string(ev.Kv.Value) != "REVOKE" {
+					continue
+				}
+				if v3.LeaseID(ev.Kv.Lease) == lkv.leaseID() {
+					lkv.rescind(cctx, key, ev.Kv.ModRevision)
+				}
+				return
+			}
+		}
+		rev = 0
+	}
+}
+
+// rescind releases a lease from this client.
+func (lkv *leasingKV) rescind(ctx context.Context, key string, rev int64) {
+	if lkv.leases.Evict(key) > rev {
+		return
+	}
+	cmp := v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev)
+	op := v3.OpDelete(lkv.pfx + key)
+	for ctx.Err() == nil {
+		if _, err := lkv.kv.Txn(ctx).If(cmp).Then(op).Commit(); err == nil {
+			return
+		}
+	}
+}
+
+func (lkv *leasingKV) waitRescind(ctx context.Context, key string, rev int64) error {
+	cctx, cancel := context.WithCancel(ctx)
+	defer cancel()
+	wch := lkv.cl.Watch(cctx, lkv.pfx+key, v3.WithRev(rev+1))
+	for resp := range wch {
+		for _, ev := range resp.Events {
+			if ev.Type == v3.EventTypeDelete {
+				return ctx.Err()
+			}
+		}
+	}
+	return ctx.Err()
+}
+
+func (lkv *leasingKV) tryModifyOp(ctx context.Context, op v3.Op) (*v3.TxnResponse, chan<- struct{}, error) {
+	key := string(op.KeyBytes())
+	wc, rev := lkv.leases.Lock(key)
+	cmp := v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev+1)
+	resp, err := lkv.kv.Txn(ctx).If(cmp).Then(op).Commit()
+	switch {
+	case err != nil:
+		lkv.leases.Evict(key)
+		fallthrough
+	case !resp.Succeeded:
+		if wc != nil {
+			close(wc)
+		}
+		return nil, nil, err
+	}
+	return resp, wc, nil
+}
+
+func (lkv *leasingKV) put(ctx context.Context, op v3.Op) (pr *v3.PutResponse, err error) {
+	if err := lkv.waitSession(ctx); err != nil {
+		return nil, err
+	}
+	for ctx.Err() == nil {
+		resp, wc, err := lkv.tryModifyOp(ctx, op)
+		if err != nil || wc == nil {
+			resp, err = lkv.revoke(ctx, string(op.KeyBytes()), op)
+		}
+		if err != nil {
+			return nil, err
+		}
+		if resp.Succeeded {
+			lkv.leases.mu.Lock()
+			lkv.leases.Update(op.KeyBytes(), op.ValueBytes(), resp.Header)
+			lkv.leases.mu.Unlock()
+			pr = (*v3.PutResponse)(resp.Responses[0].GetResponsePut())
+			pr.Header = resp.Header
+		}
+		if wc != nil {
+			close(wc)
+		}
+		if resp.Succeeded {
+			return pr, nil
+		}
+	}
+	return nil, ctx.Err()
+}
+
+func (lkv *leasingKV) acquire(ctx context.Context, key string, op v3.Op) (*v3.TxnResponse, error) {
+	for ctx.Err() == nil {
+		if err := lkv.waitSession(ctx); err != nil {
+			return nil, err
+		}
+		lcmp := v3.Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
+		resp, err := lkv.kv.Txn(ctx).If(
+			v3.Compare(v3.CreateRevision(lkv.pfx+key), "=", 0),
+			v3.Compare(lcmp, "=", 0)).
+			Then(
+				op,
+				v3.OpPut(lkv.pfx+key, "", v3.WithLease(lkv.leaseID()))).
+			Else(
+				op,
+				v3.OpGet(lkv.pfx+key),
+			).Commit()
+		if err == nil {
+			if !resp.Succeeded {
+				kvs := resp.Responses[1].GetResponseRange().Kvs
+				// if txn failed since already owner, lease is acquired
+				resp.Succeeded = len(kvs) > 0 && v3.LeaseID(kvs[0].Lease) == lkv.leaseID()
+			}
+			return resp, nil
+		}
+		// retry if transient error
+		if _, ok := err.(rpctypes.EtcdError); ok {
+			return nil, err
+		}
+		if ev, _ := status.FromError(err); ev.Code() != codes.Unavailable {
+			return nil, err
+		}
+	}
+	return nil, ctx.Err()
+}
+
+func (lkv *leasingKV) get(ctx context.Context, op v3.Op) (*v3.GetResponse, error) {
+	do := func() (*v3.GetResponse, error) {
+		r, err := lkv.kv.Do(ctx, op)
+		return r.Get(), err
+	}
+	if !lkv.readySession() {
+		return do()
+	}
+
+	if resp, ok := lkv.leases.Get(ctx, op); resp != nil {
+		return resp, nil
+	} else if !ok || op.IsSerializable() {
+		// must be handled by server or can skip linearization
+		return do()
+	}
+
+	key := string(op.KeyBytes())
+	if !lkv.leases.MayAcquire(key) {
+		resp, err := lkv.kv.Do(ctx, op)
+		return resp.Get(), err
+	}
+
+	resp, err := lkv.acquire(ctx, key, v3.OpGet(key))
+	if err != nil {
+		return nil, err
+	}
+	getResp := (*v3.GetResponse)(resp.Responses[0].GetResponseRange())
+	getResp.Header = resp.Header
+	if resp.Succeeded {
+		getResp = lkv.leases.Add(key, getResp, op)
+		lkv.wg.Add(1)
+		go func() {
+			defer lkv.wg.Done()
+			lkv.monitorLease(ctx, key, resp.Header.Revision)
+		}()
+	}
+	return getResp, nil
+}
+
+func (lkv *leasingKV) deleteRangeRPC(ctx context.Context, maxLeaseRev int64, key, end string) (*v3.DeleteResponse, error) {
+	lkey, lend := lkv.pfx+key, lkv.pfx+end
+	resp, err := lkv.kv.Txn(ctx).If(
+		v3.Compare(v3.CreateRevision(lkey).WithRange(lend), "<", maxLeaseRev+1),
+	).Then(
+		v3.OpGet(key, v3.WithRange(end), v3.WithKeysOnly()),
+		v3.OpDelete(key, v3.WithRange(end)),
+	).Commit()
+	if err != nil {
+		lkv.leases.EvictRange(key, end)
+		return nil, err
+	}
+	if !resp.Succeeded {
+		return nil, nil
+	}
+	for _, kv := range resp.Responses[0].GetResponseRange().Kvs {
+		lkv.leases.Delete(string(kv.Key), resp.Header)
+	}
+	delResp := (*v3.DeleteResponse)(resp.Responses[1].GetResponseDeleteRange())
+	delResp.Header = resp.Header
+	return delResp, nil
+}
+
+func (lkv *leasingKV) deleteRange(ctx context.Context, op v3.Op) (*v3.DeleteResponse, error) {
+	key, end := string(op.KeyBytes()), string(op.RangeBytes())
+	for ctx.Err() == nil {
+		maxLeaseRev, err := lkv.revokeRange(ctx, key, end)
+		if err != nil {
+			return nil, err
+		}
+		wcs := lkv.leases.LockRange(key, end)
+		delResp, err := lkv.deleteRangeRPC(ctx, maxLeaseRev, key, end)
+		closeAll(wcs)
+		if err != nil || delResp != nil {
+			return delResp, err
+		}
+	}
+	return nil, ctx.Err()
+}
+
+func (lkv *leasingKV) delete(ctx context.Context, op v3.Op) (dr *v3.DeleteResponse, err error) {
+	if err := lkv.waitSession(ctx); err != nil {
+		return nil, err
+	}
+	if len(op.RangeBytes()) > 0 {
+		return lkv.deleteRange(ctx, op)
+	}
+	key := string(op.KeyBytes())
+	for ctx.Err() == nil {
+		resp, wc, err := lkv.tryModifyOp(ctx, op)
+		if err != nil || wc == nil {
+			resp, err = lkv.revoke(ctx, key, op)
+		}
+		if err != nil {
+			// don't know if delete was processed
+			lkv.leases.Evict(key)
+			return nil, err
+		}
+		if resp.Succeeded {
+			dr = (*v3.DeleteResponse)(resp.Responses[0].GetResponseDeleteRange())
+			dr.Header = resp.Header
+			lkv.leases.Delete(key, dr.Header)
+		}
+		if wc != nil {
+			close(wc)
+		}
+		if resp.Succeeded {
+			return dr, nil
+		}
+	}
+	return nil, ctx.Err()
+}
+
+func (lkv *leasingKV) revoke(ctx context.Context, key string, op v3.Op) (*v3.TxnResponse, error) {
+	rev := lkv.leases.Rev(key)
+	txn := lkv.kv.Txn(ctx).If(v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev+1)).Then(op)
+	resp, err := txn.Else(v3.OpPut(lkv.pfx+key, "REVOKE", v3.WithIgnoreLease())).Commit()
+	if err != nil || resp.Succeeded {
+		return resp, err
+	}
+	return resp, lkv.waitRescind(ctx, key, resp.Header.Revision)
+}
+
+func (lkv *leasingKV) revokeRange(ctx context.Context, begin, end string) (int64, error) {
+	lkey, lend := lkv.pfx+begin, ""
+	if len(end) > 0 {
+		lend = lkv.pfx + end
+	}
+	leaseKeys, err := lkv.kv.Get(ctx, lkey, v3.WithRange(lend))
+	if err != nil {
+		return 0, err
+	}
+	return lkv.revokeLeaseKvs(ctx, leaseKeys.Kvs)
+}
+
+func (lkv *leasingKV) revokeLeaseKvs(ctx context.Context, kvs []*mvccpb.KeyValue) (int64, error) {
+	maxLeaseRev := int64(0)
+	for _, kv := range kvs {
+		if rev := kv.CreateRevision; rev > maxLeaseRev {
+			maxLeaseRev = rev
+		}
+		if v3.LeaseID(kv.Lease) == lkv.leaseID() {
+			// don't revoke own keys
+			continue
+		}
+		key := strings.TrimPrefix(string(kv.Key), lkv.pfx)
+		if _, err := lkv.revoke(ctx, key, v3.OpGet(key)); err != nil {
+			return 0, err
+		}
+	}
+	return maxLeaseRev, nil
+}
+
+func (lkv *leasingKV) waitSession(ctx context.Context) error {
+	lkv.leases.mu.RLock()
+	sessionc := lkv.sessionc
+	lkv.leases.mu.RUnlock()
+	select {
+	case <-sessionc:
+		return nil
+	case <-lkv.ctx.Done():
+		return lkv.ctx.Err()
+	case <-ctx.Done():
+		return ctx.Err()
+	}
+}
+
+func (lkv *leasingKV) readySession() bool {
+	lkv.leases.mu.RLock()
+	defer lkv.leases.mu.RUnlock()
+	if lkv.session == nil {
+		return false
+	}
+	select {
+	case <-lkv.session.Done():
+	default:
+		return true
+	}
+	return false
+}
+
+func (lkv *leasingKV) leaseID() v3.LeaseID {
+	lkv.leases.mu.RLock()
+	defer lkv.leases.mu.RUnlock()
+	return lkv.session.Lease()
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/leasing/txn.go b/vendor/github.com/coreos/etcd/clientv3/leasing/txn.go
new file mode 100644
index 0000000..da5b83a
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/leasing/txn.go
@@ -0,0 +1,223 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package leasing
+
+import (
+	"context"
+	"strings"
+
+	v3 "github.com/coreos/etcd/clientv3"
+	v3pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+)
+
+type txnLeasing struct {
+	v3.Txn
+	lkv  *leasingKV
+	ctx  context.Context
+	cs   []v3.Cmp
+	opst []v3.Op
+	opse []v3.Op
+}
+
+func (txn *txnLeasing) If(cs ...v3.Cmp) v3.Txn {
+	txn.cs = append(txn.cs, cs...)
+	txn.Txn = txn.Txn.If(cs...)
+	return txn
+}
+
+func (txn *txnLeasing) Then(ops ...v3.Op) v3.Txn {
+	txn.opst = append(txn.opst, ops...)
+	txn.Txn = txn.Txn.Then(ops...)
+	return txn
+}
+
+func (txn *txnLeasing) Else(ops ...v3.Op) v3.Txn {
+	txn.opse = append(txn.opse, ops...)
+	txn.Txn = txn.Txn.Else(ops...)
+	return txn
+}
+
+func (txn *txnLeasing) Commit() (*v3.TxnResponse, error) {
+	if resp, err := txn.eval(); resp != nil || err != nil {
+		return resp, err
+	}
+	return txn.serverTxn()
+}
+
+func (txn *txnLeasing) eval() (*v3.TxnResponse, error) {
+	// TODO: wait on keys in comparisons
+	thenOps, elseOps := gatherOps(txn.opst), gatherOps(txn.opse)
+	ops := make([]v3.Op, 0, len(thenOps)+len(elseOps))
+	ops = append(ops, thenOps...)
+	ops = append(ops, elseOps...)
+
+	for _, ch := range txn.lkv.leases.NotifyOps(ops) {
+		select {
+		case <-ch:
+		case <-txn.ctx.Done():
+			return nil, txn.ctx.Err()
+		}
+	}
+
+	txn.lkv.leases.mu.RLock()
+	defer txn.lkv.leases.mu.RUnlock()
+	succeeded, ok := txn.lkv.leases.evalCmp(txn.cs)
+	if !ok || txn.lkv.leases.header == nil {
+		return nil, nil
+	}
+	if ops = txn.opst; !succeeded {
+		ops = txn.opse
+	}
+
+	resps, ok := txn.lkv.leases.evalOps(ops)
+	if !ok {
+		return nil, nil
+	}
+	return &v3.TxnResponse{copyHeader(txn.lkv.leases.header), succeeded, resps}, nil
+}
+
+// fallback computes the ops to fetch all possible conflicting
+// leasing keys for a list of ops.
+func (txn *txnLeasing) fallback(ops []v3.Op) (fbOps []v3.Op) {
+	for _, op := range ops {
+		if op.IsGet() {
+			continue
+		}
+		lkey, lend := txn.lkv.pfx+string(op.KeyBytes()), ""
+		if len(op.RangeBytes()) > 0 {
+			lend = txn.lkv.pfx + string(op.RangeBytes())
+		}
+		fbOps = append(fbOps, v3.OpGet(lkey, v3.WithRange(lend)))
+	}
+	return fbOps
+}
+
+func (txn *txnLeasing) guardKeys(ops []v3.Op) (cmps []v3.Cmp) {
+	seen := make(map[string]bool)
+	for _, op := range ops {
+		key := string(op.KeyBytes())
+		if op.IsGet() || len(op.RangeBytes()) != 0 || seen[key] {
+			continue
+		}
+		rev := txn.lkv.leases.Rev(key)
+		cmps = append(cmps, v3.Compare(v3.CreateRevision(txn.lkv.pfx+key), "<", rev+1))
+		seen[key] = true
+	}
+	return cmps
+}
+
+func (txn *txnLeasing) guardRanges(ops []v3.Op) (cmps []v3.Cmp, err error) {
+	for _, op := range ops {
+		if op.IsGet() || len(op.RangeBytes()) == 0 {
+			continue
+		}
+
+		key, end := string(op.KeyBytes()), string(op.RangeBytes())
+		maxRevLK, err := txn.lkv.revokeRange(txn.ctx, key, end)
+		if err != nil {
+			return nil, err
+		}
+
+		opts := append(v3.WithLastRev(), v3.WithRange(end))
+		getResp, err := txn.lkv.kv.Get(txn.ctx, key, opts...)
+		if err != nil {
+			return nil, err
+		}
+		maxModRev := int64(0)
+		if len(getResp.Kvs) > 0 {
+			maxModRev = getResp.Kvs[0].ModRevision
+		}
+
+		noKeyUpdate := v3.Compare(v3.ModRevision(key).WithRange(end), "<", maxModRev+1)
+		noLeaseUpdate := v3.Compare(
+			v3.CreateRevision(txn.lkv.pfx+key).WithRange(txn.lkv.pfx+end),
+			"<",
+			maxRevLK+1)
+		cmps = append(cmps, noKeyUpdate, noLeaseUpdate)
+	}
+	return cmps, nil
+}
+
+func (txn *txnLeasing) guard(ops []v3.Op) ([]v3.Cmp, error) {
+	cmps := txn.guardKeys(ops)
+	rangeCmps, err := txn.guardRanges(ops)
+	return append(cmps, rangeCmps...), err
+}
+
+func (txn *txnLeasing) commitToCache(txnResp *v3pb.TxnResponse, userTxn v3.Op) {
+	ops := gatherResponseOps(txnResp.Responses, []v3.Op{userTxn})
+	txn.lkv.leases.mu.Lock()
+	for _, op := range ops {
+		key := string(op.KeyBytes())
+		if op.IsDelete() && len(op.RangeBytes()) > 0 {
+			end := string(op.RangeBytes())
+			for k := range txn.lkv.leases.entries {
+				if inRange(k, key, end) {
+					txn.lkv.leases.delete(k, txnResp.Header)
+				}
+			}
+		} else if op.IsDelete() {
+			txn.lkv.leases.delete(key, txnResp.Header)
+		}
+		if op.IsPut() {
+			txn.lkv.leases.Update(op.KeyBytes(), op.ValueBytes(), txnResp.Header)
+		}
+	}
+	txn.lkv.leases.mu.Unlock()
+}
+
+func (txn *txnLeasing) revokeFallback(fbResps []*v3pb.ResponseOp) error {
+	for _, resp := range fbResps {
+		_, err := txn.lkv.revokeLeaseKvs(txn.ctx, resp.GetResponseRange().Kvs)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (txn *txnLeasing) serverTxn() (*v3.TxnResponse, error) {
+	if err := txn.lkv.waitSession(txn.ctx); err != nil {
+		return nil, err
+	}
+
+	userOps := gatherOps(append(txn.opst, txn.opse...))
+	userTxn := v3.OpTxn(txn.cs, txn.opst, txn.opse)
+	fbOps := txn.fallback(userOps)
+
+	defer closeAll(txn.lkv.leases.LockWriteOps(userOps))
+	for {
+		cmps, err := txn.guard(userOps)
+		if err != nil {
+			return nil, err
+		}
+		resp, err := txn.lkv.kv.Txn(txn.ctx).If(cmps...).Then(userTxn).Else(fbOps...).Commit()
+		if err != nil {
+			for _, cmp := range cmps {
+				txn.lkv.leases.Evict(strings.TrimPrefix(string(cmp.Key), txn.lkv.pfx))
+			}
+			return nil, err
+		}
+		if resp.Succeeded {
+			txn.commitToCache((*v3pb.TxnResponse)(resp), userTxn)
+			userResp := resp.Responses[0].GetResponseTxn()
+			userResp.Header = resp.Header
+			return (*v3.TxnResponse)(userResp), nil
+		}
+		if err := txn.revokeFallback(resp.Responses); err != nil {
+			return nil, err
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/leasing/util.go b/vendor/github.com/coreos/etcd/clientv3/leasing/util.go
new file mode 100644
index 0000000..61f6e8c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/leasing/util.go
@@ -0,0 +1,108 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package leasing
+
+import (
+	"bytes"
+
+	v3 "github.com/coreos/etcd/clientv3"
+	v3pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+)
+
+func compareInt64(a, b int64) int {
+	switch {
+	case a < b:
+		return -1
+	case a > b:
+		return 1
+	default:
+		return 0
+	}
+}
+
+func evalCmp(resp *v3.GetResponse, tcmp v3.Cmp) bool {
+	var result int
+	if len(resp.Kvs) != 0 {
+		kv := resp.Kvs[0]
+		switch tcmp.Target {
+		case v3pb.Compare_VALUE:
+			if tv, _ := tcmp.TargetUnion.(*v3pb.Compare_Value); tv != nil {
+				result = bytes.Compare(kv.Value, tv.Value)
+			}
+		case v3pb.Compare_CREATE:
+			if tv, _ := tcmp.TargetUnion.(*v3pb.Compare_CreateRevision); tv != nil {
+				result = compareInt64(kv.CreateRevision, tv.CreateRevision)
+			}
+		case v3pb.Compare_MOD:
+			if tv, _ := tcmp.TargetUnion.(*v3pb.Compare_ModRevision); tv != nil {
+				result = compareInt64(kv.ModRevision, tv.ModRevision)
+			}
+		case v3pb.Compare_VERSION:
+			if tv, _ := tcmp.TargetUnion.(*v3pb.Compare_Version); tv != nil {
+				result = compareInt64(kv.Version, tv.Version)
+			}
+		}
+	}
+	switch tcmp.Result {
+	case v3pb.Compare_EQUAL:
+		return result == 0
+	case v3pb.Compare_NOT_EQUAL:
+		return result != 0
+	case v3pb.Compare_GREATER:
+		return result > 0
+	case v3pb.Compare_LESS:
+		return result < 0
+	}
+	return true
+}
+
+func gatherOps(ops []v3.Op) (ret []v3.Op) {
+	for _, op := range ops {
+		if !op.IsTxn() {
+			ret = append(ret, op)
+			continue
+		}
+		_, thenOps, elseOps := op.Txn()
+		ret = append(ret, gatherOps(append(thenOps, elseOps...))...)
+	}
+	return ret
+}
+
+func gatherResponseOps(resp []*v3pb.ResponseOp, ops []v3.Op) (ret []v3.Op) {
+	for i, op := range ops {
+		if !op.IsTxn() {
+			ret = append(ret, op)
+			continue
+		}
+		_, thenOps, elseOps := op.Txn()
+		if txnResp := resp[i].GetResponseTxn(); txnResp.Succeeded {
+			ret = append(ret, gatherResponseOps(txnResp.Responses, thenOps)...)
+		} else {
+			ret = append(ret, gatherResponseOps(txnResp.Responses, elseOps)...)
+		}
+	}
+	return ret
+}
+
+func copyHeader(hdr *v3pb.ResponseHeader) *v3pb.ResponseHeader {
+	h := *hdr
+	return &h
+}
+
+func closeAll(chs []chan<- struct{}) {
+	for _, ch := range chs {
+		close(ch)
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go
new file mode 100644
index 0000000..782e313
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/logger.go
@@ -0,0 +1,135 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"io/ioutil"
+	"sync"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+// Logger is the logger used by client library.
+// It implements grpclog.LoggerV2 interface.
+type Logger interface {
+	grpclog.LoggerV2
+
+	// Lvl returns logger if logger's verbosity level >= "lvl".
+	// Otherwise, logger that discards all logs.
+	Lvl(lvl int) Logger
+
+	// to satisfy capnslog
+
+	Print(args ...interface{})
+	Printf(format string, args ...interface{})
+	Println(args ...interface{})
+}
+
+var (
+	loggerMu sync.RWMutex
+	logger   Logger
+)
+
+type settableLogger struct {
+	l  grpclog.LoggerV2
+	mu sync.RWMutex
+}
+
+func init() {
+	// disable client side logs by default
+	logger = &settableLogger{}
+	SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
+}
+
+// SetLogger sets client-side Logger.
+func SetLogger(l grpclog.LoggerV2) {
+	loggerMu.Lock()
+	logger = NewLogger(l)
+	// override grpclog so that any changes happen with locking
+	grpclog.SetLoggerV2(logger)
+	loggerMu.Unlock()
+}
+
+// GetLogger returns the current logger.
+func GetLogger() Logger {
+	loggerMu.RLock()
+	l := logger
+	loggerMu.RUnlock()
+	return l
+}
+
+// NewLogger returns a new Logger with grpclog.LoggerV2.
+func NewLogger(gl grpclog.LoggerV2) Logger {
+	return &settableLogger{l: gl}
+}
+
+func (s *settableLogger) get() grpclog.LoggerV2 {
+	s.mu.RLock()
+	l := s.l
+	s.mu.RUnlock()
+	return l
+}
+
+// implement the grpclog.LoggerV2 interface
+
+func (s *settableLogger) Info(args ...interface{})                 { s.get().Info(args...) }
+func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) }
+func (s *settableLogger) Infoln(args ...interface{})               { s.get().Infoln(args...) }
+func (s *settableLogger) Warning(args ...interface{})              { s.get().Warning(args...) }
+func (s *settableLogger) Warningf(format string, args ...interface{}) {
+	s.get().Warningf(format, args...)
+}
+func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) }
+func (s *settableLogger) Error(args ...interface{})     { s.get().Error(args...) }
+func (s *settableLogger) Errorf(format string, args ...interface{}) {
+	s.get().Errorf(format, args...)
+}
+func (s *settableLogger) Errorln(args ...interface{})               { s.get().Errorln(args...) }
+func (s *settableLogger) Fatal(args ...interface{})                 { s.get().Fatal(args...) }
+func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
+func (s *settableLogger) Fatalln(args ...interface{})               { s.get().Fatalln(args...) }
+func (s *settableLogger) Print(args ...interface{})                 { s.get().Info(args...) }
+func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
+func (s *settableLogger) Println(args ...interface{})               { s.get().Infoln(args...) }
+func (s *settableLogger) V(l int) bool                              { return s.get().V(l) }
+func (s *settableLogger) Lvl(lvl int) Logger {
+	s.mu.RLock()
+	l := s.l
+	s.mu.RUnlock()
+	if l.V(lvl) {
+		return s
+	}
+	return &noLogger{}
+}
+
+type noLogger struct{}
+
+func (*noLogger) Info(args ...interface{})                    {}
+func (*noLogger) Infof(format string, args ...interface{})    {}
+func (*noLogger) Infoln(args ...interface{})                  {}
+func (*noLogger) Warning(args ...interface{})                 {}
+func (*noLogger) Warningf(format string, args ...interface{}) {}
+func (*noLogger) Warningln(args ...interface{})               {}
+func (*noLogger) Error(args ...interface{})                   {}
+func (*noLogger) Errorf(format string, args ...interface{})   {}
+func (*noLogger) Errorln(args ...interface{})                 {}
+func (*noLogger) Fatal(args ...interface{})                   {}
+func (*noLogger) Fatalf(format string, args ...interface{})   {}
+func (*noLogger) Fatalln(args ...interface{})                 {}
+func (*noLogger) Print(args ...interface{})                   {}
+func (*noLogger) Printf(format string, args ...interface{})   {}
+func (*noLogger) Println(args ...interface{})                 {}
+func (*noLogger) V(l int) bool                                { return false }
+func (ng *noLogger) Lvl(lvl int) Logger                       { return ng }
diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/github.com/coreos/etcd/clientv3/maintenance.go
new file mode 100644
index 0000000..f60cfbe
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/maintenance.go
@@ -0,0 +1,226 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"io"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+	"google.golang.org/grpc"
+)
+
+type (
+	DefragmentResponse pb.DefragmentResponse
+	AlarmResponse      pb.AlarmResponse
+	AlarmMember        pb.AlarmMember
+	StatusResponse     pb.StatusResponse
+	HashKVResponse     pb.HashKVResponse
+	MoveLeaderResponse pb.MoveLeaderResponse
+)
+
+type Maintenance interface {
+	// AlarmList gets all active alarms.
+	AlarmList(ctx context.Context) (*AlarmResponse, error)
+
+	// AlarmDisarm disarms a given alarm.
+	AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
+
+	// Defragment releases wasted space from internal fragmentation on a given etcd member.
+	// Defragment is only needed when deleting a large number of keys and want to reclaim
+	// the resources.
+	// Defragment is an expensive operation. User should avoid defragmenting multiple members
+	// at the same time.
+	// To defragment multiple members in the cluster, user need to call defragment multiple
+	// times with different endpoints.
+	Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
+
+	// Status gets the status of the endpoint.
+	Status(ctx context.Context, endpoint string) (*StatusResponse, error)
+
+	// HashKV returns a hash of the KV state at the time of the RPC.
+	// If revision is zero, the hash is computed on all keys. If the revision
+	// is non-zero, the hash is computed on all keys at or below the given revision.
+	HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
+
+	// Snapshot provides a reader for a point-in-time snapshot of etcd.
+	Snapshot(ctx context.Context) (io.ReadCloser, error)
+
+	// MoveLeader requests current leader to transfer its leadership to the transferee.
+	// Request must be made to the leader.
+	MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)
+}
+
+type maintenance struct {
+	dial     func(endpoint string) (pb.MaintenanceClient, func(), error)
+	remote   pb.MaintenanceClient
+	callOpts []grpc.CallOption
+}
+
+func NewMaintenance(c *Client) Maintenance {
+	api := &maintenance{
+		dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
+			conn, err := c.dial(endpoint)
+			if err != nil {
+				return nil, nil, err
+			}
+			cancel := func() { conn.Close() }
+			return RetryMaintenanceClient(c, conn), cancel, nil
+		},
+		remote: RetryMaintenanceClient(c, c.conn),
+	}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
+	api := &maintenance{
+		dial: func(string) (pb.MaintenanceClient, func(), error) {
+			return remote, func() {}, nil
+		},
+		remote: remote,
+	}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
+	req := &pb.AlarmRequest{
+		Action:   pb.AlarmRequest_GET,
+		MemberID: 0,                 // all
+		Alarm:    pb.AlarmType_NONE, // all
+	}
+	resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
+	if err == nil {
+		return (*AlarmResponse)(resp), nil
+	}
+	return nil, toErr(ctx, err)
+}
+
+func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
+	req := &pb.AlarmRequest{
+		Action:   pb.AlarmRequest_DEACTIVATE,
+		MemberID: am.MemberID,
+		Alarm:    am.Alarm,
+	}
+
+	if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
+		ar, err := m.AlarmList(ctx)
+		if err != nil {
+			return nil, toErr(ctx, err)
+		}
+		ret := AlarmResponse{}
+		for _, am := range ar.Alarms {
+			dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
+			if derr != nil {
+				return nil, toErr(ctx, derr)
+			}
+			ret.Alarms = append(ret.Alarms, dresp.Alarms...)
+		}
+		return &ret, nil
+	}
+
+	resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
+	if err == nil {
+		return (*AlarmResponse)(resp), nil
+	}
+	return nil, toErr(ctx, err)
+}
+
+func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
+	remote, cancel, err := m.dial(endpoint)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	defer cancel()
+	resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	return (*DefragmentResponse)(resp), nil
+}
+
+func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
+	remote, cancel, err := m.dial(endpoint)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	defer cancel()
+	resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	return (*StatusResponse)(resp), nil
+}
+
+func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
+	remote, cancel, err := m.dial(endpoint)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	defer cancel()
+	resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	return (*HashKVResponse)(resp), nil
+}
+
+func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
+	ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, m.callOpts...)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+
+	pr, pw := io.Pipe()
+	go func() {
+		for {
+			resp, err := ss.Recv()
+			if err != nil {
+				pw.CloseWithError(err)
+				return
+			}
+			if resp == nil && err == nil {
+				break
+			}
+			if _, werr := pw.Write(resp.Blob); werr != nil {
+				pw.CloseWithError(werr)
+				return
+			}
+		}
+		pw.Close()
+	}()
+	return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
+}
+
+type snapshotReadCloser struct {
+	ctx context.Context
+	io.ReadCloser
+}
+
+func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {
+	n, err = rc.ReadCloser.Read(p)
+	return n, toErr(rc.ctx, err)
+}
+
+func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
+	resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)
+	return (*MoveLeaderResponse)(resp), toErr(ctx, err)
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/doc.go b/vendor/github.com/coreos/etcd/clientv3/namespace/doc.go
new file mode 100644
index 0000000..3f88332
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/namespace/doc.go
@@ -0,0 +1,43 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package namespace is a clientv3 wrapper that translates all keys to begin
+// with a given prefix.
+//
+// First, create a client:
+//
+//	cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}})
+//	if err != nil {
+//		// handle error!
+//	}
+//
+// Next, override the client interfaces:
+//
+//	unprefixedKV := cli.KV
+//	cli.KV = namespace.NewKV(cli.KV, "my-prefix/")
+//	cli.Watcher = namespace.NewWatcher(cli.Watcher, "my-prefix/")
+//	cli.Lease = namespace.NewLease(cli.Lease, "my-prefix/")
+//
+// Now calls using 'cli' will namespace / prefix all keys with "my-prefix/":
+//
+//	cli.Put(context.TODO(), "abc", "123")
+//	resp, _ := unprefixedKV.Get(context.TODO(), "my-prefix/abc")
+//	fmt.Printf("%s\n", resp.Kvs[0].Value)
+//	// Output: 123
+//	unprefixedKV.Put(context.TODO(), "my-prefix/abc", "456")
+//	resp, _ = cli.Get("abc")
+//	fmt.Printf("%s\n", resp.Kvs[0].Value)
+//	// Output: 456
+//
+package namespace
diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go b/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go
new file mode 100644
index 0000000..13dd83a
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go
@@ -0,0 +1,206 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package namespace
+
+import (
+	"context"
+
+	"github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+)
+
+type kvPrefix struct {
+	clientv3.KV
+	pfx string
+}
+
+// NewKV wraps a KV instance so that all requests
+// are prefixed with a given string.
+func NewKV(kv clientv3.KV, prefix string) clientv3.KV {
+	return &kvPrefix{kv, prefix}
+}
+
+func (kv *kvPrefix) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {
+	if len(key) == 0 {
+		return nil, rpctypes.ErrEmptyKey
+	}
+	op := kv.prefixOp(clientv3.OpPut(key, val, opts...))
+	r, err := kv.KV.Do(ctx, op)
+	if err != nil {
+		return nil, err
+	}
+	put := r.Put()
+	kv.unprefixPutResponse(put)
+	return put, nil
+}
+
+func (kv *kvPrefix) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
+	if len(key) == 0 {
+		return nil, rpctypes.ErrEmptyKey
+	}
+	r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpGet(key, opts...)))
+	if err != nil {
+		return nil, err
+	}
+	get := r.Get()
+	kv.unprefixGetResponse(get)
+	return get, nil
+}
+
+func (kv *kvPrefix) Delete(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.DeleteResponse, error) {
+	if len(key) == 0 {
+		return nil, rpctypes.ErrEmptyKey
+	}
+	r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpDelete(key, opts...)))
+	if err != nil {
+		return nil, err
+	}
+	del := r.Del()
+	kv.unprefixDeleteResponse(del)
+	return del, nil
+}
+
+func (kv *kvPrefix) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse, error) {
+	if len(op.KeyBytes()) == 0 && !op.IsTxn() {
+		return clientv3.OpResponse{}, rpctypes.ErrEmptyKey
+	}
+	r, err := kv.KV.Do(ctx, kv.prefixOp(op))
+	if err != nil {
+		return r, err
+	}
+	switch {
+	case r.Get() != nil:
+		kv.unprefixGetResponse(r.Get())
+	case r.Put() != nil:
+		kv.unprefixPutResponse(r.Put())
+	case r.Del() != nil:
+		kv.unprefixDeleteResponse(r.Del())
+	case r.Txn() != nil:
+		kv.unprefixTxnResponse(r.Txn())
+	}
+	return r, nil
+}
+
+type txnPrefix struct {
+	clientv3.Txn
+	kv *kvPrefix
+}
+
+func (kv *kvPrefix) Txn(ctx context.Context) clientv3.Txn {
+	return &txnPrefix{kv.KV.Txn(ctx), kv}
+}
+
+func (txn *txnPrefix) If(cs ...clientv3.Cmp) clientv3.Txn {
+	txn.Txn = txn.Txn.If(txn.kv.prefixCmps(cs)...)
+	return txn
+}
+
+func (txn *txnPrefix) Then(ops ...clientv3.Op) clientv3.Txn {
+	txn.Txn = txn.Txn.Then(txn.kv.prefixOps(ops)...)
+	return txn
+}
+
+func (txn *txnPrefix) Else(ops ...clientv3.Op) clientv3.Txn {
+	txn.Txn = txn.Txn.Else(txn.kv.prefixOps(ops)...)
+	return txn
+}
+
+func (txn *txnPrefix) Commit() (*clientv3.TxnResponse, error) {
+	resp, err := txn.Txn.Commit()
+	if err != nil {
+		return nil, err
+	}
+	txn.kv.unprefixTxnResponse(resp)
+	return resp, nil
+}
+
+func (kv *kvPrefix) prefixOp(op clientv3.Op) clientv3.Op {
+	if !op.IsTxn() {
+		begin, end := kv.prefixInterval(op.KeyBytes(), op.RangeBytes())
+		op.WithKeyBytes(begin)
+		op.WithRangeBytes(end)
+		return op
+	}
+	cmps, thenOps, elseOps := op.Txn()
+	return clientv3.OpTxn(kv.prefixCmps(cmps), kv.prefixOps(thenOps), kv.prefixOps(elseOps))
+}
+
+func (kv *kvPrefix) unprefixGetResponse(resp *clientv3.GetResponse) {
+	for i := range resp.Kvs {
+		resp.Kvs[i].Key = resp.Kvs[i].Key[len(kv.pfx):]
+	}
+}
+
+func (kv *kvPrefix) unprefixPutResponse(resp *clientv3.PutResponse) {
+	if resp.PrevKv != nil {
+		resp.PrevKv.Key = resp.PrevKv.Key[len(kv.pfx):]
+	}
+}
+
+func (kv *kvPrefix) unprefixDeleteResponse(resp *clientv3.DeleteResponse) {
+	for i := range resp.PrevKvs {
+		resp.PrevKvs[i].Key = resp.PrevKvs[i].Key[len(kv.pfx):]
+	}
+}
+
+func (kv *kvPrefix) unprefixTxnResponse(resp *clientv3.TxnResponse) {
+	for _, r := range resp.Responses {
+		switch tv := r.Response.(type) {
+		case *pb.ResponseOp_ResponseRange:
+			if tv.ResponseRange != nil {
+				kv.unprefixGetResponse((*clientv3.GetResponse)(tv.ResponseRange))
+			}
+		case *pb.ResponseOp_ResponsePut:
+			if tv.ResponsePut != nil {
+				kv.unprefixPutResponse((*clientv3.PutResponse)(tv.ResponsePut))
+			}
+		case *pb.ResponseOp_ResponseDeleteRange:
+			if tv.ResponseDeleteRange != nil {
+				kv.unprefixDeleteResponse((*clientv3.DeleteResponse)(tv.ResponseDeleteRange))
+			}
+		case *pb.ResponseOp_ResponseTxn:
+			if tv.ResponseTxn != nil {
+				kv.unprefixTxnResponse((*clientv3.TxnResponse)(tv.ResponseTxn))
+			}
+		default:
+		}
+	}
+}
+
+func (kv *kvPrefix) prefixInterval(key, end []byte) (pfxKey []byte, pfxEnd []byte) {
+	return prefixInterval(kv.pfx, key, end)
+}
+
+func (kv *kvPrefix) prefixCmps(cs []clientv3.Cmp) []clientv3.Cmp {
+	newCmps := make([]clientv3.Cmp, len(cs))
+	for i := range cs {
+		newCmps[i] = cs[i]
+		pfxKey, endKey := kv.prefixInterval(cs[i].KeyBytes(), cs[i].RangeEnd)
+		newCmps[i].WithKeyBytes(pfxKey)
+		if len(cs[i].RangeEnd) != 0 {
+			newCmps[i].RangeEnd = endKey
+		}
+	}
+	return newCmps
+}
+
+func (kv *kvPrefix) prefixOps(ops []clientv3.Op) []clientv3.Op {
+	newOps := make([]clientv3.Op, len(ops))
+	for i := range ops {
+		newOps[i] = kv.prefixOp(ops[i])
+	}
+	return newOps
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go b/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go
new file mode 100644
index 0000000..f092106
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go
@@ -0,0 +1,57 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package namespace
+
+import (
+	"bytes"
+	"context"
+
+	"github.com/coreos/etcd/clientv3"
+)
+
+type leasePrefix struct {
+	clientv3.Lease
+	pfx []byte
+}
+
+// NewLease wraps a Lease interface to filter for only keys with a prefix
+// and remove that prefix when fetching attached keys through TimeToLive.
+func NewLease(l clientv3.Lease, prefix string) clientv3.Lease {
+	return &leasePrefix{l, []byte(prefix)}
+}
+
+func (l *leasePrefix) TimeToLive(ctx context.Context, id clientv3.LeaseID, opts ...clientv3.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) {
+	resp, err := l.Lease.TimeToLive(ctx, id, opts...)
+	if err != nil {
+		return nil, err
+	}
+	if len(resp.Keys) > 0 {
+		var outKeys [][]byte
+		for i := range resp.Keys {
+			if len(resp.Keys[i]) < len(l.pfx) {
+				// too short
+				continue
+			}
+			if !bytes.Equal(resp.Keys[i][:len(l.pfx)], l.pfx) {
+				// doesn't match prefix
+				continue
+			}
+			// strip prefix
+			outKeys = append(outKeys, resp.Keys[i][len(l.pfx):])
+		}
+		resp.Keys = outKeys
+	}
+	return resp, nil
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/util.go b/vendor/github.com/coreos/etcd/clientv3/namespace/util.go
new file mode 100644
index 0000000..ecf0404
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/namespace/util.go
@@ -0,0 +1,42 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package namespace
+
+func prefixInterval(pfx string, key, end []byte) (pfxKey []byte, pfxEnd []byte) {
+	pfxKey = make([]byte, len(pfx)+len(key))
+	copy(pfxKey[copy(pfxKey, pfx):], key)
+
+	if len(end) == 1 && end[0] == 0 {
+		// the edge of the keyspace
+		pfxEnd = make([]byte, len(pfx))
+		copy(pfxEnd, pfx)
+		ok := false
+		for i := len(pfxEnd) - 1; i >= 0; i-- {
+			if pfxEnd[i]++; pfxEnd[i] != 0 {
+				ok = true
+				break
+			}
+		}
+		if !ok {
+			// 0xff..ff => 0x00
+			pfxEnd = []byte{0}
+		}
+	} else if len(end) >= 1 {
+		pfxEnd = make([]byte, len(pfx)+len(end))
+		copy(pfxEnd[copy(pfxEnd, pfx):], end)
+	}
+
+	return pfxKey, pfxEnd
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go b/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go
new file mode 100644
index 0000000..5a9596d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go
@@ -0,0 +1,83 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package namespace
+
+import (
+	"context"
+	"sync"
+
+	"github.com/coreos/etcd/clientv3"
+)
+
+type watcherPrefix struct {
+	clientv3.Watcher
+	pfx string
+
+	wg       sync.WaitGroup
+	stopc    chan struct{}
+	stopOnce sync.Once
+}
+
+// NewWatcher wraps a Watcher instance so that all Watch requests
+// are prefixed with a given string and all Watch responses have
+// the prefix removed.
+func NewWatcher(w clientv3.Watcher, prefix string) clientv3.Watcher {
+	return &watcherPrefix{Watcher: w, pfx: prefix, stopc: make(chan struct{})}
+}
+
+func (w *watcherPrefix) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
+	// since OpOption is opaque, determine range for prefixing through an OpGet
+	op := clientv3.OpGet(key, opts...)
+	end := op.RangeBytes()
+	pfxBegin, pfxEnd := prefixInterval(w.pfx, []byte(key), end)
+	if pfxEnd != nil {
+		opts = append(opts, clientv3.WithRange(string(pfxEnd)))
+	}
+
+	wch := w.Watcher.Watch(ctx, string(pfxBegin), opts...)
+
+	// translate watch events from prefixed to unprefixed
+	pfxWch := make(chan clientv3.WatchResponse)
+	w.wg.Add(1)
+	go func() {
+		defer func() {
+			close(pfxWch)
+			w.wg.Done()
+		}()
+		for wr := range wch {
+			for i := range wr.Events {
+				wr.Events[i].Kv.Key = wr.Events[i].Kv.Key[len(w.pfx):]
+				if wr.Events[i].PrevKv != nil {
+					wr.Events[i].PrevKv.Key = wr.Events[i].Kv.Key
+				}
+			}
+			select {
+			case pfxWch <- wr:
+			case <-ctx.Done():
+				return
+			case <-w.stopc:
+				return
+			}
+		}
+	}()
+	return pfxWch
+}
+
+func (w *watcherPrefix) Close() error {
+	err := w.Watcher.Close()
+	w.stopOnce.Do(func() { close(w.stopc) })
+	w.wg.Wait()
+	return err
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/naming/doc.go b/vendor/github.com/coreos/etcd/clientv3/naming/doc.go
new file mode 100644
index 0000000..71608cc
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/naming/doc.go
@@ -0,0 +1,56 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package naming provides an etcd-backed gRPC resolver for discovering gRPC services.
+//
+// To use, first import the packages:
+//
+//	import (
+//		"github.com/coreos/etcd/clientv3"
+//		etcdnaming "github.com/coreos/etcd/clientv3/naming"
+//
+//		"google.golang.org/grpc"
+//		"google.golang.org/grpc/naming"
+//	)
+//
+// First, register new endpoint addresses for a service:
+//
+//	func etcdAdd(c *clientv3.Client, service, addr string) error {
+//		r := &etcdnaming.GRPCResolver{Client: c}
+//		return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr})
+//	}
+//
+// Dial an RPC service using the etcd gRPC resolver and a gRPC Balancer:
+//
+//	func etcdDial(c *clientv3.Client, service string) (*grpc.ClientConn, error) {
+//		r := &etcdnaming.GRPCResolver{Client: c}
+//		b := grpc.RoundRobin(r)
+//		return grpc.Dial(service, grpc.WithBalancer(b))
+//	}
+//
+// Optionally, force delete an endpoint:
+//
+//	func etcdDelete(c *clientv3, service, addr string) error {
+//		r := &etcdnaming.GRPCResolver{Client: c}
+//		return r.Update(c.Ctx(), "my-service", naming.Update{Op: naming.Delete, Addr: "1.2.3.4"})
+//	}
+//
+// Or register an expiring endpoint with a lease:
+//
+//	func etcdLeaseAdd(c *clientv3.Client, lid clientv3.LeaseID, service, addr string) error {
+//		r := &etcdnaming.GRPCResolver{Client: c}
+//		return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr}, clientv3.WithLease(lid))
+//	}
+//
+package naming
diff --git a/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go b/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go
new file mode 100644
index 0000000..3c0e8e6
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go
@@ -0,0 +1,131 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package naming
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+
+	etcd "github.com/coreos/etcd/clientv3"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/naming"
+	"google.golang.org/grpc/status"
+)
+
+var ErrWatcherClosed = fmt.Errorf("naming: watch closed")
+
+// GRPCResolver creates a grpc.Watcher for a target to track its resolution changes.
+type GRPCResolver struct {
+	// Client is an initialized etcd client.
+	Client *etcd.Client
+}
+
+func (gr *GRPCResolver) Update(ctx context.Context, target string, nm naming.Update, opts ...etcd.OpOption) (err error) {
+	switch nm.Op {
+	case naming.Add:
+		var v []byte
+		if v, err = json.Marshal(nm); err != nil {
+			return status.Error(codes.InvalidArgument, err.Error())
+		}
+		_, err = gr.Client.KV.Put(ctx, target+"/"+nm.Addr, string(v), opts...)
+	case naming.Delete:
+		_, err = gr.Client.Delete(ctx, target+"/"+nm.Addr, opts...)
+	default:
+		return status.Error(codes.InvalidArgument, "naming: bad naming op")
+	}
+	return err
+}
+
+func (gr *GRPCResolver) Resolve(target string) (naming.Watcher, error) {
+	ctx, cancel := context.WithCancel(context.Background())
+	w := &gRPCWatcher{c: gr.Client, target: target + "/", ctx: ctx, cancel: cancel}
+	return w, nil
+}
+
+type gRPCWatcher struct {
+	c      *etcd.Client
+	target string
+	ctx    context.Context
+	cancel context.CancelFunc
+	wch    etcd.WatchChan
+	err    error
+}
+
+// Next gets the next set of updates from the etcd resolver.
+// Calls to Next should be serialized; concurrent calls are not safe since
+// there is no way to reconcile the update ordering.
+func (gw *gRPCWatcher) Next() ([]*naming.Update, error) {
+	if gw.wch == nil {
+		// first Next() returns all addresses
+		return gw.firstNext()
+	}
+	if gw.err != nil {
+		return nil, gw.err
+	}
+
+	// process new events on target/*
+	wr, ok := <-gw.wch
+	if !ok {
+		gw.err = status.Error(codes.Unavailable, ErrWatcherClosed.Error())
+		return nil, gw.err
+	}
+	if gw.err = wr.Err(); gw.err != nil {
+		return nil, gw.err
+	}
+
+	updates := make([]*naming.Update, 0, len(wr.Events))
+	for _, e := range wr.Events {
+		var jupdate naming.Update
+		var err error
+		switch e.Type {
+		case etcd.EventTypePut:
+			err = json.Unmarshal(e.Kv.Value, &jupdate)
+			jupdate.Op = naming.Add
+		case etcd.EventTypeDelete:
+			err = json.Unmarshal(e.PrevKv.Value, &jupdate)
+			jupdate.Op = naming.Delete
+		}
+		if err == nil {
+			updates = append(updates, &jupdate)
+		}
+	}
+	return updates, nil
+}
+
+func (gw *gRPCWatcher) firstNext() ([]*naming.Update, error) {
+	// Use serialized request so resolution still works if the target etcd
+	// server is partitioned away from the quorum.
+	resp, err := gw.c.Get(gw.ctx, gw.target, etcd.WithPrefix(), etcd.WithSerializable())
+	if gw.err = err; err != nil {
+		return nil, err
+	}
+
+	updates := make([]*naming.Update, 0, len(resp.Kvs))
+	for _, kv := range resp.Kvs {
+		var jupdate naming.Update
+		if err := json.Unmarshal(kv.Value, &jupdate); err != nil {
+			continue
+		}
+		updates = append(updates, &jupdate)
+	}
+
+	opts := []etcd.OpOption{etcd.WithRev(resp.Header.Revision + 1), etcd.WithPrefix(), etcd.WithPrevKV()}
+	gw.wch = gw.c.Watch(gw.ctx, gw.target, opts...)
+	return updates, nil
+}
+
+func (gw *gRPCWatcher) Close() { gw.cancel() }
diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/github.com/coreos/etcd/clientv3/op.go
new file mode 100644
index 0000000..c6ec5bf
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/op.go
@@ -0,0 +1,513 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+type opType int
+
+const (
+	// A default Op has opType 0, which is invalid.
+	tRange opType = iota + 1
+	tPut
+	tDeleteRange
+	tTxn
+)
+
+var (
+	noPrefixEnd = []byte{0}
+)
+
+// Op represents an Operation that kv can execute.
+type Op struct {
+	t   opType
+	key []byte
+	end []byte
+
+	// for range
+	limit        int64
+	sort         *SortOption
+	serializable bool
+	keysOnly     bool
+	countOnly    bool
+	minModRev    int64
+	maxModRev    int64
+	minCreateRev int64
+	maxCreateRev int64
+
+	// for range, watch
+	rev int64
+
+	// for watch, put, delete
+	prevKV bool
+
+	// for put
+	ignoreValue bool
+	ignoreLease bool
+
+	// progressNotify is for progress updates.
+	progressNotify bool
+	// createdNotify is for created event
+	createdNotify bool
+	// filters for watchers
+	filterPut    bool
+	filterDelete bool
+
+	// for put
+	val     []byte
+	leaseID LeaseID
+
+	// txn
+	cmps    []Cmp
+	thenOps []Op
+	elseOps []Op
+}
+
+// accessors / mutators
+
+func (op Op) IsTxn() bool              { return op.t == tTxn }
+func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps }
+
+// KeyBytes returns the byte slice holding the Op's key.
+func (op Op) KeyBytes() []byte { return op.key }
+
+// WithKeyBytes sets the byte slice for the Op's key.
+func (op *Op) WithKeyBytes(key []byte) { op.key = key }
+
+// RangeBytes returns the byte slice holding with the Op's range end, if any.
+func (op Op) RangeBytes() []byte { return op.end }
+
+// Rev returns the requested revision, if any.
+func (op Op) Rev() int64 { return op.rev }
+
+// IsPut returns true iff the operation is a Put.
+func (op Op) IsPut() bool { return op.t == tPut }
+
+// IsGet returns true iff the operation is a Get.
+func (op Op) IsGet() bool { return op.t == tRange }
+
+// IsDelete returns true iff the operation is a Delete.
+func (op Op) IsDelete() bool { return op.t == tDeleteRange }
+
+// IsSerializable returns true if the serializable field is true.
+func (op Op) IsSerializable() bool { return op.serializable == true }
+
+// IsKeysOnly returns whether keysOnly is set.
+func (op Op) IsKeysOnly() bool { return op.keysOnly == true }
+
+// IsCountOnly returns whether countOnly is set.
+func (op Op) IsCountOnly() bool { return op.countOnly == true }
+
+// MinModRev returns the operation's minimum modify revision.
+func (op Op) MinModRev() int64 { return op.minModRev }
+
+// MaxModRev returns the operation's maximum modify revision.
+func (op Op) MaxModRev() int64 { return op.maxModRev }
+
+// MinCreateRev returns the operation's minimum create revision.
+func (op Op) MinCreateRev() int64 { return op.minCreateRev }
+
+// MaxCreateRev returns the operation's maximum create revision.
+func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
+
+// WithRangeBytes sets the byte slice for the Op's range end.
+func (op *Op) WithRangeBytes(end []byte) { op.end = end }
+
+// ValueBytes returns the byte slice holding the Op's value, if any.
+func (op Op) ValueBytes() []byte { return op.val }
+
+// WithValueBytes sets the byte slice for the Op's value.
+func (op *Op) WithValueBytes(v []byte) { op.val = v }
+
+func (op Op) toRangeRequest() *pb.RangeRequest {
+	if op.t != tRange {
+		panic("op.t != tRange")
+	}
+	r := &pb.RangeRequest{
+		Key:               op.key,
+		RangeEnd:          op.end,
+		Limit:             op.limit,
+		Revision:          op.rev,
+		Serializable:      op.serializable,
+		KeysOnly:          op.keysOnly,
+		CountOnly:         op.countOnly,
+		MinModRevision:    op.minModRev,
+		MaxModRevision:    op.maxModRev,
+		MinCreateRevision: op.minCreateRev,
+		MaxCreateRevision: op.maxCreateRev,
+	}
+	if op.sort != nil {
+		r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
+		r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
+	}
+	return r
+}
+
+func (op Op) toTxnRequest() *pb.TxnRequest {
+	thenOps := make([]*pb.RequestOp, len(op.thenOps))
+	for i, tOp := range op.thenOps {
+		thenOps[i] = tOp.toRequestOp()
+	}
+	elseOps := make([]*pb.RequestOp, len(op.elseOps))
+	for i, eOp := range op.elseOps {
+		elseOps[i] = eOp.toRequestOp()
+	}
+	cmps := make([]*pb.Compare, len(op.cmps))
+	for i := range op.cmps {
+		cmps[i] = (*pb.Compare)(&op.cmps[i])
+	}
+	return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps}
+}
+
+func (op Op) toRequestOp() *pb.RequestOp {
+	switch op.t {
+	case tRange:
+		return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
+	case tPut:
+		r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
+		return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
+	case tDeleteRange:
+		r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
+		return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
+	case tTxn:
+		return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}}
+	default:
+		panic("Unknown Op")
+	}
+}
+
+func (op Op) isWrite() bool {
+	if op.t == tTxn {
+		for _, tOp := range op.thenOps {
+			if tOp.isWrite() {
+				return true
+			}
+		}
+		for _, tOp := range op.elseOps {
+			if tOp.isWrite() {
+				return true
+			}
+		}
+		return false
+	}
+	return op.t != tRange
+}
+
+func OpGet(key string, opts ...OpOption) Op {
+	ret := Op{t: tRange, key: []byte(key)}
+	ret.applyOpts(opts)
+	return ret
+}
+
+func OpDelete(key string, opts ...OpOption) Op {
+	ret := Op{t: tDeleteRange, key: []byte(key)}
+	ret.applyOpts(opts)
+	switch {
+	case ret.leaseID != 0:
+		panic("unexpected lease in delete")
+	case ret.limit != 0:
+		panic("unexpected limit in delete")
+	case ret.rev != 0:
+		panic("unexpected revision in delete")
+	case ret.sort != nil:
+		panic("unexpected sort in delete")
+	case ret.serializable:
+		panic("unexpected serializable in delete")
+	case ret.countOnly:
+		panic("unexpected countOnly in delete")
+	case ret.minModRev != 0, ret.maxModRev != 0:
+		panic("unexpected mod revision filter in delete")
+	case ret.minCreateRev != 0, ret.maxCreateRev != 0:
+		panic("unexpected create revision filter in delete")
+	case ret.filterDelete, ret.filterPut:
+		panic("unexpected filter in delete")
+	case ret.createdNotify:
+		panic("unexpected createdNotify in delete")
+	}
+	return ret
+}
+
+func OpPut(key, val string, opts ...OpOption) Op {
+	ret := Op{t: tPut, key: []byte(key), val: []byte(val)}
+	ret.applyOpts(opts)
+	switch {
+	case ret.end != nil:
+		panic("unexpected range in put")
+	case ret.limit != 0:
+		panic("unexpected limit in put")
+	case ret.rev != 0:
+		panic("unexpected revision in put")
+	case ret.sort != nil:
+		panic("unexpected sort in put")
+	case ret.serializable:
+		panic("unexpected serializable in put")
+	case ret.countOnly:
+		panic("unexpected countOnly in put")
+	case ret.minModRev != 0, ret.maxModRev != 0:
+		panic("unexpected mod revision filter in put")
+	case ret.minCreateRev != 0, ret.maxCreateRev != 0:
+		panic("unexpected create revision filter in put")
+	case ret.filterDelete, ret.filterPut:
+		panic("unexpected filter in put")
+	case ret.createdNotify:
+		panic("unexpected createdNotify in put")
+	}
+	return ret
+}
+
+func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
+	return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
+}
+
+func opWatch(key string, opts ...OpOption) Op {
+	ret := Op{t: tRange, key: []byte(key)}
+	ret.applyOpts(opts)
+	switch {
+	case ret.leaseID != 0:
+		panic("unexpected lease in watch")
+	case ret.limit != 0:
+		panic("unexpected limit in watch")
+	case ret.sort != nil:
+		panic("unexpected sort in watch")
+	case ret.serializable:
+		panic("unexpected serializable in watch")
+	case ret.countOnly:
+		panic("unexpected countOnly in watch")
+	case ret.minModRev != 0, ret.maxModRev != 0:
+		panic("unexpected mod revision filter in watch")
+	case ret.minCreateRev != 0, ret.maxCreateRev != 0:
+		panic("unexpected create revision filter in watch")
+	}
+	return ret
+}
+
+func (op *Op) applyOpts(opts []OpOption) {
+	for _, opt := range opts {
+		opt(op)
+	}
+}
+
+// OpOption configures Operations like Get, Put, Delete.
+type OpOption func(*Op)
+
+// WithLease attaches a lease ID to a key in 'Put' request.
+func WithLease(leaseID LeaseID) OpOption {
+	return func(op *Op) { op.leaseID = leaseID }
+}
+
+// WithLimit limits the number of results to return from 'Get' request.
+// If WithLimit is given a 0 limit, it is treated as no limit.
+func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
+
+// WithRev specifies the store revision for 'Get' request.
+// Or the start revision of 'Watch' request.
+func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
+
+// WithSort specifies the ordering in 'Get' request. It requires
+// 'WithRange' and/or 'WithPrefix' to be specified too.
+// 'target' specifies the target to sort by: key, version, revisions, value.
+// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
+func WithSort(target SortTarget, order SortOrder) OpOption {
+	return func(op *Op) {
+		if target == SortByKey && order == SortAscend {
+			// If order != SortNone, server fetches the entire key-space,
+			// and then applies the sort and limit, if provided.
+			// Since by default the server returns results sorted by keys
+			// in lexicographically ascending order, the client should ignore
+			// SortOrder if the target is SortByKey.
+			order = SortNone
+		}
+		op.sort = &SortOption{target, order}
+	}
+}
+
+// GetPrefixRangeEnd gets the range end of the prefix.
+// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
+func GetPrefixRangeEnd(prefix string) string {
+	return string(getPrefix([]byte(prefix)))
+}
+
+func getPrefix(key []byte) []byte {
+	end := make([]byte, len(key))
+	copy(end, key)
+	for i := len(end) - 1; i >= 0; i-- {
+		if end[i] < 0xff {
+			end[i] = end[i] + 1
+			end = end[:i+1]
+			return end
+		}
+	}
+	// next prefix does not exist (e.g., 0xffff);
+	// default to WithFromKey policy
+	return noPrefixEnd
+}
+
+// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate
+// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())'
+// can return 'foo1', 'foo2', and so on.
+func WithPrefix() OpOption {
+	return func(op *Op) {
+		if len(op.key) == 0 {
+			op.key, op.end = []byte{0}, []byte{0}
+			return
+		}
+		op.end = getPrefix(op.key)
+	}
+}
+
+// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests.
+// For example, 'Get' requests with 'WithRange(end)' returns
+// the keys in the range [key, end).
+// endKey must be lexicographically greater than start key.
+func WithRange(endKey string) OpOption {
+	return func(op *Op) { op.end = []byte(endKey) }
+}
+
+// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests
+// to be equal or greater than the key in the argument.
+func WithFromKey() OpOption { return WithRange("\x00") }
+
+// WithSerializable makes 'Get' request serializable. By default,
+// it's linearizable. Serializable requests are better for lower latency
+// requirement.
+func WithSerializable() OpOption {
+	return func(op *Op) { op.serializable = true }
+}
+
+// WithKeysOnly makes the 'Get' request return only the keys and the corresponding
+// values will be omitted.
+func WithKeysOnly() OpOption {
+	return func(op *Op) { op.keysOnly = true }
+}
+
+// WithCountOnly makes the 'Get' request return only the count of keys.
+func WithCountOnly() OpOption {
+	return func(op *Op) { op.countOnly = true }
+}
+
+// WithMinModRev filters out keys for Get with modification revisions less than the given revision.
+func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } }
+
+// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision.
+func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } }
+
+// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision.
+func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } }
+
+// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
+func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
+
+// WithFirstCreate gets the key with the oldest creation revision in the request range.
+func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
+
+// WithLastCreate gets the key with the latest creation revision in the request range.
+func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) }
+
+// WithFirstKey gets the lexically first key in the request range.
+func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) }
+
+// WithLastKey gets the lexically last key in the request range.
+func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) }
+
+// WithFirstRev gets the key with the oldest modification revision in the request range.
+func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) }
+
+// WithLastRev gets the key with the latest modification revision in the request range.
+func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) }
+
+// withTop gets the first key over the get's prefix given a sort order
+func withTop(target SortTarget, order SortOrder) []OpOption {
+	return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
+}
+
+// WithProgressNotify makes watch server send periodic progress updates
+// every 10 minutes when there is no incoming events.
+// Progress updates have zero events in WatchResponse.
+func WithProgressNotify() OpOption {
+	return func(op *Op) {
+		op.progressNotify = true
+	}
+}
+
+// WithCreatedNotify makes watch server sends the created event.
+func WithCreatedNotify() OpOption {
+	return func(op *Op) {
+		op.createdNotify = true
+	}
+}
+
+// WithFilterPut discards PUT events from the watcher.
+func WithFilterPut() OpOption {
+	return func(op *Op) { op.filterPut = true }
+}
+
+// WithFilterDelete discards DELETE events from the watcher.
+func WithFilterDelete() OpOption {
+	return func(op *Op) { op.filterDelete = true }
+}
+
+// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
+// nothing will be returned.
+func WithPrevKV() OpOption {
+	return func(op *Op) {
+		op.prevKV = true
+	}
+}
+
+// WithIgnoreValue updates the key using its current value.
+// This option can not be combined with non-empty values.
+// Returns an error if the key does not exist.
+func WithIgnoreValue() OpOption {
+	return func(op *Op) {
+		op.ignoreValue = true
+	}
+}
+
+// WithIgnoreLease updates the key using its current lease.
+// This option can not be combined with WithLease.
+// Returns an error if the key does not exist.
+func WithIgnoreLease() OpOption {
+	return func(op *Op) {
+		op.ignoreLease = true
+	}
+}
+
+// LeaseOp represents an Operation that lease can execute.
+type LeaseOp struct {
+	id LeaseID
+
+	// for TimeToLive
+	attachedKeys bool
+}
+
+// LeaseOption configures lease operations.
+type LeaseOption func(*LeaseOp)
+
+func (op *LeaseOp) applyOpts(opts []LeaseOption) {
+	for _, opt := range opts {
+		opt(op)
+	}
+}
+
+// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID.
+func WithAttachedKeys() LeaseOption {
+	return func(op *LeaseOp) { op.attachedKeys = true }
+}
+
+func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest {
+	ret := &LeaseOp{id: id}
+	ret.applyOpts(opts)
+	return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/options.go b/vendor/github.com/coreos/etcd/clientv3/options.go
new file mode 100644
index 0000000..fa25811
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/options.go
@@ -0,0 +1,49 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"math"
+
+	"google.golang.org/grpc"
+)
+
+var (
+	// Disable gRPC internal retrial logic
+	// TODO: enable when gRPC retry is stable (FailFast=false)
+	// Reference:
+	//  - https://github.com/grpc/grpc-go/issues/1532
+	//  - https://github.com/grpc/proposal/blob/master/A6-client-retries.md
+	defaultFailFast = grpc.FailFast(true)
+
+	// client-side request send limit, gRPC default is math.MaxInt32
+	// Make sure that "client-side send limit < server-side default send/recv limit"
+	// Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes
+	defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024)
+
+	// client-side response receive limit, gRPC default is 4MB
+	// Make sure that "client-side receive limit >= server-side default send/recv limit"
+	// because range response can easily exceed request send limits
+	// Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
+	defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
+)
+
+// defaultCallOpts defines a list of default "gRPC.CallOption".
+// Some options are exposed to "clientv3.Config".
+// Defaults will be overridden by the settings in "clientv3.Config".
+var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize}
+
+// MaxLeaseTTL is the maximum lease TTL value
+const MaxLeaseTTL = 9000000000
diff --git a/vendor/github.com/coreos/etcd/clientv3/ordering/doc.go b/vendor/github.com/coreos/etcd/clientv3/ordering/doc.go
new file mode 100644
index 0000000..856f330
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/ordering/doc.go
@@ -0,0 +1,42 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ordering is a clientv3 wrapper that caches response header revisions
+// to detect ordering violations from stale responses. Users may define a
+// policy on how to handle the ordering violation, but typically the client
+// should connect to another endpoint and reissue the request.
+//
+// The most common situation where an ordering violation happens is a client
+// reconnects to a partitioned member and issues a serializable read. Since the
+// partitioned member is likely behind the last member, it may return a Get
+// response based on a store revision older than the store revision used to
+// service a prior Get on the former endpoint.
+//
+// First, create a client:
+//
+//	cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}})
+//	if err != nil {
+//		// handle error!
+//	}
+//
+// Next, override the client interface with the ordering wrapper:
+//
+//	vf := func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error {
+//		return fmt.Errorf("ordering: issued %+v, got %+v, expected rev=%v", op, resp, prevRev)
+//	}
+//	cli.KV = ordering.NewKV(cli.KV, vf)
+//
+// Now calls using 'cli' will reject order violations with an error.
+//
+package ordering
diff --git a/vendor/github.com/coreos/etcd/clientv3/ordering/kv.go b/vendor/github.com/coreos/etcd/clientv3/ordering/kv.go
new file mode 100644
index 0000000..dc9926e
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/ordering/kv.go
@@ -0,0 +1,149 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ordering
+
+import (
+	"context"
+	"sync"
+
+	"github.com/coreos/etcd/clientv3"
+)
+
+// kvOrdering ensures that serialized requests do not return
+// get with revisions less than the previous
+// returned revision.
+type kvOrdering struct {
+	clientv3.KV
+	orderViolationFunc OrderViolationFunc
+	prevRev            int64
+	revMu              sync.RWMutex
+}
+
+func NewKV(kv clientv3.KV, orderViolationFunc OrderViolationFunc) *kvOrdering {
+	return &kvOrdering{kv, orderViolationFunc, 0, sync.RWMutex{}}
+}
+
+func (kv *kvOrdering) getPrevRev() int64 {
+	kv.revMu.RLock()
+	defer kv.revMu.RUnlock()
+	return kv.prevRev
+}
+
+func (kv *kvOrdering) setPrevRev(currRev int64) {
+	kv.revMu.Lock()
+	defer kv.revMu.Unlock()
+	if currRev > kv.prevRev {
+		kv.prevRev = currRev
+	}
+}
+
+func (kv *kvOrdering) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
+	// prevRev is stored in a local variable in order to record the prevRev
+	// at the beginning of the Get operation, because concurrent
+	// access to kvOrdering could change the prevRev field in the
+	// middle of the Get operation.
+	prevRev := kv.getPrevRev()
+	op := clientv3.OpGet(key, opts...)
+	for {
+		r, err := kv.KV.Do(ctx, op)
+		if err != nil {
+			return nil, err
+		}
+		resp := r.Get()
+		if resp.Header.Revision == prevRev {
+			return resp, nil
+		} else if resp.Header.Revision > prevRev {
+			kv.setPrevRev(resp.Header.Revision)
+			return resp, nil
+		}
+		err = kv.orderViolationFunc(op, r, prevRev)
+		if err != nil {
+			return nil, err
+		}
+	}
+}
+
+func (kv *kvOrdering) Txn(ctx context.Context) clientv3.Txn {
+	return &txnOrdering{
+		kv.KV.Txn(ctx),
+		kv,
+		ctx,
+		sync.Mutex{},
+		[]clientv3.Cmp{},
+		[]clientv3.Op{},
+		[]clientv3.Op{},
+	}
+}
+
+// txnOrdering ensures that serialized requests do not return
+// txn responses with revisions less than the previous
+// returned revision.
+type txnOrdering struct {
+	clientv3.Txn
+	*kvOrdering
+	ctx     context.Context
+	mu      sync.Mutex
+	cmps    []clientv3.Cmp
+	thenOps []clientv3.Op
+	elseOps []clientv3.Op
+}
+
+func (txn *txnOrdering) If(cs ...clientv3.Cmp) clientv3.Txn {
+	txn.mu.Lock()
+	defer txn.mu.Unlock()
+	txn.cmps = cs
+	txn.Txn.If(cs...)
+	return txn
+}
+
+func (txn *txnOrdering) Then(ops ...clientv3.Op) clientv3.Txn {
+	txn.mu.Lock()
+	defer txn.mu.Unlock()
+	txn.thenOps = ops
+	txn.Txn.Then(ops...)
+	return txn
+}
+
+func (txn *txnOrdering) Else(ops ...clientv3.Op) clientv3.Txn {
+	txn.mu.Lock()
+	defer txn.mu.Unlock()
+	txn.elseOps = ops
+	txn.Txn.Else(ops...)
+	return txn
+}
+
+func (txn *txnOrdering) Commit() (*clientv3.TxnResponse, error) {
+	// prevRev is stored in a local variable in order to record the prevRev
+	// at the beginning of the Commit operation, because concurrent
+	// access to txnOrdering could change the prevRev field in the
+	// middle of the Commit operation.
+	prevRev := txn.getPrevRev()
+	opTxn := clientv3.OpTxn(txn.cmps, txn.thenOps, txn.elseOps)
+	for {
+		opResp, err := txn.KV.Do(txn.ctx, opTxn)
+		if err != nil {
+			return nil, err
+		}
+		txnResp := opResp.Txn()
+		if txnResp.Header.Revision >= prevRev {
+			txn.setPrevRev(txnResp.Header.Revision)
+			return txnResp, nil
+		}
+		err = txn.orderViolationFunc(opTxn, opResp, prevRev)
+		if err != nil {
+			return nil, err
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/ordering/util.go b/vendor/github.com/coreos/etcd/clientv3/ordering/util.go
new file mode 100644
index 0000000..190a591
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/ordering/util.go
@@ -0,0 +1,49 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ordering
+
+import (
+	"errors"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/clientv3"
+)
+
+type OrderViolationFunc func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error
+
+var ErrNoGreaterRev = errors.New("etcdclient: no cluster members have a revision higher than the previously received revision")
+
+func NewOrderViolationSwitchEndpointClosure(c clientv3.Client) OrderViolationFunc {
+	var mu sync.Mutex
+	violationCount := 0
+	return func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error {
+		if violationCount > len(c.Endpoints()) {
+			return ErrNoGreaterRev
+		}
+		mu.Lock()
+		defer mu.Unlock()
+		eps := c.Endpoints()
+		// force client to connect to given endpoint by limiting to a single endpoint
+		c.SetEndpoints(eps[violationCount%len(eps)])
+		// give enough time for operation
+		time.Sleep(1 * time.Second)
+		// set available endpoints back to all endpoints in to ensure
+		// the client has access to all the endpoints.
+		c.SetEndpoints(eps...)
+		violationCount++
+		return nil
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go
new file mode 100644
index 0000000..c6ef585
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go
@@ -0,0 +1,30 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import "context"
+
+// TODO: remove this when "FailFast=false" is fixed.
+// See https://github.com/grpc/grpc-go/issues/1532.
+func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error {
+	select {
+	case <-ready:
+		return nil
+	case <-rpcCtx.Done():
+		return rpcCtx.Err()
+	case <-clientCtx.Done():
+		return clientCtx.Err()
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go
new file mode 100644
index 0000000..7f89ba6
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/retry.go
@@ -0,0 +1,496 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+type retryPolicy uint8
+
+const (
+	repeatable retryPolicy = iota
+	nonRepeatable
+)
+
+type rpcFunc func(ctx context.Context) error
+type retryRPCFunc func(context.Context, rpcFunc, retryPolicy) error
+type retryStopErrFunc func(error) bool
+
+// immutable requests (e.g. Get) should be retried unless it's
+// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge).
+//
+// "isRepeatableStopError" returns "true" when an immutable request
+// is interrupted by server-side or gRPC-side error and its status
+// code is not transient (!= codes.Unavailable).
+//
+// Returning "true" means retry should stop, since client cannot
+// handle itself even with retries.
+func isRepeatableStopError(err error) bool {
+	eErr := rpctypes.Error(err)
+	// always stop retry on etcd errors
+	if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
+		return true
+	}
+	// only retry if unavailable
+	ev, _ := status.FromError(err)
+	return ev.Code() != codes.Unavailable
+}
+
+// mutable requests (e.g. Put, Delete, Txn) should only be retried
+// when the status code is codes.Unavailable when initial connection
+// has not been established (no pinned endpoint).
+//
+// "isNonRepeatableStopError" returns "true" when a mutable request
+// is interrupted by non-transient error that client cannot handle itself,
+// or transient error while the connection has already been established
+// (pinned endpoint exists).
+//
+// Returning "true" means retry should stop, otherwise it violates
+// write-at-most-once semantics.
+func isNonRepeatableStopError(err error) bool {
+	ev, _ := status.FromError(err)
+	if ev.Code() != codes.Unavailable {
+		return true
+	}
+	desc := rpctypes.ErrorDesc(err)
+	return desc != "there is no address available" && desc != "there is no connection available"
+}
+
+func (c *Client) newRetryWrapper() retryRPCFunc {
+	return func(rpcCtx context.Context, f rpcFunc, rp retryPolicy) error {
+		var isStop retryStopErrFunc
+		switch rp {
+		case repeatable:
+			isStop = isRepeatableStopError
+		case nonRepeatable:
+			isStop = isNonRepeatableStopError
+		}
+		for {
+			if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil {
+				return err
+			}
+			pinned := c.balancer.pinned()
+			err := f(rpcCtx)
+			if err == nil {
+				return nil
+			}
+			logger.Lvl(4).Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned)
+
+			if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) {
+				// mark this before endpoint switch is triggered
+				c.balancer.hostPortError(pinned, err)
+				c.balancer.next()
+				logger.Lvl(4).Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error())
+			}
+
+			if isStop(err) {
+				return err
+			}
+		}
+	}
+}
+
+func (c *Client) newAuthRetryWrapper(retryf retryRPCFunc) retryRPCFunc {
+	return func(rpcCtx context.Context, f rpcFunc, rp retryPolicy) error {
+		for {
+			pinned := c.balancer.pinned()
+			err := retryf(rpcCtx, f, rp)
+			if err == nil {
+				return nil
+			}
+			logger.Lvl(4).Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned)
+			// always stop retry on etcd errors other than invalid auth token
+			if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
+				gterr := c.getToken(rpcCtx)
+				if gterr != nil {
+					logger.Lvl(4).Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned)
+					return err // return the original error for simplicity
+				}
+				continue
+			}
+			return err
+		}
+	}
+}
+
+type retryKVClient struct {
+	kc     pb.KVClient
+	retryf retryRPCFunc
+}
+
+// RetryKVClient implements a KVClient.
+func RetryKVClient(c *Client) pb.KVClient {
+	return &retryKVClient{
+		kc:     pb.NewKVClient(c.conn),
+		retryf: c.newAuthRetryWrapper(c.newRetryWrapper()),
+	}
+}
+func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
+	err = rkv.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rkv.kc.Range(rctx, in, opts...)
+		return err
+	}, repeatable)
+	return resp, err
+}
+
+func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
+	err = rkv.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rkv.kc.Put(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
+	err = rkv.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rkv.kc.DeleteRange(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
+	// TODO: "repeatable" for read-only txn
+	err = rkv.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rkv.kc.Txn(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
+	err = rkv.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rkv.kc.Compact(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+type retryLeaseClient struct {
+	lc     pb.LeaseClient
+	retryf retryRPCFunc
+}
+
+// RetryLeaseClient implements a LeaseClient.
+func RetryLeaseClient(c *Client) pb.LeaseClient {
+	return &retryLeaseClient{
+		lc:     pb.NewLeaseClient(c.conn),
+		retryf: c.newAuthRetryWrapper(c.newRetryWrapper()),
+	}
+}
+
+func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
+	err = rlc.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...)
+		return err
+	}, repeatable)
+	return resp, err
+}
+
+func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
+	err = rlc.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rlc.lc.LeaseLeases(rctx, in, opts...)
+		return err
+	}, repeatable)
+	return resp, err
+}
+
+func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
+	err = rlc.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rlc.lc.LeaseGrant(rctx, in, opts...)
+		return err
+	}, repeatable)
+	return resp, err
+
+}
+
+func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
+	err = rlc.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...)
+		return err
+	}, repeatable)
+	return resp, err
+}
+
+func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
+	err = rlc.retryf(ctx, func(rctx context.Context) error {
+		stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...)
+		return err
+	}, repeatable)
+	return stream, err
+}
+
+type retryClusterClient struct {
+	cc     pb.ClusterClient
+	retryf retryRPCFunc
+}
+
+// RetryClusterClient implements a ClusterClient.
+func RetryClusterClient(c *Client) pb.ClusterClient {
+	return &retryClusterClient{
+		cc:     pb.NewClusterClient(c.conn),
+		retryf: c.newRetryWrapper(),
+	}
+}
+
+func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
+	err = rcc.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rcc.cc.MemberList(rctx, in, opts...)
+		return err
+	}, repeatable)
+	return resp, err
+}
+
+func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
+	err = rcc.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rcc.cc.MemberAdd(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
+	err = rcc.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rcc.cc.MemberRemove(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
+	err = rcc.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rcc.cc.MemberUpdate(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+type retryMaintenanceClient struct {
+	mc     pb.MaintenanceClient
+	retryf retryRPCFunc
+}
+
+// RetryMaintenanceClient implements a Maintenance.
+func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
+	return &retryMaintenanceClient{
+		mc:     pb.NewMaintenanceClient(conn),
+		retryf: c.newRetryWrapper(),
+	}
+}
+
+func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
+	err = rmc.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rmc.mc.Alarm(rctx, in, opts...)
+		return err
+	}, repeatable)
+	return resp, err
+}
+
+func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
+	err = rmc.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rmc.mc.Status(rctx, in, opts...)
+		return err
+	}, repeatable)
+	return resp, err
+}
+
+func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
+	err = rmc.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rmc.mc.Hash(rctx, in, opts...)
+		return err
+	}, repeatable)
+	return resp, err
+}
+
+func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
+	err = rmc.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rmc.mc.HashKV(rctx, in, opts...)
+		return err
+	}, repeatable)
+	return resp, err
+}
+
+func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
+	err = rmc.retryf(ctx, func(rctx context.Context) error {
+		stream, err = rmc.mc.Snapshot(rctx, in, opts...)
+		return err
+	}, repeatable)
+	return stream, err
+}
+
+func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
+	err = rmc.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rmc.mc.MoveLeader(rctx, in, opts...)
+		return err
+	}, repeatable)
+	return resp, err
+}
+
+func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
+	err = rmc.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rmc.mc.Defragment(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+type retryAuthClient struct {
+	ac     pb.AuthClient
+	retryf retryRPCFunc
+}
+
+// RetryAuthClient implements a AuthClient.
+func RetryAuthClient(c *Client) pb.AuthClient {
+	return &retryAuthClient{
+		ac:     pb.NewAuthClient(c.conn),
+		retryf: c.newRetryWrapper(),
+	}
+}
+
+func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
+	err = rac.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rac.ac.UserList(rctx, in, opts...)
+		return err
+	}, repeatable)
+	return resp, err
+}
+
+func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
+	err = rac.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rac.ac.UserGet(rctx, in, opts...)
+		return err
+	}, repeatable)
+	return resp, err
+}
+
+func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
+	err = rac.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rac.ac.RoleGet(rctx, in, opts...)
+		return err
+	}, repeatable)
+	return resp, err
+}
+
+func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
+	err = rac.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rac.ac.RoleList(rctx, in, opts...)
+		return err
+	}, repeatable)
+	return resp, err
+}
+
+func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
+	err = rac.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rac.ac.AuthEnable(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
+	err = rac.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rac.ac.AuthDisable(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
+	err = rac.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rac.ac.UserAdd(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
+	err = rac.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rac.ac.UserDelete(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
+	err = rac.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rac.ac.UserChangePassword(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
+	err = rac.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rac.ac.UserGrantRole(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
+	err = rac.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rac.ac.UserRevokeRole(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
+	err = rac.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rac.ac.RoleAdd(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
+	err = rac.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rac.ac.RoleDelete(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
+	err = rac.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
+	err = rac.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
+
+func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
+	err = rac.retryf(ctx, func(rctx context.Context) error {
+		resp, err = rac.ac.Authenticate(rctx, in, opts...)
+		return err
+	}, nonRepeatable)
+	return resp, err
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/sort.go b/vendor/github.com/coreos/etcd/clientv3/sort.go
new file mode 100644
index 0000000..2bb9d9a
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/sort.go
@@ -0,0 +1,37 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+type SortTarget int
+type SortOrder int
+
+const (
+	SortNone SortOrder = iota
+	SortAscend
+	SortDescend
+)
+
+const (
+	SortByKey SortTarget = iota
+	SortByVersion
+	SortByCreateRevision
+	SortByModRevision
+	SortByValue
+)
+
+type SortOption struct {
+	Target SortTarget
+	Order  SortOrder
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/github.com/coreos/etcd/clientv3/txn.go
new file mode 100644
index 0000000..c3c2d24
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/txn.go
@@ -0,0 +1,151 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"sync"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+	"google.golang.org/grpc"
+)
+
+// Txn is the interface that wraps mini-transactions.
+//
+//	 Txn(context.TODO()).If(
+//	  Compare(Value(k1), ">", v1),
+//	  Compare(Version(k1), "=", 2)
+//	 ).Then(
+//	  OpPut(k2,v2), OpPut(k3,v3)
+//	 ).Else(
+//	  OpPut(k4,v4), OpPut(k5,v5)
+//	 ).Commit()
+//
+type Txn interface {
+	// If takes a list of comparison. If all comparisons passed in succeed,
+	// the operations passed into Then() will be executed. Or the operations
+	// passed into Else() will be executed.
+	If(cs ...Cmp) Txn
+
+	// Then takes a list of operations. The Ops list will be executed, if the
+	// comparisons passed in If() succeed.
+	Then(ops ...Op) Txn
+
+	// Else takes a list of operations. The Ops list will be executed, if the
+	// comparisons passed in If() fail.
+	Else(ops ...Op) Txn
+
+	// Commit tries to commit the transaction.
+	Commit() (*TxnResponse, error)
+}
+
+type txn struct {
+	kv  *kv
+	ctx context.Context
+
+	mu    sync.Mutex
+	cif   bool
+	cthen bool
+	celse bool
+
+	isWrite bool
+
+	cmps []*pb.Compare
+
+	sus []*pb.RequestOp
+	fas []*pb.RequestOp
+
+	callOpts []grpc.CallOption
+}
+
+func (txn *txn) If(cs ...Cmp) Txn {
+	txn.mu.Lock()
+	defer txn.mu.Unlock()
+
+	if txn.cif {
+		panic("cannot call If twice!")
+	}
+
+	if txn.cthen {
+		panic("cannot call If after Then!")
+	}
+
+	if txn.celse {
+		panic("cannot call If after Else!")
+	}
+
+	txn.cif = true
+
+	for i := range cs {
+		txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i]))
+	}
+
+	return txn
+}
+
+func (txn *txn) Then(ops ...Op) Txn {
+	txn.mu.Lock()
+	defer txn.mu.Unlock()
+
+	if txn.cthen {
+		panic("cannot call Then twice!")
+	}
+	if txn.celse {
+		panic("cannot call Then after Else!")
+	}
+
+	txn.cthen = true
+
+	for _, op := range ops {
+		txn.isWrite = txn.isWrite || op.isWrite()
+		txn.sus = append(txn.sus, op.toRequestOp())
+	}
+
+	return txn
+}
+
+func (txn *txn) Else(ops ...Op) Txn {
+	txn.mu.Lock()
+	defer txn.mu.Unlock()
+
+	if txn.celse {
+		panic("cannot call Else twice!")
+	}
+
+	txn.celse = true
+
+	for _, op := range ops {
+		txn.isWrite = txn.isWrite || op.isWrite()
+		txn.fas = append(txn.fas, op.toRequestOp())
+	}
+
+	return txn
+}
+
+func (txn *txn) Commit() (*TxnResponse, error) {
+	txn.mu.Lock()
+	defer txn.mu.Unlock()
+
+	r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
+
+	var resp *pb.TxnResponse
+	var err error
+	resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...)
+	if err != nil {
+		return nil, toErr(txn.ctx, err)
+	}
+	return (*TxnResponse)(resp), nil
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go
new file mode 100644
index 0000000..d763385
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/watch.go
@@ -0,0 +1,828 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"fmt"
+	"sync"
+	"time"
+
+	v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+const (
+	EventTypeDelete = mvccpb.DELETE
+	EventTypePut    = mvccpb.PUT
+
+	closeSendErrTimeout = 250 * time.Millisecond
+)
+
+type Event mvccpb.Event
+
+type WatchChan <-chan WatchResponse
+
+type Watcher interface {
+	// Watch watches on a key or prefix. The watched events will be returned
+	// through the returned channel. If revisions waiting to be sent over the
+	// watch are compacted, then the watch will be canceled by the server, the
+	// client will post a compacted error watch response, and the channel will close.
+	Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
+
+	// Close closes the watcher and cancels all watch requests.
+	Close() error
+}
+
+type WatchResponse struct {
+	Header pb.ResponseHeader
+	Events []*Event
+
+	// CompactRevision is the minimum revision the watcher may receive.
+	CompactRevision int64
+
+	// Canceled is used to indicate watch failure.
+	// If the watch failed and the stream was about to close, before the channel is closed,
+	// the channel sends a final response that has Canceled set to true with a non-nil Err().
+	Canceled bool
+
+	// Created is used to indicate the creation of the watcher.
+	Created bool
+
+	closeErr error
+
+	// cancelReason is a reason of canceling watch
+	cancelReason string
+}
+
+// IsCreate returns true if the event tells that the key is newly created.
+func (e *Event) IsCreate() bool {
+	return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision
+}
+
+// IsModify returns true if the event tells that a new value is put on existing key.
+func (e *Event) IsModify() bool {
+	return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision
+}
+
+// Err is the error value if this WatchResponse holds an error.
+func (wr *WatchResponse) Err() error {
+	switch {
+	case wr.closeErr != nil:
+		return v3rpc.Error(wr.closeErr)
+	case wr.CompactRevision != 0:
+		return v3rpc.ErrCompacted
+	case wr.Canceled:
+		if len(wr.cancelReason) != 0 {
+			return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
+		}
+		return v3rpc.ErrFutureRev
+	}
+	return nil
+}
+
+// IsProgressNotify returns true if the WatchResponse is progress notification.
+func (wr *WatchResponse) IsProgressNotify() bool {
+	return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
+}
+
+// watcher implements the Watcher interface
+type watcher struct {
+	remote   pb.WatchClient
+	callOpts []grpc.CallOption
+
+	// mu protects the grpc streams map
+	mu sync.RWMutex
+
+	// streams holds all the active grpc streams keyed by ctx value.
+	streams map[string]*watchGrpcStream
+}
+
+// watchGrpcStream tracks all watch resources attached to a single grpc stream.
+type watchGrpcStream struct {
+	owner    *watcher
+	remote   pb.WatchClient
+	callOpts []grpc.CallOption
+
+	// ctx controls internal remote.Watch requests
+	ctx context.Context
+	// ctxKey is the key used when looking up this stream's context
+	ctxKey string
+	cancel context.CancelFunc
+
+	// substreams holds all active watchers on this grpc stream
+	substreams map[int64]*watcherStream
+	// resuming holds all resuming watchers on this grpc stream
+	resuming []*watcherStream
+
+	// reqc sends a watch request from Watch() to the main goroutine
+	reqc chan *watchRequest
+	// respc receives data from the watch client
+	respc chan *pb.WatchResponse
+	// donec closes to broadcast shutdown
+	donec chan struct{}
+	// errc transmits errors from grpc Recv to the watch stream reconnect logic
+	errc chan error
+	// closingc gets the watcherStream of closing watchers
+	closingc chan *watcherStream
+	// wg is Done when all substream goroutines have exited
+	wg sync.WaitGroup
+
+	// resumec closes to signal that all substreams should begin resuming
+	resumec chan struct{}
+	// closeErr is the error that closed the watch stream
+	closeErr error
+}
+
+// watchRequest is issued by the subscriber to start a new watcher
+type watchRequest struct {
+	ctx context.Context
+	key string
+	end string
+	rev int64
+	// send created notification event if this field is true
+	createdNotify bool
+	// progressNotify is for progress updates
+	progressNotify bool
+	// filters is the list of events to filter out
+	filters []pb.WatchCreateRequest_FilterType
+	// get the previous key-value pair before the event happens
+	prevKV bool
+	// retc receives a chan WatchResponse once the watcher is established
+	retc chan chan WatchResponse
+}
+
+// watcherStream represents a registered watcher
+type watcherStream struct {
+	// initReq is the request that initiated this request
+	initReq watchRequest
+
+	// outc publishes watch responses to subscriber
+	outc chan WatchResponse
+	// recvc buffers watch responses before publishing
+	recvc chan *WatchResponse
+	// donec closes when the watcherStream goroutine stops.
+	donec chan struct{}
+	// closing is set to true when stream should be scheduled to shutdown.
+	closing bool
+	// id is the registered watch id on the grpc stream
+	id int64
+
+	// buf holds all events received from etcd but not yet consumed by the client
+	buf []*WatchResponse
+}
+
+func NewWatcher(c *Client) Watcher {
+	return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c)
+}
+
+func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
+	w := &watcher{
+		remote:  wc,
+		streams: make(map[string]*watchGrpcStream),
+	}
+	if c != nil {
+		w.callOpts = c.callOpts
+	}
+	return w
+}
+
+// never closes
+var valCtxCh = make(chan struct{})
+var zeroTime = time.Unix(0, 0)
+
+// ctx with only the values; never Done
+type valCtx struct{ context.Context }
+
+func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
+func (vc *valCtx) Done() <-chan struct{}       { return valCtxCh }
+func (vc *valCtx) Err() error                  { return nil }
+
+func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
+	ctx, cancel := context.WithCancel(&valCtx{inctx})
+	wgs := &watchGrpcStream{
+		owner:      w,
+		remote:     w.remote,
+		callOpts:   w.callOpts,
+		ctx:        ctx,
+		ctxKey:     streamKeyFromCtx(inctx),
+		cancel:     cancel,
+		substreams: make(map[int64]*watcherStream),
+		respc:      make(chan *pb.WatchResponse),
+		reqc:       make(chan *watchRequest),
+		donec:      make(chan struct{}),
+		errc:       make(chan error, 1),
+		closingc:   make(chan *watcherStream),
+		resumec:    make(chan struct{}),
+	}
+	go wgs.run()
+	return wgs
+}
+
+// Watch posts a watch request to run() and waits for a new watcher channel
+func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
+	ow := opWatch(key, opts...)
+
+	var filters []pb.WatchCreateRequest_FilterType
+	if ow.filterPut {
+		filters = append(filters, pb.WatchCreateRequest_NOPUT)
+	}
+	if ow.filterDelete {
+		filters = append(filters, pb.WatchCreateRequest_NODELETE)
+	}
+
+	wr := &watchRequest{
+		ctx:            ctx,
+		createdNotify:  ow.createdNotify,
+		key:            string(ow.key),
+		end:            string(ow.end),
+		rev:            ow.rev,
+		progressNotify: ow.progressNotify,
+		filters:        filters,
+		prevKV:         ow.prevKV,
+		retc:           make(chan chan WatchResponse, 1),
+	}
+
+	ok := false
+	ctxKey := streamKeyFromCtx(ctx)
+
+	// find or allocate appropriate grpc watch stream
+	w.mu.Lock()
+	if w.streams == nil {
+		// closed
+		w.mu.Unlock()
+		ch := make(chan WatchResponse)
+		close(ch)
+		return ch
+	}
+	wgs := w.streams[ctxKey]
+	if wgs == nil {
+		wgs = w.newWatcherGrpcStream(ctx)
+		w.streams[ctxKey] = wgs
+	}
+	donec := wgs.donec
+	reqc := wgs.reqc
+	w.mu.Unlock()
+
+	// couldn't create channel; return closed channel
+	closeCh := make(chan WatchResponse, 1)
+
+	// submit request
+	select {
+	case reqc <- wr:
+		ok = true
+	case <-wr.ctx.Done():
+	case <-donec:
+		if wgs.closeErr != nil {
+			closeCh <- WatchResponse{closeErr: wgs.closeErr}
+			break
+		}
+		// retry; may have dropped stream from no ctxs
+		return w.Watch(ctx, key, opts...)
+	}
+
+	// receive channel
+	if ok {
+		select {
+		case ret := <-wr.retc:
+			return ret
+		case <-ctx.Done():
+		case <-donec:
+			if wgs.closeErr != nil {
+				closeCh <- WatchResponse{closeErr: wgs.closeErr}
+				break
+			}
+			// retry; may have dropped stream from no ctxs
+			return w.Watch(ctx, key, opts...)
+		}
+	}
+
+	close(closeCh)
+	return closeCh
+}
+
+func (w *watcher) Close() (err error) {
+	w.mu.Lock()
+	streams := w.streams
+	w.streams = nil
+	w.mu.Unlock()
+	for _, wgs := range streams {
+		if werr := wgs.close(); werr != nil {
+			err = werr
+		}
+	}
+	return err
+}
+
+func (w *watchGrpcStream) close() (err error) {
+	w.cancel()
+	<-w.donec
+	select {
+	case err = <-w.errc:
+	default:
+	}
+	return toErr(w.ctx, err)
+}
+
+func (w *watcher) closeStream(wgs *watchGrpcStream) {
+	w.mu.Lock()
+	close(wgs.donec)
+	wgs.cancel()
+	if w.streams != nil {
+		delete(w.streams, wgs.ctxKey)
+	}
+	w.mu.Unlock()
+}
+
+func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
+	if resp.WatchId == -1 {
+		// failed; no channel
+		close(ws.recvc)
+		return
+	}
+	ws.id = resp.WatchId
+	w.substreams[ws.id] = ws
+}
+
+func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) {
+	select {
+	case ws.outc <- *resp:
+	case <-ws.initReq.ctx.Done():
+	case <-time.After(closeSendErrTimeout):
+	}
+	close(ws.outc)
+}
+
+func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
+	// send channel response in case stream was never established
+	select {
+	case ws.initReq.retc <- ws.outc:
+	default:
+	}
+	// close subscriber's channel
+	if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
+		go w.sendCloseSubstream(ws, &WatchResponse{closeErr: w.closeErr})
+	} else if ws.outc != nil {
+		close(ws.outc)
+	}
+	if ws.id != -1 {
+		delete(w.substreams, ws.id)
+		return
+	}
+	for i := range w.resuming {
+		if w.resuming[i] == ws {
+			w.resuming[i] = nil
+			return
+		}
+	}
+}
+
+// run is the root of the goroutines for managing a watcher client
+func (w *watchGrpcStream) run() {
+	var wc pb.Watch_WatchClient
+	var closeErr error
+
+	// substreams marked to close but goroutine still running; needed for
+	// avoiding double-closing recvc on grpc stream teardown
+	closing := make(map[*watcherStream]struct{})
+
+	defer func() {
+		w.closeErr = closeErr
+		// shutdown substreams and resuming substreams
+		for _, ws := range w.substreams {
+			if _, ok := closing[ws]; !ok {
+				close(ws.recvc)
+				closing[ws] = struct{}{}
+			}
+		}
+		for _, ws := range w.resuming {
+			if _, ok := closing[ws]; ws != nil && !ok {
+				close(ws.recvc)
+				closing[ws] = struct{}{}
+			}
+		}
+		w.joinSubstreams()
+		for range closing {
+			w.closeSubstream(<-w.closingc)
+		}
+		w.wg.Wait()
+		w.owner.closeStream(w)
+	}()
+
+	// start a stream with the etcd grpc server
+	if wc, closeErr = w.newWatchClient(); closeErr != nil {
+		return
+	}
+
+	cancelSet := make(map[int64]struct{})
+
+	for {
+		select {
+		// Watch() requested
+		case wreq := <-w.reqc:
+			outc := make(chan WatchResponse, 1)
+			ws := &watcherStream{
+				initReq: *wreq,
+				id:      -1,
+				outc:    outc,
+				// unbuffered so resumes won't cause repeat events
+				recvc: make(chan *WatchResponse),
+			}
+
+			ws.donec = make(chan struct{})
+			w.wg.Add(1)
+			go w.serveSubstream(ws, w.resumec)
+
+			// queue up for watcher creation/resume
+			w.resuming = append(w.resuming, ws)
+			if len(w.resuming) == 1 {
+				// head of resume queue, can register a new watcher
+				wc.Send(ws.initReq.toPB())
+			}
+		// New events from the watch client
+		case pbresp := <-w.respc:
+			switch {
+			case pbresp.Created:
+				// response to head of queue creation
+				if ws := w.resuming[0]; ws != nil {
+					w.addSubstream(pbresp, ws)
+					w.dispatchEvent(pbresp)
+					w.resuming[0] = nil
+				}
+				if ws := w.nextResume(); ws != nil {
+					wc.Send(ws.initReq.toPB())
+				}
+			case pbresp.Canceled && pbresp.CompactRevision == 0:
+				delete(cancelSet, pbresp.WatchId)
+				if ws, ok := w.substreams[pbresp.WatchId]; ok {
+					// signal to stream goroutine to update closingc
+					close(ws.recvc)
+					closing[ws] = struct{}{}
+				}
+			default:
+				// dispatch to appropriate watch stream
+				if ok := w.dispatchEvent(pbresp); ok {
+					break
+				}
+				// watch response on unexpected watch id; cancel id
+				if _, ok := cancelSet[pbresp.WatchId]; ok {
+					break
+				}
+				cancelSet[pbresp.WatchId] = struct{}{}
+				cr := &pb.WatchRequest_CancelRequest{
+					CancelRequest: &pb.WatchCancelRequest{
+						WatchId: pbresp.WatchId,
+					},
+				}
+				req := &pb.WatchRequest{RequestUnion: cr}
+				wc.Send(req)
+			}
+		// watch client failed on Recv; spawn another if possible
+		case err := <-w.errc:
+			if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
+				closeErr = err
+				return
+			}
+			if wc, closeErr = w.newWatchClient(); closeErr != nil {
+				return
+			}
+			if ws := w.nextResume(); ws != nil {
+				wc.Send(ws.initReq.toPB())
+			}
+			cancelSet = make(map[int64]struct{})
+		case <-w.ctx.Done():
+			return
+		case ws := <-w.closingc:
+			w.closeSubstream(ws)
+			delete(closing, ws)
+			if len(w.substreams)+len(w.resuming) == 0 {
+				// no more watchers on this stream, shutdown
+				return
+			}
+		}
+	}
+}
+
+// nextResume chooses the next resuming to register with the grpc stream. Abandoned
+// streams are marked as nil in the queue since the head must wait for its inflight registration.
+func (w *watchGrpcStream) nextResume() *watcherStream {
+	for len(w.resuming) != 0 {
+		if w.resuming[0] != nil {
+			return w.resuming[0]
+		}
+		w.resuming = w.resuming[1:len(w.resuming)]
+	}
+	return nil
+}
+
+// dispatchEvent sends a WatchResponse to the appropriate watcher stream
+func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
+	events := make([]*Event, len(pbresp.Events))
+	for i, ev := range pbresp.Events {
+		events[i] = (*Event)(ev)
+	}
+	wr := &WatchResponse{
+		Header:          *pbresp.Header,
+		Events:          events,
+		CompactRevision: pbresp.CompactRevision,
+		Created:         pbresp.Created,
+		Canceled:        pbresp.Canceled,
+		cancelReason:    pbresp.CancelReason,
+	}
+	ws, ok := w.substreams[pbresp.WatchId]
+	if !ok {
+		return false
+	}
+	select {
+	case ws.recvc <- wr:
+	case <-ws.donec:
+		return false
+	}
+	return true
+}
+
+// serveWatchClient forwards messages from the grpc stream to run()
+func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
+	for {
+		resp, err := wc.Recv()
+		if err != nil {
+			select {
+			case w.errc <- err:
+			case <-w.donec:
+			}
+			return
+		}
+		select {
+		case w.respc <- resp:
+		case <-w.donec:
+			return
+		}
+	}
+}
+
+// serveSubstream forwards watch responses from run() to the subscriber
+func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) {
+	if ws.closing {
+		panic("created substream goroutine but substream is closing")
+	}
+
+	// nextRev is the minimum expected next revision
+	nextRev := ws.initReq.rev
+	resuming := false
+	defer func() {
+		if !resuming {
+			ws.closing = true
+		}
+		close(ws.donec)
+		if !resuming {
+			w.closingc <- ws
+		}
+		w.wg.Done()
+	}()
+
+	emptyWr := &WatchResponse{}
+	for {
+		curWr := emptyWr
+		outc := ws.outc
+
+		if len(ws.buf) > 0 {
+			curWr = ws.buf[0]
+		} else {
+			outc = nil
+		}
+		select {
+		case outc <- *curWr:
+			if ws.buf[0].Err() != nil {
+				return
+			}
+			ws.buf[0] = nil
+			ws.buf = ws.buf[1:]
+		case wr, ok := <-ws.recvc:
+			if !ok {
+				// shutdown from closeSubstream
+				return
+			}
+
+			if wr.Created {
+				if ws.initReq.retc != nil {
+					ws.initReq.retc <- ws.outc
+					// to prevent next write from taking the slot in buffered channel
+					// and posting duplicate create events
+					ws.initReq.retc = nil
+
+					// send first creation event only if requested
+					if ws.initReq.createdNotify {
+						ws.outc <- *wr
+					}
+					// once the watch channel is returned, a current revision
+					// watch must resume at the store revision. This is necessary
+					// for the following case to work as expected:
+					//	wch := m1.Watch("a")
+					//	m2.Put("a", "b")
+					//	<-wch
+					// If the revision is only bound on the first observed event,
+					// if wch is disconnected before the Put is issued, then reconnects
+					// after it is committed, it'll miss the Put.
+					if ws.initReq.rev == 0 {
+						nextRev = wr.Header.Revision
+					}
+				}
+			} else {
+				// current progress of watch; <= store revision
+				nextRev = wr.Header.Revision
+			}
+
+			if len(wr.Events) > 0 {
+				nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
+			}
+			ws.initReq.rev = nextRev
+
+			// created event is already sent above,
+			// watcher should not post duplicate events
+			if wr.Created {
+				continue
+			}
+
+			// TODO pause channel if buffer gets too large
+			ws.buf = append(ws.buf, wr)
+		case <-w.ctx.Done():
+			return
+		case <-ws.initReq.ctx.Done():
+			return
+		case <-resumec:
+			resuming = true
+			return
+		}
+	}
+	// lazily send cancel message if events on missing id
+}
+
+func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
+	// mark all substreams as resuming
+	close(w.resumec)
+	w.resumec = make(chan struct{})
+	w.joinSubstreams()
+	for _, ws := range w.substreams {
+		ws.id = -1
+		w.resuming = append(w.resuming, ws)
+	}
+	// strip out nils, if any
+	var resuming []*watcherStream
+	for _, ws := range w.resuming {
+		if ws != nil {
+			resuming = append(resuming, ws)
+		}
+	}
+	w.resuming = resuming
+	w.substreams = make(map[int64]*watcherStream)
+
+	// connect to grpc stream while accepting watcher cancelation
+	stopc := make(chan struct{})
+	donec := w.waitCancelSubstreams(stopc)
+	wc, err := w.openWatchClient()
+	close(stopc)
+	<-donec
+
+	// serve all non-closing streams, even if there's a client error
+	// so that the teardown path can shutdown the streams as expected.
+	for _, ws := range w.resuming {
+		if ws.closing {
+			continue
+		}
+		ws.donec = make(chan struct{})
+		w.wg.Add(1)
+		go w.serveSubstream(ws, w.resumec)
+	}
+
+	if err != nil {
+		return nil, v3rpc.Error(err)
+	}
+
+	// receive data from new grpc stream
+	go w.serveWatchClient(wc)
+	return wc, nil
+}
+
+func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
+	var wg sync.WaitGroup
+	wg.Add(len(w.resuming))
+	donec := make(chan struct{})
+	for i := range w.resuming {
+		go func(ws *watcherStream) {
+			defer wg.Done()
+			if ws.closing {
+				if ws.initReq.ctx.Err() != nil && ws.outc != nil {
+					close(ws.outc)
+					ws.outc = nil
+				}
+				return
+			}
+			select {
+			case <-ws.initReq.ctx.Done():
+				// closed ws will be removed from resuming
+				ws.closing = true
+				close(ws.outc)
+				ws.outc = nil
+				w.wg.Add(1)
+				go func() {
+					defer w.wg.Done()
+					w.closingc <- ws
+				}()
+			case <-stopc:
+			}
+		}(w.resuming[i])
+	}
+	go func() {
+		defer close(donec)
+		wg.Wait()
+	}()
+	return donec
+}
+
+// joinSubstreams waits for all substream goroutines to complete.
+func (w *watchGrpcStream) joinSubstreams() {
+	for _, ws := range w.substreams {
+		<-ws.donec
+	}
+	for _, ws := range w.resuming {
+		if ws != nil {
+			<-ws.donec
+		}
+	}
+}
+
+var maxBackoff = 100 * time.Millisecond
+
+// openWatchClient retries opening a watch client until success or halt.
+// manually retry in case "ws==nil && err==nil"
+// TODO: remove FailFast=false
+func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
+	backoff := time.Millisecond
+	for {
+		select {
+		case <-w.ctx.Done():
+			if err == nil {
+				return nil, w.ctx.Err()
+			}
+			return nil, err
+		default:
+		}
+		if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil {
+			break
+		}
+		if isHaltErr(w.ctx, err) {
+			return nil, v3rpc.Error(err)
+		}
+		if isUnavailableErr(w.ctx, err) {
+			// retry, but backoff
+			if backoff < maxBackoff {
+				// 25% backoff factor
+				backoff = backoff + backoff/4
+				if backoff > maxBackoff {
+					backoff = maxBackoff
+				}
+			}
+			time.Sleep(backoff)
+		}
+	}
+	return ws, nil
+}
+
+// toPB converts an internal watch request structure to its protobuf WatchRequest structure.
+func (wr *watchRequest) toPB() *pb.WatchRequest {
+	req := &pb.WatchCreateRequest{
+		StartRevision:  wr.rev,
+		Key:            []byte(wr.key),
+		RangeEnd:       []byte(wr.end),
+		ProgressNotify: wr.progressNotify,
+		Filters:        wr.filters,
+		PrevKv:         wr.prevKV,
+	}
+	cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
+	return &pb.WatchRequest{RequestUnion: cr}
+}
+
+func streamKeyFromCtx(ctx context.Context) string {
+	if md, ok := metadata.FromOutgoingContext(ctx); ok {
+		return fmt.Sprintf("%+v", md)
+	}
+	return ""
+}
diff --git a/vendor/github.com/coreos/etcd/compactor/compactor.go b/vendor/github.com/coreos/etcd/compactor/compactor.go
new file mode 100644
index 0000000..8100b69
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/compactor/compactor.go
@@ -0,0 +1,66 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compactor
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+	"github.com/coreos/pkg/capnslog"
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "compactor")
+)
+
+const (
+	ModePeriodic = "periodic"
+	ModeRevision = "revision"
+)
+
+// Compactor purges old log from the storage periodically.
+type Compactor interface {
+	// Run starts the main loop of the compactor in background.
+	// Use Stop() to halt the loop and release the resource.
+	Run()
+	// Stop halts the main loop of the compactor.
+	Stop()
+	// Pause temporally suspend the compactor not to run compaction. Resume() to unpose.
+	Pause()
+	// Resume restarts the compactor suspended by Pause().
+	Resume()
+}
+
+type Compactable interface {
+	Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error)
+}
+
+type RevGetter interface {
+	Rev() int64
+}
+
+func New(mode string, retention time.Duration, rg RevGetter, c Compactable) (Compactor, error) {
+	switch mode {
+	case ModePeriodic:
+		return NewPeriodic(retention, rg, c), nil
+	case ModeRevision:
+		return NewRevision(int64(retention), rg, c), nil
+	default:
+		return nil, fmt.Errorf("unsupported compaction mode %s", mode)
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/compactor/doc.go b/vendor/github.com/coreos/etcd/compactor/doc.go
new file mode 100644
index 0000000..cb15834
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/compactor/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package compactor implements automated policies for compacting etcd's mvcc storage.
+package compactor
diff --git a/vendor/github.com/coreos/etcd/compactor/periodic.go b/vendor/github.com/coreos/etcd/compactor/periodic.go
new file mode 100644
index 0000000..9d9164e
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/compactor/periodic.go
@@ -0,0 +1,191 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compactor
+
+import (
+	"context"
+	"sync"
+	"time"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/mvcc"
+
+	"github.com/jonboulle/clockwork"
+)
+
+// Periodic compacts the log by purging revisions older than
+// the configured retention time.
+type Periodic struct {
+	clock  clockwork.Clock
+	period time.Duration
+
+	rg RevGetter
+	c  Compactable
+
+	revs   []int64
+	ctx    context.Context
+	cancel context.CancelFunc
+
+	// mu protects paused
+	mu     sync.RWMutex
+	paused bool
+}
+
+// NewPeriodic creates a new instance of Periodic compactor that purges
+// the log older than h Duration.
+func NewPeriodic(h time.Duration, rg RevGetter, c Compactable) *Periodic {
+	return newPeriodic(clockwork.NewRealClock(), h, rg, c)
+}
+
+func newPeriodic(clock clockwork.Clock, h time.Duration, rg RevGetter, c Compactable) *Periodic {
+	t := &Periodic{
+		clock:  clock,
+		period: h,
+		rg:     rg,
+		c:      c,
+		revs:   make([]int64, 0),
+	}
+	t.ctx, t.cancel = context.WithCancel(context.Background())
+	return t
+}
+
+/*
+Compaction period 1-hour:
+  1. compute compaction period, which is 1-hour
+  2. record revisions for every 1/10 of 1-hour (6-minute)
+  3. keep recording revisions with no compaction for first 1-hour
+  4. do compact with revs[0]
+	- success? contiue on for-loop and move sliding window; revs = revs[1:]
+	- failure? update revs, and retry after 1/10 of 1-hour (6-minute)
+
+Compaction period 24-hour:
+  1. compute compaction period, which is 1-hour
+  2. record revisions for every 1/10 of 1-hour (6-minute)
+  3. keep recording revisions with no compaction for first 24-hour
+  4. do compact with revs[0]
+	- success? contiue on for-loop and move sliding window; revs = revs[1:]
+	- failure? update revs, and retry after 1/10 of 1-hour (6-minute)
+
+Compaction period 59-min:
+  1. compute compaction period, which is 59-min
+  2. record revisions for every 1/10 of 59-min (5.9-min)
+  3. keep recording revisions with no compaction for first 59-min
+  4. do compact with revs[0]
+	- success? contiue on for-loop and move sliding window; revs = revs[1:]
+	- failure? update revs, and retry after 1/10 of 59-min (5.9-min)
+
+Compaction period 5-sec:
+  1. compute compaction period, which is 5-sec
+  2. record revisions for every 1/10 of 5-sec (0.5-sec)
+  3. keep recording revisions with no compaction for first 5-sec
+  4. do compact with revs[0]
+	- success? contiue on for-loop and move sliding window; revs = revs[1:]
+	- failure? update revs, and retry after 1/10 of 5-sec (0.5-sec)
+*/
+
+// Run runs periodic compactor.
+func (t *Periodic) Run() {
+	compactInterval := t.getCompactInterval()
+	retryInterval := t.getRetryInterval()
+	retentions := t.getRetentions()
+
+	go func() {
+		lastSuccess := t.clock.Now()
+		baseInterval := t.period
+		for {
+			t.revs = append(t.revs, t.rg.Rev())
+			if len(t.revs) > retentions {
+				t.revs = t.revs[1:] // t.revs[0] is always the rev at t.period ago
+			}
+
+			select {
+			case <-t.ctx.Done():
+				return
+			case <-t.clock.After(retryInterval):
+				t.mu.Lock()
+				p := t.paused
+				t.mu.Unlock()
+				if p {
+					continue
+				}
+			}
+
+			if t.clock.Now().Sub(lastSuccess) < baseInterval {
+				continue
+			}
+
+			// wait up to initial given period
+			if baseInterval == t.period {
+				baseInterval = compactInterval
+			}
+			rev := t.revs[0]
+
+			plog.Noticef("Starting auto-compaction at revision %d (retention: %v)", rev, t.period)
+			_, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev})
+			if err == nil || err == mvcc.ErrCompacted {
+				lastSuccess = t.clock.Now()
+				plog.Noticef("Finished auto-compaction at revision %d", rev)
+			} else {
+				plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err)
+				plog.Noticef("Retry after %v", retryInterval)
+			}
+		}
+	}()
+}
+
+// if given compaction period x is <1-hour, compact every x duration.
+// (e.g. --auto-compaction-mode 'periodic' --auto-compaction-retention='10m', then compact every 10-minute)
+// if given compaction period x is >1-hour, compact every hour.
+// (e.g. --auto-compaction-mode 'periodic' --auto-compaction-retention='2h', then compact every 1-hour)
+func (t *Periodic) getCompactInterval() time.Duration {
+	itv := t.period
+	if itv > time.Hour {
+		itv = time.Hour
+	}
+	return itv
+}
+
+func (t *Periodic) getRetentions() int {
+	return int(t.period/t.getRetryInterval()) + 1
+}
+
+const retryDivisor = 10
+
+func (t *Periodic) getRetryInterval() time.Duration {
+	itv := t.period
+	if itv > time.Hour {
+		itv = time.Hour
+	}
+	return itv / retryDivisor
+}
+
+// Stop stops periodic compactor.
+func (t *Periodic) Stop() {
+	t.cancel()
+}
+
+// Pause pauses periodic compactor.
+func (t *Periodic) Pause() {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.paused = true
+}
+
+// Resume resumes periodic compactor.
+func (t *Periodic) Resume() {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.paused = false
+}
diff --git a/vendor/github.com/coreos/etcd/compactor/revision.go b/vendor/github.com/coreos/etcd/compactor/revision.go
new file mode 100644
index 0000000..927e41c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/compactor/revision.go
@@ -0,0 +1,115 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compactor
+
+import (
+	"context"
+	"sync"
+	"time"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/mvcc"
+
+	"github.com/jonboulle/clockwork"
+)
+
+// Revision compacts the log by purging revisions older than
+// the configured reivison number. Compaction happens every 5 minutes.
+type Revision struct {
+	clock     clockwork.Clock
+	retention int64
+
+	rg RevGetter
+	c  Compactable
+
+	ctx    context.Context
+	cancel context.CancelFunc
+
+	mu     sync.Mutex
+	paused bool
+}
+
+// NewRevision creates a new instance of Revisonal compactor that purges
+// the log older than retention revisions from the current revision.
+func NewRevision(retention int64, rg RevGetter, c Compactable) *Revision {
+	return newRevision(clockwork.NewRealClock(), retention, rg, c)
+}
+
+func newRevision(clock clockwork.Clock, retention int64, rg RevGetter, c Compactable) *Revision {
+	t := &Revision{
+		clock:     clock,
+		retention: retention,
+		rg:        rg,
+		c:         c,
+	}
+	t.ctx, t.cancel = context.WithCancel(context.Background())
+	return t
+}
+
+const revInterval = 5 * time.Minute
+
+// Run runs revision-based compactor.
+func (t *Revision) Run() {
+	prev := int64(0)
+	go func() {
+		for {
+			select {
+			case <-t.ctx.Done():
+				return
+			case <-t.clock.After(revInterval):
+				t.mu.Lock()
+				p := t.paused
+				t.mu.Unlock()
+				if p {
+					continue
+				}
+			}
+
+			rev := t.rg.Rev() - t.retention
+			if rev <= 0 || rev == prev {
+				continue
+			}
+
+			plog.Noticef("Starting auto-compaction at revision %d (retention: %d revisions)", rev, t.retention)
+			_, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev})
+			if err == nil || err == mvcc.ErrCompacted {
+				prev = rev
+				plog.Noticef("Finished auto-compaction at revision %d", rev)
+			} else {
+				plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err)
+				plog.Noticef("Retry after %v", revInterval)
+			}
+		}
+	}()
+}
+
+// Stop stops revision-based compactor.
+func (t *Revision) Stop() {
+	t.cancel()
+}
+
+// Pause pauses revision-based compactor.
+func (t *Revision) Pause() {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.paused = true
+}
+
+// Resume resumes revision-based compactor.
+func (t *Revision) Resume() {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.paused = false
+}
diff --git a/vendor/github.com/coreos/etcd/cover b/vendor/github.com/coreos/etcd/cover
new file mode 100755
index 0000000..b7ad391
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/cover
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+#
+# Generate coverage HTML for a package
+# e.g. PKG=./unit ./cover
+#
+set -e
+
+if [ -z "$PKG" ]; then
+	echo "cover only works with a single package, sorry"
+	exit 255
+fi
+
+COVEROUT="coverage"
+
+if ! [ -d "$COVEROUT" ]; then
+	mkdir "$COVEROUT"
+fi
+
+# strip leading dot/slash and trailing slash and sanitize other slashes
+# e.g. ./etcdserver/etcdhttp/ ==> etcdserver_etcdhttp
+COVERPKG=${PKG/#./}
+COVERPKG=${COVERPKG/#\//}
+COVERPKG=${COVERPKG/%\//}
+COVERPKG=${COVERPKG//\//_}
+
+# generate arg for "go test"
+export COVER="-coverprofile ${COVEROUT}/${COVERPKG}.out"
+
+source ./test
+
+go tool cover -html=${COVEROUT}/${COVERPKG}.out
diff --git a/vendor/github.com/coreos/etcd/discovery/discovery.go b/vendor/github.com/coreos/etcd/discovery/discovery.go
new file mode 100644
index 0000000..7d1fa0d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/discovery/discovery.go
@@ -0,0 +1,363 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package discovery provides an implementation of the cluster discovery that
+// is used by etcd.
+package discovery
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"math"
+	"net/http"
+	"net/url"
+	"path"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/coreos/etcd/client"
+	"github.com/coreos/etcd/pkg/transport"
+	"github.com/coreos/etcd/pkg/types"
+
+	"github.com/coreos/pkg/capnslog"
+	"github.com/jonboulle/clockwork"
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "discovery")
+
+	ErrInvalidURL           = errors.New("discovery: invalid URL")
+	ErrBadSizeKey           = errors.New("discovery: size key is bad")
+	ErrSizeNotFound         = errors.New("discovery: size key not found")
+	ErrTokenNotFound        = errors.New("discovery: token not found")
+	ErrDuplicateID          = errors.New("discovery: found duplicate id")
+	ErrDuplicateName        = errors.New("discovery: found duplicate name")
+	ErrFullCluster          = errors.New("discovery: cluster is full")
+	ErrTooManyRetries       = errors.New("discovery: too many retries")
+	ErrBadDiscoveryEndpoint = errors.New("discovery: bad discovery endpoint")
+)
+
+var (
+	// Number of retries discovery will attempt before giving up and erroring out.
+	nRetries             = uint(math.MaxUint32)
+	maxExpoentialRetries = uint(8)
+)
+
+// JoinCluster will connect to the discovery service at the given url, and
+// register the server represented by the given id and config to the cluster
+func JoinCluster(durl, dproxyurl string, id types.ID, config string) (string, error) {
+	d, err := newDiscovery(durl, dproxyurl, id)
+	if err != nil {
+		return "", err
+	}
+	return d.joinCluster(config)
+}
+
+// GetCluster will connect to the discovery service at the given url and
+// retrieve a string describing the cluster
+func GetCluster(durl, dproxyurl string) (string, error) {
+	d, err := newDiscovery(durl, dproxyurl, 0)
+	if err != nil {
+		return "", err
+	}
+	return d.getCluster()
+}
+
+type discovery struct {
+	cluster string
+	id      types.ID
+	c       client.KeysAPI
+	retries uint
+	url     *url.URL
+
+	clock clockwork.Clock
+}
+
+// newProxyFunc builds a proxy function from the given string, which should
+// represent a URL that can be used as a proxy. It performs basic
+// sanitization of the URL and returns any error encountered.
+func newProxyFunc(proxy string) (func(*http.Request) (*url.URL, error), error) {
+	if proxy == "" {
+		return nil, nil
+	}
+	// Do a small amount of URL sanitization to help the user
+	// Derived from net/http.ProxyFromEnvironment
+	proxyURL, err := url.Parse(proxy)
+	if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") {
+		// proxy was bogus. Try prepending "http://" to it and
+		// see if that parses correctly. If not, we ignore the
+		// error and complain about the original one
+		var err2 error
+		proxyURL, err2 = url.Parse("http://" + proxy)
+		if err2 == nil {
+			err = nil
+		}
+	}
+	if err != nil {
+		return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err)
+	}
+
+	plog.Infof("using proxy %q", proxyURL.String())
+	return http.ProxyURL(proxyURL), nil
+}
+
+func newDiscovery(durl, dproxyurl string, id types.ID) (*discovery, error) {
+	u, err := url.Parse(durl)
+	if err != nil {
+		return nil, err
+	}
+	token := u.Path
+	u.Path = ""
+	pf, err := newProxyFunc(dproxyurl)
+	if err != nil {
+		return nil, err
+	}
+
+	// TODO: add ResponseHeaderTimeout back when watch on discovery service writes header early
+	tr, err := transport.NewTransport(transport.TLSInfo{}, 30*time.Second)
+	if err != nil {
+		return nil, err
+	}
+	tr.Proxy = pf
+	cfg := client.Config{
+		Transport: tr,
+		Endpoints: []string{u.String()},
+	}
+	c, err := client.New(cfg)
+	if err != nil {
+		return nil, err
+	}
+	dc := client.NewKeysAPIWithPrefix(c, "")
+	return &discovery{
+		cluster: token,
+		c:       dc,
+		id:      id,
+		url:     u,
+		clock:   clockwork.NewRealClock(),
+	}, nil
+}
+
+func (d *discovery) joinCluster(config string) (string, error) {
+	// fast path: if the cluster is full, return the error
+	// do not need to register to the cluster in this case.
+	if _, _, _, err := d.checkCluster(); err != nil {
+		return "", err
+	}
+
+	if err := d.createSelf(config); err != nil {
+		// Fails, even on a timeout, if createSelf times out.
+		// TODO(barakmich): Retrying the same node might want to succeed here
+		// (ie, createSelf should be idempotent for discovery).
+		return "", err
+	}
+
+	nodes, size, index, err := d.checkCluster()
+	if err != nil {
+		return "", err
+	}
+
+	all, err := d.waitNodes(nodes, size, index)
+	if err != nil {
+		return "", err
+	}
+
+	return nodesToCluster(all, size)
+}
+
+func (d *discovery) getCluster() (string, error) {
+	nodes, size, index, err := d.checkCluster()
+	if err != nil {
+		if err == ErrFullCluster {
+			return nodesToCluster(nodes, size)
+		}
+		return "", err
+	}
+
+	all, err := d.waitNodes(nodes, size, index)
+	if err != nil {
+		return "", err
+	}
+	return nodesToCluster(all, size)
+}
+
+func (d *discovery) createSelf(contents string) error {
+	ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
+	resp, err := d.c.Create(ctx, d.selfKey(), contents)
+	cancel()
+	if err != nil {
+		if eerr, ok := err.(client.Error); ok && eerr.Code == client.ErrorCodeNodeExist {
+			return ErrDuplicateID
+		}
+		return err
+	}
+
+	// ensure self appears on the server we connected to
+	w := d.c.Watcher(d.selfKey(), &client.WatcherOptions{AfterIndex: resp.Node.CreatedIndex - 1})
+	_, err = w.Next(context.Background())
+	return err
+}
+
+func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) {
+	configKey := path.Join("/", d.cluster, "_config")
+	ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
+	// find cluster size
+	resp, err := d.c.Get(ctx, path.Join(configKey, "size"), nil)
+	cancel()
+	if err != nil {
+		if eerr, ok := err.(*client.Error); ok && eerr.Code == client.ErrorCodeKeyNotFound {
+			return nil, 0, 0, ErrSizeNotFound
+		}
+		if err == client.ErrInvalidJSON {
+			return nil, 0, 0, ErrBadDiscoveryEndpoint
+		}
+		if ce, ok := err.(*client.ClusterError); ok {
+			plog.Error(ce.Detail())
+			return d.checkClusterRetry()
+		}
+		return nil, 0, 0, err
+	}
+	size, err := strconv.Atoi(resp.Node.Value)
+	if err != nil {
+		return nil, 0, 0, ErrBadSizeKey
+	}
+
+	ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
+	resp, err = d.c.Get(ctx, d.cluster, nil)
+	cancel()
+	if err != nil {
+		if ce, ok := err.(*client.ClusterError); ok {
+			plog.Error(ce.Detail())
+			return d.checkClusterRetry()
+		}
+		return nil, 0, 0, err
+	}
+	var nodes []*client.Node
+	// append non-config keys to nodes
+	for _, n := range resp.Node.Nodes {
+		if !(path.Base(n.Key) == path.Base(configKey)) {
+			nodes = append(nodes, n)
+		}
+	}
+
+	snodes := sortableNodes{nodes}
+	sort.Sort(snodes)
+
+	// find self position
+	for i := range nodes {
+		if path.Base(nodes[i].Key) == path.Base(d.selfKey()) {
+			break
+		}
+		if i >= size-1 {
+			return nodes[:size], size, resp.Index, ErrFullCluster
+		}
+	}
+	return nodes, size, resp.Index, nil
+}
+
+func (d *discovery) logAndBackoffForRetry(step string) {
+	d.retries++
+	// logAndBackoffForRetry stops exponential backoff when the retries are more than maxExpoentialRetries and is set to a constant backoff afterward.
+	retries := d.retries
+	if retries > maxExpoentialRetries {
+		retries = maxExpoentialRetries
+	}
+	retryTimeInSecond := time.Duration(0x1<<retries) * time.Second
+	plog.Infof("%s: error connecting to %s, retrying in %s", step, d.url, retryTimeInSecond)
+	d.clock.Sleep(retryTimeInSecond)
+}
+
+func (d *discovery) checkClusterRetry() ([]*client.Node, int, uint64, error) {
+	if d.retries < nRetries {
+		d.logAndBackoffForRetry("cluster status check")
+		return d.checkCluster()
+	}
+	return nil, 0, 0, ErrTooManyRetries
+}
+
+func (d *discovery) waitNodesRetry() ([]*client.Node, error) {
+	if d.retries < nRetries {
+		d.logAndBackoffForRetry("waiting for other nodes")
+		nodes, n, index, err := d.checkCluster()
+		if err != nil {
+			return nil, err
+		}
+		return d.waitNodes(nodes, n, index)
+	}
+	return nil, ErrTooManyRetries
+}
+
+func (d *discovery) waitNodes(nodes []*client.Node, size int, index uint64) ([]*client.Node, error) {
+	if len(nodes) > size {
+		nodes = nodes[:size]
+	}
+	// watch from the next index
+	w := d.c.Watcher(d.cluster, &client.WatcherOptions{AfterIndex: index, Recursive: true})
+	all := make([]*client.Node, len(nodes))
+	copy(all, nodes)
+	for _, n := range all {
+		if path.Base(n.Key) == path.Base(d.selfKey()) {
+			plog.Noticef("found self %s in the cluster", path.Base(d.selfKey()))
+		} else {
+			plog.Noticef("found peer %s in the cluster", path.Base(n.Key))
+		}
+	}
+
+	// wait for others
+	for len(all) < size {
+		plog.Noticef("found %d peer(s), waiting for %d more", len(all), size-len(all))
+		resp, err := w.Next(context.Background())
+		if err != nil {
+			if ce, ok := err.(*client.ClusterError); ok {
+				plog.Error(ce.Detail())
+				return d.waitNodesRetry()
+			}
+			return nil, err
+		}
+		plog.Noticef("found peer %s in the cluster", path.Base(resp.Node.Key))
+		all = append(all, resp.Node)
+	}
+	plog.Noticef("found %d needed peer(s)", len(all))
+	return all, nil
+}
+
+func (d *discovery) selfKey() string {
+	return path.Join("/", d.cluster, d.id.String())
+}
+
+func nodesToCluster(ns []*client.Node, size int) (string, error) {
+	s := make([]string, len(ns))
+	for i, n := range ns {
+		s[i] = n.Value
+	}
+	us := strings.Join(s, ",")
+	m, err := types.NewURLsMap(us)
+	if err != nil {
+		return us, ErrInvalidURL
+	}
+	if m.Len() != size {
+		return us, ErrDuplicateName
+	}
+	return us, nil
+}
+
+type sortableNodes struct{ Nodes []*client.Node }
+
+func (ns sortableNodes) Len() int { return len(ns.Nodes) }
+func (ns sortableNodes) Less(i, j int) bool {
+	return ns.Nodes[i].CreatedIndex < ns.Nodes[j].CreatedIndex
+}
+func (ns sortableNodes) Swap(i, j int) { ns.Nodes[i], ns.Nodes[j] = ns.Nodes[j], ns.Nodes[i] }
diff --git a/vendor/github.com/coreos/etcd/embed/config.go b/vendor/github.com/coreos/etcd/embed/config.go
new file mode 100644
index 0000000..835e051
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/embed/config.go
@@ -0,0 +1,699 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+	"crypto/tls"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"path/filepath"
+	"strings"
+	"time"
+
+	"github.com/coreos/etcd/compactor"
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/pkg/cors"
+	"github.com/coreos/etcd/pkg/netutil"
+	"github.com/coreos/etcd/pkg/srv"
+	"github.com/coreos/etcd/pkg/tlsutil"
+	"github.com/coreos/etcd/pkg/transport"
+	"github.com/coreos/etcd/pkg/types"
+
+	"github.com/coreos/pkg/capnslog"
+	"github.com/ghodss/yaml"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/grpclog"
+)
+
+const (
+	ClusterStateFlagNew      = "new"
+	ClusterStateFlagExisting = "existing"
+
+	DefaultName                  = "default"
+	DefaultMaxSnapshots          = 5
+	DefaultMaxWALs               = 5
+	DefaultMaxTxnOps             = uint(128)
+	DefaultMaxRequestBytes       = 1.5 * 1024 * 1024
+	DefaultGRPCKeepAliveMinTime  = 5 * time.Second
+	DefaultGRPCKeepAliveInterval = 2 * time.Hour
+	DefaultGRPCKeepAliveTimeout  = 20 * time.Second
+
+	DefaultListenPeerURLs   = "http://localhost:2380"
+	DefaultListenClientURLs = "http://localhost:2379"
+
+	DefaultLogOutput = "default"
+
+	// DefaultStrictReconfigCheck is the default value for "--strict-reconfig-check" flag.
+	// It's enabled by default.
+	DefaultStrictReconfigCheck = true
+	// DefaultEnableV2 is the default value for "--enable-v2" flag.
+	// v2 is enabled by default.
+	// TODO: disable v2 when deprecated.
+	DefaultEnableV2 = true
+
+	// maxElectionMs specifies the maximum value of election timeout.
+	// More details are listed in ../Documentation/tuning.md#time-parameters.
+	maxElectionMs = 50000
+)
+
+var (
+	ErrConflictBootstrapFlags = fmt.Errorf("multiple discovery or bootstrap flags are set. " +
+		"Choose one of \"initial-cluster\", \"discovery\" or \"discovery-srv\"")
+	ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly")
+
+	DefaultInitialAdvertisePeerURLs = "http://localhost:2380"
+	DefaultAdvertiseClientURLs      = "http://localhost:2379"
+
+	defaultHostname   string
+	defaultHostStatus error
+)
+
+func init() {
+	defaultHostname, defaultHostStatus = netutil.GetDefaultHost()
+}
+
+// Config holds the arguments for configuring an etcd server.
+type Config struct {
+	// member
+
+	CorsInfo       *cors.CORSInfo
+	LPUrls, LCUrls []url.URL
+	Dir            string `json:"data-dir"`
+	WalDir         string `json:"wal-dir"`
+	MaxSnapFiles   uint   `json:"max-snapshots"`
+	MaxWalFiles    uint   `json:"max-wals"`
+	Name           string `json:"name"`
+	SnapCount      uint64 `json:"snapshot-count"`
+
+	// AutoCompactionMode is either 'periodic' or 'revision'.
+	AutoCompactionMode string `json:"auto-compaction-mode"`
+	// AutoCompactionRetention is either duration string with time unit
+	// (e.g. '5m' for 5-minute), or revision unit (e.g. '5000').
+	// If no time unit is provided and compaction mode is 'periodic',
+	// the unit defaults to hour. For example, '5' translates into 5-hour.
+	AutoCompactionRetention string `json:"auto-compaction-retention"`
+
+	// TickMs is the number of milliseconds between heartbeat ticks.
+	// TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1).
+	// make ticks a cluster wide configuration.
+	TickMs     uint `json:"heartbeat-interval"`
+	ElectionMs uint `json:"election-timeout"`
+
+	// InitialElectionTickAdvance is true, then local member fast-forwards
+	// election ticks to speed up "initial" leader election trigger. This
+	// benefits the case of larger election ticks. For instance, cross
+	// datacenter deployment may require longer election timeout of 10-second.
+	// If true, local node does not need wait up to 10-second. Instead,
+	// forwards its election ticks to 8-second, and have only 2-second left
+	// before leader election.
+	//
+	// Major assumptions are that:
+	//  - cluster has no active leader thus advancing ticks enables faster
+	//    leader election, or
+	//  - cluster already has an established leader, and rejoining follower
+	//    is likely to receive heartbeats from the leader after tick advance
+	//    and before election timeout.
+	//
+	// However, when network from leader to rejoining follower is congested,
+	// and the follower does not receive leader heartbeat within left election
+	// ticks, disruptive election has to happen thus affecting cluster
+	// availabilities.
+	//
+	// Disabling this would slow down initial bootstrap process for cross
+	// datacenter deployments. Make your own tradeoffs by configuring
+	// --initial-election-tick-advance at the cost of slow initial bootstrap.
+	//
+	// If single-node, it advances ticks regardless.
+	//
+	// See https://github.com/coreos/etcd/issues/9333 for more detail.
+	InitialElectionTickAdvance bool `json:"initial-election-tick-advance"`
+
+	QuotaBackendBytes int64 `json:"quota-backend-bytes"`
+	MaxTxnOps         uint  `json:"max-txn-ops"`
+	MaxRequestBytes   uint  `json:"max-request-bytes"`
+
+	// gRPC server options
+
+	// GRPCKeepAliveMinTime is the minimum interval that a client should
+	// wait before pinging server. When client pings "too fast", server
+	// sends goaway and closes the connection (errors: too_many_pings,
+	// http2.ErrCodeEnhanceYourCalm). When too slow, nothing happens.
+	// Server expects client pings only when there is any active streams
+	// (PermitWithoutStream is set false).
+	GRPCKeepAliveMinTime time.Duration `json:"grpc-keepalive-min-time"`
+	// GRPCKeepAliveInterval is the frequency of server-to-client ping
+	// to check if a connection is alive. Close a non-responsive connection
+	// after an additional duration of Timeout. 0 to disable.
+	GRPCKeepAliveInterval time.Duration `json:"grpc-keepalive-interval"`
+	// GRPCKeepAliveTimeout is the additional duration of wait
+	// before closing a non-responsive connection. 0 to disable.
+	GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"`
+
+	// clustering
+
+	APUrls, ACUrls      []url.URL
+	ClusterState        string `json:"initial-cluster-state"`
+	DNSCluster          string `json:"discovery-srv"`
+	Dproxy              string `json:"discovery-proxy"`
+	Durl                string `json:"discovery"`
+	InitialCluster      string `json:"initial-cluster"`
+	InitialClusterToken string `json:"initial-cluster-token"`
+	StrictReconfigCheck bool   `json:"strict-reconfig-check"`
+	EnableV2            bool   `json:"enable-v2"`
+
+	// security
+
+	ClientTLSInfo transport.TLSInfo
+	ClientAutoTLS bool
+	PeerTLSInfo   transport.TLSInfo
+	PeerAutoTLS   bool
+
+	// CipherSuites is a list of supported TLS cipher suites between
+	// client/server and peers. If empty, Go auto-populates the list.
+	// Note that cipher suites are prioritized in the given order.
+	CipherSuites []string `json:"cipher-suites"`
+
+	// debug
+
+	Debug                 bool   `json:"debug"`
+	LogPkgLevels          string `json:"log-package-levels"`
+	LogOutput             string `json:"log-output"`
+	EnablePprof           bool   `json:"enable-pprof"`
+	Metrics               string `json:"metrics"`
+	ListenMetricsUrls     []url.URL
+	ListenMetricsUrlsJSON string `json:"listen-metrics-urls"`
+
+	// ForceNewCluster starts a new cluster even if previously started; unsafe.
+	ForceNewCluster bool `json:"force-new-cluster"`
+
+	// UserHandlers is for registering users handlers and only used for
+	// embedding etcd into other applications.
+	// The map key is the route path for the handler, and
+	// you must ensure it can't be conflicted with etcd's.
+	UserHandlers map[string]http.Handler `json:"-"`
+	// ServiceRegister is for registering users' gRPC services. A simple usage example:
+	//	cfg := embed.NewConfig()
+	//	cfg.ServerRegister = func(s *grpc.Server) {
+	//		pb.RegisterFooServer(s, &fooServer{})
+	//		pb.RegisterBarServer(s, &barServer{})
+	//	}
+	//	embed.StartEtcd(cfg)
+	ServiceRegister func(*grpc.Server) `json:"-"`
+
+	// auth
+
+	AuthToken string `json:"auth-token"`
+
+	// Experimental flags
+
+	ExperimentalInitialCorruptCheck bool          `json:"experimental-initial-corrupt-check"`
+	ExperimentalCorruptCheckTime    time.Duration `json:"experimental-corrupt-check-time"`
+	ExperimentalEnableV2V3          string        `json:"experimental-enable-v2v3"`
+}
+
+// configYAML holds the config suitable for yaml parsing
+type configYAML struct {
+	Config
+	configJSON
+}
+
+// configJSON has file options that are translated into Config options
+type configJSON struct {
+	LPUrlsJSON         string         `json:"listen-peer-urls"`
+	LCUrlsJSON         string         `json:"listen-client-urls"`
+	CorsJSON           string         `json:"cors"`
+	APUrlsJSON         string         `json:"initial-advertise-peer-urls"`
+	ACUrlsJSON         string         `json:"advertise-client-urls"`
+	ClientSecurityJSON securityConfig `json:"client-transport-security"`
+	PeerSecurityJSON   securityConfig `json:"peer-transport-security"`
+}
+
+type securityConfig struct {
+	CAFile        string `json:"ca-file"`
+	CertFile      string `json:"cert-file"`
+	KeyFile       string `json:"key-file"`
+	CertAuth      bool   `json:"client-cert-auth"`
+	TrustedCAFile string `json:"trusted-ca-file"`
+	AutoTLS       bool   `json:"auto-tls"`
+}
+
+// NewConfig creates a new Config populated with default values.
+func NewConfig() *Config {
+	lpurl, _ := url.Parse(DefaultListenPeerURLs)
+	apurl, _ := url.Parse(DefaultInitialAdvertisePeerURLs)
+	lcurl, _ := url.Parse(DefaultListenClientURLs)
+	acurl, _ := url.Parse(DefaultAdvertiseClientURLs)
+	cfg := &Config{
+		CorsInfo:                   &cors.CORSInfo{},
+		MaxSnapFiles:               DefaultMaxSnapshots,
+		MaxWalFiles:                DefaultMaxWALs,
+		Name:                       DefaultName,
+		SnapCount:                  etcdserver.DefaultSnapCount,
+		MaxTxnOps:                  DefaultMaxTxnOps,
+		MaxRequestBytes:            DefaultMaxRequestBytes,
+		GRPCKeepAliveMinTime:       DefaultGRPCKeepAliveMinTime,
+		GRPCKeepAliveInterval:      DefaultGRPCKeepAliveInterval,
+		GRPCKeepAliveTimeout:       DefaultGRPCKeepAliveTimeout,
+		TickMs:                     100,
+		ElectionMs:                 1000,
+		InitialElectionTickAdvance: true,
+		LPUrls:              []url.URL{*lpurl},
+		LCUrls:              []url.URL{*lcurl},
+		APUrls:              []url.URL{*apurl},
+		ACUrls:              []url.URL{*acurl},
+		ClusterState:        ClusterStateFlagNew,
+		InitialClusterToken: "etcd-cluster",
+		StrictReconfigCheck: DefaultStrictReconfigCheck,
+		LogOutput:           DefaultLogOutput,
+		Metrics:             "basic",
+		EnableV2:            DefaultEnableV2,
+		AuthToken:           "simple",
+	}
+	cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
+	return cfg
+}
+
+func logTLSHandshakeFailure(conn *tls.Conn, err error) {
+	state := conn.ConnectionState()
+	remoteAddr := conn.RemoteAddr().String()
+	serverName := state.ServerName
+	if len(state.PeerCertificates) > 0 {
+		cert := state.PeerCertificates[0]
+		ips, dns := cert.IPAddresses, cert.DNSNames
+		plog.Infof("rejected connection from %q (error %q, ServerName %q, IPAddresses %q, DNSNames %q)", remoteAddr, err.Error(), serverName, ips, dns)
+	} else {
+		plog.Infof("rejected connection from %q (error %q, ServerName %q)", remoteAddr, err.Error(), serverName)
+	}
+}
+
+// SetupLogging initializes etcd logging.
+// Must be called after flag parsing.
+func (cfg *Config) SetupLogging() {
+	cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure
+	cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure
+
+	capnslog.SetGlobalLogLevel(capnslog.INFO)
+	if cfg.Debug {
+		capnslog.SetGlobalLogLevel(capnslog.DEBUG)
+		grpc.EnableTracing = true
+		// enable info, warning, error
+		grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
+	} else {
+		// only discard info
+		grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
+	}
+	if cfg.LogPkgLevels != "" {
+		repoLog := capnslog.MustRepoLogger("github.com/coreos/etcd")
+		settings, err := repoLog.ParseLogLevelConfig(cfg.LogPkgLevels)
+		if err != nil {
+			plog.Warningf("couldn't parse log level string: %s, continuing with default levels", err.Error())
+			return
+		}
+		repoLog.SetLogLevel(settings)
+	}
+
+	// capnslog initially SetFormatter(NewDefaultFormatter(os.Stderr))
+	// where NewDefaultFormatter returns NewJournaldFormatter when syscall.Getppid() == 1
+	// specify 'stdout' or 'stderr' to skip journald logging even when running under systemd
+	switch cfg.LogOutput {
+	case "stdout":
+		capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, cfg.Debug))
+	case "stderr":
+		capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stderr, cfg.Debug))
+	case DefaultLogOutput:
+	default:
+		plog.Panicf(`unknown log-output %q (only supports %q, "stdout", "stderr")`, cfg.LogOutput, DefaultLogOutput)
+	}
+}
+
+func ConfigFromFile(path string) (*Config, error) {
+	cfg := &configYAML{Config: *NewConfig()}
+	if err := cfg.configFromFile(path); err != nil {
+		return nil, err
+	}
+	return &cfg.Config, nil
+}
+
+func (cfg *configYAML) configFromFile(path string) error {
+	b, err := ioutil.ReadFile(path)
+	if err != nil {
+		return err
+	}
+
+	defaultInitialCluster := cfg.InitialCluster
+
+	err = yaml.Unmarshal(b, cfg)
+	if err != nil {
+		return err
+	}
+
+	if cfg.LPUrlsJSON != "" {
+		u, err := types.NewURLs(strings.Split(cfg.LPUrlsJSON, ","))
+		if err != nil {
+			plog.Fatalf("unexpected error setting up listen-peer-urls: %v", err)
+		}
+		cfg.LPUrls = []url.URL(u)
+	}
+
+	if cfg.LCUrlsJSON != "" {
+		u, err := types.NewURLs(strings.Split(cfg.LCUrlsJSON, ","))
+		if err != nil {
+			plog.Fatalf("unexpected error setting up listen-client-urls: %v", err)
+		}
+		cfg.LCUrls = []url.URL(u)
+	}
+
+	if cfg.CorsJSON != "" {
+		if err := cfg.CorsInfo.Set(cfg.CorsJSON); err != nil {
+			plog.Panicf("unexpected error setting up cors: %v", err)
+		}
+	}
+
+	if cfg.APUrlsJSON != "" {
+		u, err := types.NewURLs(strings.Split(cfg.APUrlsJSON, ","))
+		if err != nil {
+			plog.Fatalf("unexpected error setting up initial-advertise-peer-urls: %v", err)
+		}
+		cfg.APUrls = []url.URL(u)
+	}
+
+	if cfg.ACUrlsJSON != "" {
+		u, err := types.NewURLs(strings.Split(cfg.ACUrlsJSON, ","))
+		if err != nil {
+			plog.Fatalf("unexpected error setting up advertise-peer-urls: %v", err)
+		}
+		cfg.ACUrls = []url.URL(u)
+	}
+
+	if cfg.ListenMetricsUrlsJSON != "" {
+		u, err := types.NewURLs(strings.Split(cfg.ListenMetricsUrlsJSON, ","))
+		if err != nil {
+			plog.Fatalf("unexpected error setting up listen-metrics-urls: %v", err)
+		}
+		cfg.ListenMetricsUrls = []url.URL(u)
+	}
+
+	// If a discovery flag is set, clear default initial cluster set by InitialClusterFromName
+	if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == defaultInitialCluster {
+		cfg.InitialCluster = ""
+	}
+	if cfg.ClusterState == "" {
+		cfg.ClusterState = ClusterStateFlagNew
+	}
+
+	copySecurityDetails := func(tls *transport.TLSInfo, ysc *securityConfig) {
+		tls.CAFile = ysc.CAFile
+		tls.CertFile = ysc.CertFile
+		tls.KeyFile = ysc.KeyFile
+		tls.ClientCertAuth = ysc.CertAuth
+		tls.TrustedCAFile = ysc.TrustedCAFile
+	}
+	copySecurityDetails(&cfg.ClientTLSInfo, &cfg.ClientSecurityJSON)
+	copySecurityDetails(&cfg.PeerTLSInfo, &cfg.PeerSecurityJSON)
+	cfg.ClientAutoTLS = cfg.ClientSecurityJSON.AutoTLS
+	cfg.PeerAutoTLS = cfg.PeerSecurityJSON.AutoTLS
+
+	return cfg.Validate()
+}
+
+func updateCipherSuites(tls *transport.TLSInfo, ss []string) error {
+	if len(tls.CipherSuites) > 0 && len(ss) > 0 {
+		return fmt.Errorf("TLSInfo.CipherSuites is already specified (given %v)", ss)
+	}
+	if len(ss) > 0 {
+		cs := make([]uint16, len(ss))
+		for i, s := range ss {
+			var ok bool
+			cs[i], ok = tlsutil.GetCipherSuite(s)
+			if !ok {
+				return fmt.Errorf("unexpected TLS cipher suite %q", s)
+			}
+		}
+		tls.CipherSuites = cs
+	}
+	return nil
+}
+
+// Validate ensures that '*embed.Config' fields are properly configured.
+func (cfg *Config) Validate() error {
+	if err := checkBindURLs(cfg.LPUrls); err != nil {
+		return err
+	}
+	if err := checkBindURLs(cfg.LCUrls); err != nil {
+		return err
+	}
+	if err := checkBindURLs(cfg.ListenMetricsUrls); err != nil {
+		return err
+	}
+	if err := checkHostURLs(cfg.APUrls); err != nil {
+		// TODO: return err in v3.4
+		addrs := make([]string, len(cfg.APUrls))
+		for i := range cfg.APUrls {
+			addrs[i] = cfg.APUrls[i].String()
+		}
+		plog.Warningf("advertise-peer-urls %q is deprecated (%v)", strings.Join(addrs, ","), err)
+	}
+	if err := checkHostURLs(cfg.ACUrls); err != nil {
+		// TODO: return err in v3.4
+		addrs := make([]string, len(cfg.ACUrls))
+		for i := range cfg.ACUrls {
+			addrs[i] = cfg.ACUrls[i].String()
+		}
+		plog.Warningf("advertise-client-urls %q is deprecated (%v)", strings.Join(addrs, ","), err)
+	}
+
+	// Check if conflicting flags are passed.
+	nSet := 0
+	for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != ""} {
+		if v {
+			nSet++
+		}
+	}
+
+	if cfg.ClusterState != ClusterStateFlagNew && cfg.ClusterState != ClusterStateFlagExisting {
+		return fmt.Errorf("unexpected clusterState %q", cfg.ClusterState)
+	}
+
+	if nSet > 1 {
+		return ErrConflictBootstrapFlags
+	}
+
+	if cfg.TickMs <= 0 {
+		return fmt.Errorf("--heartbeat-interval must be >0 (set to %dms)", cfg.TickMs)
+	}
+	if cfg.ElectionMs <= 0 {
+		return fmt.Errorf("--election-timeout must be >0 (set to %dms)", cfg.ElectionMs)
+	}
+	if 5*cfg.TickMs > cfg.ElectionMs {
+		return fmt.Errorf("--election-timeout[%vms] should be at least as 5 times as --heartbeat-interval[%vms]", cfg.ElectionMs, cfg.TickMs)
+	}
+	if cfg.ElectionMs > maxElectionMs {
+		return fmt.Errorf("--election-timeout[%vms] is too long, and should be set less than %vms", cfg.ElectionMs, maxElectionMs)
+	}
+
+	// check this last since proxying in etcdmain may make this OK
+	if cfg.LCUrls != nil && cfg.ACUrls == nil {
+		return ErrUnsetAdvertiseClientURLsFlag
+	}
+
+	switch cfg.AutoCompactionMode {
+	case "":
+	case compactor.ModeRevision, compactor.ModePeriodic:
+	default:
+		return fmt.Errorf("unknown auto-compaction-mode %q", cfg.AutoCompactionMode)
+	}
+
+	return nil
+}
+
+// PeerURLsMapAndToken sets up an initial peer URLsMap and cluster token for bootstrap or discovery.
+func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, token string, err error) {
+	token = cfg.InitialClusterToken
+	switch {
+	case cfg.Durl != "":
+		urlsmap = types.URLsMap{}
+		// If using discovery, generate a temporary cluster based on
+		// self's advertised peer URLs
+		urlsmap[cfg.Name] = cfg.APUrls
+		token = cfg.Durl
+	case cfg.DNSCluster != "":
+		clusterStrs, cerr := srv.GetCluster("etcd-server", cfg.Name, cfg.DNSCluster, cfg.APUrls)
+		if cerr != nil {
+			plog.Errorf("couldn't resolve during SRV discovery (%v)", cerr)
+			return nil, "", cerr
+		}
+		for _, s := range clusterStrs {
+			plog.Noticef("got bootstrap from DNS for etcd-server at %s", s)
+		}
+		clusterStr := strings.Join(clusterStrs, ",")
+		if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.CAFile == "" {
+			cfg.PeerTLSInfo.ServerName = cfg.DNSCluster
+		}
+		urlsmap, err = types.NewURLsMap(clusterStr)
+		// only etcd member must belong to the discovered cluster.
+		// proxy does not need to belong to the discovered cluster.
+		if which == "etcd" {
+			if _, ok := urlsmap[cfg.Name]; !ok {
+				return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.Name)
+			}
+		}
+	default:
+		// We're statically configured, and cluster has appropriately been set.
+		urlsmap, err = types.NewURLsMap(cfg.InitialCluster)
+	}
+	return urlsmap, token, err
+}
+
+func (cfg Config) InitialClusterFromName(name string) (ret string) {
+	if len(cfg.APUrls) == 0 {
+		return ""
+	}
+	n := name
+	if name == "" {
+		n = DefaultName
+	}
+	for i := range cfg.APUrls {
+		ret = ret + "," + n + "=" + cfg.APUrls[i].String()
+	}
+	return ret[1:]
+}
+
+func (cfg Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew }
+func (cfg Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) }
+
+func (cfg Config) defaultPeerHost() bool {
+	return len(cfg.APUrls) == 1 && cfg.APUrls[0].String() == DefaultInitialAdvertisePeerURLs
+}
+
+func (cfg Config) defaultClientHost() bool {
+	return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs
+}
+
+func (cfg *Config) ClientSelfCert() (err error) {
+	if !cfg.ClientAutoTLS {
+		return nil
+	}
+	if !cfg.ClientTLSInfo.Empty() {
+		plog.Warningf("ignoring client auto TLS since certs given")
+		return nil
+	}
+	chosts := make([]string, len(cfg.LCUrls))
+	for i, u := range cfg.LCUrls {
+		chosts[i] = u.Host
+	}
+	cfg.ClientTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "client"), chosts)
+	if err != nil {
+		return err
+	}
+	return updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites)
+}
+
+func (cfg *Config) PeerSelfCert() (err error) {
+	if !cfg.PeerAutoTLS {
+		return nil
+	}
+	if !cfg.PeerTLSInfo.Empty() {
+		plog.Warningf("ignoring peer auto TLS since certs given")
+		return nil
+	}
+	phosts := make([]string, len(cfg.LPUrls))
+	for i, u := range cfg.LPUrls {
+		phosts[i] = u.Host
+	}
+	cfg.PeerTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "peer"), phosts)
+	if err != nil {
+		return err
+	}
+	return updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites)
+}
+
+// UpdateDefaultClusterFromName updates cluster advertise URLs with, if available, default host,
+// if advertise URLs are default values(localhost:2379,2380) AND if listen URL is 0.0.0.0.
+// e.g. advertise peer URL localhost:2380 or listen peer URL 0.0.0.0:2380
+// then the advertise peer host would be updated with machine's default host,
+// while keeping the listen URL's port.
+// User can work around this by explicitly setting URL with 127.0.0.1.
+// It returns the default hostname, if used, and the error, if any, from getting the machine's default host.
+// TODO: check whether fields are set instead of whether fields have default value
+func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (string, error) {
+	if defaultHostname == "" || defaultHostStatus != nil {
+		// update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
+		if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
+			cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
+		}
+		return "", defaultHostStatus
+	}
+
+	used := false
+	pip, pport := cfg.LPUrls[0].Hostname(), cfg.LPUrls[0].Port()
+	if cfg.defaultPeerHost() && pip == "0.0.0.0" {
+		cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)}
+		used = true
+	}
+	// update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
+	if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
+		cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
+	}
+
+	cip, cport := cfg.LCUrls[0].Hostname(), cfg.LCUrls[0].Port()
+	if cfg.defaultClientHost() && cip == "0.0.0.0" {
+		cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)}
+		used = true
+	}
+	dhost := defaultHostname
+	if !used {
+		dhost = ""
+	}
+	return dhost, defaultHostStatus
+}
+
+// checkBindURLs returns an error if any URL uses a domain name.
+func checkBindURLs(urls []url.URL) error {
+	for _, url := range urls {
+		if url.Scheme == "unix" || url.Scheme == "unixs" {
+			continue
+		}
+		host, _, err := net.SplitHostPort(url.Host)
+		if err != nil {
+			return err
+		}
+		if host == "localhost" {
+			// special case for local address
+			// TODO: support /etc/hosts ?
+			continue
+		}
+		if net.ParseIP(host) == nil {
+			return fmt.Errorf("expected IP in URL for binding (%s)", url.String())
+		}
+	}
+	return nil
+}
+
+func checkHostURLs(urls []url.URL) error {
+	for _, url := range urls {
+		host, _, err := net.SplitHostPort(url.Host)
+		if err != nil {
+			return err
+		}
+		if host == "" {
+			return fmt.Errorf("unexpected empty host (%s)", url.String())
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/coreos/etcd/embed/doc.go b/vendor/github.com/coreos/etcd/embed/doc.go
new file mode 100644
index 0000000..c555aa5
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/embed/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package embed provides bindings for embedding an etcd server in a program.
+
+Launch an embedded etcd server using the configuration defaults:
+
+	import (
+		"log"
+		"time"
+
+		"github.com/coreos/etcd/embed"
+	)
+
+	func main() {
+		cfg := embed.NewConfig()
+		cfg.Dir = "default.etcd"
+		e, err := embed.StartEtcd(cfg)
+		if err != nil {
+			log.Fatal(err)
+		}
+		defer e.Close()
+		select {
+		case <-e.Server.ReadyNotify():
+			log.Printf("Server is ready!")
+		case <-time.After(60 * time.Second):
+			e.Server.Stop() // trigger a shutdown
+			log.Printf("Server took too long to start!")
+		}
+		log.Fatal(<-e.Err())
+	}
+*/
+package embed
diff --git a/vendor/github.com/coreos/etcd/embed/etcd.go b/vendor/github.com/coreos/etcd/embed/etcd.go
new file mode 100644
index 0000000..bd848a7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/embed/etcd.go
@@ -0,0 +1,582 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+	"context"
+	"crypto/tls"
+	"fmt"
+	"io/ioutil"
+	defaultLog "log"
+	"net"
+	"net/http"
+	"net/url"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/compactor"
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/api/etcdhttp"
+	"github.com/coreos/etcd/etcdserver/api/v2http"
+	"github.com/coreos/etcd/etcdserver/api/v2v3"
+	"github.com/coreos/etcd/etcdserver/api/v3client"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc"
+	"github.com/coreos/etcd/pkg/cors"
+	"github.com/coreos/etcd/pkg/debugutil"
+	runtimeutil "github.com/coreos/etcd/pkg/runtime"
+	"github.com/coreos/etcd/pkg/transport"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/rafthttp"
+
+	"github.com/coreos/pkg/capnslog"
+	grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
+	"github.com/soheilhy/cmux"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/keepalive"
+)
+
+var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "embed")
+
+const (
+	// internal fd usage includes disk usage and transport usage.
+	// To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs
+	// at most 2 to read/lock/write WALs. One case that it needs to 2 is to
+	// read all logs after some snapshot index, which locates at the end of
+	// the second last and the head of the last. For purging, it needs to read
+	// directory, so it needs 1. For fd monitor, it needs 1.
+	// For transport, rafthttp builds two long-polling connections and at most
+	// four temporary connections with each member. There are at most 9 members
+	// in a cluster, so it should reserve 96.
+	// For the safety, we set the total reserved number to 150.
+	reservedInternalFDNum = 150
+)
+
+// Etcd contains a running etcd server and its listeners.
+type Etcd struct {
+	Peers   []*peerListener
+	Clients []net.Listener
+	// a map of contexts for the servers that serves client requests.
+	sctxs            map[string]*serveCtx
+	metricsListeners []net.Listener
+
+	Server *etcdserver.EtcdServer
+
+	cfg   Config
+	stopc chan struct{}
+	errc  chan error
+
+	closeOnce sync.Once
+}
+
+type peerListener struct {
+	net.Listener
+	serve func() error
+	close func(context.Context) error
+}
+
+// StartEtcd launches the etcd server and HTTP handlers for client/server communication.
+// The returned Etcd.Server is not guaranteed to have joined the cluster. Wait
+// on the Etcd.Server.ReadyNotify() channel to know when it completes and is ready for use.
+func StartEtcd(inCfg *Config) (e *Etcd, err error) {
+	if err = inCfg.Validate(); err != nil {
+		return nil, err
+	}
+	serving := false
+	e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})}
+	cfg := &e.cfg
+	defer func() {
+		if e == nil || err == nil {
+			return
+		}
+		if !serving {
+			// errored before starting gRPC server for serveCtx.serversC
+			for _, sctx := range e.sctxs {
+				close(sctx.serversC)
+			}
+		}
+		e.Close()
+		e = nil
+	}()
+
+	if e.Peers, err = startPeerListeners(cfg); err != nil {
+		return e, err
+	}
+	if e.sctxs, err = startClientListeners(cfg); err != nil {
+		return e, err
+	}
+	for _, sctx := range e.sctxs {
+		e.Clients = append(e.Clients, sctx.l)
+	}
+
+	var (
+		urlsmap types.URLsMap
+		token   string
+	)
+
+	memberInitialized := true
+	if !isMemberInitialized(cfg) {
+		memberInitialized = false
+		urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd")
+		if err != nil {
+			return e, fmt.Errorf("error setting up initial cluster: %v", err)
+		}
+	}
+
+	// AutoCompactionRetention defaults to "0" if not set.
+	if len(cfg.AutoCompactionRetention) == 0 {
+		cfg.AutoCompactionRetention = "0"
+	}
+	autoCompactionRetention, err := parseCompactionRetention(cfg.AutoCompactionMode, cfg.AutoCompactionRetention)
+	if err != nil {
+		return e, err
+	}
+
+	srvcfg := etcdserver.ServerConfig{
+		Name:                       cfg.Name,
+		ClientURLs:                 cfg.ACUrls,
+		PeerURLs:                   cfg.APUrls,
+		DataDir:                    cfg.Dir,
+		DedicatedWALDir:            cfg.WalDir,
+		SnapCount:                  cfg.SnapCount,
+		MaxSnapFiles:               cfg.MaxSnapFiles,
+		MaxWALFiles:                cfg.MaxWalFiles,
+		InitialPeerURLsMap:         urlsmap,
+		InitialClusterToken:        token,
+		DiscoveryURL:               cfg.Durl,
+		DiscoveryProxy:             cfg.Dproxy,
+		NewCluster:                 cfg.IsNewCluster(),
+		ForceNewCluster:            cfg.ForceNewCluster,
+		PeerTLSInfo:                cfg.PeerTLSInfo,
+		TickMs:                     cfg.TickMs,
+		ElectionTicks:              cfg.ElectionTicks(),
+		InitialElectionTickAdvance: cfg.InitialElectionTickAdvance,
+		AutoCompactionRetention:    autoCompactionRetention,
+		AutoCompactionMode:         cfg.AutoCompactionMode,
+		QuotaBackendBytes:          cfg.QuotaBackendBytes,
+		MaxTxnOps:                  cfg.MaxTxnOps,
+		MaxRequestBytes:            cfg.MaxRequestBytes,
+		StrictReconfigCheck:        cfg.StrictReconfigCheck,
+		ClientCertAuthEnabled:      cfg.ClientTLSInfo.ClientCertAuth,
+		AuthToken:                  cfg.AuthToken,
+		InitialCorruptCheck:        cfg.ExperimentalInitialCorruptCheck,
+		CorruptCheckTime:           cfg.ExperimentalCorruptCheckTime,
+		Debug:                      cfg.Debug,
+	}
+
+	if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
+		return e, err
+	}
+
+	// buffer channel so goroutines on closed connections won't wait forever
+	e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs))
+
+	// newly started member ("memberInitialized==false")
+	// does not need corruption check
+	if memberInitialized {
+		if err = e.Server.CheckInitialHashKV(); err != nil {
+			// set "EtcdServer" to nil, so that it does not block on "EtcdServer.Close()"
+			// (nothing to close since rafthttp transports have not been started)
+			e.Server = nil
+			return e, err
+		}
+	}
+	e.Server.Start()
+
+	if err = e.servePeers(); err != nil {
+		return e, err
+	}
+	if err = e.serveClients(); err != nil {
+		return e, err
+	}
+	if err = e.serveMetrics(); err != nil {
+		return e, err
+	}
+
+	serving = true
+	return e, nil
+}
+
+// Config returns the current configuration.
+func (e *Etcd) Config() Config {
+	return e.cfg
+}
+
+// Close gracefully shuts down all servers/listeners.
+// Client requests will be terminated with request timeout.
+// After timeout, enforce remaning requests be closed immediately.
+func (e *Etcd) Close() {
+	e.closeOnce.Do(func() { close(e.stopc) })
+
+	// close client requests with request timeout
+	timeout := 2 * time.Second
+	if e.Server != nil {
+		timeout = e.Server.Cfg.ReqTimeout()
+	}
+	for _, sctx := range e.sctxs {
+		for ss := range sctx.serversC {
+			ctx, cancel := context.WithTimeout(context.Background(), timeout)
+			stopServers(ctx, ss)
+			cancel()
+		}
+	}
+
+	for _, sctx := range e.sctxs {
+		sctx.cancel()
+	}
+
+	for i := range e.Clients {
+		if e.Clients[i] != nil {
+			e.Clients[i].Close()
+		}
+	}
+
+	for i := range e.metricsListeners {
+		e.metricsListeners[i].Close()
+	}
+
+	// close rafthttp transports
+	if e.Server != nil {
+		e.Server.Stop()
+	}
+
+	// close all idle connections in peer handler (wait up to 1-second)
+	for i := range e.Peers {
+		if e.Peers[i] != nil && e.Peers[i].close != nil {
+			ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+			e.Peers[i].close(ctx)
+			cancel()
+		}
+	}
+}
+
+func stopServers(ctx context.Context, ss *servers) {
+	shutdownNow := func() {
+		// first, close the http.Server
+		ss.http.Shutdown(ctx)
+		// then close grpc.Server; cancels all active RPCs
+		ss.grpc.Stop()
+	}
+
+	// do not grpc.Server.GracefulStop with TLS enabled etcd server
+	// See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531
+	// and https://github.com/coreos/etcd/issues/8916
+	if ss.secure {
+		shutdownNow()
+		return
+	}
+
+	ch := make(chan struct{})
+	go func() {
+		defer close(ch)
+		// close listeners to stop accepting new connections,
+		// will block on any existing transports
+		ss.grpc.GracefulStop()
+	}()
+
+	// wait until all pending RPCs are finished
+	select {
+	case <-ch:
+	case <-ctx.Done():
+		// took too long, manually close open transports
+		// e.g. watch streams
+		shutdownNow()
+
+		// concurrent GracefulStop should be interrupted
+		<-ch
+	}
+}
+
+func (e *Etcd) Err() <-chan error { return e.errc }
+
+func startPeerListeners(cfg *Config) (peers []*peerListener, err error) {
+	if err = updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites); err != nil {
+		return nil, err
+	}
+	if err = cfg.PeerSelfCert(); err != nil {
+		plog.Fatalf("could not get certs (%v)", err)
+	}
+	if !cfg.PeerTLSInfo.Empty() {
+		plog.Infof("peerTLS: %s", cfg.PeerTLSInfo)
+	}
+
+	peers = make([]*peerListener, len(cfg.LPUrls))
+	defer func() {
+		if err == nil {
+			return
+		}
+		for i := range peers {
+			if peers[i] != nil && peers[i].close != nil {
+				plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String())
+				ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+				peers[i].close(ctx)
+				cancel()
+			}
+		}
+	}()
+
+	for i, u := range cfg.LPUrls {
+		if u.Scheme == "http" {
+			if !cfg.PeerTLSInfo.Empty() {
+				plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String())
+			}
+			if cfg.PeerTLSInfo.ClientCertAuth {
+				plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
+			}
+		}
+		peers[i] = &peerListener{close: func(context.Context) error { return nil }}
+		peers[i].Listener, err = rafthttp.NewListener(u, &cfg.PeerTLSInfo)
+		if err != nil {
+			return nil, err
+		}
+		// once serve, overwrite with 'http.Server.Shutdown'
+		peers[i].close = func(context.Context) error {
+			return peers[i].Listener.Close()
+		}
+		plog.Info("listening for peers on ", u.String())
+	}
+	return peers, nil
+}
+
+// configure peer handlers after rafthttp.Transport started
+func (e *Etcd) servePeers() (err error) {
+	ph := etcdhttp.NewPeerHandler(e.Server)
+	var peerTLScfg *tls.Config
+	if !e.cfg.PeerTLSInfo.Empty() {
+		if peerTLScfg, err = e.cfg.PeerTLSInfo.ServerConfig(); err != nil {
+			return err
+		}
+	}
+
+	for _, p := range e.Peers {
+		gs := v3rpc.Server(e.Server, peerTLScfg)
+		m := cmux.New(p.Listener)
+		go gs.Serve(m.Match(cmux.HTTP2()))
+		srv := &http.Server{
+			Handler:     grpcHandlerFunc(gs, ph),
+			ReadTimeout: 5 * time.Minute,
+			ErrorLog:    defaultLog.New(ioutil.Discard, "", 0), // do not log user error
+		}
+		go srv.Serve(m.Match(cmux.Any()))
+		p.serve = func() error { return m.Serve() }
+		p.close = func(ctx context.Context) error {
+			// gracefully shutdown http.Server
+			// close open listeners, idle connections
+			// until context cancel or time-out
+			stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: gs, http: srv})
+			return nil
+		}
+	}
+
+	// start peer servers in a goroutine
+	for _, pl := range e.Peers {
+		go func(l *peerListener) {
+			e.errHandler(l.serve())
+		}(pl)
+	}
+	return nil
+}
+
+func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
+	if err = updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites); err != nil {
+		return nil, err
+	}
+	if err = cfg.ClientSelfCert(); err != nil {
+		plog.Fatalf("could not get certs (%v)", err)
+	}
+	if cfg.EnablePprof {
+		plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf)
+	}
+
+	sctxs = make(map[string]*serveCtx)
+	for _, u := range cfg.LCUrls {
+		sctx := newServeCtx()
+
+		if u.Scheme == "http" || u.Scheme == "unix" {
+			if !cfg.ClientTLSInfo.Empty() {
+				plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String())
+			}
+			if cfg.ClientTLSInfo.ClientCertAuth {
+				plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
+			}
+		}
+		if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() {
+			return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPs scheme", u.String())
+		}
+
+		proto := "tcp"
+		addr := u.Host
+		if u.Scheme == "unix" || u.Scheme == "unixs" {
+			proto = "unix"
+			addr = u.Host + u.Path
+		}
+
+		sctx.secure = u.Scheme == "https" || u.Scheme == "unixs"
+		sctx.insecure = !sctx.secure
+		if oldctx := sctxs[addr]; oldctx != nil {
+			oldctx.secure = oldctx.secure || sctx.secure
+			oldctx.insecure = oldctx.insecure || sctx.insecure
+			continue
+		}
+
+		if sctx.l, err = net.Listen(proto, addr); err != nil {
+			return nil, err
+		}
+		// net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking
+		// hosts that disable ipv6. So, use the address given by the user.
+		sctx.addr = addr
+
+		if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil {
+			if fdLimit <= reservedInternalFDNum {
+				plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum)
+			}
+			sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum))
+		}
+
+		if proto == "tcp" {
+			if sctx.l, err = transport.NewKeepAliveListener(sctx.l, "tcp", nil); err != nil {
+				return nil, err
+			}
+		}
+
+		plog.Info("listening for client requests on ", u.Host)
+		defer func() {
+			if err != nil {
+				sctx.l.Close()
+				plog.Info("stopping listening for client requests on ", u.Host)
+			}
+		}()
+		for k := range cfg.UserHandlers {
+			sctx.userHandlers[k] = cfg.UserHandlers[k]
+		}
+		sctx.serviceRegister = cfg.ServiceRegister
+		if cfg.EnablePprof || cfg.Debug {
+			sctx.registerPprof()
+		}
+		if cfg.Debug {
+			sctx.registerTrace()
+		}
+		sctxs[addr] = sctx
+	}
+	return sctxs, nil
+}
+
+func (e *Etcd) serveClients() (err error) {
+	if !e.cfg.ClientTLSInfo.Empty() {
+		plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo)
+	}
+
+	if e.cfg.CorsInfo.String() != "" {
+		plog.Infof("cors = %s", e.cfg.CorsInfo)
+	}
+
+	// Start a client server goroutine for each listen address
+	var h http.Handler
+	if e.Config().EnableV2 {
+		if len(e.Config().ExperimentalEnableV2V3) > 0 {
+			srv := v2v3.NewServer(v3client.New(e.Server), e.cfg.ExperimentalEnableV2V3)
+			h = v2http.NewClientHandler(srv, e.Server.Cfg.ReqTimeout())
+		} else {
+			h = v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout())
+		}
+	} else {
+		mux := http.NewServeMux()
+		etcdhttp.HandleBasic(mux, e.Server)
+		h = mux
+	}
+	h = http.Handler(&cors.CORSHandler{Handler: h, Info: e.cfg.CorsInfo})
+
+	gopts := []grpc.ServerOption{}
+	if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) {
+		gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
+			MinTime:             e.cfg.GRPCKeepAliveMinTime,
+			PermitWithoutStream: false,
+		}))
+	}
+	if e.cfg.GRPCKeepAliveInterval > time.Duration(0) &&
+		e.cfg.GRPCKeepAliveTimeout > time.Duration(0) {
+		gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{
+			Time:    e.cfg.GRPCKeepAliveInterval,
+			Timeout: e.cfg.GRPCKeepAliveTimeout,
+		}))
+	}
+
+	// start client servers in a goroutine
+	for _, sctx := range e.sctxs {
+		go func(s *serveCtx) {
+			e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, gopts...))
+		}(sctx)
+	}
+	return nil
+}
+
+func (e *Etcd) serveMetrics() (err error) {
+	if e.cfg.Metrics == "extensive" {
+		grpc_prometheus.EnableHandlingTimeHistogram()
+	}
+
+	if len(e.cfg.ListenMetricsUrls) > 0 {
+		metricsMux := http.NewServeMux()
+		etcdhttp.HandleMetricsHealth(metricsMux, e.Server)
+
+		for _, murl := range e.cfg.ListenMetricsUrls {
+			tlsInfo := &e.cfg.ClientTLSInfo
+			if murl.Scheme == "http" {
+				tlsInfo = nil
+			}
+			ml, err := transport.NewListener(murl.Host, murl.Scheme, tlsInfo)
+			if err != nil {
+				return err
+			}
+			e.metricsListeners = append(e.metricsListeners, ml)
+			go func(u url.URL, ln net.Listener) {
+				plog.Info("listening for metrics on ", u.String())
+				e.errHandler(http.Serve(ln, metricsMux))
+			}(murl, ml)
+		}
+	}
+	return nil
+}
+
+func (e *Etcd) errHandler(err error) {
+	select {
+	case <-e.stopc:
+		return
+	default:
+	}
+	select {
+	case <-e.stopc:
+	case e.errc <- err:
+	}
+}
+
+func parseCompactionRetention(mode, retention string) (ret time.Duration, err error) {
+	h, err := strconv.Atoi(retention)
+	if err == nil {
+		switch mode {
+		case compactor.ModeRevision:
+			ret = time.Duration(int64(h))
+		case compactor.ModePeriodic:
+			ret = time.Duration(int64(h)) * time.Hour
+		}
+	} else {
+		// periodic compaction
+		ret, err = time.ParseDuration(retention)
+		if err != nil {
+			return 0, fmt.Errorf("error parsing CompactionRetention: %v", err)
+		}
+	}
+	return ret, nil
+}
diff --git a/vendor/github.com/coreos/etcd/embed/serve.go b/vendor/github.com/coreos/etcd/embed/serve.go
new file mode 100644
index 0000000..62b8b57
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/embed/serve.go
@@ -0,0 +1,285 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+	"context"
+	"io/ioutil"
+	defaultLog "log"
+	"net"
+	"net/http"
+	"strings"
+
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/api/v3client"
+	"github.com/coreos/etcd/etcdserver/api/v3election"
+	"github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
+	v3electiongw "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw"
+	"github.com/coreos/etcd/etcdserver/api/v3lock"
+	"github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
+	v3lockgw "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc"
+	etcdservergw "github.com/coreos/etcd/etcdserver/etcdserverpb/gw"
+	"github.com/coreos/etcd/pkg/debugutil"
+	"github.com/coreos/etcd/pkg/transport"
+
+	gw "github.com/grpc-ecosystem/grpc-gateway/runtime"
+	"github.com/soheilhy/cmux"
+	"github.com/tmc/grpc-websocket-proxy/wsproxy"
+	"golang.org/x/net/trace"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/credentials"
+)
+
+type serveCtx struct {
+	l        net.Listener
+	addr     string
+	secure   bool
+	insecure bool
+
+	ctx    context.Context
+	cancel context.CancelFunc
+
+	userHandlers    map[string]http.Handler
+	serviceRegister func(*grpc.Server)
+	serversC        chan *servers
+}
+
+type servers struct {
+	secure bool
+	grpc   *grpc.Server
+	http   *http.Server
+}
+
+func newServeCtx() *serveCtx {
+	ctx, cancel := context.WithCancel(context.Background())
+	return &serveCtx{ctx: ctx, cancel: cancel, userHandlers: make(map[string]http.Handler),
+		serversC: make(chan *servers, 2), // in case sctx.insecure,sctx.secure true
+	}
+}
+
+// serve accepts incoming connections on the listener l,
+// creating a new service goroutine for each. The service goroutines
+// read requests and then call handler to reply to them.
+func (sctx *serveCtx) serve(
+	s *etcdserver.EtcdServer,
+	tlsinfo *transport.TLSInfo,
+	handler http.Handler,
+	errHandler func(error),
+	gopts ...grpc.ServerOption) (err error) {
+	logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0)
+	<-s.ReadyNotify()
+	plog.Info("ready to serve client requests")
+
+	m := cmux.New(sctx.l)
+	v3c := v3client.New(s)
+	servElection := v3election.NewElectionServer(v3c)
+	servLock := v3lock.NewLockServer(v3c)
+
+	var gs *grpc.Server
+	defer func() {
+		if err != nil && gs != nil {
+			gs.Stop()
+		}
+	}()
+
+	if sctx.insecure {
+		gs = v3rpc.Server(s, nil, gopts...)
+		v3electionpb.RegisterElectionServer(gs, servElection)
+		v3lockpb.RegisterLockServer(gs, servLock)
+		if sctx.serviceRegister != nil {
+			sctx.serviceRegister(gs)
+		}
+		grpcl := m.Match(cmux.HTTP2())
+		go func() { errHandler(gs.Serve(grpcl)) }()
+
+		var gwmux *gw.ServeMux
+		gwmux, err = sctx.registerGateway([]grpc.DialOption{grpc.WithInsecure()})
+		if err != nil {
+			return err
+		}
+
+		httpmux := sctx.createMux(gwmux, handler)
+
+		srvhttp := &http.Server{
+			Handler:  wrapMux(httpmux),
+			ErrorLog: logger, // do not log user error
+		}
+		httpl := m.Match(cmux.HTTP1())
+		go func() { errHandler(srvhttp.Serve(httpl)) }()
+
+		sctx.serversC <- &servers{grpc: gs, http: srvhttp}
+		plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.l.Addr().String())
+	}
+
+	if sctx.secure {
+		tlscfg, tlsErr := tlsinfo.ServerConfig()
+		if tlsErr != nil {
+			return tlsErr
+		}
+		gs = v3rpc.Server(s, tlscfg, gopts...)
+		v3electionpb.RegisterElectionServer(gs, servElection)
+		v3lockpb.RegisterLockServer(gs, servLock)
+		if sctx.serviceRegister != nil {
+			sctx.serviceRegister(gs)
+		}
+		handler = grpcHandlerFunc(gs, handler)
+
+		dtls := tlscfg.Clone()
+		// trust local server
+		dtls.InsecureSkipVerify = true
+		creds := credentials.NewTLS(dtls)
+		opts := []grpc.DialOption{grpc.WithTransportCredentials(creds)}
+		var gwmux *gw.ServeMux
+		gwmux, err = sctx.registerGateway(opts)
+		if err != nil {
+			return err
+		}
+
+		var tlsl net.Listener
+		tlsl, err = transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo)
+		if err != nil {
+			return err
+		}
+		// TODO: add debug flag; enable logging when debug flag is set
+		httpmux := sctx.createMux(gwmux, handler)
+
+		srv := &http.Server{
+			Handler:   wrapMux(httpmux),
+			TLSConfig: tlscfg,
+			ErrorLog:  logger, // do not log user error
+		}
+		go func() { errHandler(srv.Serve(tlsl)) }()
+
+		sctx.serversC <- &servers{secure: true, grpc: gs, http: srv}
+		plog.Infof("serving client requests on %s", sctx.l.Addr().String())
+	}
+
+	close(sctx.serversC)
+	return m.Serve()
+}
+
+// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC
+// connections or otherHandler otherwise. Given in gRPC docs.
+func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler {
+	if otherHandler == nil {
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			grpcServer.ServeHTTP(w, r)
+		})
+	}
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
+			grpcServer.ServeHTTP(w, r)
+		} else {
+			otherHandler.ServeHTTP(w, r)
+		}
+	})
+}
+
+type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error
+
+func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) {
+	ctx := sctx.ctx
+	conn, err := grpc.DialContext(ctx, sctx.addr, opts...)
+	if err != nil {
+		return nil, err
+	}
+	gwmux := gw.NewServeMux()
+
+	handlers := []registerHandlerFunc{
+		etcdservergw.RegisterKVHandler,
+		etcdservergw.RegisterWatchHandler,
+		etcdservergw.RegisterLeaseHandler,
+		etcdservergw.RegisterClusterHandler,
+		etcdservergw.RegisterMaintenanceHandler,
+		etcdservergw.RegisterAuthHandler,
+		v3lockgw.RegisterLockHandler,
+		v3electiongw.RegisterElectionHandler,
+	}
+	for _, h := range handlers {
+		if err := h(ctx, gwmux, conn); err != nil {
+			return nil, err
+		}
+	}
+	go func() {
+		<-ctx.Done()
+		if cerr := conn.Close(); cerr != nil {
+			plog.Warningf("failed to close conn to %s: %v", sctx.l.Addr().String(), cerr)
+		}
+	}()
+
+	return gwmux, nil
+}
+
+func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux {
+	httpmux := http.NewServeMux()
+	for path, h := range sctx.userHandlers {
+		httpmux.Handle(path, h)
+	}
+
+	httpmux.Handle(
+		"/v3beta/",
+		wsproxy.WebsocketProxy(
+			gwmux,
+			wsproxy.WithRequestMutator(
+				// Default to the POST method for streams
+				func(incoming *http.Request, outgoing *http.Request) *http.Request {
+					outgoing.Method = "POST"
+					return outgoing
+				},
+			),
+		),
+	)
+	if handler != nil {
+		httpmux.Handle("/", handler)
+	}
+	return httpmux
+}
+
+// wraps HTTP multiplexer to mute requests to /v3alpha
+// TODO: deprecate this in 3.4 release
+func wrapMux(mux *http.ServeMux) http.Handler { return &v3alphaMutator{mux: mux} }
+
+type v3alphaMutator struct {
+	mux *http.ServeMux
+}
+
+func (m *v3alphaMutator) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+	if req != nil && req.URL != nil && strings.HasPrefix(req.URL.Path, "/v3alpha/") {
+		req.URL.Path = strings.Replace(req.URL.Path, "/v3alpha/", "/v3beta/", 1)
+	}
+	m.mux.ServeHTTP(rw, req)
+}
+
+func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) {
+	if sctx.userHandlers[s] != nil {
+		plog.Warningf("path %s already registered by user handler", s)
+		return
+	}
+	sctx.userHandlers[s] = h
+}
+
+func (sctx *serveCtx) registerPprof() {
+	for p, h := range debugutil.PProfHandlers() {
+		sctx.registerUserHandler(p, h)
+	}
+}
+
+func (sctx *serveCtx) registerTrace() {
+	reqf := func(w http.ResponseWriter, r *http.Request) { trace.Render(w, r, true) }
+	sctx.registerUserHandler("/debug/requests", http.HandlerFunc(reqf))
+	evf := func(w http.ResponseWriter, r *http.Request) { trace.RenderEvents(w, r, true) }
+	sctx.registerUserHandler("/debug/events", http.HandlerFunc(evf))
+}
diff --git a/vendor/github.com/coreos/etcd/embed/util.go b/vendor/github.com/coreos/etcd/embed/util.go
new file mode 100644
index 0000000..168e031
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/embed/util.go
@@ -0,0 +1,30 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+	"path/filepath"
+
+	"github.com/coreos/etcd/wal"
+)
+
+func isMemberInitialized(cfg *Config) bool {
+	waldir := cfg.WalDir
+	if waldir == "" {
+		waldir = filepath.Join(cfg.Dir, "member", "wal")
+	}
+
+	return wal.Exist(waldir)
+}
diff --git a/vendor/github.com/coreos/etcd/error/error.go b/vendor/github.com/coreos/etcd/error/error.go
new file mode 100644
index 0000000..b541a62
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/error/error.go
@@ -0,0 +1,163 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package error describes errors in etcd project. When any change happens,
+// Documentation/v2/errorcode.md needs to be updated correspondingly.
+package error
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+)
+
+var errors = map[int]string{
+	// command related errors
+	EcodeKeyNotFound:      "Key not found",
+	EcodeTestFailed:       "Compare failed", //test and set
+	EcodeNotFile:          "Not a file",
+	ecodeNoMorePeer:       "Reached the max number of peers in the cluster",
+	EcodeNotDir:           "Not a directory",
+	EcodeNodeExist:        "Key already exists", // create
+	ecodeKeyIsPreserved:   "The prefix of given key is a keyword in etcd",
+	EcodeRootROnly:        "Root is read only",
+	EcodeDirNotEmpty:      "Directory not empty",
+	ecodeExistingPeerAddr: "Peer address has existed",
+	EcodeUnauthorized:     "The request requires user authentication",
+
+	// Post form related errors
+	ecodeValueRequired:        "Value is Required in POST form",
+	EcodePrevValueRequired:    "PrevValue is Required in POST form",
+	EcodeTTLNaN:               "The given TTL in POST form is not a number",
+	EcodeIndexNaN:             "The given index in POST form is not a number",
+	ecodeValueOrTTLRequired:   "Value or TTL is required in POST form",
+	ecodeTimeoutNaN:           "The given timeout in POST form is not a number",
+	ecodeNameRequired:         "Name is required in POST form",
+	ecodeIndexOrValueRequired: "Index or value is required",
+	ecodeIndexValueMutex:      "Index and value cannot both be specified",
+	EcodeInvalidField:         "Invalid field",
+	EcodeInvalidForm:          "Invalid POST form",
+	EcodeRefreshValue:         "Value provided on refresh",
+	EcodeRefreshTTLRequired:   "A TTL must be provided on refresh",
+
+	// raft related errors
+	EcodeRaftInternal: "Raft Internal Error",
+	EcodeLeaderElect:  "During Leader Election",
+
+	// etcd related errors
+	EcodeWatcherCleared:     "watcher is cleared due to etcd recovery",
+	EcodeEventIndexCleared:  "The event in requested index is outdated and cleared",
+	ecodeStandbyInternal:    "Standby Internal Error",
+	ecodeInvalidActiveSize:  "Invalid active size",
+	ecodeInvalidRemoveDelay: "Standby remove delay",
+
+	// client related errors
+	ecodeClientInternal: "Client Internal Error",
+}
+
+var errorStatus = map[int]int{
+	EcodeKeyNotFound:  http.StatusNotFound,
+	EcodeNotFile:      http.StatusForbidden,
+	EcodeDirNotEmpty:  http.StatusForbidden,
+	EcodeUnauthorized: http.StatusUnauthorized,
+	EcodeTestFailed:   http.StatusPreconditionFailed,
+	EcodeNodeExist:    http.StatusPreconditionFailed,
+	EcodeRaftInternal: http.StatusInternalServerError,
+	EcodeLeaderElect:  http.StatusInternalServerError,
+}
+
+const (
+	EcodeKeyNotFound      = 100
+	EcodeTestFailed       = 101
+	EcodeNotFile          = 102
+	ecodeNoMorePeer       = 103
+	EcodeNotDir           = 104
+	EcodeNodeExist        = 105
+	ecodeKeyIsPreserved   = 106
+	EcodeRootROnly        = 107
+	EcodeDirNotEmpty      = 108
+	ecodeExistingPeerAddr = 109
+	EcodeUnauthorized     = 110
+
+	ecodeValueRequired        = 200
+	EcodePrevValueRequired    = 201
+	EcodeTTLNaN               = 202
+	EcodeIndexNaN             = 203
+	ecodeValueOrTTLRequired   = 204
+	ecodeTimeoutNaN           = 205
+	ecodeNameRequired         = 206
+	ecodeIndexOrValueRequired = 207
+	ecodeIndexValueMutex      = 208
+	EcodeInvalidField         = 209
+	EcodeInvalidForm          = 210
+	EcodeRefreshValue         = 211
+	EcodeRefreshTTLRequired   = 212
+
+	EcodeRaftInternal = 300
+	EcodeLeaderElect  = 301
+
+	EcodeWatcherCleared     = 400
+	EcodeEventIndexCleared  = 401
+	ecodeStandbyInternal    = 402
+	ecodeInvalidActiveSize  = 403
+	ecodeInvalidRemoveDelay = 404
+
+	ecodeClientInternal = 500
+)
+
+type Error struct {
+	ErrorCode int    `json:"errorCode"`
+	Message   string `json:"message"`
+	Cause     string `json:"cause,omitempty"`
+	Index     uint64 `json:"index"`
+}
+
+func NewRequestError(errorCode int, cause string) *Error {
+	return NewError(errorCode, cause, 0)
+}
+
+func NewError(errorCode int, cause string, index uint64) *Error {
+	return &Error{
+		ErrorCode: errorCode,
+		Message:   errors[errorCode],
+		Cause:     cause,
+		Index:     index,
+	}
+}
+
+// Error is for the error interface
+func (e Error) Error() string {
+	return e.Message + " (" + e.Cause + ")"
+}
+
+func (e Error) toJsonString() string {
+	b, _ := json.Marshal(e)
+	return string(b)
+}
+
+func (e Error) StatusCode() int {
+	status, ok := errorStatus[e.ErrorCode]
+	if !ok {
+		status = http.StatusBadRequest
+	}
+	return status
+}
+
+func (e Error) WriteTo(w http.ResponseWriter) error {
+	w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index))
+	w.Header().Set("Content-Type", "application/json")
+	w.WriteHeader(e.StatusCode())
+	_, err := w.Write([]byte(e.toJsonString() + "\n"))
+	return err
+}
diff --git a/vendor/github.com/coreos/etcd/etcd.conf.yml.sample b/vendor/github.com/coreos/etcd/etcd.conf.yml.sample
new file mode 100644
index 0000000..2bc115f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcd.conf.yml.sample
@@ -0,0 +1,144 @@
+# This is the configuration file for the etcd server.
+
+# Human-readable name for this member.
+name: 'default'
+
+# Path to the data directory.
+data-dir:
+
+# Path to the dedicated wal directory.
+wal-dir:
+
+# Number of committed transactions to trigger a snapshot to disk.
+snapshot-count: 10000
+
+# Time (in milliseconds) of a heartbeat interval.
+heartbeat-interval: 100
+
+# Time (in milliseconds) for an election to timeout.
+election-timeout: 1000
+
+# Raise alarms when backend size exceeds the given quota. 0 means use the
+# default quota.
+quota-backend-bytes: 0
+
+# List of comma separated URLs to listen on for peer traffic.
+listen-peer-urls: http://localhost:2380
+
+# List of comma separated URLs to listen on for client traffic.
+listen-client-urls: http://localhost:2379
+
+# Maximum number of snapshot files to retain (0 is unlimited).
+max-snapshots: 5
+
+# Maximum number of wal files to retain (0 is unlimited).
+max-wals: 5
+
+# Comma-separated white list of origins for CORS (cross-origin resource sharing).
+cors:
+
+# List of this member's peer URLs to advertise to the rest of the cluster.
+# The URLs needed to be a comma-separated list.
+initial-advertise-peer-urls: http://localhost:2380
+
+# List of this member's client URLs to advertise to the public.
+# The URLs needed to be a comma-separated list.
+advertise-client-urls: http://localhost:2379
+
+# Discovery URL used to bootstrap the cluster.
+discovery:
+
+# Valid values include 'exit', 'proxy'
+discovery-fallback: 'proxy'
+
+# HTTP proxy to use for traffic to discovery service.
+discovery-proxy:
+
+# DNS domain used to bootstrap initial cluster.
+discovery-srv:
+
+# Initial cluster configuration for bootstrapping.
+initial-cluster:
+
+# Initial cluster token for the etcd cluster during bootstrap.
+initial-cluster-token: 'etcd-cluster'
+
+# Initial cluster state ('new' or 'existing').
+initial-cluster-state: 'new'
+
+# Reject reconfiguration requests that would cause quorum loss.
+strict-reconfig-check: false
+
+# Accept etcd V2 client requests
+enable-v2: true
+
+# Enable runtime profiling data via HTTP server
+enable-pprof: true
+
+# Valid values include 'on', 'readonly', 'off'
+proxy: 'off'
+
+# Time (in milliseconds) an endpoint will be held in a failed state.
+proxy-failure-wait: 5000
+
+# Time (in milliseconds) of the endpoints refresh interval.
+proxy-refresh-interval: 30000
+
+# Time (in milliseconds) for a dial to timeout.
+proxy-dial-timeout: 1000
+
+# Time (in milliseconds) for a write to timeout.
+proxy-write-timeout: 5000
+
+# Time (in milliseconds) for a read to timeout.
+proxy-read-timeout: 0
+
+client-transport-security:
+  # DEPRECATED: Path to the client server TLS CA file.
+  ca-file:
+
+  # Path to the client server TLS cert file.
+  cert-file:
+
+  # Path to the client server TLS key file.
+  key-file:
+
+  # Enable client cert authentication.
+  client-cert-auth: false
+
+  # Path to the client server TLS trusted CA cert file.
+  trusted-ca-file:
+
+  # Client TLS using generated certificates
+  auto-tls: false
+
+peer-transport-security:
+  # DEPRECATED: Path to the peer server TLS CA file.
+  ca-file:
+
+  # Path to the peer server TLS cert file.
+  cert-file:
+
+  # Path to the peer server TLS key file.
+  key-file:
+
+  # Enable peer client cert authentication.
+  peer-client-cert-auth: false
+
+  # Path to the peer server TLS trusted CA cert file.
+  trusted-ca-file:
+
+  # Peer TLS using generated certificates.
+  auto-tls: false
+
+# Enable debug-level logging for etcd.
+debug: false
+
+# Specify a particular log level for each etcd package (eg: 'etcdmain=CRITICAL,etcdserver=DEBUG'.
+log-package-levels:
+
+# Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd.
+log-output: default
+
+# Force to create a new one member cluster.
+force-new-cluster: false
diff --git a/vendor/github.com/coreos/etcd/etcdmain/config.go b/vendor/github.com/coreos/etcd/etcdmain/config.go
new file mode 100644
index 0000000..2a5faa7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdmain/config.go
@@ -0,0 +1,346 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Every change should be reflected on help.go as well.
+
+package etcdmain
+
+import (
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"net/url"
+	"os"
+	"runtime"
+	"strings"
+
+	"github.com/coreos/etcd/embed"
+	"github.com/coreos/etcd/pkg/flags"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/version"
+
+	"github.com/ghodss/yaml"
+)
+
+var (
+	proxyFlagOff      = "off"
+	proxyFlagReadonly = "readonly"
+	proxyFlagOn       = "on"
+
+	fallbackFlagExit  = "exit"
+	fallbackFlagProxy = "proxy"
+
+	ignored = []string{
+		"cluster-active-size",
+		"cluster-remove-delay",
+		"cluster-sync-interval",
+		"config",
+		"force",
+		"max-result-buffer",
+		"max-retry-attempts",
+		"peer-heartbeat-interval",
+		"peer-election-timeout",
+		"retry-interval",
+		"snapshot",
+		"v",
+		"vv",
+		// for coverage testing
+		"test.coverprofile",
+		"test.outputdir",
+	}
+)
+
+type configProxy struct {
+	ProxyFailureWaitMs     uint `json:"proxy-failure-wait"`
+	ProxyRefreshIntervalMs uint `json:"proxy-refresh-interval"`
+	ProxyDialTimeoutMs     uint `json:"proxy-dial-timeout"`
+	ProxyWriteTimeoutMs    uint `json:"proxy-write-timeout"`
+	ProxyReadTimeoutMs     uint `json:"proxy-read-timeout"`
+	Fallback               string
+	Proxy                  string
+	ProxyJSON              string `json:"proxy"`
+	FallbackJSON           string `json:"discovery-fallback"`
+}
+
+// config holds the config for a command line invocation of etcd
+type config struct {
+	ec           embed.Config
+	cp           configProxy
+	cf           configFlags
+	configFile   string
+	printVersion bool
+	ignored      []string
+}
+
+// configFlags has the set of flags used for command line parsing a Config
+type configFlags struct {
+	flagSet      *flag.FlagSet
+	clusterState *flags.StringsFlag
+	fallback     *flags.StringsFlag
+	proxy        *flags.StringsFlag
+}
+
+func newConfig() *config {
+	cfg := &config{
+		ec: *embed.NewConfig(),
+		cp: configProxy{
+			Proxy:                  proxyFlagOff,
+			ProxyFailureWaitMs:     5000,
+			ProxyRefreshIntervalMs: 30000,
+			ProxyDialTimeoutMs:     1000,
+			ProxyWriteTimeoutMs:    5000,
+		},
+		ignored: ignored,
+	}
+	cfg.cf = configFlags{
+		flagSet: flag.NewFlagSet("etcd", flag.ContinueOnError),
+		clusterState: flags.NewStringsFlag(
+			embed.ClusterStateFlagNew,
+			embed.ClusterStateFlagExisting,
+		),
+		fallback: flags.NewStringsFlag(
+			fallbackFlagProxy,
+			fallbackFlagExit,
+		),
+		proxy: flags.NewStringsFlag(
+			proxyFlagOff,
+			proxyFlagReadonly,
+			proxyFlagOn,
+		),
+	}
+
+	fs := cfg.cf.flagSet
+	fs.Usage = func() {
+		fmt.Fprintln(os.Stderr, usageline)
+	}
+
+	fs.StringVar(&cfg.configFile, "config-file", "", "Path to the server configuration file")
+
+	// member
+	fs.Var(cfg.ec.CorsInfo, "cors", "Comma-separated white list of origins for CORS (cross-origin resource sharing).")
+	fs.StringVar(&cfg.ec.Dir, "data-dir", cfg.ec.Dir, "Path to the data directory.")
+	fs.StringVar(&cfg.ec.WalDir, "wal-dir", cfg.ec.WalDir, "Path to the dedicated wal directory.")
+	fs.Var(flags.NewURLsValue(embed.DefaultListenPeerURLs), "listen-peer-urls", "List of URLs to listen on for peer traffic.")
+	fs.Var(flags.NewURLsValue(embed.DefaultListenClientURLs), "listen-client-urls", "List of URLs to listen on for client traffic.")
+	fs.StringVar(&cfg.ec.ListenMetricsUrlsJSON, "listen-metrics-urls", "", "List of URLs to listen on for metrics.")
+	fs.UintVar(&cfg.ec.MaxSnapFiles, "max-snapshots", cfg.ec.MaxSnapFiles, "Maximum number of snapshot files to retain (0 is unlimited).")
+	fs.UintVar(&cfg.ec.MaxWalFiles, "max-wals", cfg.ec.MaxWalFiles, "Maximum number of wal files to retain (0 is unlimited).")
+	fs.StringVar(&cfg.ec.Name, "name", cfg.ec.Name, "Human-readable name for this member.")
+	fs.Uint64Var(&cfg.ec.SnapCount, "snapshot-count", cfg.ec.SnapCount, "Number of committed transactions to trigger a snapshot to disk.")
+	fs.UintVar(&cfg.ec.TickMs, "heartbeat-interval", cfg.ec.TickMs, "Time (in milliseconds) of a heartbeat interval.")
+	fs.UintVar(&cfg.ec.ElectionMs, "election-timeout", cfg.ec.ElectionMs, "Time (in milliseconds) for an election to timeout.")
+	fs.BoolVar(&cfg.ec.InitialElectionTickAdvance, "initial-election-tick-advance", cfg.ec.InitialElectionTickAdvance, "Whether to fast-forward initial election ticks on boot for faster election.")
+	fs.Int64Var(&cfg.ec.QuotaBackendBytes, "quota-backend-bytes", cfg.ec.QuotaBackendBytes, "Raise alarms when backend size exceeds the given quota. 0 means use the default quota.")
+	fs.UintVar(&cfg.ec.MaxTxnOps, "max-txn-ops", cfg.ec.MaxTxnOps, "Maximum number of operations permitted in a transaction.")
+	fs.UintVar(&cfg.ec.MaxRequestBytes, "max-request-bytes", cfg.ec.MaxRequestBytes, "Maximum client request size in bytes the server will accept.")
+	fs.DurationVar(&cfg.ec.GRPCKeepAliveMinTime, "grpc-keepalive-min-time", cfg.ec.GRPCKeepAliveMinTime, "Minimum interval duration that a client should wait before pinging server.")
+	fs.DurationVar(&cfg.ec.GRPCKeepAliveInterval, "grpc-keepalive-interval", cfg.ec.GRPCKeepAliveInterval, "Frequency duration of server-to-client ping to check if a connection is alive (0 to disable).")
+	fs.DurationVar(&cfg.ec.GRPCKeepAliveTimeout, "grpc-keepalive-timeout", cfg.ec.GRPCKeepAliveTimeout, "Additional duration of wait before closing a non-responsive connection (0 to disable).")
+
+	// clustering
+	fs.Var(flags.NewURLsValue(embed.DefaultInitialAdvertisePeerURLs), "initial-advertise-peer-urls", "List of this member's peer URLs to advertise to the rest of the cluster.")
+	fs.Var(flags.NewURLsValue(embed.DefaultAdvertiseClientURLs), "advertise-client-urls", "List of this member's client URLs to advertise to the public.")
+	fs.StringVar(&cfg.ec.Durl, "discovery", cfg.ec.Durl, "Discovery URL used to bootstrap the cluster.")
+	fs.Var(cfg.cf.fallback, "discovery-fallback", fmt.Sprintf("Valid values include %s", strings.Join(cfg.cf.fallback.Values, ", ")))
+
+	fs.StringVar(&cfg.ec.Dproxy, "discovery-proxy", cfg.ec.Dproxy, "HTTP proxy to use for traffic to discovery service.")
+	fs.StringVar(&cfg.ec.DNSCluster, "discovery-srv", cfg.ec.DNSCluster, "DNS domain used to bootstrap initial cluster.")
+	fs.StringVar(&cfg.ec.InitialCluster, "initial-cluster", cfg.ec.InitialCluster, "Initial cluster configuration for bootstrapping.")
+	fs.StringVar(&cfg.ec.InitialClusterToken, "initial-cluster-token", cfg.ec.InitialClusterToken, "Initial cluster token for the etcd cluster during bootstrap.")
+	fs.Var(cfg.cf.clusterState, "initial-cluster-state", "Initial cluster state ('new' or 'existing').")
+
+	fs.BoolVar(&cfg.ec.StrictReconfigCheck, "strict-reconfig-check", cfg.ec.StrictReconfigCheck, "Reject reconfiguration requests that would cause quorum loss.")
+	fs.BoolVar(&cfg.ec.EnableV2, "enable-v2", cfg.ec.EnableV2, "Accept etcd V2 client requests.")
+	fs.StringVar(&cfg.ec.ExperimentalEnableV2V3, "experimental-enable-v2v3", cfg.ec.ExperimentalEnableV2V3, "v3 prefix for serving emulated v2 state.")
+
+	// proxy
+	fs.Var(cfg.cf.proxy, "proxy", fmt.Sprintf("Valid values include %s", strings.Join(cfg.cf.proxy.Values, ", ")))
+
+	fs.UintVar(&cfg.cp.ProxyFailureWaitMs, "proxy-failure-wait", cfg.cp.ProxyFailureWaitMs, "Time (in milliseconds) an endpoint will be held in a failed state.")
+	fs.UintVar(&cfg.cp.ProxyRefreshIntervalMs, "proxy-refresh-interval", cfg.cp.ProxyRefreshIntervalMs, "Time (in milliseconds) of the endpoints refresh interval.")
+	fs.UintVar(&cfg.cp.ProxyDialTimeoutMs, "proxy-dial-timeout", cfg.cp.ProxyDialTimeoutMs, "Time (in milliseconds) for a dial to timeout.")
+	fs.UintVar(&cfg.cp.ProxyWriteTimeoutMs, "proxy-write-timeout", cfg.cp.ProxyWriteTimeoutMs, "Time (in milliseconds) for a write to timeout.")
+	fs.UintVar(&cfg.cp.ProxyReadTimeoutMs, "proxy-read-timeout", cfg.cp.ProxyReadTimeoutMs, "Time (in milliseconds) for a read to timeout.")
+
+	// security
+	fs.StringVar(&cfg.ec.ClientTLSInfo.CAFile, "ca-file", "", "DEPRECATED: Path to the client server TLS CA file.")
+	fs.StringVar(&cfg.ec.ClientTLSInfo.CertFile, "cert-file", "", "Path to the client server TLS cert file.")
+	fs.StringVar(&cfg.ec.ClientTLSInfo.KeyFile, "key-file", "", "Path to the client server TLS key file.")
+	fs.BoolVar(&cfg.ec.ClientTLSInfo.ClientCertAuth, "client-cert-auth", false, "Enable client cert authentication.")
+	fs.StringVar(&cfg.ec.ClientTLSInfo.CRLFile, "client-crl-file", "", "Path to the client certificate revocation list file.")
+	fs.StringVar(&cfg.ec.ClientTLSInfo.TrustedCAFile, "trusted-ca-file", "", "Path to the client server TLS trusted CA cert file.")
+	fs.BoolVar(&cfg.ec.ClientAutoTLS, "auto-tls", false, "Client TLS using generated certificates")
+	fs.StringVar(&cfg.ec.PeerTLSInfo.CAFile, "peer-ca-file", "", "DEPRECATED: Path to the peer server TLS CA file.")
+	fs.StringVar(&cfg.ec.PeerTLSInfo.CertFile, "peer-cert-file", "", "Path to the peer server TLS cert file.")
+	fs.StringVar(&cfg.ec.PeerTLSInfo.KeyFile, "peer-key-file", "", "Path to the peer server TLS key file.")
+	fs.BoolVar(&cfg.ec.PeerTLSInfo.ClientCertAuth, "peer-client-cert-auth", false, "Enable peer client cert authentication.")
+	fs.StringVar(&cfg.ec.PeerTLSInfo.TrustedCAFile, "peer-trusted-ca-file", "", "Path to the peer server TLS trusted CA file.")
+	fs.BoolVar(&cfg.ec.PeerAutoTLS, "peer-auto-tls", false, "Peer TLS using generated certificates")
+	fs.StringVar(&cfg.ec.PeerTLSInfo.CRLFile, "peer-crl-file", "", "Path to the peer certificate revocation list file.")
+	fs.StringVar(&cfg.ec.PeerTLSInfo.AllowedCN, "peer-cert-allowed-cn", "", "Allowed CN for inter peer authentication.")
+
+	fs.Var(flags.NewStringsValueV2(""), "cipher-suites", "Comma-separated list of supported TLS cipher suites between client/server and peers (empty will be auto-populated by Go).")
+
+	// logging
+	fs.BoolVar(&cfg.ec.Debug, "debug", false, "Enable debug-level logging for etcd.")
+	fs.StringVar(&cfg.ec.LogPkgLevels, "log-package-levels", "", "Specify a particular log level for each etcd package (eg: 'etcdmain=CRITICAL,etcdserver=DEBUG').")
+	fs.StringVar(&cfg.ec.LogOutput, "log-output", embed.DefaultLogOutput, "Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd.")
+
+	// unsafe
+	fs.BoolVar(&cfg.ec.ForceNewCluster, "force-new-cluster", false, "Force to create a new one member cluster.")
+
+	// version
+	fs.BoolVar(&cfg.printVersion, "version", false, "Print the version and exit.")
+
+	fs.StringVar(&cfg.ec.AutoCompactionRetention, "auto-compaction-retention", "0", "Auto compaction retention for mvcc key value store. 0 means disable auto compaction.")
+	fs.StringVar(&cfg.ec.AutoCompactionMode, "auto-compaction-mode", "periodic", "interpret 'auto-compaction-retention' one of: periodic|revision. 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. '5m'). 'revision' for revision number based retention.")
+
+	// pprof profiler via HTTP
+	fs.BoolVar(&cfg.ec.EnablePprof, "enable-pprof", false, "Enable runtime profiling data via HTTP server. Address is at client URL + \"/debug/pprof/\"")
+
+	// additional metrics
+	fs.StringVar(&cfg.ec.Metrics, "metrics", cfg.ec.Metrics, "Set level of detail for exported metrics, specify 'extensive' to include histogram metrics")
+
+	// auth
+	fs.StringVar(&cfg.ec.AuthToken, "auth-token", cfg.ec.AuthToken, "Specify auth token specific options.")
+
+	// experimental
+	fs.BoolVar(&cfg.ec.ExperimentalInitialCorruptCheck, "experimental-initial-corrupt-check", cfg.ec.ExperimentalInitialCorruptCheck, "Enable to check data corruption before serving any client/peer traffic.")
+	fs.DurationVar(&cfg.ec.ExperimentalCorruptCheckTime, "experimental-corrupt-check-time", cfg.ec.ExperimentalCorruptCheckTime, "Duration of time between cluster corruption check passes.")
+
+	// ignored
+	for _, f := range cfg.ignored {
+		fs.Var(&flags.IgnoredFlag{Name: f}, f, "")
+	}
+	return cfg
+}
+
+func (cfg *config) parse(arguments []string) error {
+	perr := cfg.cf.flagSet.Parse(arguments)
+	switch perr {
+	case nil:
+	case flag.ErrHelp:
+		fmt.Println(flagsline)
+		os.Exit(0)
+	default:
+		os.Exit(2)
+	}
+	if len(cfg.cf.flagSet.Args()) != 0 {
+		return fmt.Errorf("'%s' is not a valid flag", cfg.cf.flagSet.Arg(0))
+	}
+
+	if cfg.printVersion {
+		fmt.Printf("etcd Version: %s\n", version.Version)
+		fmt.Printf("Git SHA: %s\n", version.GitSHA)
+		fmt.Printf("Go Version: %s\n", runtime.Version())
+		fmt.Printf("Go OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
+		os.Exit(0)
+	}
+
+	var err error
+	if cfg.configFile != "" {
+		plog.Infof("Loading server configuration from %q", cfg.configFile)
+		err = cfg.configFromFile(cfg.configFile)
+	} else {
+		err = cfg.configFromCmdLine()
+	}
+	return err
+}
+
+func (cfg *config) configFromCmdLine() error {
+	err := flags.SetFlagsFromEnv("ETCD", cfg.cf.flagSet)
+	if err != nil {
+		plog.Fatalf("%v", err)
+	}
+
+	cfg.ec.LPUrls = flags.URLsFromFlag(cfg.cf.flagSet, "listen-peer-urls")
+	cfg.ec.APUrls = flags.URLsFromFlag(cfg.cf.flagSet, "initial-advertise-peer-urls")
+	cfg.ec.LCUrls = flags.URLsFromFlag(cfg.cf.flagSet, "listen-client-urls")
+	cfg.ec.ACUrls = flags.URLsFromFlag(cfg.cf.flagSet, "advertise-client-urls")
+
+	if len(cfg.ec.ListenMetricsUrlsJSON) > 0 {
+		u, err := types.NewURLs(strings.Split(cfg.ec.ListenMetricsUrlsJSON, ","))
+		if err != nil {
+			plog.Fatalf("unexpected error setting up listen-metrics-urls: %v", err)
+		}
+		cfg.ec.ListenMetricsUrls = []url.URL(u)
+	}
+
+	cfg.ec.CipherSuites = flags.StringsFromFlagV2(cfg.cf.flagSet, "cipher-suites")
+
+	cfg.ec.ClusterState = cfg.cf.clusterState.String()
+	cfg.cp.Fallback = cfg.cf.fallback.String()
+	cfg.cp.Proxy = cfg.cf.proxy.String()
+
+	// disable default advertise-client-urls if lcurls is set
+	missingAC := flags.IsSet(cfg.cf.flagSet, "listen-client-urls") && !flags.IsSet(cfg.cf.flagSet, "advertise-client-urls")
+	if !cfg.mayBeProxy() && missingAC {
+		cfg.ec.ACUrls = nil
+	}
+
+	// disable default initial-cluster if discovery is set
+	if (cfg.ec.Durl != "" || cfg.ec.DNSCluster != "") && !flags.IsSet(cfg.cf.flagSet, "initial-cluster") {
+		cfg.ec.InitialCluster = ""
+	}
+
+	return cfg.validate()
+}
+
+func (cfg *config) configFromFile(path string) error {
+	eCfg, err := embed.ConfigFromFile(path)
+	if err != nil {
+		return err
+	}
+	cfg.ec = *eCfg
+
+	// load extra config information
+	b, rerr := ioutil.ReadFile(path)
+	if rerr != nil {
+		return rerr
+	}
+	if yerr := yaml.Unmarshal(b, &cfg.cp); yerr != nil {
+		return yerr
+	}
+	if cfg.cp.FallbackJSON != "" {
+		if err := cfg.cf.fallback.Set(cfg.cp.FallbackJSON); err != nil {
+			plog.Panicf("unexpected error setting up discovery-fallback flag: %v", err)
+		}
+		cfg.cp.Fallback = cfg.cf.fallback.String()
+	}
+	if cfg.cp.ProxyJSON != "" {
+		if err := cfg.cf.proxy.Set(cfg.cp.ProxyJSON); err != nil {
+			plog.Panicf("unexpected error setting up proxyFlag: %v", err)
+		}
+		cfg.cp.Proxy = cfg.cf.proxy.String()
+	}
+	return nil
+}
+
+func (cfg *config) mayBeProxy() bool {
+	mayFallbackToProxy := cfg.ec.Durl != "" && cfg.cp.Fallback == fallbackFlagProxy
+	return cfg.cp.Proxy != proxyFlagOff || mayFallbackToProxy
+}
+
+func (cfg *config) validate() error {
+	err := cfg.ec.Validate()
+	// TODO(yichengq): check this for joining through discovery service case
+	if err == embed.ErrUnsetAdvertiseClientURLsFlag && cfg.mayBeProxy() {
+		return nil
+	}
+	return err
+}
+
+func (cfg config) isProxy() bool               { return cfg.cf.proxy.String() != proxyFlagOff }
+func (cfg config) isReadonlyProxy() bool       { return cfg.cf.proxy.String() == proxyFlagReadonly }
+func (cfg config) shouldFallbackToProxy() bool { return cfg.cf.fallback.String() == fallbackFlagProxy }
diff --git a/vendor/github.com/coreos/etcd/etcdmain/doc.go b/vendor/github.com/coreos/etcd/etcdmain/doc.go
new file mode 100644
index 0000000..ff281aa
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdmain/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package etcdmain contains the main entry point for the etcd binary.
+package etcdmain
diff --git a/vendor/github.com/coreos/etcd/etcdmain/etcd.go b/vendor/github.com/coreos/etcd/etcdmain/etcd.go
new file mode 100644
index 0000000..87e9b25
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdmain/etcd.go
@@ -0,0 +1,399 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdmain
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"path/filepath"
+	"reflect"
+	"runtime"
+	"strings"
+	"time"
+
+	"github.com/coreos/etcd/discovery"
+	"github.com/coreos/etcd/embed"
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/api/etcdhttp"
+	"github.com/coreos/etcd/pkg/cors"
+	"github.com/coreos/etcd/pkg/fileutil"
+	pkgioutil "github.com/coreos/etcd/pkg/ioutil"
+	"github.com/coreos/etcd/pkg/osutil"
+	"github.com/coreos/etcd/pkg/transport"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/proxy/httpproxy"
+	"github.com/coreos/etcd/version"
+
+	"github.com/coreos/pkg/capnslog"
+	"google.golang.org/grpc"
+)
+
+type dirType string
+
+var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdmain")
+
+var (
+	dirMember = dirType("member")
+	dirProxy  = dirType("proxy")
+	dirEmpty  = dirType("empty")
+)
+
+func startEtcdOrProxyV2() {
+	grpc.EnableTracing = false
+
+	cfg := newConfig()
+	defaultInitialCluster := cfg.ec.InitialCluster
+
+	err := cfg.parse(os.Args[1:])
+	if err != nil {
+		plog.Errorf("error verifying flags, %v. See 'etcd --help'.", err)
+		switch err {
+		case embed.ErrUnsetAdvertiseClientURLsFlag:
+			plog.Errorf("When listening on specific address(es), this etcd process must advertise accessible url(s) to each connected client.")
+		}
+		os.Exit(1)
+	}
+	cfg.ec.SetupLogging()
+
+	var stopped <-chan struct{}
+	var errc <-chan error
+
+	plog.Infof("etcd Version: %s\n", version.Version)
+	plog.Infof("Git SHA: %s\n", version.GitSHA)
+	plog.Infof("Go Version: %s\n", runtime.Version())
+	plog.Infof("Go OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
+
+	GoMaxProcs := runtime.GOMAXPROCS(0)
+	plog.Infof("setting maximum number of CPUs to %d, total number of available CPUs is %d", GoMaxProcs, runtime.NumCPU())
+
+	defaultHost, dhErr := (&cfg.ec).UpdateDefaultClusterFromName(defaultInitialCluster)
+	if defaultHost != "" {
+		plog.Infof("advertising using detected default host %q", defaultHost)
+	}
+	if dhErr != nil {
+		plog.Noticef("failed to detect default host (%v)", dhErr)
+	}
+
+	if cfg.ec.Dir == "" {
+		cfg.ec.Dir = fmt.Sprintf("%v.etcd", cfg.ec.Name)
+		plog.Warningf("no data-dir provided, using default data-dir ./%s", cfg.ec.Dir)
+	}
+
+	which := identifyDataDirOrDie(cfg.ec.Dir)
+	if which != dirEmpty {
+		plog.Noticef("the server is already initialized as %v before, starting as etcd %v...", which, which)
+		switch which {
+		case dirMember:
+			stopped, errc, err = startEtcd(&cfg.ec)
+		case dirProxy:
+			err = startProxy(cfg)
+		default:
+			plog.Panicf("unhandled dir type %v", which)
+		}
+	} else {
+		shouldProxy := cfg.isProxy()
+		if !shouldProxy {
+			stopped, errc, err = startEtcd(&cfg.ec)
+			if derr, ok := err.(*etcdserver.DiscoveryError); ok && derr.Err == discovery.ErrFullCluster {
+				if cfg.shouldFallbackToProxy() {
+					plog.Noticef("discovery cluster full, falling back to %s", fallbackFlagProxy)
+					shouldProxy = true
+				}
+			}
+		}
+		if shouldProxy {
+			err = startProxy(cfg)
+		}
+	}
+
+	if err != nil {
+		if derr, ok := err.(*etcdserver.DiscoveryError); ok {
+			switch derr.Err {
+			case discovery.ErrDuplicateID:
+				plog.Errorf("member %q has previously registered with discovery service token (%s).", cfg.ec.Name, cfg.ec.Durl)
+				plog.Errorf("But etcd could not find valid cluster configuration in the given data dir (%s).", cfg.ec.Dir)
+				plog.Infof("Please check the given data dir path if the previous bootstrap succeeded")
+				plog.Infof("or use a new discovery token if the previous bootstrap failed.")
+			case discovery.ErrDuplicateName:
+				plog.Errorf("member with duplicated name has registered with discovery service token(%s).", cfg.ec.Durl)
+				plog.Errorf("please check (cURL) the discovery token for more information.")
+				plog.Errorf("please do not reuse the discovery token and generate a new one to bootstrap the cluster.")
+			default:
+				plog.Errorf("%v", err)
+				plog.Infof("discovery token %s was used, but failed to bootstrap the cluster.", cfg.ec.Durl)
+				plog.Infof("please generate a new discovery token and try to bootstrap again.")
+			}
+			os.Exit(1)
+		}
+
+		if strings.Contains(err.Error(), "include") && strings.Contains(err.Error(), "--initial-cluster") {
+			plog.Infof("%v", err)
+			if cfg.ec.InitialCluster == cfg.ec.InitialClusterFromName(cfg.ec.Name) {
+				plog.Infof("forgot to set --initial-cluster flag?")
+			}
+			if types.URLs(cfg.ec.APUrls).String() == embed.DefaultInitialAdvertisePeerURLs {
+				plog.Infof("forgot to set --initial-advertise-peer-urls flag?")
+			}
+			if cfg.ec.InitialCluster == cfg.ec.InitialClusterFromName(cfg.ec.Name) && len(cfg.ec.Durl) == 0 {
+				plog.Infof("if you want to use discovery service, please set --discovery flag.")
+			}
+			os.Exit(1)
+		}
+		plog.Fatalf("%v", err)
+	}
+
+	osutil.HandleInterrupts()
+
+	// At this point, the initialization of etcd is done.
+	// The listeners are listening on the TCP ports and ready
+	// for accepting connections. The etcd instance should be
+	// joined with the cluster and ready to serve incoming
+	// connections.
+	notifySystemd()
+
+	select {
+	case lerr := <-errc:
+		// fatal out on listener errors
+		plog.Fatal(lerr)
+	case <-stopped:
+	}
+
+	osutil.Exit(0)
+}
+
+// startEtcd runs StartEtcd in addition to hooks needed for standalone etcd.
+func startEtcd(cfg *embed.Config) (<-chan struct{}, <-chan error, error) {
+	e, err := embed.StartEtcd(cfg)
+	if err != nil {
+		return nil, nil, err
+	}
+	osutil.RegisterInterruptHandler(e.Close)
+	select {
+	case <-e.Server.ReadyNotify(): // wait for e.Server to join the cluster
+	case <-e.Server.StopNotify(): // publish aborted from 'ErrStopped'
+	}
+	return e.Server.StopNotify(), e.Err(), nil
+}
+
+// startProxy launches an HTTP proxy for client communication which proxies to other etcd nodes.
+func startProxy(cfg *config) error {
+	plog.Notice("proxy: this proxy supports v2 API only!")
+
+	clientTLSInfo := cfg.ec.ClientTLSInfo
+	if clientTLSInfo.Empty() {
+		// Support old proxy behavior of defaulting to PeerTLSInfo
+		// for both client and peer connections.
+		clientTLSInfo = cfg.ec.PeerTLSInfo
+	}
+	clientTLSInfo.InsecureSkipVerify = cfg.ec.ClientAutoTLS
+	cfg.ec.PeerTLSInfo.InsecureSkipVerify = cfg.ec.PeerAutoTLS
+
+	pt, err := transport.NewTimeoutTransport(clientTLSInfo, time.Duration(cfg.cp.ProxyDialTimeoutMs)*time.Millisecond, time.Duration(cfg.cp.ProxyReadTimeoutMs)*time.Millisecond, time.Duration(cfg.cp.ProxyWriteTimeoutMs)*time.Millisecond)
+	if err != nil {
+		return err
+	}
+	pt.MaxIdleConnsPerHost = httpproxy.DefaultMaxIdleConnsPerHost
+
+	if err = cfg.ec.PeerSelfCert(); err != nil {
+		plog.Fatalf("could not get certs (%v)", err)
+	}
+	tr, err := transport.NewTimeoutTransport(cfg.ec.PeerTLSInfo, time.Duration(cfg.cp.ProxyDialTimeoutMs)*time.Millisecond, time.Duration(cfg.cp.ProxyReadTimeoutMs)*time.Millisecond, time.Duration(cfg.cp.ProxyWriteTimeoutMs)*time.Millisecond)
+	if err != nil {
+		return err
+	}
+
+	cfg.ec.Dir = filepath.Join(cfg.ec.Dir, "proxy")
+	err = os.MkdirAll(cfg.ec.Dir, fileutil.PrivateDirMode)
+	if err != nil {
+		return err
+	}
+
+	var peerURLs []string
+	clusterfile := filepath.Join(cfg.ec.Dir, "cluster")
+
+	b, err := ioutil.ReadFile(clusterfile)
+	switch {
+	case err == nil:
+		if cfg.ec.Durl != "" {
+			plog.Warningf("discovery token ignored since the proxy has already been initialized. Valid cluster file found at %q", clusterfile)
+		}
+		if cfg.ec.DNSCluster != "" {
+			plog.Warningf("DNS SRV discovery ignored since the proxy has already been initialized. Valid cluster file found at %q", clusterfile)
+		}
+		urls := struct{ PeerURLs []string }{}
+		err = json.Unmarshal(b, &urls)
+		if err != nil {
+			return err
+		}
+		peerURLs = urls.PeerURLs
+		plog.Infof("proxy: using peer urls %v from cluster file %q", peerURLs, clusterfile)
+	case os.IsNotExist(err):
+		var urlsmap types.URLsMap
+		urlsmap, _, err = cfg.ec.PeerURLsMapAndToken("proxy")
+		if err != nil {
+			return fmt.Errorf("error setting up initial cluster: %v", err)
+		}
+
+		if cfg.ec.Durl != "" {
+			var s string
+			s, err = discovery.GetCluster(cfg.ec.Durl, cfg.ec.Dproxy)
+			if err != nil {
+				return err
+			}
+			if urlsmap, err = types.NewURLsMap(s); err != nil {
+				return err
+			}
+		}
+		peerURLs = urlsmap.URLs()
+		plog.Infof("proxy: using peer urls %v ", peerURLs)
+	default:
+		return err
+	}
+
+	clientURLs := []string{}
+	uf := func() []string {
+		gcls, gerr := etcdserver.GetClusterFromRemotePeers(peerURLs, tr)
+
+		if gerr != nil {
+			plog.Warningf("proxy: %v", gerr)
+			return []string{}
+		}
+
+		clientURLs = gcls.ClientURLs()
+
+		urls := struct{ PeerURLs []string }{gcls.PeerURLs()}
+		b, jerr := json.Marshal(urls)
+		if jerr != nil {
+			plog.Warningf("proxy: error on marshal peer urls %s", jerr)
+			return clientURLs
+		}
+
+		err = pkgioutil.WriteAndSyncFile(clusterfile+".bak", b, 0600)
+		if err != nil {
+			plog.Warningf("proxy: error on writing urls %s", err)
+			return clientURLs
+		}
+		err = os.Rename(clusterfile+".bak", clusterfile)
+		if err != nil {
+			plog.Warningf("proxy: error on updating clusterfile %s", err)
+			return clientURLs
+		}
+		if !reflect.DeepEqual(gcls.PeerURLs(), peerURLs) {
+			plog.Noticef("proxy: updated peer urls in cluster file from %v to %v", peerURLs, gcls.PeerURLs())
+		}
+		peerURLs = gcls.PeerURLs()
+
+		return clientURLs
+	}
+	ph := httpproxy.NewHandler(pt, uf, time.Duration(cfg.cp.ProxyFailureWaitMs)*time.Millisecond, time.Duration(cfg.cp.ProxyRefreshIntervalMs)*time.Millisecond)
+	ph = &cors.CORSHandler{
+		Handler: ph,
+		Info:    cfg.ec.CorsInfo,
+	}
+
+	if cfg.isReadonlyProxy() {
+		ph = httpproxy.NewReadonlyHandler(ph)
+	}
+
+	// setup self signed certs when serving https
+	cHosts, cTLS := []string{}, false
+	for _, u := range cfg.ec.LCUrls {
+		cHosts = append(cHosts, u.Host)
+		cTLS = cTLS || u.Scheme == "https"
+	}
+	for _, u := range cfg.ec.ACUrls {
+		cHosts = append(cHosts, u.Host)
+		cTLS = cTLS || u.Scheme == "https"
+	}
+	listenerTLS := cfg.ec.ClientTLSInfo
+	if cfg.ec.ClientAutoTLS && cTLS {
+		listenerTLS, err = transport.SelfCert(filepath.Join(cfg.ec.Dir, "clientCerts"), cHosts)
+		if err != nil {
+			plog.Fatalf("proxy: could not initialize self-signed client certs (%v)", err)
+		}
+	}
+
+	// Start a proxy server goroutine for each listen address
+	for _, u := range cfg.ec.LCUrls {
+		l, err := transport.NewListener(u.Host, u.Scheme, &listenerTLS)
+		if err != nil {
+			return err
+		}
+
+		host := u.String()
+		go func() {
+			plog.Info("proxy: listening for client requests on ", host)
+			mux := http.NewServeMux()
+			etcdhttp.HandlePrometheus(mux) // v2 proxy just uses the same port
+			mux.Handle("/", ph)
+			plog.Fatal(http.Serve(l, mux))
+		}()
+	}
+	return nil
+}
+
+// identifyDataDirOrDie returns the type of the data dir.
+// Dies if the datadir is invalid.
+func identifyDataDirOrDie(dir string) dirType {
+	names, err := fileutil.ReadDir(dir)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return dirEmpty
+		}
+		plog.Fatalf("error listing data dir: %s", dir)
+	}
+
+	var m, p bool
+	for _, name := range names {
+		switch dirType(name) {
+		case dirMember:
+			m = true
+		case dirProxy:
+			p = true
+		default:
+			plog.Warningf("found invalid file/dir %s under data dir %s (Ignore this if you are upgrading etcd)", name, dir)
+		}
+	}
+
+	if m && p {
+		plog.Fatal("invalid datadir. Both member and proxy directories exist.")
+	}
+	if m {
+		return dirMember
+	}
+	if p {
+		return dirProxy
+	}
+	return dirEmpty
+}
+
+func checkSupportArch() {
+	// TODO qualify arm64
+	if runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64le" {
+		return
+	}
+	// unsupported arch only configured via environment variable
+	// so unset here to not parse through flag
+	defer os.Unsetenv("ETCD_UNSUPPORTED_ARCH")
+	if env, ok := os.LookupEnv("ETCD_UNSUPPORTED_ARCH"); ok && env == runtime.GOARCH {
+		plog.Warningf("running etcd on unsupported architecture %q since ETCD_UNSUPPORTED_ARCH is set", env)
+		return
+	}
+	plog.Errorf("etcd on unsupported platform without ETCD_UNSUPPORTED_ARCH=%s set.", runtime.GOARCH)
+	os.Exit(1)
+}
diff --git a/vendor/github.com/coreos/etcd/etcdmain/gateway.go b/vendor/github.com/coreos/etcd/etcdmain/gateway.go
new file mode 100644
index 0000000..5487414
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdmain/gateway.go
@@ -0,0 +1,135 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdmain
+
+import (
+	"fmt"
+	"net"
+	"net/url"
+	"os"
+	"time"
+
+	"github.com/coreos/etcd/proxy/tcpproxy"
+
+	"github.com/spf13/cobra"
+)
+
+var (
+	gatewayListenAddr        string
+	gatewayEndpoints         []string
+	gatewayDNSCluster        string
+	gatewayInsecureDiscovery bool
+	getewayRetryDelay        time.Duration
+	gatewayCA                string
+)
+
+var (
+	rootCmd = &cobra.Command{
+		Use:        "etcd",
+		Short:      "etcd server",
+		SuggestFor: []string{"etcd"},
+	}
+)
+
+func init() {
+	rootCmd.AddCommand(newGatewayCommand())
+}
+
+// newGatewayCommand returns the cobra command for "gateway".
+func newGatewayCommand() *cobra.Command {
+	lpc := &cobra.Command{
+		Use:   "gateway <subcommand>",
+		Short: "gateway related command",
+	}
+	lpc.AddCommand(newGatewayStartCommand())
+
+	return lpc
+}
+
+func newGatewayStartCommand() *cobra.Command {
+	cmd := cobra.Command{
+		Use:   "start",
+		Short: "start the gateway",
+		Run:   startGateway,
+	}
+
+	cmd.Flags().StringVar(&gatewayListenAddr, "listen-addr", "127.0.0.1:23790", "listen address")
+	cmd.Flags().StringVar(&gatewayDNSCluster, "discovery-srv", "", "DNS domain used to bootstrap initial cluster")
+	cmd.Flags().BoolVar(&gatewayInsecureDiscovery, "insecure-discovery", false, "accept insecure SRV records")
+	cmd.Flags().StringVar(&gatewayCA, "trusted-ca-file", "", "path to the client server TLS CA file.")
+
+	cmd.Flags().StringSliceVar(&gatewayEndpoints, "endpoints", []string{"127.0.0.1:2379"}, "comma separated etcd cluster endpoints")
+
+	cmd.Flags().DurationVar(&getewayRetryDelay, "retry-delay", time.Minute, "duration of delay before retrying failed endpoints")
+
+	return &cmd
+}
+
+func stripSchema(eps []string) []string {
+	var endpoints []string
+
+	for _, ep := range eps {
+
+		if u, err := url.Parse(ep); err == nil && u.Host != "" {
+			ep = u.Host
+		}
+
+		endpoints = append(endpoints, ep)
+	}
+
+	return endpoints
+}
+
+func startGateway(cmd *cobra.Command, args []string) {
+	srvs := discoverEndpoints(gatewayDNSCluster, gatewayCA, gatewayInsecureDiscovery)
+	if len(srvs.Endpoints) == 0 {
+		// no endpoints discovered, fall back to provided endpoints
+		srvs.Endpoints = gatewayEndpoints
+	}
+	// Strip the schema from the endpoints because we start just a TCP proxy
+	srvs.Endpoints = stripSchema(srvs.Endpoints)
+	if len(srvs.SRVs) == 0 {
+		for _, ep := range srvs.Endpoints {
+			h, p, err := net.SplitHostPort(ep)
+			if err != nil {
+				plog.Fatalf("error parsing endpoint %q", ep)
+			}
+			var port uint16
+			fmt.Sscanf(p, "%d", &port)
+			srvs.SRVs = append(srvs.SRVs, &net.SRV{Target: h, Port: port})
+		}
+	}
+
+	if len(srvs.Endpoints) == 0 {
+		plog.Fatalf("no endpoints found")
+	}
+
+	l, err := net.Listen("tcp", gatewayListenAddr)
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+
+	tp := tcpproxy.TCPProxy{
+		Listener:        l,
+		Endpoints:       srvs.SRVs,
+		MonitorInterval: getewayRetryDelay,
+	}
+
+	// At this point, etcd gateway listener is initialized
+	notifySystemd()
+
+	tp.Run()
+}
diff --git a/vendor/github.com/coreos/etcd/etcdmain/grpc_proxy.go b/vendor/github.com/coreos/etcd/etcdmain/grpc_proxy.go
new file mode 100644
index 0000000..6a8e39f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdmain/grpc_proxy.go
@@ -0,0 +1,399 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdmain
+
+import (
+	"context"
+	"fmt"
+	"io/ioutil"
+	"math"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"path/filepath"
+	"time"
+
+	"github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/clientv3/leasing"
+	"github.com/coreos/etcd/clientv3/namespace"
+	"github.com/coreos/etcd/clientv3/ordering"
+	"github.com/coreos/etcd/etcdserver/api/etcdhttp"
+	"github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
+	"github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/pkg/debugutil"
+	"github.com/coreos/etcd/pkg/transport"
+	"github.com/coreos/etcd/proxy/grpcproxy"
+
+	"github.com/coreos/pkg/capnslog"
+	grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
+	"github.com/soheilhy/cmux"
+	"github.com/spf13/cobra"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/grpclog"
+)
+
+var (
+	grpcProxyListenAddr        string
+	grpcProxyMetricsListenAddr string
+	grpcProxyEndpoints         []string
+	grpcProxyDNSCluster        string
+	grpcProxyInsecureDiscovery bool
+	grpcProxyDataDir           string
+	grpcMaxCallSendMsgSize     int
+	grpcMaxCallRecvMsgSize     int
+
+	// tls for connecting to etcd
+
+	grpcProxyCA                    string
+	grpcProxyCert                  string
+	grpcProxyKey                   string
+	grpcProxyInsecureSkipTLSVerify bool
+
+	// tls for clients connecting to proxy
+
+	grpcProxyListenCA      string
+	grpcProxyListenCert    string
+	grpcProxyListenKey     string
+	grpcProxyListenAutoTLS bool
+	grpcProxyListenCRL     string
+
+	grpcProxyAdvertiseClientURL string
+	grpcProxyResolverPrefix     string
+	grpcProxyResolverTTL        int
+
+	grpcProxyNamespace string
+	grpcProxyLeasing   string
+
+	grpcProxyEnablePprof    bool
+	grpcProxyEnableOrdering bool
+
+	grpcProxyDebug bool
+)
+
+const defaultGRPCMaxCallSendMsgSize = 1.5 * 1024 * 1024
+
+func init() {
+	rootCmd.AddCommand(newGRPCProxyCommand())
+}
+
+// newGRPCProxyCommand returns the cobra command for "grpc-proxy".
+func newGRPCProxyCommand() *cobra.Command {
+	lpc := &cobra.Command{
+		Use:   "grpc-proxy <subcommand>",
+		Short: "grpc-proxy related command",
+	}
+	lpc.AddCommand(newGRPCProxyStartCommand())
+
+	return lpc
+}
+
+func newGRPCProxyStartCommand() *cobra.Command {
+	cmd := cobra.Command{
+		Use:   "start",
+		Short: "start the grpc proxy",
+		Run:   startGRPCProxy,
+	}
+
+	cmd.Flags().StringVar(&grpcProxyListenAddr, "listen-addr", "127.0.0.1:23790", "listen address")
+	cmd.Flags().StringVar(&grpcProxyDNSCluster, "discovery-srv", "", "DNS domain used to bootstrap initial cluster")
+	cmd.Flags().StringVar(&grpcProxyMetricsListenAddr, "metrics-addr", "", "listen for /metrics requests on an additional interface")
+	cmd.Flags().BoolVar(&grpcProxyInsecureDiscovery, "insecure-discovery", false, "accept insecure SRV records")
+	cmd.Flags().StringSliceVar(&grpcProxyEndpoints, "endpoints", []string{"127.0.0.1:2379"}, "comma separated etcd cluster endpoints")
+	cmd.Flags().StringVar(&grpcProxyAdvertiseClientURL, "advertise-client-url", "127.0.0.1:23790", "advertise address to register (must be reachable by client)")
+	cmd.Flags().StringVar(&grpcProxyResolverPrefix, "resolver-prefix", "", "prefix to use for registering proxy (must be shared with other grpc-proxy members)")
+	cmd.Flags().IntVar(&grpcProxyResolverTTL, "resolver-ttl", 0, "specify TTL, in seconds, when registering proxy endpoints")
+	cmd.Flags().StringVar(&grpcProxyNamespace, "namespace", "", "string to prefix to all keys for namespacing requests")
+	cmd.Flags().BoolVar(&grpcProxyEnablePprof, "enable-pprof", false, `Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof/"`)
+	cmd.Flags().StringVar(&grpcProxyDataDir, "data-dir", "default.proxy", "Data directory for persistent data")
+	cmd.Flags().IntVar(&grpcMaxCallSendMsgSize, "max-send-bytes", defaultGRPCMaxCallSendMsgSize, "message send limits in bytes (default value is 1.5 MiB)")
+	cmd.Flags().IntVar(&grpcMaxCallRecvMsgSize, "max-recv-bytes", math.MaxInt32, "message receive limits in bytes (default value is math.MaxInt32)")
+
+	// client TLS for connecting to server
+	cmd.Flags().StringVar(&grpcProxyCert, "cert", "", "identify secure connections with etcd servers using this TLS certificate file")
+	cmd.Flags().StringVar(&grpcProxyKey, "key", "", "identify secure connections with etcd servers using this TLS key file")
+	cmd.Flags().StringVar(&grpcProxyCA, "cacert", "", "verify certificates of TLS-enabled secure etcd servers using this CA bundle")
+	cmd.Flags().BoolVar(&grpcProxyInsecureSkipTLSVerify, "insecure-skip-tls-verify", false, "skip authentication of etcd server TLS certificates")
+
+	// client TLS for connecting to proxy
+	cmd.Flags().StringVar(&grpcProxyListenCert, "cert-file", "", "identify secure connections to the proxy using this TLS certificate file")
+	cmd.Flags().StringVar(&grpcProxyListenKey, "key-file", "", "identify secure connections to the proxy using this TLS key file")
+	cmd.Flags().StringVar(&grpcProxyListenCA, "trusted-ca-file", "", "verify certificates of TLS-enabled secure proxy using this CA bundle")
+	cmd.Flags().BoolVar(&grpcProxyListenAutoTLS, "auto-tls", false, "proxy TLS using generated certificates")
+	cmd.Flags().StringVar(&grpcProxyListenCRL, "client-crl-file", "", "proxy client certificate revocation list file.")
+
+	// experimental flags
+	cmd.Flags().BoolVar(&grpcProxyEnableOrdering, "experimental-serializable-ordering", false, "Ensure serializable reads have monotonically increasing store revisions across endpoints.")
+	cmd.Flags().StringVar(&grpcProxyLeasing, "experimental-leasing-prefix", "", "leasing metadata prefix for disconnected linearized reads.")
+
+	cmd.Flags().BoolVar(&grpcProxyDebug, "debug", false, "Enable debug-level logging for grpc-proxy.")
+
+	return &cmd
+}
+
+func startGRPCProxy(cmd *cobra.Command, args []string) {
+	checkArgs()
+
+	capnslog.SetGlobalLogLevel(capnslog.INFO)
+	if grpcProxyDebug {
+		capnslog.SetGlobalLogLevel(capnslog.DEBUG)
+		grpc.EnableTracing = true
+		// enable info, warning, error
+		grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
+	} else {
+		// only discard info
+		grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
+	}
+
+	tlsinfo := newTLS(grpcProxyListenCA, grpcProxyListenCert, grpcProxyListenKey)
+	if tlsinfo == nil && grpcProxyListenAutoTLS {
+		host := []string{"https://" + grpcProxyListenAddr}
+		dir := filepath.Join(grpcProxyDataDir, "fixtures", "proxy")
+		autoTLS, err := transport.SelfCert(dir, host)
+		if err != nil {
+			plog.Fatal(err)
+		}
+		tlsinfo = &autoTLS
+	}
+	if tlsinfo != nil {
+		plog.Infof("ServerTLS: %s", tlsinfo)
+	}
+	m := mustListenCMux(tlsinfo)
+
+	grpcl := m.Match(cmux.HTTP2())
+	defer func() {
+		grpcl.Close()
+		plog.Infof("stopping listening for grpc-proxy client requests on %s", grpcProxyListenAddr)
+	}()
+
+	client := mustNewClient()
+
+	srvhttp, httpl := mustHTTPListener(m, tlsinfo, client)
+	errc := make(chan error)
+	go func() { errc <- newGRPCProxyServer(client).Serve(grpcl) }()
+	go func() { errc <- srvhttp.Serve(httpl) }()
+	go func() { errc <- m.Serve() }()
+	if len(grpcProxyMetricsListenAddr) > 0 {
+		mhttpl := mustMetricsListener(tlsinfo)
+		go func() {
+			mux := http.NewServeMux()
+			etcdhttp.HandlePrometheus(mux)
+			grpcproxy.HandleHealth(mux, client)
+			plog.Fatal(http.Serve(mhttpl, mux))
+		}()
+	}
+
+	// grpc-proxy is initialized, ready to serve
+	notifySystemd()
+
+	fmt.Fprintln(os.Stderr, <-errc)
+	os.Exit(1)
+}
+
+func checkArgs() {
+	if grpcProxyResolverPrefix != "" && grpcProxyResolverTTL < 1 {
+		fmt.Fprintln(os.Stderr, fmt.Errorf("invalid resolver-ttl %d", grpcProxyResolverTTL))
+		os.Exit(1)
+	}
+	if grpcProxyResolverPrefix == "" && grpcProxyResolverTTL > 0 {
+		fmt.Fprintln(os.Stderr, fmt.Errorf("invalid resolver-prefix %q", grpcProxyResolverPrefix))
+		os.Exit(1)
+	}
+	if grpcProxyResolverPrefix != "" && grpcProxyResolverTTL > 0 && grpcProxyAdvertiseClientURL == "" {
+		fmt.Fprintln(os.Stderr, fmt.Errorf("invalid advertise-client-url %q", grpcProxyAdvertiseClientURL))
+		os.Exit(1)
+	}
+}
+
+func mustNewClient() *clientv3.Client {
+	srvs := discoverEndpoints(grpcProxyDNSCluster, grpcProxyCA, grpcProxyInsecureDiscovery)
+	eps := srvs.Endpoints
+	if len(eps) == 0 {
+		eps = grpcProxyEndpoints
+	}
+	cfg, err := newClientCfg(eps)
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+	cfg.DialOptions = append(cfg.DialOptions,
+		grpc.WithUnaryInterceptor(grpcproxy.AuthUnaryClientInterceptor))
+	cfg.DialOptions = append(cfg.DialOptions,
+		grpc.WithStreamInterceptor(grpcproxy.AuthStreamClientInterceptor))
+	client, err := clientv3.New(*cfg)
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+	return client
+}
+
+func newClientCfg(eps []string) (*clientv3.Config, error) {
+	// set tls if any one tls option set
+	cfg := clientv3.Config{
+		Endpoints:   eps,
+		DialTimeout: 5 * time.Second,
+	}
+
+	if grpcMaxCallSendMsgSize > 0 {
+		cfg.MaxCallSendMsgSize = grpcMaxCallSendMsgSize
+	}
+	if grpcMaxCallRecvMsgSize > 0 {
+		cfg.MaxCallRecvMsgSize = grpcMaxCallRecvMsgSize
+	}
+
+	tls := newTLS(grpcProxyCA, grpcProxyCert, grpcProxyKey)
+	if tls == nil && grpcProxyInsecureSkipTLSVerify {
+		tls = &transport.TLSInfo{}
+	}
+	if tls != nil {
+		clientTLS, err := tls.ClientConfig()
+		if err != nil {
+			return nil, err
+		}
+		clientTLS.InsecureSkipVerify = grpcProxyInsecureSkipTLSVerify
+		cfg.TLS = clientTLS
+		plog.Infof("ClientTLS: %s", tls)
+	}
+	return &cfg, nil
+}
+
+func newTLS(ca, cert, key string) *transport.TLSInfo {
+	if ca == "" && cert == "" && key == "" {
+		return nil
+	}
+	return &transport.TLSInfo{CAFile: ca, CertFile: cert, KeyFile: key}
+}
+
+func mustListenCMux(tlsinfo *transport.TLSInfo) cmux.CMux {
+	l, err := net.Listen("tcp", grpcProxyListenAddr)
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+
+	if l, err = transport.NewKeepAliveListener(l, "tcp", nil); err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+	if tlsinfo != nil {
+		tlsinfo.CRLFile = grpcProxyListenCRL
+		if l, err = transport.NewTLSListener(l, tlsinfo); err != nil {
+			plog.Fatal(err)
+		}
+	}
+
+	plog.Infof("listening for grpc-proxy client requests on %s", grpcProxyListenAddr)
+	return cmux.New(l)
+}
+
+func newGRPCProxyServer(client *clientv3.Client) *grpc.Server {
+	if grpcProxyEnableOrdering {
+		vf := ordering.NewOrderViolationSwitchEndpointClosure(*client)
+		client.KV = ordering.NewKV(client.KV, vf)
+		plog.Infof("waiting for linearized read from cluster to recover ordering")
+		for {
+			_, err := client.KV.Get(context.TODO(), "_", clientv3.WithKeysOnly())
+			if err == nil {
+				break
+			}
+			plog.Warningf("ordering recovery failed, retrying in 1s (%v)", err)
+			time.Sleep(time.Second)
+		}
+	}
+
+	if len(grpcProxyNamespace) > 0 {
+		client.KV = namespace.NewKV(client.KV, grpcProxyNamespace)
+		client.Watcher = namespace.NewWatcher(client.Watcher, grpcProxyNamespace)
+		client.Lease = namespace.NewLease(client.Lease, grpcProxyNamespace)
+	}
+
+	if len(grpcProxyLeasing) > 0 {
+		client.KV, _, _ = leasing.NewKV(client, grpcProxyLeasing)
+	}
+
+	kvp, _ := grpcproxy.NewKvProxy(client)
+	watchp, _ := grpcproxy.NewWatchProxy(client)
+	if grpcProxyResolverPrefix != "" {
+		grpcproxy.Register(client, grpcProxyResolverPrefix, grpcProxyAdvertiseClientURL, grpcProxyResolverTTL)
+	}
+	clusterp, _ := grpcproxy.NewClusterProxy(client, grpcProxyAdvertiseClientURL, grpcProxyResolverPrefix)
+	leasep, _ := grpcproxy.NewLeaseProxy(client)
+	mainp := grpcproxy.NewMaintenanceProxy(client)
+	authp := grpcproxy.NewAuthProxy(client)
+	electionp := grpcproxy.NewElectionProxy(client)
+	lockp := grpcproxy.NewLockProxy(client)
+
+	server := grpc.NewServer(
+		grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
+		grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
+		grpc.MaxConcurrentStreams(math.MaxUint32),
+	)
+
+	pb.RegisterKVServer(server, kvp)
+	pb.RegisterWatchServer(server, watchp)
+	pb.RegisterClusterServer(server, clusterp)
+	pb.RegisterLeaseServer(server, leasep)
+	pb.RegisterMaintenanceServer(server, mainp)
+	pb.RegisterAuthServer(server, authp)
+	v3electionpb.RegisterElectionServer(server, electionp)
+	v3lockpb.RegisterLockServer(server, lockp)
+
+	// set zero values for metrics registered for this grpc server
+	grpc_prometheus.Register(server)
+
+	return server
+}
+
+func mustHTTPListener(m cmux.CMux, tlsinfo *transport.TLSInfo, c *clientv3.Client) (*http.Server, net.Listener) {
+	httpmux := http.NewServeMux()
+	httpmux.HandleFunc("/", http.NotFound)
+	etcdhttp.HandlePrometheus(httpmux)
+	grpcproxy.HandleHealth(httpmux, c)
+	if grpcProxyEnablePprof {
+		for p, h := range debugutil.PProfHandlers() {
+			httpmux.Handle(p, h)
+		}
+		plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf)
+	}
+	srvhttp := &http.Server{Handler: httpmux}
+
+	if tlsinfo == nil {
+		return srvhttp, m.Match(cmux.HTTP1())
+	}
+
+	srvTLS, err := tlsinfo.ServerConfig()
+	if err != nil {
+		plog.Fatalf("could not setup TLS (%v)", err)
+	}
+	srvhttp.TLSConfig = srvTLS
+	return srvhttp, m.Match(cmux.Any())
+}
+
+func mustMetricsListener(tlsinfo *transport.TLSInfo) net.Listener {
+	murl, err := url.Parse(grpcProxyMetricsListenAddr)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "cannot parse %q", grpcProxyMetricsListenAddr)
+		os.Exit(1)
+	}
+	ml, err := transport.NewListener(murl.Host, murl.Scheme, tlsinfo)
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+	plog.Info("grpc-proxy: listening for metrics on ", murl.String())
+	return ml
+}
diff --git a/vendor/github.com/coreos/etcd/etcdmain/help.go b/vendor/github.com/coreos/etcd/etcdmain/help.go
new file mode 100644
index 0000000..c64dab3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdmain/help.go
@@ -0,0 +1,203 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdmain
+
+import (
+	"strconv"
+
+	"github.com/coreos/etcd/embed"
+)
+
+var (
+	usageline = `usage: etcd [flags]
+       start an etcd server
+
+       etcd --version
+       show the version of etcd
+
+       etcd -h | --help
+       show the help information about etcd
+
+       etcd --config-file
+       path to the server configuration file
+
+       etcd gateway
+       run the stateless pass-through etcd TCP connection forwarding proxy
+
+       etcd grpc-proxy
+       run the stateless etcd v3 gRPC L7 reverse proxy
+	`
+	flagsline = `
+member flags:
+
+	--name 'default'
+		human-readable name for this member.
+	--data-dir '${name}.etcd'
+		path to the data directory.
+	--wal-dir ''
+		path to the dedicated wal directory.
+	--snapshot-count '100000'
+		number of committed transactions to trigger a snapshot to disk.
+	--heartbeat-interval '100'
+		time (in milliseconds) of a heartbeat interval.
+	--election-timeout '1000'
+		time (in milliseconds) for an election to timeout. See tuning documentation for details.
+	--initial-election-tick-advance 'true'
+		whether to fast-forward initial election ticks on boot for faster election.
+	--listen-peer-urls 'http://localhost:2380'
+		list of URLs to listen on for peer traffic.
+	--listen-client-urls 'http://localhost:2379'
+		list of URLs to listen on for client traffic.
+	--max-snapshots '` + strconv.Itoa(embed.DefaultMaxSnapshots) + `'
+		maximum number of snapshot files to retain (0 is unlimited).
+	--max-wals '` + strconv.Itoa(embed.DefaultMaxWALs) + `'
+		maximum number of wal files to retain (0 is unlimited).
+	--cors ''
+		comma-separated whitelist of origins for CORS (cross-origin resource sharing).
+	--quota-backend-bytes '0'
+		raise alarms when backend size exceeds the given quota (0 defaults to low space quota).
+	--max-txn-ops '128'
+		maximum number of operations permitted in a transaction.
+	--max-request-bytes '1572864'
+		maximum client request size in bytes the server will accept.
+	--grpc-keepalive-min-time '5s'
+		minimum duration interval that a client should wait before pinging server.
+	--grpc-keepalive-interval '2h'
+		frequency duration of server-to-client ping to check if a connection is alive (0 to disable).
+	--grpc-keepalive-timeout '20s'
+		additional duration of wait before closing a non-responsive connection (0 to disable).
+
+clustering flags:
+
+	--initial-advertise-peer-urls 'http://localhost:2380'
+		list of this member's peer URLs to advertise to the rest of the cluster.
+	--initial-cluster 'default=http://localhost:2380'
+		initial cluster configuration for bootstrapping.
+	--initial-cluster-state 'new'
+		initial cluster state ('new' or 'existing').
+	--initial-cluster-token 'etcd-cluster'
+		initial cluster token for the etcd cluster during bootstrap.
+		Specifying this can protect you from unintended cross-cluster interaction when running multiple clusters.
+	--advertise-client-urls 'http://localhost:2379'
+		list of this member's client URLs to advertise to the public.
+		The client URLs advertised should be accessible to machines that talk to etcd cluster. etcd client libraries parse these URLs to connect to the cluster.
+	--discovery ''
+		discovery URL used to bootstrap the cluster.
+	--discovery-fallback 'proxy'
+		expected behavior ('exit' or 'proxy') when discovery services fails.
+		"proxy" supports v2 API only.
+	--discovery-proxy ''
+		HTTP proxy to use for traffic to discovery service.
+	--discovery-srv ''
+		dns srv domain used to bootstrap the cluster.
+	--strict-reconfig-check '` + strconv.FormatBool(embed.DefaultStrictReconfigCheck) + `'
+		reject reconfiguration requests that would cause quorum loss.
+	--auto-compaction-retention '0'
+		auto compaction retention length. 0 means disable auto compaction.
+	--auto-compaction-mode 'periodic'
+		interpret 'auto-compaction-retention' one of: periodic|revision. 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. '5m'). 'revision' for revision number based retention.
+	--enable-v2 '` + strconv.FormatBool(embed.DefaultEnableV2) + `'
+		Accept etcd V2 client requests.
+
+proxy flags:
+	"proxy" supports v2 API only.
+
+	--proxy 'off'
+		proxy mode setting ('off', 'readonly' or 'on').
+	--proxy-failure-wait 5000
+		time (in milliseconds) an endpoint will be held in a failed state.
+	--proxy-refresh-interval 30000
+		time (in milliseconds) of the endpoints refresh interval.
+	--proxy-dial-timeout 1000
+		time (in milliseconds) for a dial to timeout.
+	--proxy-write-timeout 5000
+		time (in milliseconds) for a write to timeout.
+	--proxy-read-timeout 0
+		time (in milliseconds) for a read to timeout.
+
+
+security flags:
+
+	--ca-file '' [DEPRECATED]
+		path to the client server TLS CA file. '-ca-file ca.crt' could be replaced by '-trusted-ca-file ca.crt -client-cert-auth' and etcd will perform the same.
+	--cert-file ''
+		path to the client server TLS cert file.
+	--key-file ''
+		path to the client server TLS key file.
+	--client-cert-auth 'false'
+		enable client cert authentication.
+	--client-crl-file ''
+		path to the client certificate revocation list file.
+	--trusted-ca-file ''
+		path to the client server TLS trusted CA cert file.
+	--auto-tls 'false'
+		client TLS using generated certificates.
+	--peer-ca-file '' [DEPRECATED]
+		path to the peer server TLS CA file. '-peer-ca-file ca.crt' could be replaced by '-peer-trusted-ca-file ca.crt -peer-client-cert-auth' and etcd will perform the same.
+	--peer-cert-file ''
+		path to the peer server TLS cert file.
+	--peer-key-file ''
+		path to the peer server TLS key file.
+	--peer-client-cert-auth 'false'
+		enable peer client cert authentication.
+	--peer-trusted-ca-file ''
+		path to the peer server TLS trusted CA file.
+	--peer-cert-allowed-cn ''
+		Required CN for client certs connecting to the peer endpoint.
+	--peer-auto-tls 'false'
+		peer TLS using self-generated certificates if --peer-key-file and --peer-cert-file are not provided.
+	--peer-crl-file ''
+		path to the peer certificate revocation list file.
+	--cipher-suites ''
+		comma-separated list of supported TLS cipher suites between client/server and peers (empty will be auto-populated by Go).
+
+logging flags
+
+	--debug 'false'
+		enable debug-level logging for etcd.
+	--log-package-levels ''
+		specify a particular log level for each etcd package (eg: 'etcdmain=CRITICAL,etcdserver=DEBUG').
+	--log-output 'default'
+		specify 'stdout' or 'stderr' to skip journald logging even when running under systemd.
+
+unsafe flags:
+
+Please be CAUTIOUS when using unsafe flags because it will break the guarantees
+given by the consensus protocol.
+
+	--force-new-cluster 'false'
+		force to create a new one-member cluster.
+
+profiling flags:
+	--enable-pprof 'false'
+		Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof/"
+	--metrics 'basic'
+		Set level of detail for exported metrics, specify 'extensive' to include histogram metrics.
+	--listen-metrics-urls ''
+		List of URLs to listen on for metrics.
+
+auth flags:
+	--auth-token 'simple'
+		Specify a v3 authentication token type and its options ('simple' or 'jwt').
+
+experimental flags:
+	--experimental-initial-corrupt-check 'false'
+		enable to check data corruption before serving any client/peer traffic.
+	--experimental-corrupt-check-time '0s'
+		duration of time between cluster corruption check passes.
+	--experimental-enable-v2v3 ''
+		serve v2 requests through the v3 backend under a given prefix.
+`
+)
diff --git a/vendor/github.com/coreos/etcd/etcdmain/main.go b/vendor/github.com/coreos/etcd/etcdmain/main.go
new file mode 100644
index 0000000..06bbae5
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdmain/main.go
@@ -0,0 +1,60 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdmain
+
+import (
+	"fmt"
+	"os"
+	"strings"
+
+	"github.com/coreos/go-systemd/daemon"
+	systemdutil "github.com/coreos/go-systemd/util"
+)
+
+func Main() {
+	checkSupportArch()
+
+	if len(os.Args) > 1 {
+		cmd := os.Args[1]
+		if covArgs := os.Getenv("ETCDCOV_ARGS"); len(covArgs) > 0 {
+			args := strings.Split(os.Getenv("ETCDCOV_ARGS"), "\xe7\xcd")[1:]
+			rootCmd.SetArgs(args)
+			cmd = "grpc-proxy"
+		}
+		switch cmd {
+		case "gateway", "grpc-proxy":
+			if err := rootCmd.Execute(); err != nil {
+				fmt.Fprint(os.Stderr, err)
+				os.Exit(1)
+			}
+			return
+		}
+	}
+
+	startEtcdOrProxyV2()
+}
+
+func notifySystemd() {
+	if !systemdutil.IsRunningSystemd() {
+		return
+	}
+	sent, err := daemon.SdNotify(false, "READY=1")
+	if err != nil {
+		plog.Errorf("failed to notify systemd for readiness: %v", err)
+	}
+	if !sent {
+		plog.Errorf("forgot to set Type=notify in systemd service file?")
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/etcdmain/util.go b/vendor/github.com/coreos/etcd/etcdmain/util.go
new file mode 100644
index 0000000..9657271
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdmain/util.go
@@ -0,0 +1,65 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdmain
+
+import (
+	"fmt"
+	"os"
+
+	"github.com/coreos/etcd/pkg/srv"
+	"github.com/coreos/etcd/pkg/transport"
+)
+
+func discoverEndpoints(dns string, ca string, insecure bool) (s srv.SRVClients) {
+	if dns == "" {
+		return s
+	}
+	srvs, err := srv.GetClient("etcd-client", dns)
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+	endpoints := srvs.Endpoints
+	plog.Infof("discovered the cluster %s from %s", endpoints, dns)
+	if insecure {
+		return *srvs
+	}
+	// confirm TLS connections are good
+	tlsInfo := transport.TLSInfo{
+		TrustedCAFile: ca,
+		ServerName:    dns,
+	}
+	plog.Infof("validating discovered endpoints %v", endpoints)
+	endpoints, err = transport.ValidateSecureEndpoints(tlsInfo, endpoints)
+	if err != nil {
+		plog.Warningf("%v", err)
+	}
+	plog.Infof("using discovered endpoints %v", endpoints)
+
+	// map endpoints back to SRVClients struct with SRV data
+	eps := make(map[string]struct{})
+	for _, ep := range endpoints {
+		eps[ep] = struct{}{}
+	}
+	for i := range srvs.Endpoints {
+		if _, ok := eps[srvs.Endpoints[i]]; !ok {
+			continue
+		}
+		s.Endpoints = append(s.Endpoints, srvs.Endpoints[i])
+		s.SRVs = append(s.SRVs, srvs.SRVs[i])
+	}
+
+	return s
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go
new file mode 100644
index 0000000..eb34383
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go
@@ -0,0 +1,87 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package api
+
+import (
+	"sync"
+
+	"github.com/coreos/etcd/version"
+	"github.com/coreos/go-semver/semver"
+	"github.com/coreos/pkg/capnslog"
+)
+
+type Capability string
+
+const (
+	AuthCapability  Capability = "auth"
+	V3rpcCapability Capability = "v3rpc"
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api")
+
+	// capabilityMaps is a static map of version to capability map.
+	capabilityMaps = map[string]map[Capability]bool{
+		"3.0.0": {AuthCapability: true, V3rpcCapability: true},
+		"3.1.0": {AuthCapability: true, V3rpcCapability: true},
+		"3.2.0": {AuthCapability: true, V3rpcCapability: true},
+		"3.3.0": {AuthCapability: true, V3rpcCapability: true},
+	}
+
+	enableMapMu sync.RWMutex
+	// enabledMap points to a map in capabilityMaps
+	enabledMap map[Capability]bool
+
+	curVersion *semver.Version
+)
+
+func init() {
+	enabledMap = map[Capability]bool{
+		AuthCapability:  true,
+		V3rpcCapability: true,
+	}
+}
+
+// UpdateCapability updates the enabledMap when the cluster version increases.
+func UpdateCapability(v *semver.Version) {
+	if v == nil {
+		// if recovered but version was never set by cluster
+		return
+	}
+	enableMapMu.Lock()
+	if curVersion != nil && !curVersion.LessThan(*v) {
+		enableMapMu.Unlock()
+		return
+	}
+	curVersion = v
+	enabledMap = capabilityMaps[curVersion.String()]
+	enableMapMu.Unlock()
+	plog.Infof("enabled capabilities for version %s", version.Cluster(v.String()))
+}
+
+func IsCapabilityEnabled(c Capability) bool {
+	enableMapMu.RLock()
+	defer enableMapMu.RUnlock()
+	if enabledMap == nil {
+		return false
+	}
+	return enabledMap[c]
+}
+
+func EnableCapability(c Capability) {
+	enableMapMu.Lock()
+	defer enableMapMu.Unlock()
+	enabledMap[c] = true
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go
new file mode 100644
index 0000000..654c258
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go
@@ -0,0 +1,38 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package api
+
+import (
+	"github.com/coreos/etcd/etcdserver/membership"
+	"github.com/coreos/etcd/pkg/types"
+
+	"github.com/coreos/go-semver/semver"
+)
+
+// Cluster is an interface representing a collection of members in one etcd cluster.
+type Cluster interface {
+	// ID returns the cluster ID
+	ID() types.ID
+	// ClientURLs returns an aggregate set of all URLs on which this
+	// cluster is listening for client requests
+	ClientURLs() []string
+	// Members returns a slice of members sorted by their ID
+	Members() []*membership.Member
+	// Member retrieves a particular member based on ID, or nil if the
+	// member does not exist in the cluster
+	Member(id types.ID) *membership.Member
+	// Version is the cluster-wide minimum major.minor version.
+	Version() *semver.Version
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/doc.go
new file mode 100644
index 0000000..f44881b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package api manages the capabilities and features that are exposed to clients by the etcd cluster.
+package api
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go
new file mode 100644
index 0000000..f0d3b0b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go
@@ -0,0 +1,158 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+	"encoding/json"
+	"expvar"
+	"fmt"
+	"net/http"
+	"strings"
+
+	etcdErr "github.com/coreos/etcd/error"
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/api"
+	"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
+	"github.com/coreos/etcd/pkg/logutil"
+	"github.com/coreos/etcd/version"
+	"github.com/coreos/pkg/capnslog"
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/etcdhttp")
+	mlog = logutil.NewMergeLogger(plog)
+)
+
+const (
+	configPath  = "/config"
+	varsPath    = "/debug/vars"
+	versionPath = "/version"
+)
+
+// HandleBasic adds handlers to a mux for serving JSON etcd client requests
+// that do not access the v2 store.
+func HandleBasic(mux *http.ServeMux, server etcdserver.ServerPeer) {
+	mux.HandleFunc(varsPath, serveVars)
+	mux.HandleFunc(configPath+"/local/log", logHandleFunc)
+	HandleMetricsHealth(mux, server)
+	mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion))
+}
+
+func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
+		v := c.Version()
+		if v != nil {
+			fn(w, r, v.String())
+		} else {
+			fn(w, r, "not_decided")
+		}
+	}
+}
+
+func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) {
+	if !allowMethod(w, r, "GET") {
+		return
+	}
+	vs := version.Versions{
+		Server:  version.Version,
+		Cluster: clusterV,
+	}
+
+	w.Header().Set("Content-Type", "application/json")
+	b, err := json.Marshal(&vs)
+	if err != nil {
+		plog.Panicf("cannot marshal versions to json (%v)", err)
+	}
+	w.Write(b)
+}
+
+func logHandleFunc(w http.ResponseWriter, r *http.Request) {
+	if !allowMethod(w, r, "PUT") {
+		return
+	}
+
+	in := struct{ Level string }{}
+
+	d := json.NewDecoder(r.Body)
+	if err := d.Decode(&in); err != nil {
+		WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body"))
+		return
+	}
+
+	logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level))
+	if err != nil {
+		WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level))
+		return
+	}
+
+	plog.Noticef("globalLogLevel set to %q", logl.String())
+	capnslog.SetGlobalLogLevel(logl)
+	w.WriteHeader(http.StatusNoContent)
+}
+
+func serveVars(w http.ResponseWriter, r *http.Request) {
+	if !allowMethod(w, r, "GET") {
+		return
+	}
+
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	fmt.Fprintf(w, "{\n")
+	first := true
+	expvar.Do(func(kv expvar.KeyValue) {
+		if !first {
+			fmt.Fprintf(w, ",\n")
+		}
+		first = false
+		fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
+	})
+	fmt.Fprintf(w, "\n}\n")
+}
+
+func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool {
+	if m == r.Method {
+		return true
+	}
+	w.Header().Set("Allow", m)
+	http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+	return false
+}
+
+// WriteError logs and writes the given Error to the ResponseWriter
+// If Error is an etcdErr, it is rendered to the ResponseWriter
+// Otherwise, it is assumed to be a StatusInternalServerError
+func WriteError(w http.ResponseWriter, r *http.Request, err error) {
+	if err == nil {
+		return
+	}
+	switch e := err.(type) {
+	case *etcdErr.Error:
+		e.WriteTo(w)
+	case *httptypes.HTTPError:
+		if et := e.WriteTo(w); et != nil {
+			plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
+		}
+	default:
+		switch err {
+		case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy:
+			mlog.MergeError(err)
+		default:
+			mlog.MergeErrorf("got unexpected response error (%v)", err)
+		}
+		herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error")
+		if et := herr.WriteTo(w); et != nil {
+			plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/doc.go
new file mode 100644
index 0000000..a03b626
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package etcdhttp implements HTTP transportation layer for etcdserver.
+package etcdhttp
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/metrics.go
new file mode 100644
index 0000000..aeaf350
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/metrics.go
@@ -0,0 +1,123 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+	"context"
+	"encoding/json"
+	"net/http"
+	"time"
+
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/raft"
+
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+const (
+	pathMetrics = "/metrics"
+	PathHealth  = "/health"
+)
+
+// HandleMetricsHealth registers metrics and health handlers.
+func HandleMetricsHealth(mux *http.ServeMux, srv etcdserver.ServerV2) {
+	mux.Handle(pathMetrics, promhttp.Handler())
+	mux.Handle(PathHealth, NewHealthHandler(func() Health { return checkHealth(srv) }))
+}
+
+// HandlePrometheus registers prometheus handler on '/metrics'.
+func HandlePrometheus(mux *http.ServeMux) {
+	mux.Handle(pathMetrics, promhttp.Handler())
+}
+
+// NewHealthHandler handles '/health' requests.
+func NewHealthHandler(hfunc func() Health) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
+		if r.Method != http.MethodGet {
+			w.Header().Set("Allow", http.MethodGet)
+			http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+			return
+		}
+		h := hfunc()
+		d, _ := json.Marshal(h)
+		if h.Health != "true" {
+			http.Error(w, string(d), http.StatusServiceUnavailable)
+			return
+		}
+		w.WriteHeader(http.StatusOK)
+		w.Write(d)
+	}
+}
+
+var (
+	healthSuccess = prometheus.NewCounter(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "server",
+		Name:      "health_success",
+		Help:      "The total number of successful health checks",
+	})
+	healthFailed = prometheus.NewCounter(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "server",
+		Name:      "health_failures",
+		Help:      "The total number of failed health checks",
+	})
+)
+
+func init() {
+	prometheus.MustRegister(healthSuccess)
+	prometheus.MustRegister(healthFailed)
+}
+
+// Health defines etcd server health status.
+// TODO: remove manual parsing in etcdctl cluster-health
+type Health struct {
+	Health string `json:"health"`
+}
+
+// TODO: server NOSPACE, etcdserver.ErrNoLeader in health API
+
+func checkHealth(srv etcdserver.ServerV2) Health {
+	h := Health{Health: "true"}
+
+	as := srv.Alarms()
+	if len(as) > 0 {
+		h.Health = "false"
+	}
+
+	if h.Health == "true" {
+		if uint64(srv.Leader()) == raft.None {
+			h.Health = "false"
+		}
+	}
+
+	if h.Health == "true" {
+		ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+		_, err := srv.Do(ctx, etcdserverpb.Request{Method: "QGET"})
+		cancel()
+		if err != nil {
+			h.Health = "false"
+		}
+	}
+
+	if h.Health == "true" {
+		healthSuccess.Inc()
+	} else {
+		healthFailed.Inc()
+	}
+	return h
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go
new file mode 100644
index 0000000..0a9213b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go
@@ -0,0 +1,73 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+	"encoding/json"
+	"net/http"
+
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/api"
+	"github.com/coreos/etcd/lease/leasehttp"
+	"github.com/coreos/etcd/rafthttp"
+)
+
+const (
+	peerMembersPrefix = "/members"
+)
+
+// NewPeerHandler generates an http.Handler to handle etcd peer requests.
+func NewPeerHandler(s etcdserver.ServerPeer) http.Handler {
+	return newPeerHandler(s.Cluster(), s.RaftHandler(), s.LeaseHandler())
+}
+
+func newPeerHandler(cluster api.Cluster, raftHandler http.Handler, leaseHandler http.Handler) http.Handler {
+	mh := &peerMembersHandler{
+		cluster: cluster,
+	}
+
+	mux := http.NewServeMux()
+	mux.HandleFunc("/", http.NotFound)
+	mux.Handle(rafthttp.RaftPrefix, raftHandler)
+	mux.Handle(rafthttp.RaftPrefix+"/", raftHandler)
+	mux.Handle(peerMembersPrefix, mh)
+	if leaseHandler != nil {
+		mux.Handle(leasehttp.LeasePrefix, leaseHandler)
+		mux.Handle(leasehttp.LeaseInternalPrefix, leaseHandler)
+	}
+	mux.HandleFunc(versionPath, versionHandler(cluster, serveVersion))
+	return mux
+}
+
+type peerMembersHandler struct {
+	cluster api.Cluster
+}
+
+func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	if !allowMethod(w, r, "GET") {
+		return
+	}
+	w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
+
+	if r.URL.Path != peerMembersPrefix {
+		http.Error(w, "bad path", http.StatusBadRequest)
+		return
+	}
+	ms := h.cluster.Members()
+	w.Header().Set("Content-Type", "application/json")
+	if err := json.NewEncoder(w).Encode(ms); err != nil {
+		plog.Warningf("failed to encode members response (%v)", err)
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/capability.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/capability.go
new file mode 100644
index 0000000..fa0bcca
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/capability.go
@@ -0,0 +1,40 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2http
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/coreos/etcd/etcdserver/api"
+	"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
+)
+
+func capabilityHandler(c api.Capability, fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
+		if !api.IsCapabilityEnabled(c) {
+			notCapable(w, r, c)
+			return
+		}
+		fn(w, r)
+	}
+}
+
+func notCapable(w http.ResponseWriter, r *http.Request, c api.Capability) {
+	herr := httptypes.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Not capable of accessing %s feature during rolling upgrades.", c))
+	if err := herr.WriteTo(w); err != nil {
+		plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr)
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go
new file mode 100644
index 0000000..6aaf3db
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go
@@ -0,0 +1,719 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2http
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"path"
+	"strconv"
+	"strings"
+	"time"
+
+	etcdErr "github.com/coreos/etcd/error"
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/api"
+	"github.com/coreos/etcd/etcdserver/api/etcdhttp"
+	"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
+	"github.com/coreos/etcd/etcdserver/auth"
+	"github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/etcdserver/membership"
+	"github.com/coreos/etcd/etcdserver/stats"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/store"
+
+	"github.com/jonboulle/clockwork"
+)
+
+const (
+	authPrefix     = "/v2/auth"
+	keysPrefix     = "/v2/keys"
+	machinesPrefix = "/v2/machines"
+	membersPrefix  = "/v2/members"
+	statsPrefix    = "/v2/stats"
+)
+
+// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
+func NewClientHandler(server etcdserver.ServerPeer, timeout time.Duration) http.Handler {
+	mux := http.NewServeMux()
+	etcdhttp.HandleBasic(mux, server)
+	handleV2(mux, server, timeout)
+	return requestLogger(mux)
+}
+
+func handleV2(mux *http.ServeMux, server etcdserver.ServerV2, timeout time.Duration) {
+	sec := auth.NewStore(server, timeout)
+	kh := &keysHandler{
+		sec:                   sec,
+		server:                server,
+		cluster:               server.Cluster(),
+		timeout:               timeout,
+		clientCertAuthEnabled: server.ClientCertAuthEnabled(),
+	}
+
+	sh := &statsHandler{
+		stats: server,
+	}
+
+	mh := &membersHandler{
+		sec:     sec,
+		server:  server,
+		cluster: server.Cluster(),
+		timeout: timeout,
+		clock:   clockwork.NewRealClock(),
+		clientCertAuthEnabled: server.ClientCertAuthEnabled(),
+	}
+
+	mah := &machinesHandler{cluster: server.Cluster()}
+
+	sech := &authHandler{
+		sec:                   sec,
+		cluster:               server.Cluster(),
+		clientCertAuthEnabled: server.ClientCertAuthEnabled(),
+	}
+	mux.HandleFunc("/", http.NotFound)
+	mux.Handle(keysPrefix, kh)
+	mux.Handle(keysPrefix+"/", kh)
+	mux.HandleFunc(statsPrefix+"/store", sh.serveStore)
+	mux.HandleFunc(statsPrefix+"/self", sh.serveSelf)
+	mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader)
+	mux.Handle(membersPrefix, mh)
+	mux.Handle(membersPrefix+"/", mh)
+	mux.Handle(machinesPrefix, mah)
+	handleAuth(mux, sech)
+}
+
+type keysHandler struct {
+	sec                   auth.Store
+	server                etcdserver.ServerV2
+	cluster               api.Cluster
+	timeout               time.Duration
+	clientCertAuthEnabled bool
+}
+
+func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	if !allowMethod(w, r.Method, "HEAD", "GET", "PUT", "POST", "DELETE") {
+		return
+	}
+
+	w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
+
+	ctx, cancel := context.WithTimeout(context.Background(), h.timeout)
+	defer cancel()
+	clock := clockwork.NewRealClock()
+	startTime := clock.Now()
+	rr, noValueOnSuccess, err := parseKeyRequest(r, clock)
+	if err != nil {
+		writeKeyError(w, err)
+		return
+	}
+	// The path must be valid at this point (we've parsed the request successfully).
+	if !hasKeyPrefixAccess(h.sec, r, r.URL.Path[len(keysPrefix):], rr.Recursive, h.clientCertAuthEnabled) {
+		writeKeyNoAuth(w)
+		return
+	}
+	if !rr.Wait {
+		reportRequestReceived(rr)
+	}
+	resp, err := h.server.Do(ctx, rr)
+	if err != nil {
+		err = trimErrorPrefix(err, etcdserver.StoreKeysPrefix)
+		writeKeyError(w, err)
+		reportRequestFailed(rr, err)
+		return
+	}
+	switch {
+	case resp.Event != nil:
+		if err := writeKeyEvent(w, resp, noValueOnSuccess); err != nil {
+			// Should never be reached
+			plog.Errorf("error writing event (%v)", err)
+		}
+		reportRequestCompleted(rr, resp, startTime)
+	case resp.Watcher != nil:
+		ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout)
+		defer cancel()
+		handleKeyWatch(ctx, w, resp, rr.Stream)
+	default:
+		writeKeyError(w, errors.New("received response with no Event/Watcher!"))
+	}
+}
+
+type machinesHandler struct {
+	cluster api.Cluster
+}
+
+func (h *machinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	if !allowMethod(w, r.Method, "GET", "HEAD") {
+		return
+	}
+	endpoints := h.cluster.ClientURLs()
+	w.Write([]byte(strings.Join(endpoints, ", ")))
+}
+
+type membersHandler struct {
+	sec                   auth.Store
+	server                etcdserver.ServerV2
+	cluster               api.Cluster
+	timeout               time.Duration
+	clock                 clockwork.Clock
+	clientCertAuthEnabled bool
+}
+
+func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	if !allowMethod(w, r.Method, "GET", "POST", "DELETE", "PUT") {
+		return
+	}
+	if !hasWriteRootAccess(h.sec, r, h.clientCertAuthEnabled) {
+		writeNoAuth(w, r)
+		return
+	}
+	w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
+
+	ctx, cancel := context.WithTimeout(context.Background(), h.timeout)
+	defer cancel()
+
+	switch r.Method {
+	case "GET":
+		switch trimPrefix(r.URL.Path, membersPrefix) {
+		case "":
+			mc := newMemberCollection(h.cluster.Members())
+			w.Header().Set("Content-Type", "application/json")
+			if err := json.NewEncoder(w).Encode(mc); err != nil {
+				plog.Warningf("failed to encode members response (%v)", err)
+			}
+		case "leader":
+			id := h.server.Leader()
+			if id == 0 {
+				writeError(w, r, httptypes.NewHTTPError(http.StatusServiceUnavailable, "During election"))
+				return
+			}
+			m := newMember(h.cluster.Member(id))
+			w.Header().Set("Content-Type", "application/json")
+			if err := json.NewEncoder(w).Encode(m); err != nil {
+				plog.Warningf("failed to encode members response (%v)", err)
+			}
+		default:
+			writeError(w, r, httptypes.NewHTTPError(http.StatusNotFound, "Not found"))
+		}
+	case "POST":
+		req := httptypes.MemberCreateRequest{}
+		if ok := unmarshalRequest(r, &req, w); !ok {
+			return
+		}
+		now := h.clock.Now()
+		m := membership.NewMember("", req.PeerURLs, "", &now)
+		_, err := h.server.AddMember(ctx, *m)
+		switch {
+		case err == membership.ErrIDExists || err == membership.ErrPeerURLexists:
+			writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
+			return
+		case err != nil:
+			plog.Errorf("error adding member %s (%v)", m.ID, err)
+			writeError(w, r, err)
+			return
+		}
+		res := newMember(m)
+		w.Header().Set("Content-Type", "application/json")
+		w.WriteHeader(http.StatusCreated)
+		if err := json.NewEncoder(w).Encode(res); err != nil {
+			plog.Warningf("failed to encode members response (%v)", err)
+		}
+	case "DELETE":
+		id, ok := getID(r.URL.Path, w)
+		if !ok {
+			return
+		}
+		_, err := h.server.RemoveMember(ctx, uint64(id))
+		switch {
+		case err == membership.ErrIDRemoved:
+			writeError(w, r, httptypes.NewHTTPError(http.StatusGone, fmt.Sprintf("Member permanently removed: %s", id)))
+		case err == membership.ErrIDNotFound:
+			writeError(w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id)))
+		case err != nil:
+			plog.Errorf("error removing member %s (%v)", id, err)
+			writeError(w, r, err)
+		default:
+			w.WriteHeader(http.StatusNoContent)
+		}
+	case "PUT":
+		id, ok := getID(r.URL.Path, w)
+		if !ok {
+			return
+		}
+		req := httptypes.MemberUpdateRequest{}
+		if ok := unmarshalRequest(r, &req, w); !ok {
+			return
+		}
+		m := membership.Member{
+			ID:             id,
+			RaftAttributes: membership.RaftAttributes{PeerURLs: req.PeerURLs.StringSlice()},
+		}
+		_, err := h.server.UpdateMember(ctx, m)
+		switch {
+		case err == membership.ErrPeerURLexists:
+			writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
+		case err == membership.ErrIDNotFound:
+			writeError(w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id)))
+		case err != nil:
+			plog.Errorf("error updating member %s (%v)", m.ID, err)
+			writeError(w, r, err)
+		default:
+			w.WriteHeader(http.StatusNoContent)
+		}
+	}
+}
+
+type statsHandler struct {
+	stats stats.Stats
+}
+
+func (h *statsHandler) serveStore(w http.ResponseWriter, r *http.Request) {
+	if !allowMethod(w, r.Method, "GET") {
+		return
+	}
+	w.Header().Set("Content-Type", "application/json")
+	w.Write(h.stats.StoreStats())
+}
+
+func (h *statsHandler) serveSelf(w http.ResponseWriter, r *http.Request) {
+	if !allowMethod(w, r.Method, "GET") {
+		return
+	}
+	w.Header().Set("Content-Type", "application/json")
+	w.Write(h.stats.SelfStats())
+}
+
+func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) {
+	if !allowMethod(w, r.Method, "GET") {
+		return
+	}
+	stats := h.stats.LeaderStats()
+	if stats == nil {
+		etcdhttp.WriteError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader"))
+		return
+	}
+	w.Header().Set("Content-Type", "application/json")
+	w.Write(stats)
+}
+
+// parseKeyRequest converts a received http.Request on keysPrefix to
+// a server Request, performing validation of supplied fields as appropriate.
+// If any validation fails, an empty Request and non-nil error is returned.
+func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Request, bool, error) {
+	var noValueOnSuccess bool
+	emptyReq := etcdserverpb.Request{}
+
+	err := r.ParseForm()
+	if err != nil {
+		return emptyReq, false, etcdErr.NewRequestError(
+			etcdErr.EcodeInvalidForm,
+			err.Error(),
+		)
+	}
+
+	if !strings.HasPrefix(r.URL.Path, keysPrefix) {
+		return emptyReq, false, etcdErr.NewRequestError(
+			etcdErr.EcodeInvalidForm,
+			"incorrect key prefix",
+		)
+	}
+	p := path.Join(etcdserver.StoreKeysPrefix, r.URL.Path[len(keysPrefix):])
+
+	var pIdx, wIdx uint64
+	if pIdx, err = getUint64(r.Form, "prevIndex"); err != nil {
+		return emptyReq, false, etcdErr.NewRequestError(
+			etcdErr.EcodeIndexNaN,
+			`invalid value for "prevIndex"`,
+		)
+	}
+	if wIdx, err = getUint64(r.Form, "waitIndex"); err != nil {
+		return emptyReq, false, etcdErr.NewRequestError(
+			etcdErr.EcodeIndexNaN,
+			`invalid value for "waitIndex"`,
+		)
+	}
+
+	var rec, sort, wait, dir, quorum, stream bool
+	if rec, err = getBool(r.Form, "recursive"); err != nil {
+		return emptyReq, false, etcdErr.NewRequestError(
+			etcdErr.EcodeInvalidField,
+			`invalid value for "recursive"`,
+		)
+	}
+	if sort, err = getBool(r.Form, "sorted"); err != nil {
+		return emptyReq, false, etcdErr.NewRequestError(
+			etcdErr.EcodeInvalidField,
+			`invalid value for "sorted"`,
+		)
+	}
+	if wait, err = getBool(r.Form, "wait"); err != nil {
+		return emptyReq, false, etcdErr.NewRequestError(
+			etcdErr.EcodeInvalidField,
+			`invalid value for "wait"`,
+		)
+	}
+	// TODO(jonboulle): define what parameters dir is/isn't compatible with?
+	if dir, err = getBool(r.Form, "dir"); err != nil {
+		return emptyReq, false, etcdErr.NewRequestError(
+			etcdErr.EcodeInvalidField,
+			`invalid value for "dir"`,
+		)
+	}
+	if quorum, err = getBool(r.Form, "quorum"); err != nil {
+		return emptyReq, false, etcdErr.NewRequestError(
+			etcdErr.EcodeInvalidField,
+			`invalid value for "quorum"`,
+		)
+	}
+	if stream, err = getBool(r.Form, "stream"); err != nil {
+		return emptyReq, false, etcdErr.NewRequestError(
+			etcdErr.EcodeInvalidField,
+			`invalid value for "stream"`,
+		)
+	}
+
+	if wait && r.Method != "GET" {
+		return emptyReq, false, etcdErr.NewRequestError(
+			etcdErr.EcodeInvalidField,
+			`"wait" can only be used with GET requests`,
+		)
+	}
+
+	pV := r.FormValue("prevValue")
+	if _, ok := r.Form["prevValue"]; ok && pV == "" {
+		return emptyReq, false, etcdErr.NewRequestError(
+			etcdErr.EcodePrevValueRequired,
+			`"prevValue" cannot be empty`,
+		)
+	}
+
+	if noValueOnSuccess, err = getBool(r.Form, "noValueOnSuccess"); err != nil {
+		return emptyReq, false, etcdErr.NewRequestError(
+			etcdErr.EcodeInvalidField,
+			`invalid value for "noValueOnSuccess"`,
+		)
+	}
+
+	// TTL is nullable, so leave it null if not specified
+	// or an empty string
+	var ttl *uint64
+	if len(r.FormValue("ttl")) > 0 {
+		i, err := getUint64(r.Form, "ttl")
+		if err != nil {
+			return emptyReq, false, etcdErr.NewRequestError(
+				etcdErr.EcodeTTLNaN,
+				`invalid value for "ttl"`,
+			)
+		}
+		ttl = &i
+	}
+
+	// prevExist is nullable, so leave it null if not specified
+	var pe *bool
+	if _, ok := r.Form["prevExist"]; ok {
+		bv, err := getBool(r.Form, "prevExist")
+		if err != nil {
+			return emptyReq, false, etcdErr.NewRequestError(
+				etcdErr.EcodeInvalidField,
+				"invalid value for prevExist",
+			)
+		}
+		pe = &bv
+	}
+
+	// refresh is nullable, so leave it null if not specified
+	var refresh *bool
+	if _, ok := r.Form["refresh"]; ok {
+		bv, err := getBool(r.Form, "refresh")
+		if err != nil {
+			return emptyReq, false, etcdErr.NewRequestError(
+				etcdErr.EcodeInvalidField,
+				"invalid value for refresh",
+			)
+		}
+		refresh = &bv
+		if refresh != nil && *refresh {
+			val := r.FormValue("value")
+			if _, ok := r.Form["value"]; ok && val != "" {
+				return emptyReq, false, etcdErr.NewRequestError(
+					etcdErr.EcodeRefreshValue,
+					`A value was provided on a refresh`,
+				)
+			}
+			if ttl == nil {
+				return emptyReq, false, etcdErr.NewRequestError(
+					etcdErr.EcodeRefreshTTLRequired,
+					`No TTL value set`,
+				)
+			}
+		}
+	}
+
+	rr := etcdserverpb.Request{
+		Method:    r.Method,
+		Path:      p,
+		Val:       r.FormValue("value"),
+		Dir:       dir,
+		PrevValue: pV,
+		PrevIndex: pIdx,
+		PrevExist: pe,
+		Wait:      wait,
+		Since:     wIdx,
+		Recursive: rec,
+		Sorted:    sort,
+		Quorum:    quorum,
+		Stream:    stream,
+	}
+
+	if pe != nil {
+		rr.PrevExist = pe
+	}
+
+	if refresh != nil {
+		rr.Refresh = refresh
+	}
+
+	// Null TTL is equivalent to unset Expiration
+	if ttl != nil {
+		expr := time.Duration(*ttl) * time.Second
+		rr.Expiration = clock.Now().Add(expr).UnixNano()
+	}
+
+	return rr, noValueOnSuccess, nil
+}
+
+// writeKeyEvent trims the prefix of key path in a single Event under
+// StoreKeysPrefix, serializes it and writes the resulting JSON to the given
+// ResponseWriter, along with the appropriate headers.
+func writeKeyEvent(w http.ResponseWriter, resp etcdserver.Response, noValueOnSuccess bool) error {
+	ev := resp.Event
+	if ev == nil {
+		return errors.New("cannot write empty Event!")
+	}
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("X-Etcd-Index", fmt.Sprint(ev.EtcdIndex))
+	w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index))
+	w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term))
+
+	if ev.IsCreated() {
+		w.WriteHeader(http.StatusCreated)
+	}
+
+	ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
+	if noValueOnSuccess &&
+		(ev.Action == store.Set || ev.Action == store.CompareAndSwap ||
+			ev.Action == store.Create || ev.Action == store.Update) {
+		ev.Node = nil
+		ev.PrevNode = nil
+	}
+	return json.NewEncoder(w).Encode(ev)
+}
+
+func writeKeyNoAuth(w http.ResponseWriter) {
+	e := etcdErr.NewError(etcdErr.EcodeUnauthorized, "Insufficient credentials", 0)
+	e.WriteTo(w)
+}
+
+// writeKeyError logs and writes the given Error to the ResponseWriter.
+// If Error is not an etcdErr, the error will be converted to an etcd error.
+func writeKeyError(w http.ResponseWriter, err error) {
+	if err == nil {
+		return
+	}
+	switch e := err.(type) {
+	case *etcdErr.Error:
+		e.WriteTo(w)
+	default:
+		switch err {
+		case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost:
+			mlog.MergeError(err)
+		default:
+			mlog.MergeErrorf("got unexpected response error (%v)", err)
+		}
+		ee := etcdErr.NewError(etcdErr.EcodeRaftInternal, err.Error(), 0)
+		ee.WriteTo(w)
+	}
+}
+
+func handleKeyWatch(ctx context.Context, w http.ResponseWriter, resp etcdserver.Response, stream bool) {
+	wa := resp.Watcher
+	defer wa.Remove()
+	ech := wa.EventChan()
+	var nch <-chan bool
+	if x, ok := w.(http.CloseNotifier); ok {
+		nch = x.CloseNotify()
+	}
+
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("X-Etcd-Index", fmt.Sprint(wa.StartIndex()))
+	w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index))
+	w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term))
+	w.WriteHeader(http.StatusOK)
+
+	// Ensure headers are flushed early, in case of long polling
+	w.(http.Flusher).Flush()
+
+	for {
+		select {
+		case <-nch:
+			// Client closed connection. Nothing to do.
+			return
+		case <-ctx.Done():
+			// Timed out. net/http will close the connection for us, so nothing to do.
+			return
+		case ev, ok := <-ech:
+			if !ok {
+				// If the channel is closed this may be an indication of
+				// that notifications are much more than we are able to
+				// send to the client in time. Then we simply end streaming.
+				return
+			}
+			ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
+			if err := json.NewEncoder(w).Encode(ev); err != nil {
+				// Should never be reached
+				plog.Warningf("error writing event (%v)", err)
+				return
+			}
+			if !stream {
+				return
+			}
+			w.(http.Flusher).Flush()
+		}
+	}
+}
+
+func trimEventPrefix(ev *store.Event, prefix string) *store.Event {
+	if ev == nil {
+		return nil
+	}
+	// Since the *Event may reference one in the store history
+	// history, we must copy it before modifying
+	e := ev.Clone()
+	trimNodeExternPrefix(e.Node, prefix)
+	trimNodeExternPrefix(e.PrevNode, prefix)
+	return e
+}
+
+func trimNodeExternPrefix(n *store.NodeExtern, prefix string) {
+	if n == nil {
+		return
+	}
+	n.Key = strings.TrimPrefix(n.Key, prefix)
+	for _, nn := range n.Nodes {
+		trimNodeExternPrefix(nn, prefix)
+	}
+}
+
+func trimErrorPrefix(err error, prefix string) error {
+	if e, ok := err.(*etcdErr.Error); ok {
+		e.Cause = strings.TrimPrefix(e.Cause, prefix)
+	}
+	return err
+}
+
+func unmarshalRequest(r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool {
+	ctype := r.Header.Get("Content-Type")
+	semicolonPosition := strings.Index(ctype, ";")
+	if semicolonPosition != -1 {
+		ctype = strings.TrimSpace(strings.ToLower(ctype[0:semicolonPosition]))
+	}
+	if ctype != "application/json" {
+		writeError(w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype)))
+		return false
+	}
+	b, err := ioutil.ReadAll(r.Body)
+	if err != nil {
+		writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error()))
+		return false
+	}
+	if err := req.UnmarshalJSON(b); err != nil {
+		writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error()))
+		return false
+	}
+	return true
+}
+
+func getID(p string, w http.ResponseWriter) (types.ID, bool) {
+	idStr := trimPrefix(p, membersPrefix)
+	if idStr == "" {
+		http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+		return 0, false
+	}
+	id, err := types.IDFromString(idStr)
+	if err != nil {
+		writeError(w, nil, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", idStr)))
+		return 0, false
+	}
+	return id, true
+}
+
+// getUint64 extracts a uint64 by the given key from a Form. If the key does
+// not exist in the form, 0 is returned. If the key exists but the value is
+// badly formed, an error is returned. If multiple values are present only the
+// first is considered.
+func getUint64(form url.Values, key string) (i uint64, err error) {
+	if vals, ok := form[key]; ok {
+		i, err = strconv.ParseUint(vals[0], 10, 64)
+	}
+	return
+}
+
+// getBool extracts a bool by the given key from a Form. If the key does not
+// exist in the form, false is returned. If the key exists but the value is
+// badly formed, an error is returned. If multiple values are present only the
+// first is considered.
+func getBool(form url.Values, key string) (b bool, err error) {
+	if vals, ok := form[key]; ok {
+		b, err = strconv.ParseBool(vals[0])
+	}
+	return
+}
+
+// trimPrefix removes a given prefix and any slash following the prefix
+// e.g.: trimPrefix("foo", "foo") == trimPrefix("foo/", "foo") == ""
+func trimPrefix(p, prefix string) (s string) {
+	s = strings.TrimPrefix(p, prefix)
+	s = strings.TrimPrefix(s, "/")
+	return
+}
+
+func newMemberCollection(ms []*membership.Member) *httptypes.MemberCollection {
+	c := httptypes.MemberCollection(make([]httptypes.Member, len(ms)))
+
+	for i, m := range ms {
+		c[i] = newMember(m)
+	}
+
+	return &c
+}
+
+func newMember(m *membership.Member) httptypes.Member {
+	tm := httptypes.Member{
+		ID:         m.ID.String(),
+		Name:       m.Name,
+		PeerURLs:   make([]string, len(m.PeerURLs)),
+		ClientURLs: make([]string, len(m.ClientURLs)),
+	}
+
+	copy(tm.PeerURLs, m.PeerURLs)
+	copy(tm.ClientURLs, m.ClientURLs)
+
+	return tm
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client_auth.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client_auth.go
new file mode 100644
index 0000000..606e2e0
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client_auth.go
@@ -0,0 +1,543 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2http
+
+import (
+	"encoding/json"
+	"net/http"
+	"path"
+	"strings"
+
+	"github.com/coreos/etcd/etcdserver/api"
+	"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
+	"github.com/coreos/etcd/etcdserver/auth"
+)
+
+type authHandler struct {
+	sec                   auth.Store
+	cluster               api.Cluster
+	clientCertAuthEnabled bool
+}
+
+func hasWriteRootAccess(sec auth.Store, r *http.Request, clientCertAuthEnabled bool) bool {
+	if r.Method == "GET" || r.Method == "HEAD" {
+		return true
+	}
+	return hasRootAccess(sec, r, clientCertAuthEnabled)
+}
+
+func userFromBasicAuth(sec auth.Store, r *http.Request) *auth.User {
+	username, password, ok := r.BasicAuth()
+	if !ok {
+		plog.Warningf("auth: malformed basic auth encoding")
+		return nil
+	}
+	user, err := sec.GetUser(username)
+	if err != nil {
+		return nil
+	}
+
+	ok = sec.CheckPassword(user, password)
+	if !ok {
+		plog.Warningf("auth: incorrect password for user: %s", username)
+		return nil
+	}
+	return &user
+}
+
+func userFromClientCertificate(sec auth.Store, r *http.Request) *auth.User {
+	if r.TLS == nil {
+		return nil
+	}
+
+	for _, chains := range r.TLS.VerifiedChains {
+		for _, chain := range chains {
+			plog.Debugf("auth: found common name %s.\n", chain.Subject.CommonName)
+			user, err := sec.GetUser(chain.Subject.CommonName)
+			if err == nil {
+				plog.Debugf("auth: authenticated user %s by cert common name.", user.User)
+				return &user
+			}
+		}
+	}
+	return nil
+}
+
+func hasRootAccess(sec auth.Store, r *http.Request, clientCertAuthEnabled bool) bool {
+	if sec == nil {
+		// No store means no auth available, eg, tests.
+		return true
+	}
+	if !sec.AuthEnabled() {
+		return true
+	}
+
+	var rootUser *auth.User
+	if r.Header.Get("Authorization") == "" && clientCertAuthEnabled {
+		rootUser = userFromClientCertificate(sec, r)
+		if rootUser == nil {
+			return false
+		}
+	} else {
+		rootUser = userFromBasicAuth(sec, r)
+		if rootUser == nil {
+			return false
+		}
+	}
+
+	for _, role := range rootUser.Roles {
+		if role == auth.RootRoleName {
+			return true
+		}
+	}
+	plog.Warningf("auth: user %s does not have the %s role for resource %s.", rootUser.User, auth.RootRoleName, r.URL.Path)
+	return false
+}
+
+func hasKeyPrefixAccess(sec auth.Store, r *http.Request, key string, recursive, clientCertAuthEnabled bool) bool {
+	if sec == nil {
+		// No store means no auth available, eg, tests.
+		return true
+	}
+	if !sec.AuthEnabled() {
+		return true
+	}
+
+	var user *auth.User
+	if r.Header.Get("Authorization") == "" {
+		if clientCertAuthEnabled {
+			user = userFromClientCertificate(sec, r)
+		}
+		if user == nil {
+			return hasGuestAccess(sec, r, key)
+		}
+	} else {
+		user = userFromBasicAuth(sec, r)
+		if user == nil {
+			return false
+		}
+	}
+
+	writeAccess := r.Method != "GET" && r.Method != "HEAD"
+	for _, roleName := range user.Roles {
+		role, err := sec.GetRole(roleName)
+		if err != nil {
+			continue
+		}
+		if recursive {
+			if role.HasRecursiveAccess(key, writeAccess) {
+				return true
+			}
+		} else if role.HasKeyAccess(key, writeAccess) {
+			return true
+		}
+	}
+	plog.Warningf("auth: invalid access for user %s on key %s.", user.User, key)
+	return false
+}
+
+func hasGuestAccess(sec auth.Store, r *http.Request, key string) bool {
+	writeAccess := r.Method != "GET" && r.Method != "HEAD"
+	role, err := sec.GetRole(auth.GuestRoleName)
+	if err != nil {
+		return false
+	}
+	if role.HasKeyAccess(key, writeAccess) {
+		return true
+	}
+	plog.Warningf("auth: invalid access for unauthenticated user on resource %s.", key)
+	return false
+}
+
+func writeNoAuth(w http.ResponseWriter, r *http.Request) {
+	herr := httptypes.NewHTTPError(http.StatusUnauthorized, "Insufficient credentials")
+	if err := herr.WriteTo(w); err != nil {
+		plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr)
+	}
+}
+
+func handleAuth(mux *http.ServeMux, sh *authHandler) {
+	mux.HandleFunc(authPrefix+"/roles", capabilityHandler(api.AuthCapability, sh.baseRoles))
+	mux.HandleFunc(authPrefix+"/roles/", capabilityHandler(api.AuthCapability, sh.handleRoles))
+	mux.HandleFunc(authPrefix+"/users", capabilityHandler(api.AuthCapability, sh.baseUsers))
+	mux.HandleFunc(authPrefix+"/users/", capabilityHandler(api.AuthCapability, sh.handleUsers))
+	mux.HandleFunc(authPrefix+"/enable", capabilityHandler(api.AuthCapability, sh.enableDisable))
+}
+
+func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) {
+	if !allowMethod(w, r.Method, "GET") {
+		return
+	}
+	if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
+		writeNoAuth(w, r)
+		return
+	}
+
+	w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
+	w.Header().Set("Content-Type", "application/json")
+
+	roles, err := sh.sec.AllRoles()
+	if err != nil {
+		writeError(w, r, err)
+		return
+	}
+	if roles == nil {
+		roles = make([]string, 0)
+	}
+
+	err = r.ParseForm()
+	if err != nil {
+		writeError(w, r, err)
+		return
+	}
+
+	var rolesCollections struct {
+		Roles []auth.Role `json:"roles"`
+	}
+	for _, roleName := range roles {
+		var role auth.Role
+		role, err = sh.sec.GetRole(roleName)
+		if err != nil {
+			writeError(w, r, err)
+			return
+		}
+		rolesCollections.Roles = append(rolesCollections.Roles, role)
+	}
+	err = json.NewEncoder(w).Encode(rolesCollections)
+
+	if err != nil {
+		plog.Warningf("baseRoles error encoding on %s", r.URL)
+		writeError(w, r, err)
+		return
+	}
+}
+
+func (sh *authHandler) handleRoles(w http.ResponseWriter, r *http.Request) {
+	subpath := path.Clean(r.URL.Path[len(authPrefix):])
+	// Split "/roles/rolename/command".
+	// First item is an empty string, second is "roles"
+	pieces := strings.Split(subpath, "/")
+	if len(pieces) == 2 {
+		sh.baseRoles(w, r)
+		return
+	}
+	if len(pieces) != 3 {
+		writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path"))
+		return
+	}
+	sh.forRole(w, r, pieces[2])
+}
+
+func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role string) {
+	if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
+		return
+	}
+	if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
+		writeNoAuth(w, r)
+		return
+	}
+	w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
+	w.Header().Set("Content-Type", "application/json")
+
+	switch r.Method {
+	case "GET":
+		data, err := sh.sec.GetRole(role)
+		if err != nil {
+			writeError(w, r, err)
+			return
+		}
+		err = json.NewEncoder(w).Encode(data)
+		if err != nil {
+			plog.Warningf("forRole error encoding on %s", r.URL)
+			return
+		}
+		return
+	case "PUT":
+		var in auth.Role
+		err := json.NewDecoder(r.Body).Decode(&in)
+		if err != nil {
+			writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body."))
+			return
+		}
+		if in.Role != role {
+			writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON name does not match the name in the URL"))
+			return
+		}
+
+		var out auth.Role
+
+		// create
+		if in.Grant.IsEmpty() && in.Revoke.IsEmpty() {
+			err = sh.sec.CreateRole(in)
+			if err != nil {
+				writeError(w, r, err)
+				return
+			}
+			w.WriteHeader(http.StatusCreated)
+			out = in
+		} else {
+			if !in.Permissions.IsEmpty() {
+				writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON contains both permissions and grant/revoke"))
+				return
+			}
+			out, err = sh.sec.UpdateRole(in)
+			if err != nil {
+				writeError(w, r, err)
+				return
+			}
+			w.WriteHeader(http.StatusOK)
+		}
+
+		err = json.NewEncoder(w).Encode(out)
+		if err != nil {
+			plog.Warningf("forRole error encoding on %s", r.URL)
+			return
+		}
+		return
+	case "DELETE":
+		err := sh.sec.DeleteRole(role)
+		if err != nil {
+			writeError(w, r, err)
+			return
+		}
+	}
+}
+
+type userWithRoles struct {
+	User  string      `json:"user"`
+	Roles []auth.Role `json:"roles,omitempty"`
+}
+
+type usersCollections struct {
+	Users []userWithRoles `json:"users"`
+}
+
+func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) {
+	if !allowMethod(w, r.Method, "GET") {
+		return
+	}
+	if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
+		writeNoAuth(w, r)
+		return
+	}
+	w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
+	w.Header().Set("Content-Type", "application/json")
+
+	users, err := sh.sec.AllUsers()
+	if err != nil {
+		writeError(w, r, err)
+		return
+	}
+	if users == nil {
+		users = make([]string, 0)
+	}
+
+	err = r.ParseForm()
+	if err != nil {
+		writeError(w, r, err)
+		return
+	}
+
+	ucs := usersCollections{}
+	for _, userName := range users {
+		var user auth.User
+		user, err = sh.sec.GetUser(userName)
+		if err != nil {
+			writeError(w, r, err)
+			return
+		}
+
+		uwr := userWithRoles{User: user.User}
+		for _, roleName := range user.Roles {
+			var role auth.Role
+			role, err = sh.sec.GetRole(roleName)
+			if err != nil {
+				continue
+			}
+			uwr.Roles = append(uwr.Roles, role)
+		}
+
+		ucs.Users = append(ucs.Users, uwr)
+	}
+	err = json.NewEncoder(w).Encode(ucs)
+
+	if err != nil {
+		plog.Warningf("baseUsers error encoding on %s", r.URL)
+		writeError(w, r, err)
+		return
+	}
+}
+
+func (sh *authHandler) handleUsers(w http.ResponseWriter, r *http.Request) {
+	subpath := path.Clean(r.URL.Path[len(authPrefix):])
+	// Split "/users/username".
+	// First item is an empty string, second is "users"
+	pieces := strings.Split(subpath, "/")
+	if len(pieces) == 2 {
+		sh.baseUsers(w, r)
+		return
+	}
+	if len(pieces) != 3 {
+		writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path"))
+		return
+	}
+	sh.forUser(w, r, pieces[2])
+}
+
+func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user string) {
+	if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
+		return
+	}
+	if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
+		writeNoAuth(w, r)
+		return
+	}
+	w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
+	w.Header().Set("Content-Type", "application/json")
+
+	switch r.Method {
+	case "GET":
+		u, err := sh.sec.GetUser(user)
+		if err != nil {
+			writeError(w, r, err)
+			return
+		}
+
+		err = r.ParseForm()
+		if err != nil {
+			writeError(w, r, err)
+			return
+		}
+
+		uwr := userWithRoles{User: u.User}
+		for _, roleName := range u.Roles {
+			var role auth.Role
+			role, err = sh.sec.GetRole(roleName)
+			if err != nil {
+				writeError(w, r, err)
+				return
+			}
+			uwr.Roles = append(uwr.Roles, role)
+		}
+		err = json.NewEncoder(w).Encode(uwr)
+
+		if err != nil {
+			plog.Warningf("forUser error encoding on %s", r.URL)
+			return
+		}
+		return
+	case "PUT":
+		var u auth.User
+		err := json.NewDecoder(r.Body).Decode(&u)
+		if err != nil {
+			writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body."))
+			return
+		}
+		if u.User != user {
+			writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON name does not match the name in the URL"))
+			return
+		}
+
+		var (
+			out     auth.User
+			created bool
+		)
+
+		if len(u.Grant) == 0 && len(u.Revoke) == 0 {
+			// create or update
+			if len(u.Roles) != 0 {
+				out, err = sh.sec.CreateUser(u)
+			} else {
+				// if user passes in both password and roles, we are unsure about his/her
+				// intention.
+				out, created, err = sh.sec.CreateOrUpdateUser(u)
+			}
+
+			if err != nil {
+				writeError(w, r, err)
+				return
+			}
+		} else {
+			// update case
+			if len(u.Roles) != 0 {
+				writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON contains both roles and grant/revoke"))
+				return
+			}
+			out, err = sh.sec.UpdateUser(u)
+			if err != nil {
+				writeError(w, r, err)
+				return
+			}
+		}
+
+		if created {
+			w.WriteHeader(http.StatusCreated)
+		} else {
+			w.WriteHeader(http.StatusOK)
+		}
+
+		out.Password = ""
+
+		err = json.NewEncoder(w).Encode(out)
+		if err != nil {
+			plog.Warningf("forUser error encoding on %s", r.URL)
+			return
+		}
+		return
+	case "DELETE":
+		err := sh.sec.DeleteUser(user)
+		if err != nil {
+			writeError(w, r, err)
+			return
+		}
+	}
+}
+
+type enabled struct {
+	Enabled bool `json:"enabled"`
+}
+
+func (sh *authHandler) enableDisable(w http.ResponseWriter, r *http.Request) {
+	if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
+		return
+	}
+	if !hasWriteRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
+		writeNoAuth(w, r)
+		return
+	}
+	w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
+	w.Header().Set("Content-Type", "application/json")
+	isEnabled := sh.sec.AuthEnabled()
+	switch r.Method {
+	case "GET":
+		jsonDict := enabled{isEnabled}
+		err := json.NewEncoder(w).Encode(jsonDict)
+		if err != nil {
+			plog.Warningf("error encoding auth state on %s", r.URL)
+		}
+	case "PUT":
+		err := sh.sec.EnableAuth()
+		if err != nil {
+			writeError(w, r, err)
+			return
+		}
+	case "DELETE":
+		err := sh.sec.DisableAuth()
+		if err != nil {
+			writeError(w, r, err)
+			return
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/doc.go
new file mode 100644
index 0000000..475c4b1
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v2http provides etcd client and server implementations.
+package v2http
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go
new file mode 100644
index 0000000..589c172
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go
@@ -0,0 +1,74 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2http
+
+import (
+	"math"
+	"net/http"
+	"strings"
+	"time"
+
+	"github.com/coreos/etcd/etcdserver/api/etcdhttp"
+	"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
+	"github.com/coreos/etcd/etcdserver/auth"
+	"github.com/coreos/etcd/pkg/logutil"
+
+	"github.com/coreos/pkg/capnslog"
+)
+
+const (
+	// time to wait for a Watch request
+	defaultWatchTimeout = time.Duration(math.MaxInt64)
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http")
+	mlog = logutil.NewMergeLogger(plog)
+)
+
+func writeError(w http.ResponseWriter, r *http.Request, err error) {
+	if err == nil {
+		return
+	}
+	if e, ok := err.(auth.Error); ok {
+		herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error())
+		if et := herr.WriteTo(w); et != nil {
+			plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
+		}
+		return
+	}
+	etcdhttp.WriteError(w, r, err)
+}
+
+// allowMethod verifies that the given method is one of the allowed methods,
+// and if not, it writes an error to w.  A boolean is returned indicating
+// whether or not the method is allowed.
+func allowMethod(w http.ResponseWriter, m string, ms ...string) bool {
+	for _, meth := range ms {
+		if m == meth {
+			return true
+		}
+	}
+	w.Header().Set("Allow", strings.Join(ms, ","))
+	http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+	return false
+}
+
+func requestLogger(handler http.Handler) http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		plog.Debugf("[%s] %s remote:%s", r.Method, r.RequestURI, r.RemoteAddr)
+		handler.ServeHTTP(w, r)
+	})
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go
new file mode 100644
index 0000000..0657604
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go
@@ -0,0 +1,56 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httptypes
+
+import (
+	"encoding/json"
+	"net/http"
+
+	"github.com/coreos/pkg/capnslog"
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http/httptypes")
+)
+
+type HTTPError struct {
+	Message string `json:"message"`
+	// Code is the HTTP status code
+	Code int `json:"-"`
+}
+
+func (e HTTPError) Error() string {
+	return e.Message
+}
+
+func (e HTTPError) WriteTo(w http.ResponseWriter) error {
+	w.Header().Set("Content-Type", "application/json")
+	w.WriteHeader(e.Code)
+	b, err := json.Marshal(e)
+	if err != nil {
+		plog.Panicf("marshal HTTPError should never fail (%v)", err)
+	}
+	if _, err := w.Write(b); err != nil {
+		return err
+	}
+	return nil
+}
+
+func NewHTTPError(code int, m string) *HTTPError {
+	return &HTTPError{
+		Message: m,
+		Code:    code,
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go
new file mode 100644
index 0000000..738d744
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go
@@ -0,0 +1,69 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package httptypes defines how etcd's HTTP API entities are serialized to and
+// deserialized from JSON.
+package httptypes
+
+import (
+	"encoding/json"
+
+	"github.com/coreos/etcd/pkg/types"
+)
+
+type Member struct {
+	ID         string   `json:"id"`
+	Name       string   `json:"name"`
+	PeerURLs   []string `json:"peerURLs"`
+	ClientURLs []string `json:"clientURLs"`
+}
+
+type MemberCreateRequest struct {
+	PeerURLs types.URLs
+}
+
+type MemberUpdateRequest struct {
+	MemberCreateRequest
+}
+
+func (m *MemberCreateRequest) UnmarshalJSON(data []byte) error {
+	s := struct {
+		PeerURLs []string `json:"peerURLs"`
+	}{}
+
+	err := json.Unmarshal(data, &s)
+	if err != nil {
+		return err
+	}
+
+	urls, err := types.NewURLs(s.PeerURLs)
+	if err != nil {
+		return err
+	}
+
+	m.PeerURLs = urls
+	return nil
+}
+
+type MemberCollection []Member
+
+func (c *MemberCollection) MarshalJSON() ([]byte, error) {
+	d := struct {
+		Members []Member `json:"members"`
+	}{
+		Members: []Member(*c),
+	}
+
+	return json.Marshal(d)
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/metrics.go
new file mode 100644
index 0000000..fdfb0c6
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/metrics.go
@@ -0,0 +1,96 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2http
+
+import (
+	"strconv"
+	"time"
+
+	"net/http"
+
+	etcdErr "github.com/coreos/etcd/error"
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
+	"github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+	incomingEvents = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: "etcd",
+			Subsystem: "http",
+			Name:      "received_total",
+			Help:      "Counter of requests received into the system (successfully parsed and authd).",
+		}, []string{"method"})
+
+	failedEvents = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: "etcd",
+			Subsystem: "http",
+			Name:      "failed_total",
+			Help:      "Counter of handle failures of requests (non-watches), by method (GET/PUT etc.) and code (400, 500 etc.).",
+		}, []string{"method", "code"})
+
+	successfulEventsHandlingTime = prometheus.NewHistogramVec(
+		prometheus.HistogramOpts{
+			Namespace: "etcd",
+			Subsystem: "http",
+			Name:      "successful_duration_seconds",
+			Help:      "Bucketed histogram of processing time (s) of successfully handled requests (non-watches), by method (GET/PUT etc.).",
+			Buckets:   prometheus.ExponentialBuckets(0.0005, 2, 13),
+		}, []string{"method"})
+)
+
+func init() {
+	prometheus.MustRegister(incomingEvents)
+	prometheus.MustRegister(failedEvents)
+	prometheus.MustRegister(successfulEventsHandlingTime)
+}
+
+func reportRequestReceived(request etcdserverpb.Request) {
+	incomingEvents.WithLabelValues(methodFromRequest(request)).Inc()
+}
+
+func reportRequestCompleted(request etcdserverpb.Request, response etcdserver.Response, startTime time.Time) {
+	method := methodFromRequest(request)
+	successfulEventsHandlingTime.WithLabelValues(method).Observe(time.Since(startTime).Seconds())
+}
+
+func reportRequestFailed(request etcdserverpb.Request, err error) {
+	method := methodFromRequest(request)
+	failedEvents.WithLabelValues(method, strconv.Itoa(codeFromError(err))).Inc()
+}
+
+func methodFromRequest(request etcdserverpb.Request) string {
+	if request.Method == "GET" && request.Quorum {
+		return "QGET"
+	}
+	return request.Method
+}
+
+func codeFromError(err error) int {
+	if err == nil {
+		return http.StatusInternalServerError
+	}
+	switch e := err.(type) {
+	case *etcdErr.Error:
+		return (*etcdErr.Error)(e).StatusCode()
+	case *httptypes.HTTPError:
+		return (*httptypes.HTTPError)(e).Code
+	default:
+		return http.StatusInternalServerError
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/cluster.go
new file mode 100644
index 0000000..b53e6d7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/cluster.go
@@ -0,0 +1,31 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2v3
+
+import (
+	"github.com/coreos/etcd/etcdserver/membership"
+	"github.com/coreos/etcd/pkg/types"
+
+	"github.com/coreos/go-semver/semver"
+)
+
+func (s *v2v3Server) ID() types.ID {
+	// TODO: use an actual member ID
+	return types.ID(0xe7cd2f00d)
+}
+func (s *v2v3Server) ClientURLs() []string                  { panic("STUB") }
+func (s *v2v3Server) Members() []*membership.Member         { panic("STUB") }
+func (s *v2v3Server) Member(id types.ID) *membership.Member { panic("STUB") }
+func (s *v2v3Server) Version() *semver.Version              { panic("STUB") }
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/doc.go
new file mode 100644
index 0000000..2ff372f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v2v3 provides a ServerV2 implementation backed by clientv3.Client.
+package v2v3
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/server.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/server.go
new file mode 100644
index 0000000..2ef63ce
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/server.go
@@ -0,0 +1,117 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2v3
+
+import (
+	"context"
+	"net/http"
+	"time"
+
+	"github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/api"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/etcdserver/membership"
+	"github.com/coreos/etcd/pkg/types"
+
+	"github.com/coreos/go-semver/semver"
+)
+
+type fakeStats struct{}
+
+func (s *fakeStats) SelfStats() []byte   { return nil }
+func (s *fakeStats) LeaderStats() []byte { return nil }
+func (s *fakeStats) StoreStats() []byte  { return nil }
+
+type v2v3Server struct {
+	c     *clientv3.Client
+	store *v2v3Store
+	fakeStats
+}
+
+func NewServer(c *clientv3.Client, pfx string) etcdserver.ServerPeer {
+	return &v2v3Server{c: c, store: newStore(c, pfx)}
+}
+
+func (s *v2v3Server) ClientCertAuthEnabled() bool { return false }
+
+func (s *v2v3Server) LeaseHandler() http.Handler { panic("STUB: lease handler") }
+func (s *v2v3Server) RaftHandler() http.Handler  { panic("STUB: raft handler") }
+
+func (s *v2v3Server) Leader() types.ID {
+	ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
+	defer cancel()
+	resp, err := s.c.Status(ctx, s.c.Endpoints()[0])
+	if err != nil {
+		return 0
+	}
+	return types.ID(resp.Leader)
+}
+
+func (s *v2v3Server) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
+	resp, err := s.c.MemberAdd(ctx, memb.PeerURLs)
+	if err != nil {
+		return nil, err
+	}
+	return v3MembersToMembership(resp.Members), nil
+}
+
+func (s *v2v3Server) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
+	resp, err := s.c.MemberRemove(ctx, id)
+	if err != nil {
+		return nil, err
+	}
+	return v3MembersToMembership(resp.Members), nil
+}
+
+func (s *v2v3Server) UpdateMember(ctx context.Context, m membership.Member) ([]*membership.Member, error) {
+	resp, err := s.c.MemberUpdate(ctx, uint64(m.ID), m.PeerURLs)
+	if err != nil {
+		return nil, err
+	}
+	return v3MembersToMembership(resp.Members), nil
+}
+
+func v3MembersToMembership(v3membs []*pb.Member) []*membership.Member {
+	membs := make([]*membership.Member, len(v3membs))
+	for i, m := range v3membs {
+		membs[i] = &membership.Member{
+			ID: types.ID(m.ID),
+			RaftAttributes: membership.RaftAttributes{
+				PeerURLs: m.PeerURLs,
+			},
+			Attributes: membership.Attributes{
+				Name:       m.Name,
+				ClientURLs: m.ClientURLs,
+			},
+		}
+	}
+	return membs
+}
+
+func (s *v2v3Server) ClusterVersion() *semver.Version { return s.Version() }
+func (s *v2v3Server) Cluster() api.Cluster            { return s }
+func (s *v2v3Server) Alarms() []*pb.AlarmMember       { return nil }
+
+func (s *v2v3Server) Do(ctx context.Context, r pb.Request) (etcdserver.Response, error) {
+	applier := etcdserver.NewApplierV2(s.store, nil)
+	reqHandler := etcdserver.NewStoreRequestV2Handler(s.store, applier)
+	req := (*etcdserver.RequestV2)(&r)
+	resp, err := req.Handle(ctx, reqHandler)
+	if resp.Err != nil {
+		return resp, resp.Err
+	}
+	return resp, err
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/store.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/store.go
new file mode 100644
index 0000000..444f93f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/store.go
@@ -0,0 +1,620 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2v3
+
+import (
+	"context"
+	"fmt"
+	"path"
+	"strings"
+	"time"
+
+	"github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/clientv3/concurrency"
+	etcdErr "github.com/coreos/etcd/error"
+	"github.com/coreos/etcd/mvcc/mvccpb"
+	"github.com/coreos/etcd/store"
+)
+
+// store implements the Store interface for V2 using
+// a v3 client.
+type v2v3Store struct {
+	c *clientv3.Client
+	// pfx is the v3 prefix where keys should be stored.
+	pfx string
+	ctx context.Context
+}
+
+const maxPathDepth = 63
+
+var errUnsupported = fmt.Errorf("TTLs are unsupported")
+
+func NewStore(c *clientv3.Client, pfx string) store.Store { return newStore(c, pfx) }
+
+func newStore(c *clientv3.Client, pfx string) *v2v3Store { return &v2v3Store{c, pfx, c.Ctx()} }
+
+func (s *v2v3Store) Index() uint64 { panic("STUB") }
+
+func (s *v2v3Store) Get(nodePath string, recursive, sorted bool) (*store.Event, error) {
+	key := s.mkPath(nodePath)
+	resp, err := s.c.Txn(s.ctx).Then(
+		clientv3.OpGet(key+"/"),
+		clientv3.OpGet(key),
+	).Commit()
+	if err != nil {
+		return nil, err
+	}
+
+	if kvs := resp.Responses[0].GetResponseRange().Kvs; len(kvs) != 0 || isRoot(nodePath) {
+		nodes, err := s.getDir(nodePath, recursive, sorted, resp.Header.Revision)
+		if err != nil {
+			return nil, err
+		}
+		cidx, midx := uint64(0), uint64(0)
+		if len(kvs) > 0 {
+			cidx, midx = mkV2Rev(kvs[0].CreateRevision), mkV2Rev(kvs[0].ModRevision)
+		}
+		return &store.Event{
+			Action: store.Get,
+			Node: &store.NodeExtern{
+				Key:           nodePath,
+				Dir:           true,
+				Nodes:         nodes,
+				CreatedIndex:  cidx,
+				ModifiedIndex: midx,
+			},
+			EtcdIndex: mkV2Rev(resp.Header.Revision),
+		}, nil
+	}
+
+	kvs := resp.Responses[1].GetResponseRange().Kvs
+	if len(kvs) == 0 {
+		return nil, etcdErr.NewError(etcdErr.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
+	}
+
+	return &store.Event{
+		Action:    store.Get,
+		Node:      s.mkV2Node(kvs[0]),
+		EtcdIndex: mkV2Rev(resp.Header.Revision),
+	}, nil
+}
+
+func (s *v2v3Store) getDir(nodePath string, recursive, sorted bool, rev int64) ([]*store.NodeExtern, error) {
+	rootNodes, err := s.getDirDepth(nodePath, 1, rev)
+	if err != nil || !recursive {
+		return rootNodes, err
+	}
+	nextNodes := rootNodes
+	nodes := make(map[string]*store.NodeExtern)
+	// Breadth walk the subdirectories
+	for i := 2; len(nextNodes) > 0; i++ {
+		for _, n := range nextNodes {
+			nodes[n.Key] = n
+			if parent := nodes[path.Dir(n.Key)]; parent != nil {
+				parent.Nodes = append(parent.Nodes, n)
+			}
+		}
+		if nextNodes, err = s.getDirDepth(nodePath, i, rev); err != nil {
+			return nil, err
+		}
+	}
+	return rootNodes, nil
+}
+
+func (s *v2v3Store) getDirDepth(nodePath string, depth int, rev int64) ([]*store.NodeExtern, error) {
+	pd := s.mkPathDepth(nodePath, depth)
+	resp, err := s.c.Get(s.ctx, pd, clientv3.WithPrefix(), clientv3.WithRev(rev))
+	if err != nil {
+		return nil, err
+	}
+
+	nodes := make([]*store.NodeExtern, len(resp.Kvs))
+	for i, kv := range resp.Kvs {
+		nodes[i] = s.mkV2Node(kv)
+	}
+	return nodes, nil
+}
+
+func (s *v2v3Store) Set(
+	nodePath string,
+	dir bool,
+	value string,
+	expireOpts store.TTLOptionSet,
+) (*store.Event, error) {
+	if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
+		return nil, errUnsupported
+	}
+
+	if isRoot(nodePath) {
+		return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
+	}
+
+	ecode := 0
+	applyf := func(stm concurrency.STM) error {
+		parent := path.Dir(nodePath)
+		if !isRoot(parent) && stm.Rev(s.mkPath(parent)+"/") == 0 {
+			ecode = etcdErr.EcodeKeyNotFound
+			return nil
+		}
+
+		key := s.mkPath(nodePath)
+		if dir {
+			if stm.Rev(key) != 0 {
+				// exists as non-dir
+				ecode = etcdErr.EcodeNotDir
+				return nil
+			}
+			key = key + "/"
+		} else if stm.Rev(key+"/") != 0 {
+			ecode = etcdErr.EcodeNotFile
+			return nil
+		}
+		stm.Put(key, value, clientv3.WithPrevKV())
+		stm.Put(s.mkActionKey(), store.Set)
+		return nil
+	}
+
+	resp, err := s.newSTM(applyf)
+	if err != nil {
+		return nil, err
+	}
+	if ecode != 0 {
+		return nil, etcdErr.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision))
+	}
+
+	createRev := resp.Header.Revision
+	var pn *store.NodeExtern
+	if pkv := prevKeyFromPuts(resp); pkv != nil {
+		pn = s.mkV2Node(pkv)
+		createRev = pkv.CreateRevision
+	}
+
+	vp := &value
+	if dir {
+		vp = nil
+	}
+	return &store.Event{
+		Action: store.Set,
+		Node: &store.NodeExtern{
+			Key:           nodePath,
+			Value:         vp,
+			Dir:           dir,
+			ModifiedIndex: mkV2Rev(resp.Header.Revision),
+			CreatedIndex:  mkV2Rev(createRev),
+		},
+		PrevNode:  pn,
+		EtcdIndex: mkV2Rev(resp.Header.Revision),
+	}, nil
+}
+
+func (s *v2v3Store) Update(nodePath, newValue string, expireOpts store.TTLOptionSet) (*store.Event, error) {
+	if isRoot(nodePath) {
+		return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
+	}
+
+	if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
+		return nil, errUnsupported
+	}
+
+	key := s.mkPath(nodePath)
+	ecode := 0
+	applyf := func(stm concurrency.STM) error {
+		if rev := stm.Rev(key + "/"); rev != 0 {
+			ecode = etcdErr.EcodeNotFile
+			return nil
+		}
+		if rev := stm.Rev(key); rev == 0 {
+			ecode = etcdErr.EcodeKeyNotFound
+			return nil
+		}
+		stm.Put(key, newValue, clientv3.WithPrevKV())
+		stm.Put(s.mkActionKey(), store.Update)
+		return nil
+	}
+
+	resp, err := s.newSTM(applyf)
+	if err != nil {
+		return nil, err
+	}
+	if ecode != 0 {
+		return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
+	}
+
+	pkv := prevKeyFromPuts(resp)
+	return &store.Event{
+		Action: store.Update,
+		Node: &store.NodeExtern{
+			Key:           nodePath,
+			Value:         &newValue,
+			ModifiedIndex: mkV2Rev(resp.Header.Revision),
+			CreatedIndex:  mkV2Rev(pkv.CreateRevision),
+		},
+		PrevNode:  s.mkV2Node(pkv),
+		EtcdIndex: mkV2Rev(resp.Header.Revision),
+	}, nil
+}
+
+func (s *v2v3Store) Create(
+	nodePath string,
+	dir bool,
+	value string,
+	unique bool,
+	expireOpts store.TTLOptionSet,
+) (*store.Event, error) {
+	if isRoot(nodePath) {
+		return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
+	}
+	if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
+		return nil, errUnsupported
+	}
+	ecode := 0
+	applyf := func(stm concurrency.STM) error {
+		ecode = 0
+		key := s.mkPath(nodePath)
+		if unique {
+			// append unique item under the node path
+			for {
+				key = nodePath + "/" + fmt.Sprintf("%020s", time.Now())
+				key = path.Clean(path.Join("/", key))
+				key = s.mkPath(key)
+				if stm.Rev(key) == 0 {
+					break
+				}
+			}
+		}
+		if stm.Rev(key) > 0 || stm.Rev(key+"/") > 0 {
+			ecode = etcdErr.EcodeNodeExist
+			return nil
+		}
+		// build path if any directories in path do not exist
+		dirs := []string{}
+		for p := path.Dir(nodePath); !isRoot(p); p = path.Dir(p) {
+			pp := s.mkPath(p)
+			if stm.Rev(pp) > 0 {
+				ecode = etcdErr.EcodeNotDir
+				return nil
+			}
+			if stm.Rev(pp+"/") == 0 {
+				dirs = append(dirs, pp+"/")
+			}
+		}
+		for _, d := range dirs {
+			stm.Put(d, "")
+		}
+
+		if dir {
+			// directories marked with extra slash in key name
+			key += "/"
+		}
+		stm.Put(key, value)
+		stm.Put(s.mkActionKey(), store.Create)
+		return nil
+	}
+
+	resp, err := s.newSTM(applyf)
+	if err != nil {
+		return nil, err
+	}
+	if ecode != 0 {
+		return nil, etcdErr.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision))
+	}
+
+	var v *string
+	if !dir {
+		v = &value
+	}
+
+	return &store.Event{
+		Action: store.Create,
+		Node: &store.NodeExtern{
+			Key:           nodePath,
+			Value:         v,
+			Dir:           dir,
+			ModifiedIndex: mkV2Rev(resp.Header.Revision),
+			CreatedIndex:  mkV2Rev(resp.Header.Revision),
+		},
+		EtcdIndex: mkV2Rev(resp.Header.Revision),
+	}, nil
+}
+
+func (s *v2v3Store) CompareAndSwap(
+	nodePath string,
+	prevValue string,
+	prevIndex uint64,
+	value string,
+	expireOpts store.TTLOptionSet,
+) (*store.Event, error) {
+	if isRoot(nodePath) {
+		return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
+	}
+	if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
+		return nil, errUnsupported
+	}
+
+	key := s.mkPath(nodePath)
+	resp, err := s.c.Txn(s.ctx).If(
+		s.mkCompare(nodePath, prevValue, prevIndex)...,
+	).Then(
+		clientv3.OpPut(key, value, clientv3.WithPrevKV()),
+		clientv3.OpPut(s.mkActionKey(), store.CompareAndSwap),
+	).Else(
+		clientv3.OpGet(key),
+		clientv3.OpGet(key+"/"),
+	).Commit()
+
+	if err != nil {
+		return nil, err
+	}
+	if !resp.Succeeded {
+		return nil, compareFail(nodePath, prevValue, prevIndex, resp)
+	}
+
+	pkv := resp.Responses[0].GetResponsePut().PrevKv
+	return &store.Event{
+		Action: store.CompareAndSwap,
+		Node: &store.NodeExtern{
+			Key:           nodePath,
+			Value:         &value,
+			CreatedIndex:  mkV2Rev(pkv.CreateRevision),
+			ModifiedIndex: mkV2Rev(resp.Header.Revision),
+		},
+		PrevNode:  s.mkV2Node(pkv),
+		EtcdIndex: mkV2Rev(resp.Header.Revision),
+	}, nil
+}
+
+func (s *v2v3Store) Delete(nodePath string, dir, recursive bool) (*store.Event, error) {
+	if isRoot(nodePath) {
+		return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
+	}
+	if !dir && !recursive {
+		return s.deleteNode(nodePath)
+	}
+	if !recursive {
+		return s.deleteEmptyDir(nodePath)
+	}
+
+	dels := make([]clientv3.Op, maxPathDepth+1)
+	dels[0] = clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV())
+	for i := 1; i < maxPathDepth; i++ {
+		dels[i] = clientv3.OpDelete(s.mkPathDepth(nodePath, i), clientv3.WithPrefix())
+	}
+	dels[maxPathDepth] = clientv3.OpPut(s.mkActionKey(), store.Delete)
+
+	resp, err := s.c.Txn(s.ctx).If(
+		clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), ">", 0),
+		clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, maxPathDepth)+"/"), "=", 0),
+	).Then(
+		dels...,
+	).Commit()
+	if err != nil {
+		return nil, err
+	}
+	if !resp.Succeeded {
+		return nil, etcdErr.NewError(etcdErr.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision))
+	}
+	dresp := resp.Responses[0].GetResponseDeleteRange()
+	return &store.Event{
+		Action:    store.Delete,
+		PrevNode:  s.mkV2Node(dresp.PrevKvs[0]),
+		EtcdIndex: mkV2Rev(resp.Header.Revision),
+	}, nil
+}
+
+func (s *v2v3Store) deleteEmptyDir(nodePath string) (*store.Event, error) {
+	resp, err := s.c.Txn(s.ctx).If(
+		clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, 1)), "=", 0).WithPrefix(),
+	).Then(
+		clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV()),
+		clientv3.OpPut(s.mkActionKey(), store.Delete),
+	).Commit()
+	if err != nil {
+		return nil, err
+	}
+	if !resp.Succeeded {
+		return nil, etcdErr.NewError(etcdErr.EcodeDirNotEmpty, nodePath, mkV2Rev(resp.Header.Revision))
+	}
+	dresp := resp.Responses[0].GetResponseDeleteRange()
+	if len(dresp.PrevKvs) == 0 {
+		return nil, etcdErr.NewError(etcdErr.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision))
+	}
+	return &store.Event{
+		Action:    store.Delete,
+		PrevNode:  s.mkV2Node(dresp.PrevKvs[0]),
+		EtcdIndex: mkV2Rev(resp.Header.Revision),
+	}, nil
+}
+
+func (s *v2v3Store) deleteNode(nodePath string) (*store.Event, error) {
+	resp, err := s.c.Txn(s.ctx).If(
+		clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), "=", 0),
+	).Then(
+		clientv3.OpDelete(s.mkPath(nodePath), clientv3.WithPrevKV()),
+		clientv3.OpPut(s.mkActionKey(), store.Delete),
+	).Commit()
+	if err != nil {
+		return nil, err
+	}
+	if !resp.Succeeded {
+		return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
+	}
+	pkvs := resp.Responses[0].GetResponseDeleteRange().PrevKvs
+	if len(pkvs) == 0 {
+		return nil, etcdErr.NewError(etcdErr.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
+	}
+	pkv := pkvs[0]
+	return &store.Event{
+		Action: store.Delete,
+		Node: &store.NodeExtern{
+			Key:           nodePath,
+			CreatedIndex:  mkV2Rev(pkv.CreateRevision),
+			ModifiedIndex: mkV2Rev(resp.Header.Revision),
+		},
+		PrevNode:  s.mkV2Node(pkv),
+		EtcdIndex: mkV2Rev(resp.Header.Revision),
+	}, nil
+}
+
+func (s *v2v3Store) CompareAndDelete(nodePath, prevValue string, prevIndex uint64) (*store.Event, error) {
+	if isRoot(nodePath) {
+		return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
+	}
+
+	key := s.mkPath(nodePath)
+	resp, err := s.c.Txn(s.ctx).If(
+		s.mkCompare(nodePath, prevValue, prevIndex)...,
+	).Then(
+		clientv3.OpDelete(key, clientv3.WithPrevKV()),
+		clientv3.OpPut(s.mkActionKey(), store.CompareAndDelete),
+	).Else(
+		clientv3.OpGet(key),
+		clientv3.OpGet(key+"/"),
+	).Commit()
+
+	if err != nil {
+		return nil, err
+	}
+	if !resp.Succeeded {
+		return nil, compareFail(nodePath, prevValue, prevIndex, resp)
+	}
+
+	// len(pkvs) > 1 since txn only succeeds when key exists
+	pkv := resp.Responses[0].GetResponseDeleteRange().PrevKvs[0]
+	return &store.Event{
+		Action: store.CompareAndDelete,
+		Node: &store.NodeExtern{
+			Key:           nodePath,
+			CreatedIndex:  mkV2Rev(pkv.CreateRevision),
+			ModifiedIndex: mkV2Rev(resp.Header.Revision),
+		},
+		PrevNode:  s.mkV2Node(pkv),
+		EtcdIndex: mkV2Rev(resp.Header.Revision),
+	}, nil
+}
+
+func compareFail(nodePath, prevValue string, prevIndex uint64, resp *clientv3.TxnResponse) error {
+	if dkvs := resp.Responses[1].GetResponseRange().Kvs; len(dkvs) > 0 {
+		return etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
+	}
+	kvs := resp.Responses[0].GetResponseRange().Kvs
+	if len(kvs) == 0 {
+		return etcdErr.NewError(etcdErr.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
+	}
+	kv := kvs[0]
+	indexMatch := (prevIndex == 0 || kv.ModRevision == int64(prevIndex))
+	valueMatch := (prevValue == "" || string(kv.Value) == prevValue)
+	var cause string
+	switch {
+	case indexMatch && !valueMatch:
+		cause = fmt.Sprintf("[%v != %v]", prevValue, string(kv.Value))
+	case valueMatch && !indexMatch:
+		cause = fmt.Sprintf("[%v != %v]", prevIndex, kv.ModRevision)
+	default:
+		cause = fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, string(kv.Value), prevIndex, kv.ModRevision)
+	}
+	return etcdErr.NewError(etcdErr.EcodeTestFailed, cause, mkV2Rev(resp.Header.Revision))
+}
+
+func (s *v2v3Store) mkCompare(nodePath, prevValue string, prevIndex uint64) []clientv3.Cmp {
+	key := s.mkPath(nodePath)
+	cmps := []clientv3.Cmp{clientv3.Compare(clientv3.Version(key), ">", 0)}
+	if prevIndex != 0 {
+		cmps = append(cmps, clientv3.Compare(clientv3.ModRevision(key), "=", mkV3Rev(prevIndex)))
+	}
+	if prevValue != "" {
+		cmps = append(cmps, clientv3.Compare(clientv3.Value(key), "=", prevValue))
+	}
+	return cmps
+}
+
+func (s *v2v3Store) JsonStats() []byte                  { panic("STUB") }
+func (s *v2v3Store) DeleteExpiredKeys(cutoff time.Time) { panic("STUB") }
+
+func (s *v2v3Store) Version() int { return 2 }
+
+// TODO: move this out of the Store interface?
+
+func (s *v2v3Store) Save() ([]byte, error)       { panic("STUB") }
+func (s *v2v3Store) Recovery(state []byte) error { panic("STUB") }
+func (s *v2v3Store) Clone() store.Store          { panic("STUB") }
+func (s *v2v3Store) SaveNoCopy() ([]byte, error) { panic("STUB") }
+func (s *v2v3Store) HasTTLKeys() bool            { panic("STUB") }
+
+func (s *v2v3Store) mkPath(nodePath string) string { return s.mkPathDepth(nodePath, 0) }
+
+func (s *v2v3Store) mkNodePath(p string) string {
+	return path.Clean(p[len(s.pfx)+len("/k/000/"):])
+}
+
+// mkPathDepth makes a path to a key that encodes its directory depth
+// for fast directory listing. If a depth is provided, it is added
+// to the computed depth.
+func (s *v2v3Store) mkPathDepth(nodePath string, depth int) string {
+	normalForm := path.Clean(path.Join("/", nodePath))
+	n := strings.Count(normalForm, "/") + depth
+	return fmt.Sprintf("%s/%03d/k/%s", s.pfx, n, normalForm)
+}
+
+func (s *v2v3Store) mkActionKey() string { return s.pfx + "/act" }
+
+func isRoot(s string) bool { return len(s) == 0 || s == "/" || s == "/0" || s == "/1" }
+
+func mkV2Rev(v3Rev int64) uint64 {
+	if v3Rev == 0 {
+		return 0
+	}
+	return uint64(v3Rev - 1)
+}
+
+func mkV3Rev(v2Rev uint64) int64 {
+	if v2Rev == 0 {
+		return 0
+	}
+	return int64(v2Rev + 1)
+}
+
+// mkV2Node creates a V2 NodeExtern from a V3 KeyValue
+func (s *v2v3Store) mkV2Node(kv *mvccpb.KeyValue) *store.NodeExtern {
+	if kv == nil {
+		return nil
+	}
+	n := &store.NodeExtern{
+		Key:           string(s.mkNodePath(string(kv.Key))),
+		Dir:           kv.Key[len(kv.Key)-1] == '/',
+		CreatedIndex:  mkV2Rev(kv.CreateRevision),
+		ModifiedIndex: mkV2Rev(kv.ModRevision),
+	}
+	if !n.Dir {
+		v := string(kv.Value)
+		n.Value = &v
+	}
+	return n
+}
+
+// prevKeyFromPuts gets the prev key that is being put; ignores
+// the put action response.
+func prevKeyFromPuts(resp *clientv3.TxnResponse) *mvccpb.KeyValue {
+	for _, r := range resp.Responses {
+		pkv := r.GetResponsePut().PrevKv
+		if pkv != nil && pkv.CreateRevision > 0 {
+			return pkv
+		}
+	}
+	return nil
+}
+
+func (s *v2v3Store) newSTM(applyf func(concurrency.STM) error) (*clientv3.TxnResponse, error) {
+	return concurrency.NewSTM(s.c, applyf, concurrency.WithIsolation(concurrency.Serializable))
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/watcher.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/watcher.go
new file mode 100644
index 0000000..1c2680e
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/watcher.go
@@ -0,0 +1,140 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2v3
+
+import (
+	"context"
+	"strings"
+
+	"github.com/coreos/etcd/clientv3"
+	etcdErr "github.com/coreos/etcd/error"
+	"github.com/coreos/etcd/store"
+)
+
+func (s *v2v3Store) Watch(prefix string, recursive, stream bool, sinceIndex uint64) (store.Watcher, error) {
+	ctx, cancel := context.WithCancel(s.ctx)
+	wch := s.c.Watch(
+		ctx,
+		// TODO: very pricey; use a single store-wide watch in future
+		s.pfx,
+		clientv3.WithPrefix(),
+		clientv3.WithRev(int64(sinceIndex)),
+		clientv3.WithCreatedNotify(),
+		clientv3.WithPrevKV())
+	resp, ok := <-wch
+	if err := resp.Err(); err != nil || !ok {
+		cancel()
+		return nil, etcdErr.NewError(etcdErr.EcodeRaftInternal, prefix, 0)
+	}
+
+	evc, donec := make(chan *store.Event), make(chan struct{})
+	go func() {
+		defer func() {
+			close(evc)
+			close(donec)
+		}()
+		for resp := range wch {
+			for _, ev := range s.mkV2Events(resp) {
+				k := ev.Node.Key
+				if recursive {
+					if !strings.HasPrefix(k, prefix) {
+						continue
+					}
+					// accept events on hidden keys given in prefix
+					k = strings.Replace(k, prefix, "/", 1)
+					// ignore hidden keys deeper than prefix
+					if strings.Contains(k, "/_") {
+						continue
+					}
+				}
+				if !recursive && k != prefix {
+					continue
+				}
+				select {
+				case evc <- ev:
+				case <-ctx.Done():
+					return
+				}
+				if !stream {
+					return
+				}
+			}
+		}
+	}()
+
+	return &v2v3Watcher{
+		startRev: resp.Header.Revision,
+		evc:      evc,
+		donec:    donec,
+		cancel:   cancel,
+	}, nil
+}
+
+func (s *v2v3Store) mkV2Events(wr clientv3.WatchResponse) (evs []*store.Event) {
+	ak := s.mkActionKey()
+	for _, rev := range mkRevs(wr) {
+		var act, key *clientv3.Event
+		for _, ev := range rev {
+			if string(ev.Kv.Key) == ak {
+				act = ev
+			} else if key != nil && len(key.Kv.Key) < len(ev.Kv.Key) {
+				// use longest key to ignore intermediate new
+				// directories from Create.
+				key = ev
+			} else if key == nil {
+				key = ev
+			}
+		}
+		v2ev := &store.Event{
+			Action:    string(act.Kv.Value),
+			Node:      s.mkV2Node(key.Kv),
+			PrevNode:  s.mkV2Node(key.PrevKv),
+			EtcdIndex: mkV2Rev(wr.Header.Revision),
+		}
+		evs = append(evs, v2ev)
+	}
+	return evs
+}
+
+func mkRevs(wr clientv3.WatchResponse) (revs [][]*clientv3.Event) {
+	var curRev []*clientv3.Event
+	for _, ev := range wr.Events {
+		if curRev != nil && ev.Kv.ModRevision != curRev[0].Kv.ModRevision {
+			revs = append(revs, curRev)
+			curRev = nil
+		}
+		curRev = append(curRev, ev)
+	}
+	if curRev != nil {
+		revs = append(revs, curRev)
+	}
+	return revs
+}
+
+type v2v3Watcher struct {
+	startRev int64
+	evc      chan *store.Event
+	donec    chan struct{}
+	cancel   context.CancelFunc
+}
+
+func (w *v2v3Watcher) StartIndex() uint64 { return mkV2Rev(w.startRev) }
+
+func (w *v2v3Watcher) Remove() {
+	w.cancel()
+	<-w.donec
+}
+
+func (w *v2v3Watcher) EventChan() chan *store.Event { return w.evc }
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go
new file mode 100644
index 0000000..310715f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3client provides clientv3 interfaces from an etcdserver.
+//
+// Use v3client by creating an EtcdServer instance, then wrapping it with v3client.New:
+//
+//	import (
+//		"context"
+//
+//		"github.com/coreos/etcd/embed"
+//		"github.com/coreos/etcd/etcdserver/api/v3client"
+//	)
+//
+//	...
+//
+//	// create an embedded EtcdServer from the default configuration
+//	cfg := embed.NewConfig()
+//	cfg.Dir = "default.etcd"
+//	e, err := embed.StartEtcd(cfg)
+//	if err != nil {
+//		// handle error!
+//	}
+//
+//	// wrap the EtcdServer with v3client
+//	cli := v3client.New(e.Server)
+//
+//	// use like an ordinary clientv3
+//	resp, err := cli.Put(context.TODO(), "some-key", "it works!")
+//	if err != nil {
+//		// handle error!
+//	}
+//
+package v3client
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go
new file mode 100644
index 0000000..ab48ea7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go
@@ -0,0 +1,66 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3client
+
+import (
+	"context"
+	"time"
+
+	"github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc"
+	"github.com/coreos/etcd/proxy/grpcproxy/adapter"
+)
+
+// New creates a clientv3 client that wraps an in-process EtcdServer. Instead
+// of making gRPC calls through sockets, the client makes direct function calls
+// to the etcd server through its api/v3rpc function interfaces.
+func New(s *etcdserver.EtcdServer) *clientv3.Client {
+	c := clientv3.NewCtxClient(context.Background())
+
+	kvc := adapter.KvServerToKvClient(v3rpc.NewQuotaKVServer(s))
+	c.KV = clientv3.NewKVFromKVClient(kvc, c)
+
+	lc := adapter.LeaseServerToLeaseClient(v3rpc.NewQuotaLeaseServer(s))
+	c.Lease = clientv3.NewLeaseFromLeaseClient(lc, c, time.Second)
+
+	wc := adapter.WatchServerToWatchClient(v3rpc.NewWatchServer(s))
+	c.Watcher = &watchWrapper{clientv3.NewWatchFromWatchClient(wc, c)}
+
+	mc := adapter.MaintenanceServerToMaintenanceClient(v3rpc.NewMaintenanceServer(s))
+	c.Maintenance = clientv3.NewMaintenanceFromMaintenanceClient(mc, c)
+
+	clc := adapter.ClusterServerToClusterClient(v3rpc.NewClusterServer(s))
+	c.Cluster = clientv3.NewClusterFromClusterClient(clc, c)
+
+	// TODO: implement clientv3.Auth interface?
+
+	return c
+}
+
+// BlankContext implements Stringer on a context so the ctx string doesn't
+// depend on the context's WithValue data, which tends to be unsynchronized
+// (e.g., x/net/trace), causing ctx.String() to throw data races.
+type blankContext struct{ context.Context }
+
+func (*blankContext) String() string { return "(blankCtx)" }
+
+// watchWrapper wraps clientv3 watch calls to blank out the context
+// to avoid races on trace data.
+type watchWrapper struct{ clientv3.Watcher }
+
+func (ww *watchWrapper) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
+	return ww.Watcher.Watch(&blankContext{ctx}, key, opts...)
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go
new file mode 100644
index 0000000..d6fefd7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3election provides a v3 election service from an etcdserver.
+package v3election
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go
new file mode 100644
index 0000000..c66d7a3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go
@@ -0,0 +1,134 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3election
+
+import (
+	"context"
+	"errors"
+
+	"github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/clientv3/concurrency"
+	epb "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
+)
+
+// ErrMissingLeaderKey is returned when election API request
+// is missing the "leader" field.
+var ErrMissingLeaderKey = errors.New(`"leader" field must be provided`)
+
+type electionServer struct {
+	c *clientv3.Client
+}
+
+func NewElectionServer(c *clientv3.Client) epb.ElectionServer {
+	return &electionServer{c}
+}
+
+func (es *electionServer) Campaign(ctx context.Context, req *epb.CampaignRequest) (*epb.CampaignResponse, error) {
+	s, err := es.session(ctx, req.Lease)
+	if err != nil {
+		return nil, err
+	}
+	e := concurrency.NewElection(s, string(req.Name))
+	if err = e.Campaign(ctx, string(req.Value)); err != nil {
+		return nil, err
+	}
+	return &epb.CampaignResponse{
+		Header: e.Header(),
+		Leader: &epb.LeaderKey{
+			Name:  req.Name,
+			Key:   []byte(e.Key()),
+			Rev:   e.Rev(),
+			Lease: int64(s.Lease()),
+		},
+	}, nil
+}
+
+func (es *electionServer) Proclaim(ctx context.Context, req *epb.ProclaimRequest) (*epb.ProclaimResponse, error) {
+	if req.Leader == nil {
+		return nil, ErrMissingLeaderKey
+	}
+	s, err := es.session(ctx, req.Leader.Lease)
+	if err != nil {
+		return nil, err
+	}
+	e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev)
+	if err := e.Proclaim(ctx, string(req.Value)); err != nil {
+		return nil, err
+	}
+	return &epb.ProclaimResponse{Header: e.Header()}, nil
+}
+
+func (es *electionServer) Observe(req *epb.LeaderRequest, stream epb.Election_ObserveServer) error {
+	s, err := es.session(stream.Context(), -1)
+	if err != nil {
+		return err
+	}
+	e := concurrency.NewElection(s, string(req.Name))
+	ch := e.Observe(stream.Context())
+	for stream.Context().Err() == nil {
+		select {
+		case <-stream.Context().Done():
+		case resp, ok := <-ch:
+			if !ok {
+				return nil
+			}
+			lresp := &epb.LeaderResponse{Header: resp.Header, Kv: resp.Kvs[0]}
+			if err := stream.Send(lresp); err != nil {
+				return err
+			}
+		}
+	}
+	return stream.Context().Err()
+}
+
+func (es *electionServer) Leader(ctx context.Context, req *epb.LeaderRequest) (*epb.LeaderResponse, error) {
+	s, err := es.session(ctx, -1)
+	if err != nil {
+		return nil, err
+	}
+	l, lerr := concurrency.NewElection(s, string(req.Name)).Leader(ctx)
+	if lerr != nil {
+		return nil, lerr
+	}
+	return &epb.LeaderResponse{Header: l.Header, Kv: l.Kvs[0]}, nil
+}
+
+func (es *electionServer) Resign(ctx context.Context, req *epb.ResignRequest) (*epb.ResignResponse, error) {
+	if req.Leader == nil {
+		return nil, ErrMissingLeaderKey
+	}
+	s, err := es.session(ctx, req.Leader.Lease)
+	if err != nil {
+		return nil, err
+	}
+	e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev)
+	if err := e.Resign(ctx); err != nil {
+		return nil, err
+	}
+	return &epb.ResignResponse{Header: e.Header()}, nil
+}
+
+func (es *electionServer) session(ctx context.Context, lease int64) (*concurrency.Session, error) {
+	s, err := concurrency.NewSession(
+		es.c,
+		concurrency.WithLease(clientv3.LeaseID(lease)),
+		concurrency.WithContext(ctx),
+	)
+	if err != nil {
+		return nil, err
+	}
+	s.Orphan()
+	return s, nil
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
new file mode 100644
index 0000000..58368bb
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
@@ -0,0 +1,313 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: etcdserver/api/v3election/v3electionpb/v3election.proto
+
+/*
+Package v3electionpb is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package gw
+
+import (
+	"github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
+	"io"
+	"net/http"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/grpc-ecosystem/grpc-gateway/runtime"
+	"github.com/grpc-ecosystem/grpc-gateway/utilities"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/status"
+)
+
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+
+func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq v3electionpb.CampaignRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Campaign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq v3electionpb.ProclaimRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Proclaim(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq v3electionpb.LeaderRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Leader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (v3electionpb.Election_ObserveClient, runtime.ServerMetadata, error) {
+	var protoReq v3electionpb.LeaderRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	stream, err := client.Observe(ctx, &protoReq)
+	if err != nil {
+		return nil, metadata, err
+	}
+	header, err := stream.Header()
+	if err != nil {
+		return nil, metadata, err
+	}
+	metadata.HeaderMD = header
+	return stream, metadata, nil
+
+}
+
+func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq v3electionpb.ResignRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Resign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+// RegisterElectionHandlerFromEndpoint is same as RegisterElectionHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterElectionHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+	conn, err := grpc.Dial(endpoint, opts...)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+			return
+		}
+		go func() {
+			<-ctx.Done()
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+		}()
+	}()
+
+	return RegisterElectionHandler(ctx, mux, conn)
+}
+
+// RegisterElectionHandler registers the http handlers for service Election to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+	return RegisterElectionHandlerClient(ctx, mux, v3electionpb.NewElectionClient(conn))
+}
+
+// RegisterElectionHandler registers the http handlers for service Election to "mux".
+// The handlers forward requests to the grpc endpoint over the given implementation of "ElectionClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ElectionClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "ElectionClient" to call the correct interceptors.
+func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3electionpb.ElectionClient) error {
+
+	mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Election_Campaign_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Election_Proclaim_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Election_Leader_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Election_Observe_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Election_Observe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Election_Resign_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	return nil
+}
+
+var (
+	pattern_Election_Campaign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "campaign"}, ""))
+
+	pattern_Election_Proclaim_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "proclaim"}, ""))
+
+	pattern_Election_Leader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "leader"}, ""))
+
+	pattern_Election_Observe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "observe"}, ""))
+
+	pattern_Election_Resign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "resign"}, ""))
+)
+
+var (
+	forward_Election_Campaign_0 = runtime.ForwardResponseMessage
+
+	forward_Election_Proclaim_0 = runtime.ForwardResponseMessage
+
+	forward_Election_Leader_0 = runtime.ForwardResponseMessage
+
+	forward_Election_Observe_0 = runtime.ForwardResponseStream
+
+	forward_Election_Resign_0 = runtime.ForwardResponseMessage
+)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto
new file mode 100644
index 0000000..cb475b8
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto
@@ -0,0 +1,119 @@
+syntax = "proto3";
+package v3electionpb;
+
+import "gogoproto/gogo.proto";
+import "etcd/etcdserver/etcdserverpb/rpc.proto";
+import "etcd/mvcc/mvccpb/kv.proto";
+
+// for grpc-gateway
+import "google/api/annotations.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+// The election service exposes client-side election facilities as a gRPC interface.
+service Election {
+  // Campaign waits to acquire leadership in an election, returning a LeaderKey
+  // representing the leadership if successful. The LeaderKey can then be used
+  // to issue new values on the election, transactionally guard API requests on
+  // leadership still being held, and resign from the election.
+  rpc Campaign(CampaignRequest) returns (CampaignResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/election/campaign"
+        body: "*"
+    };
+  }
+  // Proclaim updates the leader's posted value with a new value.
+  rpc Proclaim(ProclaimRequest) returns (ProclaimResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/election/proclaim"
+        body: "*"
+    };
+  }
+  // Leader returns the current election proclamation, if any.
+  rpc Leader(LeaderRequest) returns (LeaderResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/election/leader"
+        body: "*"
+    };
+  }
+  // Observe streams election proclamations in-order as made by the election's
+  // elected leaders.
+  rpc Observe(LeaderRequest) returns (stream LeaderResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/election/observe"
+        body: "*"
+    };
+  }
+  // Resign releases election leadership so other campaigners may acquire
+  // leadership on the election.
+  rpc Resign(ResignRequest) returns (ResignResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/election/resign"
+        body: "*"
+    };
+  }
+}
+
+message CampaignRequest {
+  // name is the election's identifier for the campaign.
+  bytes name = 1;
+  // lease is the ID of the lease attached to leadership of the election. If the
+  // lease expires or is revoked before resigning leadership, then the
+  // leadership is transferred to the next campaigner, if any.
+  int64 lease = 2;
+  // value is the initial proclaimed value set when the campaigner wins the
+  // election.
+  bytes value = 3;
+}
+
+message CampaignResponse {
+  etcdserverpb.ResponseHeader header = 1;
+  // leader describes the resources used for holding leadereship of the election.
+  LeaderKey leader = 2;
+}
+
+message LeaderKey {
+  // name is the election identifier that correponds to the leadership key.
+  bytes name = 1;
+  // key is an opaque key representing the ownership of the election. If the key
+  // is deleted, then leadership is lost.
+  bytes key = 2;
+  // rev is the creation revision of the key. It can be used to test for ownership
+  // of an election during transactions by testing the key's creation revision
+  // matches rev.
+  int64 rev = 3;
+  // lease is the lease ID of the election leader.
+  int64 lease = 4;
+}
+
+message LeaderRequest {
+  // name is the election identifier for the leadership information.
+  bytes name = 1;
+}
+
+message LeaderResponse {
+  etcdserverpb.ResponseHeader header = 1;
+  // kv is the key-value pair representing the latest leader update.
+  mvccpb.KeyValue kv = 2;
+}
+
+message ResignRequest {
+  // leader is the leadership to relinquish by resignation.
+  LeaderKey leader = 1;
+}
+
+message ResignResponse {
+  etcdserverpb.ResponseHeader header = 1;
+}
+
+message ProclaimRequest {
+  // leader is the leadership hold on the election.
+  LeaderKey leader = 1;
+  // value is an update meant to overwrite the leader's current value.
+  bytes value = 2;
+}
+
+message ProclaimResponse {
+  etcdserverpb.ResponseHeader header = 1;
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go
new file mode 100644
index 0000000..e0a1008
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3lock provides a v3 locking service from an etcdserver.
+package v3lock
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go
new file mode 100644
index 0000000..a5efcba
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go
@@ -0,0 +1,56 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3lock
+
+import (
+	"context"
+
+	"github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/clientv3/concurrency"
+	"github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
+)
+
+type lockServer struct {
+	c *clientv3.Client
+}
+
+func NewLockServer(c *clientv3.Client) v3lockpb.LockServer {
+	return &lockServer{c}
+}
+
+func (ls *lockServer) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) {
+	s, err := concurrency.NewSession(
+		ls.c,
+		concurrency.WithLease(clientv3.LeaseID(req.Lease)),
+		concurrency.WithContext(ctx),
+	)
+	if err != nil {
+		return nil, err
+	}
+	s.Orphan()
+	m := concurrency.NewMutex(s, string(req.Name))
+	if err = m.Lock(ctx); err != nil {
+		return nil, err
+	}
+	return &v3lockpb.LockResponse{Header: m.Header(), Key: []byte(m.Key())}, nil
+}
+
+func (ls *lockServer) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) {
+	resp, err := ls.c.Delete(ctx, string(req.Key))
+	if err != nil {
+		return nil, err
+	}
+	return &v3lockpb.UnlockResponse{Header: resp.Header}, nil
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
new file mode 100644
index 0000000..efecc45
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
@@ -0,0 +1,167 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: etcdserver/api/v3lock/v3lockpb/v3lock.proto
+
+/*
+Package v3lockpb is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package gw
+
+import (
+	"github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
+	"io"
+	"net/http"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/grpc-ecosystem/grpc-gateway/runtime"
+	"github.com/grpc-ecosystem/grpc-gateway/utilities"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/status"
+)
+
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+
+func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq v3lockpb.LockRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Lock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq v3lockpb.UnlockRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Unlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+// RegisterLockHandlerFromEndpoint is same as RegisterLockHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterLockHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+	conn, err := grpc.Dial(endpoint, opts...)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+			return
+		}
+		go func() {
+			<-ctx.Done()
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+		}()
+	}()
+
+	return RegisterLockHandler(ctx, mux, conn)
+}
+
+// RegisterLockHandler registers the http handlers for service Lock to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+	return RegisterLockHandlerClient(ctx, mux, v3lockpb.NewLockClient(conn))
+}
+
+// RegisterLockHandler registers the http handlers for service Lock to "mux".
+// The handlers forward requests to the grpc endpoint over the given implementation of "LockClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LockClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "LockClient" to call the correct interceptors.
+func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3lockpb.LockClient) error {
+
+	mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Lock_Lock_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Lock_Unlock_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	return nil
+}
+
+var (
+	pattern_Lock_Lock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1}, []string{"v3beta", "lock"}, ""))
+
+	pattern_Lock_Unlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "lock", "unlock"}, ""))
+)
+
+var (
+	forward_Lock_Lock_0 = runtime.ForwardResponseMessage
+
+	forward_Lock_Unlock_0 = runtime.ForwardResponseMessage
+)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto
new file mode 100644
index 0000000..44b698d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto
@@ -0,0 +1,65 @@
+syntax = "proto3";
+package v3lockpb;
+
+import "gogoproto/gogo.proto";
+import "etcd/etcdserver/etcdserverpb/rpc.proto";
+
+// for grpc-gateway
+import "google/api/annotations.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+// The lock service exposes client-side locking facilities as a gRPC interface.
+service Lock {
+  // Lock acquires a distributed shared lock on a given named lock.
+  // On success, it will return a unique key that exists so long as the
+  // lock is held by the caller. This key can be used in conjunction with
+  // transactions to safely ensure updates to etcd only occur while holding
+  // lock ownership. The lock is held until Unlock is called on the key or the
+  // lease associate with the owner expires.
+  rpc Lock(LockRequest) returns (LockResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/lock/lock"
+        body: "*"
+    };
+  }
+
+  // Unlock takes a key returned by Lock and releases the hold on lock. The
+  // next Lock caller waiting for the lock will then be woken up and given
+  // ownership of the lock.
+  rpc Unlock(UnlockRequest) returns (UnlockResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/lock/unlock"
+        body: "*"
+    };
+  }
+}
+
+message LockRequest {
+  // name is the identifier for the distributed shared lock to be acquired.
+  bytes name = 1;
+  // lease is the ID of the lease that will be attached to ownership of the
+  // lock. If the lease expires or is revoked and currently holds the lock,
+  // the lock is automatically released. Calls to Lock with the same lease will
+  // be treated as a single acquistion; locking twice with the same lease is a
+  // no-op.
+  int64 lease = 2;
+}
+
+message LockResponse {
+  etcdserverpb.ResponseHeader header = 1;
+  // key is a key that will exist on etcd for the duration that the Lock caller
+  // owns the lock. Users should not modify this key or the lock may exhibit
+  // undefined behavior.
+  bytes key = 2;
+}
+
+message UnlockRequest {
+  // key is the lock ownership key granted by Lock.
+  bytes key = 1;
+}
+
+message UnlockResponse {
+  etcdserverpb.ResponseHeader header = 1;
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go
new file mode 100644
index 0000000..ca8e53a
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go
@@ -0,0 +1,158 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+	"context"
+
+	"github.com/coreos/etcd/etcdserver"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+)
+
+type AuthServer struct {
+	authenticator etcdserver.Authenticator
+}
+
+func NewAuthServer(s *etcdserver.EtcdServer) *AuthServer {
+	return &AuthServer{authenticator: s}
+}
+
+func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
+	resp, err := as.authenticator.AuthEnable(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return resp, nil
+}
+
+func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
+	resp, err := as.authenticator.AuthDisable(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return resp, nil
+}
+
+func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
+	resp, err := as.authenticator.Authenticate(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return resp, nil
+}
+
+func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+	resp, err := as.authenticator.RoleAdd(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return resp, nil
+}
+
+func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+	resp, err := as.authenticator.RoleDelete(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return resp, nil
+}
+
+func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+	resp, err := as.authenticator.RoleGet(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return resp, nil
+}
+
+func (as *AuthServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+	resp, err := as.authenticator.RoleList(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return resp, nil
+}
+
+func (as *AuthServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+	resp, err := as.authenticator.RoleRevokePermission(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return resp, nil
+}
+
+func (as *AuthServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+	resp, err := as.authenticator.RoleGrantPermission(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return resp, nil
+}
+
+func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+	resp, err := as.authenticator.UserAdd(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return resp, nil
+}
+
+func (as *AuthServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+	resp, err := as.authenticator.UserDelete(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return resp, nil
+}
+
+func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+	resp, err := as.authenticator.UserGet(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return resp, nil
+}
+
+func (as *AuthServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+	resp, err := as.authenticator.UserList(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return resp, nil
+}
+
+func (as *AuthServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+	resp, err := as.authenticator.UserGrantRole(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return resp, nil
+}
+
+func (as *AuthServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+	resp, err := as.authenticator.UserRevokeRole(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return resp, nil
+}
+
+func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+	resp, err := as.authenticator.UserChangePassword(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return resp, nil
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go
new file mode 100644
index 0000000..17a2c87
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go
@@ -0,0 +1,34 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import "github.com/gogo/protobuf/proto"
+
+type codec struct{}
+
+func (c *codec) Marshal(v interface{}) ([]byte, error) {
+	b, err := proto.Marshal(v.(proto.Message))
+	sentBytes.Add(float64(len(b)))
+	return b, err
+}
+
+func (c *codec) Unmarshal(data []byte, v interface{}) error {
+	receivedBytes.Add(float64(len(data)))
+	return proto.Unmarshal(data, v.(proto.Message))
+}
+
+func (c *codec) String() string {
+	return "proto"
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
new file mode 100644
index 0000000..c97e746
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
@@ -0,0 +1,76 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+	"crypto/tls"
+	"math"
+
+	"github.com/coreos/etcd/etcdserver"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+	"github.com/grpc-ecosystem/go-grpc-middleware"
+	"github.com/grpc-ecosystem/go-grpc-prometheus"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/health"
+	healthpb "google.golang.org/grpc/health/grpc_health_v1"
+)
+
+const (
+	grpcOverheadBytes = 512 * 1024
+	maxStreams        = math.MaxUint32
+	maxSendBytes      = math.MaxInt32
+)
+
+func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOption) *grpc.Server {
+	var opts []grpc.ServerOption
+	opts = append(opts, grpc.CustomCodec(&codec{}))
+	if tls != nil {
+		opts = append(opts, grpc.Creds(credentials.NewTLS(tls)))
+	}
+	opts = append(opts, grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
+		newLogUnaryInterceptor(s),
+		newUnaryInterceptor(s),
+		grpc_prometheus.UnaryServerInterceptor,
+	)))
+	opts = append(opts, grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
+		newStreamInterceptor(s),
+		grpc_prometheus.StreamServerInterceptor,
+	)))
+	opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes)))
+	opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes))
+	opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
+	grpcServer := grpc.NewServer(append(opts, gopts...)...)
+
+	pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s))
+	pb.RegisterWatchServer(grpcServer, NewWatchServer(s))
+	pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s))
+	pb.RegisterClusterServer(grpcServer, NewClusterServer(s))
+	pb.RegisterAuthServer(grpcServer, NewAuthServer(s))
+	pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s))
+
+	// server should register all the services manually
+	// use empty service name for all etcd services' health status,
+	// see https://github.com/grpc/grpc/blob/master/doc/health-checking.md for more
+	hsrv := health.NewServer()
+	hsrv.SetServingStatus("", healthpb.HealthCheckResponse_SERVING)
+	healthpb.RegisterHealthServer(grpcServer, hsrv)
+
+	// set zero values for metrics registered for this grpc server
+	grpc_prometheus.Register(grpcServer)
+
+	return grpcServer
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go
new file mode 100644
index 0000000..75da52f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go
@@ -0,0 +1,49 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+	"github.com/coreos/etcd/etcdserver"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+)
+
+type header struct {
+	clusterID int64
+	memberID  int64
+	raftTimer etcdserver.RaftTimer
+	rev       func() int64
+}
+
+func newHeader(s *etcdserver.EtcdServer) header {
+	return header{
+		clusterID: int64(s.Cluster().ID()),
+		memberID:  int64(s.ID()),
+		raftTimer: s,
+		rev:       func() int64 { return s.KV().Rev() },
+	}
+}
+
+// fill populates pb.ResponseHeader using etcdserver information
+func (h *header) fill(rh *pb.ResponseHeader) {
+	if rh == nil {
+		plog.Panic("unexpected nil resp.Header")
+	}
+	rh.ClusterId = uint64(h.clusterID)
+	rh.MemberId = uint64(h.memberID)
+	rh.RaftTerm = h.raftTimer.Term()
+	if rh.Revision == 0 {
+		rh.Revision = h.rev()
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
new file mode 100644
index 0000000..d594ae7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
@@ -0,0 +1,263 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+	"context"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/api"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/raft"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"go.uber.org/zap"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+)
+
+const (
+	maxNoLeaderCnt = 3
+)
+
+type streamsMap struct {
+	mu      sync.Mutex
+	streams map[grpc.ServerStream]struct{}
+}
+
+func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
+	return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+		if !api.IsCapabilityEnabled(api.V3rpcCapability) {
+			return nil, rpctypes.ErrGRPCNotCapable
+		}
+
+		md, ok := metadata.FromIncomingContext(ctx)
+		if ok {
+			if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
+				if s.Leader() == types.ID(raft.None) {
+					return nil, rpctypes.ErrGRPCNoLeader
+				}
+			}
+		}
+
+		return handler(ctx, req)
+	}
+}
+
+func newLogUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
+	return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+		startTime := time.Now()
+		resp, err := handler(ctx, req)
+		defer logUnaryRequestStats(ctx, nil, info, startTime, req, resp)
+		return resp, err
+	}
+}
+
+func logUnaryRequestStats(ctx context.Context, lg *zap.Logger, info *grpc.UnaryServerInfo, startTime time.Time, req interface{}, resp interface{}) {
+	duration := time.Since(startTime)
+	remote := "No remote client info."
+	peerInfo, ok := peer.FromContext(ctx)
+	if ok {
+		remote = peerInfo.Addr.String()
+	}
+	var responseType string = info.FullMethod
+	var reqCount, respCount int64
+	var reqSize, respSize int
+	var reqContent string
+	switch _resp := resp.(type) {
+	case *pb.RangeResponse:
+		_req, ok := req.(*pb.RangeRequest)
+		if ok {
+			reqCount = 0
+			reqSize = _req.Size()
+			reqContent = _req.String()
+		}
+		if _resp != nil {
+			respCount = _resp.GetCount()
+			respSize = _resp.Size()
+		}
+	case *pb.PutResponse:
+		_req, ok := req.(*pb.PutRequest)
+		if ok {
+			reqCount = 1
+			reqSize = _req.Size()
+			reqContent = pb.NewLoggablePutRequest(_req).String()
+			// redact value field from request content, see PR #9821
+		}
+		if _resp != nil {
+			respCount = 0
+			respSize = _resp.Size()
+		}
+	case *pb.DeleteRangeResponse:
+		_req, ok := req.(*pb.DeleteRangeRequest)
+		if ok {
+			reqCount = 0
+			reqSize = _req.Size()
+			reqContent = _req.String()
+		}
+		if _resp != nil {
+			respCount = _resp.GetDeleted()
+			respSize = _resp.Size()
+		}
+	case *pb.TxnResponse:
+		_req, ok := req.(*pb.TxnRequest)
+		if ok && _resp != nil {
+			if _resp.GetSucceeded() { // determine the 'actual' count and size of request based on success or failure
+				reqCount = int64(len(_req.GetSuccess()))
+				reqSize = 0
+				for _, r := range _req.GetSuccess() {
+					reqSize += r.Size()
+				}
+			} else {
+				reqCount = int64(len(_req.GetFailure()))
+				reqSize = 0
+				for _, r := range _req.GetFailure() {
+					reqSize += r.Size()
+				}
+			}
+			reqContent = pb.NewLoggableTxnRequest(_req).String()
+			// redact value field from request content, see PR #9821
+		}
+		if _resp != nil {
+			respCount = 0
+			respSize = _resp.Size()
+		}
+	default:
+		reqCount = -1
+		reqSize = -1
+		respCount = -1
+		respSize = -1
+	}
+
+	logGenericRequestStats(lg, startTime, duration, remote, responseType, reqCount, reqSize, respCount, respSize, reqContent)
+}
+
+func logGenericRequestStats(lg *zap.Logger, startTime time.Time, duration time.Duration, remote string, responseType string,
+	reqCount int64, reqSize int, respCount int64, respSize int, reqContent string) {
+	if lg == nil {
+		plog.Debugf("start time = %v, "+
+			"time spent = %v, "+
+			"remote = %s, "+
+			"response type = %s, "+
+			"request count = %d, "+
+			"request size = %d, "+
+			"response count = %d, "+
+			"response size = %d, "+
+			"request content = %s",
+			startTime, duration, remote, responseType, reqCount, reqSize, respCount, respSize, reqContent,
+		)
+	} else {
+		lg.Debug("request stats",
+			zap.Time("start time", startTime),
+			zap.Duration("time spent", duration),
+			zap.String("remote", remote),
+			zap.String("response type", responseType),
+			zap.Int64("request count", reqCount),
+			zap.Int("request size", reqSize),
+			zap.Int64("response count", respCount),
+			zap.Int("response size", respSize),
+			zap.String("request content", reqContent),
+		)
+	}
+}
+
+func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor {
+	smap := monitorLeader(s)
+
+	return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+		if !api.IsCapabilityEnabled(api.V3rpcCapability) {
+			return rpctypes.ErrGRPCNotCapable
+		}
+
+		md, ok := metadata.FromIncomingContext(ss.Context())
+		if ok {
+			if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
+				if s.Leader() == types.ID(raft.None) {
+					return rpctypes.ErrGRPCNoLeader
+				}
+
+				cctx, cancel := context.WithCancel(ss.Context())
+				ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss}
+
+				smap.mu.Lock()
+				smap.streams[ss] = struct{}{}
+				smap.mu.Unlock()
+
+				defer func() {
+					smap.mu.Lock()
+					delete(smap.streams, ss)
+					smap.mu.Unlock()
+					cancel()
+				}()
+
+			}
+		}
+
+		return handler(srv, ss)
+	}
+}
+
+type serverStreamWithCtx struct {
+	grpc.ServerStream
+	ctx    context.Context
+	cancel *context.CancelFunc
+}
+
+func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx }
+
+func monitorLeader(s *etcdserver.EtcdServer) *streamsMap {
+	smap := &streamsMap{
+		streams: make(map[grpc.ServerStream]struct{}),
+	}
+
+	go func() {
+		election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond
+		noLeaderCnt := 0
+
+		for {
+			select {
+			case <-s.StopNotify():
+				return
+			case <-time.After(election):
+				if s.Leader() == types.ID(raft.None) {
+					noLeaderCnt++
+				} else {
+					noLeaderCnt = 0
+				}
+
+				// We are more conservative on canceling existing streams. Reconnecting streams
+				// cost much more than just rejecting new requests. So we wait until the member
+				// cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams.
+				if noLeaderCnt >= maxNoLeaderCnt {
+					smap.mu.Lock()
+					for ss := range smap.streams {
+						if ssWithCtx, ok := ss.(serverStreamWithCtx); ok {
+							(*ssWithCtx.cancel)()
+							<-ss.Context().Done()
+						}
+					}
+					smap.streams = make(map[grpc.ServerStream]struct{})
+					smap.mu.Unlock()
+				}
+			}
+		}
+	}()
+
+	return smap
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
new file mode 100644
index 0000000..9781bdd
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
@@ -0,0 +1,277 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3rpc implements etcd v3 RPC system based on gRPC.
+package v3rpc
+
+import (
+	"context"
+
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/pkg/adt"
+
+	"github.com/coreos/pkg/capnslog"
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v3rpc")
+)
+
+type kvServer struct {
+	hdr header
+	kv  etcdserver.RaftKV
+	// maxTxnOps is the max operations per txn.
+	// e.g suppose maxTxnOps = 128.
+	// Txn.Success can have at most 128 operations,
+	// and Txn.Failure can have at most 128 operations.
+	maxTxnOps uint
+}
+
+func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer {
+	return &kvServer{hdr: newHeader(s), kv: s, maxTxnOps: s.Cfg.MaxTxnOps}
+}
+
+func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+	if err := checkRangeRequest(r); err != nil {
+		return nil, err
+	}
+
+	resp, err := s.kv.Range(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+
+	s.hdr.fill(resp.Header)
+	return resp, nil
+}
+
+func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
+	if err := checkPutRequest(r); err != nil {
+		return nil, err
+	}
+
+	resp, err := s.kv.Put(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+
+	s.hdr.fill(resp.Header)
+	return resp, nil
+}
+
+func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+	if err := checkDeleteRequest(r); err != nil {
+		return nil, err
+	}
+
+	resp, err := s.kv.DeleteRange(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+
+	s.hdr.fill(resp.Header)
+	return resp, nil
+}
+
+func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
+	if err := checkTxnRequest(r, int(s.maxTxnOps)); err != nil {
+		return nil, err
+	}
+	// check for forbidden put/del overlaps after checking request to avoid quadratic blowup
+	if _, _, err := checkIntervals(r.Success); err != nil {
+		return nil, err
+	}
+	if _, _, err := checkIntervals(r.Failure); err != nil {
+		return nil, err
+	}
+
+	resp, err := s.kv.Txn(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+
+	s.hdr.fill(resp.Header)
+	return resp, nil
+}
+
+func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
+	resp, err := s.kv.Compact(ctx, r)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+
+	s.hdr.fill(resp.Header)
+	return resp, nil
+}
+
+func checkRangeRequest(r *pb.RangeRequest) error {
+	if len(r.Key) == 0 {
+		return rpctypes.ErrGRPCEmptyKey
+	}
+	return nil
+}
+
+func checkPutRequest(r *pb.PutRequest) error {
+	if len(r.Key) == 0 {
+		return rpctypes.ErrGRPCEmptyKey
+	}
+	if r.IgnoreValue && len(r.Value) != 0 {
+		return rpctypes.ErrGRPCValueProvided
+	}
+	if r.IgnoreLease && r.Lease != 0 {
+		return rpctypes.ErrGRPCLeaseProvided
+	}
+	return nil
+}
+
+func checkDeleteRequest(r *pb.DeleteRangeRequest) error {
+	if len(r.Key) == 0 {
+		return rpctypes.ErrGRPCEmptyKey
+	}
+	return nil
+}
+
+func checkTxnRequest(r *pb.TxnRequest, maxTxnOps int) error {
+	opc := len(r.Compare)
+	if opc < len(r.Success) {
+		opc = len(r.Success)
+	}
+	if opc < len(r.Failure) {
+		opc = len(r.Failure)
+	}
+	if opc > maxTxnOps {
+		return rpctypes.ErrGRPCTooManyOps
+	}
+
+	for _, c := range r.Compare {
+		if len(c.Key) == 0 {
+			return rpctypes.ErrGRPCEmptyKey
+		}
+	}
+	for _, u := range r.Success {
+		if err := checkRequestOp(u, maxTxnOps-opc); err != nil {
+			return err
+		}
+	}
+	for _, u := range r.Failure {
+		if err := checkRequestOp(u, maxTxnOps-opc); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// checkIntervals tests whether puts and deletes overlap for a list of ops. If
+// there is an overlap, returns an error. If no overlap, return put and delete
+// sets for recursive evaluation.
+func checkIntervals(reqs []*pb.RequestOp) (map[string]struct{}, adt.IntervalTree, error) {
+	var dels adt.IntervalTree
+
+	// collect deletes from this level; build first to check lower level overlapped puts
+	for _, req := range reqs {
+		tv, ok := req.Request.(*pb.RequestOp_RequestDeleteRange)
+		if !ok {
+			continue
+		}
+		dreq := tv.RequestDeleteRange
+		if dreq == nil {
+			continue
+		}
+		var iv adt.Interval
+		if len(dreq.RangeEnd) != 0 {
+			iv = adt.NewStringAffineInterval(string(dreq.Key), string(dreq.RangeEnd))
+		} else {
+			iv = adt.NewStringAffinePoint(string(dreq.Key))
+		}
+		dels.Insert(iv, struct{}{})
+	}
+
+	// collect children puts/deletes
+	puts := make(map[string]struct{})
+	for _, req := range reqs {
+		tv, ok := req.Request.(*pb.RequestOp_RequestTxn)
+		if !ok {
+			continue
+		}
+		putsThen, delsThen, err := checkIntervals(tv.RequestTxn.Success)
+		if err != nil {
+			return nil, dels, err
+		}
+		putsElse, delsElse, err := checkIntervals(tv.RequestTxn.Failure)
+		if err != nil {
+			return nil, dels, err
+		}
+		for k := range putsThen {
+			if _, ok := puts[k]; ok {
+				return nil, dels, rpctypes.ErrGRPCDuplicateKey
+			}
+			if dels.Intersects(adt.NewStringAffinePoint(k)) {
+				return nil, dels, rpctypes.ErrGRPCDuplicateKey
+			}
+			puts[k] = struct{}{}
+		}
+		for k := range putsElse {
+			if _, ok := puts[k]; ok {
+				// if key is from putsThen, overlap is OK since
+				// either then/else are mutually exclusive
+				if _, isSafe := putsThen[k]; !isSafe {
+					return nil, dels, rpctypes.ErrGRPCDuplicateKey
+				}
+			}
+			if dels.Intersects(adt.NewStringAffinePoint(k)) {
+				return nil, dels, rpctypes.ErrGRPCDuplicateKey
+			}
+			puts[k] = struct{}{}
+		}
+		dels.Union(delsThen, adt.NewStringAffineInterval("\x00", ""))
+		dels.Union(delsElse, adt.NewStringAffineInterval("\x00", ""))
+	}
+
+	// collect and check this level's puts
+	for _, req := range reqs {
+		tv, ok := req.Request.(*pb.RequestOp_RequestPut)
+		if !ok || tv.RequestPut == nil {
+			continue
+		}
+		k := string(tv.RequestPut.Key)
+		if _, ok := puts[k]; ok {
+			return nil, dels, rpctypes.ErrGRPCDuplicateKey
+		}
+		if dels.Intersects(adt.NewStringAffinePoint(k)) {
+			return nil, dels, rpctypes.ErrGRPCDuplicateKey
+		}
+		puts[k] = struct{}{}
+	}
+	return puts, dels, nil
+}
+
+func checkRequestOp(u *pb.RequestOp, maxTxnOps int) error {
+	// TODO: ensure only one of the field is set.
+	switch uv := u.Request.(type) {
+	case *pb.RequestOp_RequestRange:
+		return checkRangeRequest(uv.RequestRange)
+	case *pb.RequestOp_RequestPut:
+		return checkPutRequest(uv.RequestPut)
+	case *pb.RequestOp_RequestDeleteRange:
+		return checkDeleteRequest(uv.RequestDeleteRange)
+	case *pb.RequestOp_RequestTxn:
+		return checkTxnRequest(uv.RequestTxn, maxTxnOps)
+	default:
+		// empty op / nil entry
+		return rpctypes.ErrGRPCKeyNotFound
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
new file mode 100644
index 0000000..5b4f2b1
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
@@ -0,0 +1,148 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+	"context"
+	"io"
+
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/lease"
+)
+
+type LeaseServer struct {
+	hdr header
+	le  etcdserver.Lessor
+}
+
+func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
+	return &LeaseServer{le: s, hdr: newHeader(s)}
+}
+
+func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+	resp, err := ls.le.LeaseGrant(ctx, cr)
+
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	ls.hdr.fill(resp.Header)
+	return resp, nil
+}
+
+func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+	resp, err := ls.le.LeaseRevoke(ctx, rr)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	ls.hdr.fill(resp.Header)
+	return resp, nil
+}
+
+func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
+	resp, err := ls.le.LeaseTimeToLive(ctx, rr)
+	if err != nil && err != lease.ErrLeaseNotFound {
+		return nil, togRPCError(err)
+	}
+	if err == lease.ErrLeaseNotFound {
+		resp = &pb.LeaseTimeToLiveResponse{
+			Header: &pb.ResponseHeader{},
+			ID:     rr.ID,
+			TTL:    -1,
+		}
+	}
+	ls.hdr.fill(resp.Header)
+	return resp, nil
+}
+
+func (ls *LeaseServer) LeaseLeases(ctx context.Context, rr *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
+	resp, err := ls.le.LeaseLeases(ctx, rr)
+	if err != nil && err != lease.ErrLeaseNotFound {
+		return nil, togRPCError(err)
+	}
+	if err == lease.ErrLeaseNotFound {
+		resp = &pb.LeaseLeasesResponse{
+			Header: &pb.ResponseHeader{},
+			Leases: []*pb.LeaseStatus{},
+		}
+	}
+	ls.hdr.fill(resp.Header)
+	return resp, nil
+}
+
+func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) {
+	errc := make(chan error, 1)
+	go func() {
+		errc <- ls.leaseKeepAlive(stream)
+	}()
+	select {
+	case err = <-errc:
+	case <-stream.Context().Done():
+		// the only server-side cancellation is noleader for now.
+		err = stream.Context().Err()
+		if err == context.Canceled {
+			err = rpctypes.ErrGRPCNoLeader
+		}
+	}
+	return err
+}
+
+func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
+	for {
+		req, err := stream.Recv()
+		if err == io.EOF {
+			return nil
+		}
+		if err != nil {
+			if isClientCtxErr(stream.Context().Err(), err) {
+				plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
+			} else {
+				plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
+			}
+			return err
+		}
+
+		// Create header before we sent out the renew request.
+		// This can make sure that the revision is strictly smaller or equal to
+		// when the keepalive happened at the local server (when the local server is the leader)
+		// or remote leader.
+		// Without this, a lease might be revoked at rev 3 but client can see the keepalive succeeded
+		// at rev 4.
+		resp := &pb.LeaseKeepAliveResponse{ID: req.ID, Header: &pb.ResponseHeader{}}
+		ls.hdr.fill(resp.Header)
+
+		ttl, err := ls.le.LeaseRenew(stream.Context(), lease.LeaseID(req.ID))
+		if err == lease.ErrLeaseNotFound {
+			err = nil
+			ttl = 0
+		}
+
+		if err != nil {
+			return togRPCError(err)
+		}
+
+		resp.TTL = ttl
+		err = stream.Send(resp)
+		if err != nil {
+			if isClientCtxErr(stream.Context().Err(), err) {
+				plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
+			} else {
+				plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
+			}
+			return err
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
new file mode 100644
index 0000000..c9df180
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
@@ -0,0 +1,229 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+	"context"
+	"crypto/sha256"
+	"io"
+
+	"github.com/coreos/etcd/auth"
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/mvcc"
+	"github.com/coreos/etcd/mvcc/backend"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/version"
+)
+
+type KVGetter interface {
+	KV() mvcc.ConsistentWatchableKV
+}
+
+type BackendGetter interface {
+	Backend() backend.Backend
+}
+
+type Alarmer interface {
+	Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
+}
+
+type LeaderTransferrer interface {
+	MoveLeader(ctx context.Context, lead, target uint64) error
+}
+
+type RaftStatusGetter interface {
+	etcdserver.RaftTimer
+	ID() types.ID
+	Leader() types.ID
+}
+
+type AuthGetter interface {
+	AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error)
+	AuthStore() auth.AuthStore
+}
+
+type maintenanceServer struct {
+	rg  RaftStatusGetter
+	kg  KVGetter
+	bg  BackendGetter
+	a   Alarmer
+	lt  LeaderTransferrer
+	hdr header
+}
+
+func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
+	srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, lt: s, hdr: newHeader(s)}
+	return &authMaintenanceServer{srv, s}
+}
+
+func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
+	plog.Noticef("starting to defragment the storage backend...")
+	err := ms.bg.Backend().Defrag()
+	if err != nil {
+		plog.Errorf("failed to defragment the storage backend (%v)", err)
+		return nil, err
+	}
+	plog.Noticef("finished defragmenting the storage backend")
+	return &pb.DefragmentResponse{}, nil
+}
+
+func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
+	snap := ms.bg.Backend().Snapshot()
+	pr, pw := io.Pipe()
+
+	defer pr.Close()
+
+	go func() {
+		snap.WriteTo(pw)
+		if err := snap.Close(); err != nil {
+			plog.Errorf("error closing snapshot (%v)", err)
+		}
+		pw.Close()
+	}()
+
+	// send file data
+	h := sha256.New()
+	br := int64(0)
+	buf := make([]byte, 32*1024)
+	sz := snap.Size()
+	for br < sz {
+		n, err := io.ReadFull(pr, buf)
+		if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+			return togRPCError(err)
+		}
+		br += int64(n)
+		resp := &pb.SnapshotResponse{
+			RemainingBytes: uint64(sz - br),
+			Blob:           buf[:n],
+		}
+		if err = srv.Send(resp); err != nil {
+			return togRPCError(err)
+		}
+		h.Write(buf[:n])
+	}
+
+	// send sha
+	sha := h.Sum(nil)
+	hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha}
+	if err := srv.Send(hresp); err != nil {
+		return togRPCError(err)
+	}
+
+	return nil
+}
+
+func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
+	h, rev, err := ms.kg.KV().Hash()
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h}
+	ms.hdr.fill(resp.Header)
+	return resp, nil
+}
+
+func (ms *maintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
+	h, rev, compactRev, err := ms.kg.KV().HashByRev(r.Revision)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+
+	resp := &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h, CompactRevision: compactRev}
+	ms.hdr.fill(resp.Header)
+	return resp, nil
+}
+
+func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
+	return ms.a.Alarm(ctx, ar)
+}
+
+func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
+	resp := &pb.StatusResponse{
+		Header:    &pb.ResponseHeader{Revision: ms.hdr.rev()},
+		Version:   version.Version,
+		DbSize:    ms.bg.Backend().Size(),
+		Leader:    uint64(ms.rg.Leader()),
+		RaftIndex: ms.rg.Index(),
+		RaftTerm:  ms.rg.Term(),
+	}
+	ms.hdr.fill(resp.Header)
+	return resp, nil
+}
+
+func (ms *maintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
+	if ms.rg.ID() != ms.rg.Leader() {
+		return nil, rpctypes.ErrGRPCNotLeader
+	}
+
+	if err := ms.lt.MoveLeader(ctx, uint64(ms.rg.Leader()), tr.TargetID); err != nil {
+		return nil, togRPCError(err)
+	}
+	return &pb.MoveLeaderResponse{}, nil
+}
+
+type authMaintenanceServer struct {
+	*maintenanceServer
+	ag AuthGetter
+}
+
+func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error {
+	authInfo, err := ams.ag.AuthInfoFromCtx(ctx)
+	if err != nil {
+		return err
+	}
+
+	return ams.ag.AuthStore().IsAdminPermitted(authInfo)
+}
+
+func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
+	if err := ams.isAuthenticated(ctx); err != nil {
+		return nil, err
+	}
+
+	return ams.maintenanceServer.Defragment(ctx, sr)
+}
+
+func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
+	if err := ams.isAuthenticated(srv.Context()); err != nil {
+		return err
+	}
+
+	return ams.maintenanceServer.Snapshot(sr, srv)
+}
+
+func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
+	if err := ams.isAuthenticated(ctx); err != nil {
+		return nil, err
+	}
+
+	return ams.maintenanceServer.Hash(ctx, r)
+}
+
+func (ams *authMaintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
+	if err := ams.isAuthenticated(ctx); err != nil {
+		return nil, err
+	}
+	return ams.maintenanceServer.HashKV(ctx, r)
+}
+
+func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
+	return ams.maintenanceServer.Status(ctx, ar)
+}
+
+func (ams *authMaintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
+	return ams.maintenanceServer.MoveLeader(ctx, tr)
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
new file mode 100644
index 0000000..cbe7b47
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+	"context"
+	"time"
+
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/api"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/etcdserver/membership"
+	"github.com/coreos/etcd/pkg/types"
+)
+
+type ClusterServer struct {
+	cluster api.Cluster
+	server  etcdserver.ServerV3
+}
+
+func NewClusterServer(s etcdserver.ServerV3) *ClusterServer {
+	return &ClusterServer{
+		cluster: s.Cluster(),
+		server:  s,
+	}
+}
+
+func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) {
+	urls, err := types.NewURLs(r.PeerURLs)
+	if err != nil {
+		return nil, rpctypes.ErrGRPCMemberBadURLs
+	}
+
+	now := time.Now()
+	m := membership.NewMember("", urls, "", &now)
+	membs, merr := cs.server.AddMember(ctx, *m)
+	if merr != nil {
+		return nil, togRPCError(merr)
+	}
+
+	return &pb.MemberAddResponse{
+		Header:  cs.header(),
+		Member:  &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs},
+		Members: membersToProtoMembers(membs),
+	}, nil
+}
+
+func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
+	membs, err := cs.server.RemoveMember(ctx, r.ID)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
+}
+
+func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) {
+	m := membership.Member{
+		ID:             types.ID(r.ID),
+		RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs},
+	}
+	membs, err := cs.server.UpdateMember(ctx, m)
+	if err != nil {
+		return nil, togRPCError(err)
+	}
+	return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
+}
+
+func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) {
+	membs := membersToProtoMembers(cs.cluster.Members())
+	return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil
+}
+
+func (cs *ClusterServer) header() *pb.ResponseHeader {
+	return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.server.Term()}
+}
+
+func membersToProtoMembers(membs []*membership.Member) []*pb.Member {
+	protoMembs := make([]*pb.Member, len(membs))
+	for i := range membs {
+		protoMembs[i] = &pb.Member{
+			Name:       membs[i].Name,
+			ID:         uint64(membs[i].ID),
+			PeerURLs:   membs[i].PeerURLs,
+			ClientURLs: membs[i].ClientURLs,
+		}
+	}
+	return protoMembs
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
new file mode 100644
index 0000000..6cb41a6
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
@@ -0,0 +1,38 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+	sentBytes = prometheus.NewCounter(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "network",
+		Name:      "client_grpc_sent_bytes_total",
+		Help:      "The total number of bytes sent to grpc clients.",
+	})
+
+	receivedBytes = prometheus.NewCounter(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "network",
+		Name:      "client_grpc_received_bytes_total",
+		Help:      "The total number of bytes received from grpc clients.",
+	})
+)
+
+func init() {
+	prometheus.MustRegister(sentBytes)
+	prometheus.MustRegister(receivedBytes)
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go
new file mode 100644
index 0000000..02d9960
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go
@@ -0,0 +1,90 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+	"context"
+
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/pkg/types"
+)
+
+type quotaKVServer struct {
+	pb.KVServer
+	qa quotaAlarmer
+}
+
+type quotaAlarmer struct {
+	q  etcdserver.Quota
+	a  Alarmer
+	id types.ID
+}
+
+// check whether request satisfies the quota. If there is not enough space,
+// ignore request and raise the free space alarm.
+func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error {
+	if qa.q.Available(r) {
+		return nil
+	}
+	req := &pb.AlarmRequest{
+		MemberID: uint64(qa.id),
+		Action:   pb.AlarmRequest_ACTIVATE,
+		Alarm:    pb.AlarmType_NOSPACE,
+	}
+	qa.a.Alarm(ctx, req)
+	return rpctypes.ErrGRPCNoSpace
+}
+
+func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer {
+	return &quotaKVServer{
+		NewKVServer(s),
+		quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()},
+	}
+}
+
+func (s *quotaKVServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
+	if err := s.qa.check(ctx, r); err != nil {
+		return nil, err
+	}
+	return s.KVServer.Put(ctx, r)
+}
+
+func (s *quotaKVServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
+	if err := s.qa.check(ctx, r); err != nil {
+		return nil, err
+	}
+	return s.KVServer.Txn(ctx, r)
+}
+
+type quotaLeaseServer struct {
+	pb.LeaseServer
+	qa quotaAlarmer
+}
+
+func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+	if err := s.qa.check(ctx, cr); err != nil {
+		return nil, err
+	}
+	return s.LeaseServer.LeaseGrant(ctx, cr)
+}
+
+func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
+	return &quotaLeaseServer{
+		NewLeaseServer(s),
+		quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()},
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
new file mode 100644
index 0000000..799c119
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
@@ -0,0 +1,117 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+	"context"
+	"strings"
+
+	"github.com/coreos/etcd/auth"
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	"github.com/coreos/etcd/etcdserver/membership"
+	"github.com/coreos/etcd/lease"
+	"github.com/coreos/etcd/mvcc"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+var toGRPCErrorMap = map[error]error{
+	membership.ErrIDRemoved:               rpctypes.ErrGRPCMemberNotFound,
+	membership.ErrIDNotFound:              rpctypes.ErrGRPCMemberNotFound,
+	membership.ErrIDExists:                rpctypes.ErrGRPCMemberExist,
+	membership.ErrPeerURLexists:           rpctypes.ErrGRPCPeerURLExist,
+	etcdserver.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted,
+
+	mvcc.ErrCompacted:             rpctypes.ErrGRPCCompacted,
+	mvcc.ErrFutureRev:             rpctypes.ErrGRPCFutureRev,
+	etcdserver.ErrRequestTooLarge: rpctypes.ErrGRPCRequestTooLarge,
+	etcdserver.ErrNoSpace:         rpctypes.ErrGRPCNoSpace,
+	etcdserver.ErrTooManyRequests: rpctypes.ErrTooManyRequests,
+
+	etcdserver.ErrNoLeader:                   rpctypes.ErrGRPCNoLeader,
+	etcdserver.ErrNotLeader:                  rpctypes.ErrGRPCNotLeader,
+	etcdserver.ErrStopped:                    rpctypes.ErrGRPCStopped,
+	etcdserver.ErrTimeout:                    rpctypes.ErrGRPCTimeout,
+	etcdserver.ErrTimeoutDueToLeaderFail:     rpctypes.ErrGRPCTimeoutDueToLeaderFail,
+	etcdserver.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost,
+	etcdserver.ErrUnhealthy:                  rpctypes.ErrGRPCUnhealthy,
+	etcdserver.ErrKeyNotFound:                rpctypes.ErrGRPCKeyNotFound,
+	etcdserver.ErrCorrupt:                    rpctypes.ErrGRPCCorrupt,
+
+	lease.ErrLeaseNotFound:    rpctypes.ErrGRPCLeaseNotFound,
+	lease.ErrLeaseExists:      rpctypes.ErrGRPCLeaseExist,
+	lease.ErrLeaseTTLTooLarge: rpctypes.ErrGRPCLeaseTTLTooLarge,
+
+	auth.ErrRootUserNotExist:     rpctypes.ErrGRPCRootUserNotExist,
+	auth.ErrRootRoleNotExist:     rpctypes.ErrGRPCRootRoleNotExist,
+	auth.ErrUserAlreadyExist:     rpctypes.ErrGRPCUserAlreadyExist,
+	auth.ErrUserEmpty:            rpctypes.ErrGRPCUserEmpty,
+	auth.ErrUserNotFound:         rpctypes.ErrGRPCUserNotFound,
+	auth.ErrRoleAlreadyExist:     rpctypes.ErrGRPCRoleAlreadyExist,
+	auth.ErrRoleNotFound:         rpctypes.ErrGRPCRoleNotFound,
+	auth.ErrAuthFailed:           rpctypes.ErrGRPCAuthFailed,
+	auth.ErrPermissionDenied:     rpctypes.ErrGRPCPermissionDenied,
+	auth.ErrRoleNotGranted:       rpctypes.ErrGRPCRoleNotGranted,
+	auth.ErrPermissionNotGranted: rpctypes.ErrGRPCPermissionNotGranted,
+	auth.ErrAuthNotEnabled:       rpctypes.ErrGRPCAuthNotEnabled,
+	auth.ErrInvalidAuthToken:     rpctypes.ErrGRPCInvalidAuthToken,
+	auth.ErrInvalidAuthMgmt:      rpctypes.ErrGRPCInvalidAuthMgmt,
+}
+
+func togRPCError(err error) error {
+	// let gRPC server convert to codes.Canceled, codes.DeadlineExceeded
+	if err == context.Canceled || err == context.DeadlineExceeded {
+		return err
+	}
+	grpcErr, ok := toGRPCErrorMap[err]
+	if !ok {
+		return status.Error(codes.Unknown, err.Error())
+	}
+	return grpcErr
+}
+
+func isClientCtxErr(ctxErr error, err error) bool {
+	if ctxErr != nil {
+		return true
+	}
+
+	ev, ok := status.FromError(err)
+	if !ok {
+		return false
+	}
+
+	switch ev.Code() {
+	case codes.Canceled, codes.DeadlineExceeded:
+		// client-side context cancel or deadline exceeded
+		// "rpc error: code = Canceled desc = context canceled"
+		// "rpc error: code = DeadlineExceeded desc = context deadline exceeded"
+		return true
+	case codes.Unavailable:
+		msg := ev.Message()
+		// client-side context cancel or deadline exceeded with TLS ("http2.errClientDisconnected")
+		// "rpc error: code = Unavailable desc = client disconnected"
+		if msg == "client disconnected" {
+			return true
+		}
+		// "grpc/transport.ClientTransport.CloseStream" on canceled streams
+		// "rpc error: code = Unavailable desc = stream error: stream ID 21; CANCEL")
+		if strings.HasPrefix(msg, "stream error: ") && strings.HasSuffix(msg, "; CANCEL") {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
new file mode 100644
index 0000000..dd4f329
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
@@ -0,0 +1,447 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+	"context"
+	"io"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/auth"
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/mvcc"
+	"github.com/coreos/etcd/mvcc/mvccpb"
+)
+
+type watchServer struct {
+	clusterID int64
+	memberID  int64
+	raftTimer etcdserver.RaftTimer
+	watchable mvcc.WatchableKV
+
+	ag AuthGetter
+}
+
+func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
+	return &watchServer{
+		clusterID: int64(s.Cluster().ID()),
+		memberID:  int64(s.ID()),
+		raftTimer: s,
+		watchable: s.Watchable(),
+		ag:        s,
+	}
+}
+
+var (
+	// External test can read this with GetProgressReportInterval()
+	// and change this to a small value to finish fast with
+	// SetProgressReportInterval().
+	progressReportInterval   = 10 * time.Minute
+	progressReportIntervalMu sync.RWMutex
+)
+
+func GetProgressReportInterval() time.Duration {
+	progressReportIntervalMu.RLock()
+	defer progressReportIntervalMu.RUnlock()
+	return progressReportInterval
+}
+
+func SetProgressReportInterval(newTimeout time.Duration) {
+	progressReportIntervalMu.Lock()
+	defer progressReportIntervalMu.Unlock()
+	progressReportInterval = newTimeout
+}
+
+const (
+	// We send ctrl response inside the read loop. We do not want
+	// send to block read, but we still want ctrl response we sent to
+	// be serialized. Thus we use a buffered chan to solve the problem.
+	// A small buffer should be OK for most cases, since we expect the
+	// ctrl requests are infrequent.
+	ctrlStreamBufLen = 16
+)
+
+// serverWatchStream is an etcd server side stream. It receives requests
+// from client side gRPC stream. It receives watch events from mvcc.WatchStream,
+// and creates responses that forwarded to gRPC stream.
+// It also forwards control message like watch created and canceled.
+type serverWatchStream struct {
+	clusterID int64
+	memberID  int64
+	raftTimer etcdserver.RaftTimer
+
+	watchable mvcc.WatchableKV
+
+	gRPCStream  pb.Watch_WatchServer
+	watchStream mvcc.WatchStream
+	ctrlStream  chan *pb.WatchResponse
+
+	// mu protects progress, prevKV
+	mu sync.Mutex
+	// progress tracks the watchID that stream might need to send
+	// progress to.
+	// TODO: combine progress and prevKV into a single struct?
+	progress map[mvcc.WatchID]bool
+	prevKV   map[mvcc.WatchID]bool
+
+	// closec indicates the stream is closed.
+	closec chan struct{}
+
+	// wg waits for the send loop to complete
+	wg sync.WaitGroup
+
+	ag AuthGetter
+}
+
+func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
+	sws := serverWatchStream{
+		clusterID: ws.clusterID,
+		memberID:  ws.memberID,
+		raftTimer: ws.raftTimer,
+
+		watchable: ws.watchable,
+
+		gRPCStream:  stream,
+		watchStream: ws.watchable.NewWatchStream(),
+		// chan for sending control response like watcher created and canceled.
+		ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen),
+		progress:   make(map[mvcc.WatchID]bool),
+		prevKV:     make(map[mvcc.WatchID]bool),
+		closec:     make(chan struct{}),
+
+		ag: ws.ag,
+	}
+
+	sws.wg.Add(1)
+	go func() {
+		sws.sendLoop()
+		sws.wg.Done()
+	}()
+
+	errc := make(chan error, 1)
+	// Ideally recvLoop would also use sws.wg to signal its completion
+	// but when stream.Context().Done() is closed, the stream's recv
+	// may continue to block since it uses a different context, leading to
+	// deadlock when calling sws.close().
+	go func() {
+		if rerr := sws.recvLoop(); rerr != nil {
+			if isClientCtxErr(stream.Context().Err(), rerr) {
+				plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
+			} else {
+				plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
+			}
+			errc <- rerr
+		}
+	}()
+	select {
+	case err = <-errc:
+		close(sws.ctrlStream)
+	case <-stream.Context().Done():
+		err = stream.Context().Err()
+		// the only server-side cancellation is noleader for now.
+		if err == context.Canceled {
+			err = rpctypes.ErrGRPCNoLeader
+		}
+	}
+	sws.close()
+	return err
+}
+
+func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool {
+	authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
+	if err != nil {
+		return false
+	}
+	if authInfo == nil {
+		// if auth is enabled, IsRangePermitted() can cause an error
+		authInfo = &auth.AuthInfo{}
+	}
+
+	return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil
+}
+
+func (sws *serverWatchStream) recvLoop() error {
+	for {
+		req, err := sws.gRPCStream.Recv()
+		if err == io.EOF {
+			return nil
+		}
+		if err != nil {
+			return err
+		}
+
+		switch uv := req.RequestUnion.(type) {
+		case *pb.WatchRequest_CreateRequest:
+			if uv.CreateRequest == nil {
+				break
+			}
+
+			creq := uv.CreateRequest
+			if len(creq.Key) == 0 {
+				// \x00 is the smallest key
+				creq.Key = []byte{0}
+			}
+			if len(creq.RangeEnd) == 0 {
+				// force nil since watchstream.Watch distinguishes
+				// between nil and []byte{} for single key / >=
+				creq.RangeEnd = nil
+			}
+			if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 {
+				// support  >= key queries
+				creq.RangeEnd = []byte{}
+			}
+
+			if !sws.isWatchPermitted(creq) {
+				wr := &pb.WatchResponse{
+					Header:       sws.newResponseHeader(sws.watchStream.Rev()),
+					WatchId:      -1,
+					Canceled:     true,
+					Created:      true,
+					CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(),
+				}
+
+				select {
+				case sws.ctrlStream <- wr:
+				case <-sws.closec:
+				}
+				return nil
+			}
+
+			filters := FiltersFromRequest(creq)
+
+			wsrev := sws.watchStream.Rev()
+			rev := creq.StartRevision
+			if rev == 0 {
+				rev = wsrev + 1
+			}
+			id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev, filters...)
+			if id != -1 {
+				sws.mu.Lock()
+				if creq.ProgressNotify {
+					sws.progress[id] = true
+				}
+				if creq.PrevKv {
+					sws.prevKV[id] = true
+				}
+				sws.mu.Unlock()
+			}
+			wr := &pb.WatchResponse{
+				Header:   sws.newResponseHeader(wsrev),
+				WatchId:  int64(id),
+				Created:  true,
+				Canceled: id == -1,
+			}
+			select {
+			case sws.ctrlStream <- wr:
+			case <-sws.closec:
+				return nil
+			}
+		case *pb.WatchRequest_CancelRequest:
+			if uv.CancelRequest != nil {
+				id := uv.CancelRequest.WatchId
+				err := sws.watchStream.Cancel(mvcc.WatchID(id))
+				if err == nil {
+					sws.ctrlStream <- &pb.WatchResponse{
+						Header:   sws.newResponseHeader(sws.watchStream.Rev()),
+						WatchId:  id,
+						Canceled: true,
+					}
+					sws.mu.Lock()
+					delete(sws.progress, mvcc.WatchID(id))
+					delete(sws.prevKV, mvcc.WatchID(id))
+					sws.mu.Unlock()
+				}
+			}
+		default:
+			// we probably should not shutdown the entire stream when
+			// receive an valid command.
+			// so just do nothing instead.
+			continue
+		}
+	}
+}
+
+func (sws *serverWatchStream) sendLoop() {
+	// watch ids that are currently active
+	ids := make(map[mvcc.WatchID]struct{})
+	// watch responses pending on a watch id creation message
+	pending := make(map[mvcc.WatchID][]*pb.WatchResponse)
+
+	interval := GetProgressReportInterval()
+	progressTicker := time.NewTicker(interval)
+
+	defer func() {
+		progressTicker.Stop()
+		// drain the chan to clean up pending events
+		for ws := range sws.watchStream.Chan() {
+			mvcc.ReportEventReceived(len(ws.Events))
+		}
+		for _, wrs := range pending {
+			for _, ws := range wrs {
+				mvcc.ReportEventReceived(len(ws.Events))
+			}
+		}
+	}()
+
+	for {
+		select {
+		case wresp, ok := <-sws.watchStream.Chan():
+			if !ok {
+				return
+			}
+
+			// TODO: evs is []mvccpb.Event type
+			// either return []*mvccpb.Event from the mvcc package
+			// or define protocol buffer with []mvccpb.Event.
+			evs := wresp.Events
+			events := make([]*mvccpb.Event, len(evs))
+			sws.mu.Lock()
+			needPrevKV := sws.prevKV[wresp.WatchID]
+			sws.mu.Unlock()
+			for i := range evs {
+				events[i] = &evs[i]
+
+				if needPrevKV {
+					opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1}
+					r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt)
+					if err == nil && len(r.KVs) != 0 {
+						events[i].PrevKv = &(r.KVs[0])
+					}
+				}
+			}
+
+			canceled := wresp.CompactRevision != 0
+			wr := &pb.WatchResponse{
+				Header:          sws.newResponseHeader(wresp.Revision),
+				WatchId:         int64(wresp.WatchID),
+				Events:          events,
+				CompactRevision: wresp.CompactRevision,
+				Canceled:        canceled,
+			}
+
+			if _, hasId := ids[wresp.WatchID]; !hasId {
+				// buffer if id not yet announced
+				wrs := append(pending[wresp.WatchID], wr)
+				pending[wresp.WatchID] = wrs
+				continue
+			}
+
+			mvcc.ReportEventReceived(len(evs))
+			if err := sws.gRPCStream.Send(wr); err != nil {
+				if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
+					plog.Debugf("failed to send watch response to gRPC stream (%q)", err.Error())
+				} else {
+					plog.Warningf("failed to send watch response to gRPC stream (%q)", err.Error())
+				}
+				return
+			}
+
+			sws.mu.Lock()
+			if len(evs) > 0 && sws.progress[wresp.WatchID] {
+				// elide next progress update if sent a key update
+				sws.progress[wresp.WatchID] = false
+			}
+			sws.mu.Unlock()
+
+		case c, ok := <-sws.ctrlStream:
+			if !ok {
+				return
+			}
+
+			if err := sws.gRPCStream.Send(c); err != nil {
+				if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
+					plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error())
+				} else {
+					plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error())
+				}
+				return
+			}
+
+			// track id creation
+			wid := mvcc.WatchID(c.WatchId)
+			if c.Canceled {
+				delete(ids, wid)
+				continue
+			}
+			if c.Created {
+				// flush buffered events
+				ids[wid] = struct{}{}
+				for _, v := range pending[wid] {
+					mvcc.ReportEventReceived(len(v.Events))
+					if err := sws.gRPCStream.Send(v); err != nil {
+						if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
+							plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error())
+						} else {
+							plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error())
+						}
+						return
+					}
+				}
+				delete(pending, wid)
+			}
+		case <-progressTicker.C:
+			sws.mu.Lock()
+			for id, ok := range sws.progress {
+				if ok {
+					sws.watchStream.RequestProgress(id)
+				}
+				sws.progress[id] = true
+			}
+			sws.mu.Unlock()
+		case <-sws.closec:
+			return
+		}
+	}
+}
+
+func (sws *serverWatchStream) close() {
+	sws.watchStream.Close()
+	close(sws.closec)
+	sws.wg.Wait()
+}
+
+func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
+	return &pb.ResponseHeader{
+		ClusterId: uint64(sws.clusterID),
+		MemberId:  uint64(sws.memberID),
+		Revision:  rev,
+		RaftTerm:  sws.raftTimer.Term(),
+	}
+}
+
+func filterNoDelete(e mvccpb.Event) bool {
+	return e.Type == mvccpb.DELETE
+}
+
+func filterNoPut(e mvccpb.Event) bool {
+	return e.Type == mvccpb.PUT
+}
+
+func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc {
+	filters := make([]mvcc.FilterFunc, 0, len(creq.Filters))
+	for _, ft := range creq.Filters {
+		switch ft {
+		case pb.WatchCreateRequest_NOPUT:
+			filters = append(filters, filterNoPut)
+		case pb.WatchCreateRequest_NODELETE:
+			filters = append(filters, filterNoDelete)
+		default:
+		}
+	}
+	return filters
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply.go b/vendor/github.com/coreos/etcd/etcdserver/apply.go
new file mode 100644
index 0000000..93e78e3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/apply.go
@@ -0,0 +1,972 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+	"bytes"
+	"context"
+	"sort"
+	"time"
+
+	"github.com/coreos/etcd/auth"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/lease"
+	"github.com/coreos/etcd/mvcc"
+	"github.com/coreos/etcd/mvcc/mvccpb"
+	"github.com/coreos/etcd/pkg/types"
+
+	"github.com/gogo/protobuf/proto"
+)
+
+const (
+	warnApplyDuration = 100 * time.Millisecond
+)
+
+type applyResult struct {
+	resp proto.Message
+	err  error
+	// physc signals the physical effect of the request has completed in addition
+	// to being logically reflected by the node. Currently only used for
+	// Compaction requests.
+	physc <-chan struct{}
+}
+
+// applierV3 is the interface for processing V3 raft messages
+type applierV3 interface {
+	Apply(r *pb.InternalRaftRequest) *applyResult
+
+	Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error)
+	Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error)
+	DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
+	Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error)
+	Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error)
+
+	LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
+	LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
+
+	Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error)
+
+	Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error)
+
+	AuthEnable() (*pb.AuthEnableResponse, error)
+	AuthDisable() (*pb.AuthDisableResponse, error)
+
+	UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
+	UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
+	UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
+	UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
+	UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
+	UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
+	RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
+	RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
+	RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
+	RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
+	RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
+	UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
+	RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
+}
+
+type checkReqFunc func(mvcc.ReadView, *pb.RequestOp) error
+
+type applierV3backend struct {
+	s *EtcdServer
+
+	checkPut   checkReqFunc
+	checkRange checkReqFunc
+}
+
+func (s *EtcdServer) newApplierV3Backend() applierV3 {
+	base := &applierV3backend{s: s}
+	base.checkPut = func(rv mvcc.ReadView, req *pb.RequestOp) error {
+		return base.checkRequestPut(rv, req)
+	}
+	base.checkRange = func(rv mvcc.ReadView, req *pb.RequestOp) error {
+		return base.checkRequestRange(rv, req)
+	}
+	return base
+}
+
+func (s *EtcdServer) newApplierV3() applierV3 {
+	return newAuthApplierV3(
+		s.AuthStore(),
+		newQuotaApplierV3(s, s.newApplierV3Backend()),
+		s.lessor,
+	)
+}
+
+func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
+	ar := &applyResult{}
+	defer func(start time.Time) {
+		warnOfExpensiveRequest(start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
+	}(time.Now())
+
+	// call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls
+	switch {
+	case r.Range != nil:
+		ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range)
+	case r.Put != nil:
+		ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put)
+	case r.DeleteRange != nil:
+		ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange)
+	case r.Txn != nil:
+		ar.resp, ar.err = a.s.applyV3.Txn(r.Txn)
+	case r.Compaction != nil:
+		ar.resp, ar.physc, ar.err = a.s.applyV3.Compaction(r.Compaction)
+	case r.LeaseGrant != nil:
+		ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant)
+	case r.LeaseRevoke != nil:
+		ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke)
+	case r.Alarm != nil:
+		ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm)
+	case r.Authenticate != nil:
+		ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate)
+	case r.AuthEnable != nil:
+		ar.resp, ar.err = a.s.applyV3.AuthEnable()
+	case r.AuthDisable != nil:
+		ar.resp, ar.err = a.s.applyV3.AuthDisable()
+	case r.AuthUserAdd != nil:
+		ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd)
+	case r.AuthUserDelete != nil:
+		ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete)
+	case r.AuthUserChangePassword != nil:
+		ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword)
+	case r.AuthUserGrantRole != nil:
+		ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole)
+	case r.AuthUserGet != nil:
+		ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet)
+	case r.AuthUserRevokeRole != nil:
+		ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole)
+	case r.AuthRoleAdd != nil:
+		ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd)
+	case r.AuthRoleGrantPermission != nil:
+		ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission)
+	case r.AuthRoleGet != nil:
+		ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet)
+	case r.AuthRoleRevokePermission != nil:
+		ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission)
+	case r.AuthRoleDelete != nil:
+		ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete)
+	case r.AuthUserList != nil:
+		ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList)
+	case r.AuthRoleList != nil:
+		ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList)
+	default:
+		panic("not implemented")
+	}
+	return ar
+}
+
+func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) {
+	resp = &pb.PutResponse{}
+	resp.Header = &pb.ResponseHeader{}
+
+	val, leaseID := p.Value, lease.LeaseID(p.Lease)
+	if txn == nil {
+		if leaseID != lease.NoLease {
+			if l := a.s.lessor.Lookup(leaseID); l == nil {
+				return nil, lease.ErrLeaseNotFound
+			}
+		}
+		txn = a.s.KV().Write()
+		defer txn.End()
+	}
+
+	var rr *mvcc.RangeResult
+	if p.IgnoreValue || p.IgnoreLease || p.PrevKv {
+		rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{})
+		if err != nil {
+			return nil, err
+		}
+	}
+	if p.IgnoreValue || p.IgnoreLease {
+		if rr == nil || len(rr.KVs) == 0 {
+			// ignore_{lease,value} flag expects previous key-value pair
+			return nil, ErrKeyNotFound
+		}
+	}
+	if p.IgnoreValue {
+		val = rr.KVs[0].Value
+	}
+	if p.IgnoreLease {
+		leaseID = lease.LeaseID(rr.KVs[0].Lease)
+	}
+	if p.PrevKv {
+		if rr != nil && len(rr.KVs) != 0 {
+			resp.PrevKv = &rr.KVs[0]
+		}
+	}
+
+	resp.Header.Revision = txn.Put(p.Key, val, leaseID)
+	return resp, nil
+}
+
+func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+	resp := &pb.DeleteRangeResponse{}
+	resp.Header = &pb.ResponseHeader{}
+	end := mkGteRange(dr.RangeEnd)
+
+	if txn == nil {
+		txn = a.s.kv.Write()
+		defer txn.End()
+	}
+
+	if dr.PrevKv {
+		rr, err := txn.Range(dr.Key, end, mvcc.RangeOptions{})
+		if err != nil {
+			return nil, err
+		}
+		if rr != nil {
+			resp.PrevKvs = make([]*mvccpb.KeyValue, len(rr.KVs))
+			for i := range rr.KVs {
+				resp.PrevKvs[i] = &rr.KVs[i]
+			}
+		}
+	}
+
+	resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, end)
+	return resp, nil
+}
+
+func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+	resp := &pb.RangeResponse{}
+	resp.Header = &pb.ResponseHeader{}
+
+	if txn == nil {
+		txn = a.s.kv.Read()
+		defer txn.End()
+	}
+
+	limit := r.Limit
+	if r.SortOrder != pb.RangeRequest_NONE ||
+		r.MinModRevision != 0 || r.MaxModRevision != 0 ||
+		r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 {
+		// fetch everything; sort and truncate afterwards
+		limit = 0
+	}
+	if limit > 0 {
+		// fetch one extra for 'more' flag
+		limit = limit + 1
+	}
+
+	ro := mvcc.RangeOptions{
+		Limit: limit,
+		Rev:   r.Revision,
+		Count: r.CountOnly,
+	}
+
+	rr, err := txn.Range(r.Key, mkGteRange(r.RangeEnd), ro)
+	if err != nil {
+		return nil, err
+	}
+
+	if r.MaxModRevision != 0 {
+		f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision }
+		pruneKVs(rr, f)
+	}
+	if r.MinModRevision != 0 {
+		f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision }
+		pruneKVs(rr, f)
+	}
+	if r.MaxCreateRevision != 0 {
+		f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision }
+		pruneKVs(rr, f)
+	}
+	if r.MinCreateRevision != 0 {
+		f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision }
+		pruneKVs(rr, f)
+	}
+
+	sortOrder := r.SortOrder
+	if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE {
+		// Since current mvcc.Range implementation returns results
+		// sorted by keys in lexiographically ascending order,
+		// sort ASCEND by default only when target is not 'KEY'
+		sortOrder = pb.RangeRequest_ASCEND
+	}
+	if sortOrder != pb.RangeRequest_NONE {
+		var sorter sort.Interface
+		switch {
+		case r.SortTarget == pb.RangeRequest_KEY:
+			sorter = &kvSortByKey{&kvSort{rr.KVs}}
+		case r.SortTarget == pb.RangeRequest_VERSION:
+			sorter = &kvSortByVersion{&kvSort{rr.KVs}}
+		case r.SortTarget == pb.RangeRequest_CREATE:
+			sorter = &kvSortByCreate{&kvSort{rr.KVs}}
+		case r.SortTarget == pb.RangeRequest_MOD:
+			sorter = &kvSortByMod{&kvSort{rr.KVs}}
+		case r.SortTarget == pb.RangeRequest_VALUE:
+			sorter = &kvSortByValue{&kvSort{rr.KVs}}
+		}
+		switch {
+		case sortOrder == pb.RangeRequest_ASCEND:
+			sort.Sort(sorter)
+		case sortOrder == pb.RangeRequest_DESCEND:
+			sort.Sort(sort.Reverse(sorter))
+		}
+	}
+
+	if r.Limit > 0 && len(rr.KVs) > int(r.Limit) {
+		rr.KVs = rr.KVs[:r.Limit]
+		resp.More = true
+	}
+
+	resp.Header.Revision = rr.Rev
+	resp.Count = int64(rr.Count)
+	resp.Kvs = make([]*mvccpb.KeyValue, len(rr.KVs))
+	for i := range rr.KVs {
+		if r.KeysOnly {
+			rr.KVs[i].Value = nil
+		}
+		resp.Kvs[i] = &rr.KVs[i]
+	}
+	return resp, nil
+}
+
+func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
+	isWrite := !isTxnReadonly(rt)
+	txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read())
+
+	txnPath := compareToPath(txn, rt)
+	if isWrite {
+		if _, err := checkRequests(txn, rt, txnPath, a.checkPut); err != nil {
+			txn.End()
+			return nil, err
+		}
+	}
+	if _, err := checkRequests(txn, rt, txnPath, a.checkRange); err != nil {
+		txn.End()
+		return nil, err
+	}
+
+	txnResp, _ := newTxnResp(rt, txnPath)
+
+	// When executing mutable txn ops, etcd must hold the txn lock so
+	// readers do not see any intermediate results. Since writes are
+	// serialized on the raft loop, the revision in the read view will
+	// be the revision of the write txn.
+	if isWrite {
+		txn.End()
+		txn = a.s.KV().Write()
+	}
+	a.applyTxn(txn, rt, txnPath, txnResp)
+	rev := txn.Rev()
+	if len(txn.Changes()) != 0 {
+		rev++
+	}
+	txn.End()
+
+	txnResp.Header.Revision = rev
+	return txnResp, nil
+}
+
+// newTxnResp allocates a txn response for a txn request given a path.
+func newTxnResp(rt *pb.TxnRequest, txnPath []bool) (txnResp *pb.TxnResponse, txnCount int) {
+	reqs := rt.Success
+	if !txnPath[0] {
+		reqs = rt.Failure
+	}
+	resps := make([]*pb.ResponseOp, len(reqs))
+	txnResp = &pb.TxnResponse{
+		Responses: resps,
+		Succeeded: txnPath[0],
+		Header:    &pb.ResponseHeader{},
+	}
+	for i, req := range reqs {
+		switch tv := req.Request.(type) {
+		case *pb.RequestOp_RequestRange:
+			resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{}}
+		case *pb.RequestOp_RequestPut:
+			resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{}}
+		case *pb.RequestOp_RequestDeleteRange:
+			resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{}}
+		case *pb.RequestOp_RequestTxn:
+			resp, txns := newTxnResp(tv.RequestTxn, txnPath[1:])
+			resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseTxn{ResponseTxn: resp}}
+			txnPath = txnPath[1+txns:]
+			txnCount += txns + 1
+		default:
+		}
+	}
+	return txnResp, txnCount
+}
+
+func compareToPath(rv mvcc.ReadView, rt *pb.TxnRequest) []bool {
+	txnPath := make([]bool, 1)
+	ops := rt.Success
+	if txnPath[0] = applyCompares(rv, rt.Compare); !txnPath[0] {
+		ops = rt.Failure
+	}
+	for _, op := range ops {
+		tv, ok := op.Request.(*pb.RequestOp_RequestTxn)
+		if !ok || tv.RequestTxn == nil {
+			continue
+		}
+		txnPath = append(txnPath, compareToPath(rv, tv.RequestTxn)...)
+	}
+	return txnPath
+}
+
+func applyCompares(rv mvcc.ReadView, cmps []*pb.Compare) bool {
+	for _, c := range cmps {
+		if !applyCompare(rv, c) {
+			return false
+		}
+	}
+	return true
+}
+
+// applyCompare applies the compare request.
+// If the comparison succeeds, it returns true. Otherwise, returns false.
+func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool {
+	// TODO: possible optimizations
+	// * chunk reads for large ranges to conserve memory
+	// * rewrite rules for common patterns:
+	//	ex. "[a, b) createrev > 0" => "limit 1 /\ kvs > 0"
+	// * caching
+	rr, err := rv.Range(c.Key, mkGteRange(c.RangeEnd), mvcc.RangeOptions{})
+	if err != nil {
+		return false
+	}
+	if len(rr.KVs) == 0 {
+		if c.Target == pb.Compare_VALUE {
+			// Always fail if comparing a value on a key/keys that doesn't exist;
+			// nil == empty string in grpc; no way to represent missing value
+			return false
+		}
+		return compareKV(c, mvccpb.KeyValue{})
+	}
+	for _, kv := range rr.KVs {
+		if !compareKV(c, kv) {
+			return false
+		}
+	}
+	return true
+}
+
+func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool {
+	var result int
+	rev := int64(0)
+	switch c.Target {
+	case pb.Compare_VALUE:
+		v := []byte{}
+		if tv, _ := c.TargetUnion.(*pb.Compare_Value); tv != nil {
+			v = tv.Value
+		}
+		result = bytes.Compare(ckv.Value, v)
+	case pb.Compare_CREATE:
+		if tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision); tv != nil {
+			rev = tv.CreateRevision
+		}
+		result = compareInt64(ckv.CreateRevision, rev)
+	case pb.Compare_MOD:
+		if tv, _ := c.TargetUnion.(*pb.Compare_ModRevision); tv != nil {
+			rev = tv.ModRevision
+		}
+		result = compareInt64(ckv.ModRevision, rev)
+	case pb.Compare_VERSION:
+		if tv, _ := c.TargetUnion.(*pb.Compare_Version); tv != nil {
+			rev = tv.Version
+		}
+		result = compareInt64(ckv.Version, rev)
+	case pb.Compare_LEASE:
+		if tv, _ := c.TargetUnion.(*pb.Compare_Lease); tv != nil {
+			rev = tv.Lease
+		}
+		result = compareInt64(ckv.Lease, rev)
+	}
+	switch c.Result {
+	case pb.Compare_EQUAL:
+		return result == 0
+	case pb.Compare_NOT_EQUAL:
+		return result != 0
+	case pb.Compare_GREATER:
+		return result > 0
+	case pb.Compare_LESS:
+		return result < 0
+	}
+	return true
+}
+
+func (a *applierV3backend) applyTxn(txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPath []bool, tresp *pb.TxnResponse) (txns int) {
+	reqs := rt.Success
+	if !txnPath[0] {
+		reqs = rt.Failure
+	}
+	for i, req := range reqs {
+		respi := tresp.Responses[i].Response
+		switch tv := req.Request.(type) {
+		case *pb.RequestOp_RequestRange:
+			resp, err := a.Range(txn, tv.RequestRange)
+			if err != nil {
+				plog.Panicf("unexpected error during txn: %v", err)
+			}
+			respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp
+		case *pb.RequestOp_RequestPut:
+			resp, err := a.Put(txn, tv.RequestPut)
+			if err != nil {
+				plog.Panicf("unexpected error during txn: %v", err)
+			}
+			respi.(*pb.ResponseOp_ResponsePut).ResponsePut = resp
+		case *pb.RequestOp_RequestDeleteRange:
+			resp, err := a.DeleteRange(txn, tv.RequestDeleteRange)
+			if err != nil {
+				plog.Panicf("unexpected error during txn: %v", err)
+			}
+			respi.(*pb.ResponseOp_ResponseDeleteRange).ResponseDeleteRange = resp
+		case *pb.RequestOp_RequestTxn:
+			resp := respi.(*pb.ResponseOp_ResponseTxn).ResponseTxn
+			applyTxns := a.applyTxn(txn, tv.RequestTxn, txnPath[1:], resp)
+			txns += applyTxns + 1
+			txnPath = txnPath[applyTxns+1:]
+		default:
+			// empty union
+		}
+	}
+	return txns
+}
+
+func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) {
+	resp := &pb.CompactionResponse{}
+	resp.Header = &pb.ResponseHeader{}
+	ch, err := a.s.KV().Compact(compaction.Revision)
+	if err != nil {
+		return nil, ch, err
+	}
+	// get the current revision. which key to get is not important.
+	rr, _ := a.s.KV().Range([]byte("compaction"), nil, mvcc.RangeOptions{})
+	resp.Header.Revision = rr.Rev
+	return resp, ch, err
+}
+
+func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+	l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL)
+	resp := &pb.LeaseGrantResponse{}
+	if err == nil {
+		resp.ID = int64(l.ID)
+		resp.TTL = l.TTL()
+		resp.Header = newHeader(a.s)
+	}
+	return resp, err
+}
+
+func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+	err := a.s.lessor.Revoke(lease.LeaseID(lc.ID))
+	return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err
+}
+
+func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
+	resp := &pb.AlarmResponse{}
+	oldCount := len(a.s.alarmStore.Get(ar.Alarm))
+
+	switch ar.Action {
+	case pb.AlarmRequest_GET:
+		resp.Alarms = a.s.alarmStore.Get(ar.Alarm)
+	case pb.AlarmRequest_ACTIVATE:
+		m := a.s.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm)
+		if m == nil {
+			break
+		}
+		resp.Alarms = append(resp.Alarms, m)
+		activated := oldCount == 0 && len(a.s.alarmStore.Get(m.Alarm)) == 1
+		if !activated {
+			break
+		}
+
+		plog.Warningf("alarm %v raised by peer %s", m.Alarm, types.ID(m.MemberID))
+		switch m.Alarm {
+		case pb.AlarmType_CORRUPT:
+			a.s.applyV3 = newApplierV3Corrupt(a)
+		case pb.AlarmType_NOSPACE:
+			a.s.applyV3 = newApplierV3Capped(a)
+		default:
+			plog.Errorf("unimplemented alarm activation (%+v)", m)
+		}
+	case pb.AlarmRequest_DEACTIVATE:
+		m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm)
+		if m == nil {
+			break
+		}
+		resp.Alarms = append(resp.Alarms, m)
+		deactivated := oldCount > 0 && len(a.s.alarmStore.Get(ar.Alarm)) == 0
+		if !deactivated {
+			break
+		}
+
+		switch m.Alarm {
+		case pb.AlarmType_NOSPACE, pb.AlarmType_CORRUPT:
+			// TODO: check kv hash before deactivating CORRUPT?
+			plog.Infof("alarm disarmed %+v", ar)
+			a.s.applyV3 = a.s.newApplierV3()
+		default:
+			plog.Errorf("unimplemented alarm deactivation (%+v)", m)
+		}
+	default:
+		return nil, nil
+	}
+	return resp, nil
+}
+
+type applierV3Capped struct {
+	applierV3
+	q backendQuota
+}
+
+// newApplierV3Capped creates an applyV3 that will reject Puts and transactions
+// with Puts so that the number of keys in the store is capped.
+func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} }
+
+func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
+	return nil, ErrNoSpace
+}
+
+func (a *applierV3Capped) Txn(r *pb.TxnRequest) (*pb.TxnResponse, error) {
+	if a.q.Cost(r) > 0 {
+		return nil, ErrNoSpace
+	}
+	return a.applierV3.Txn(r)
+}
+
+func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+	return nil, ErrNoSpace
+}
+
+func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) {
+	err := a.s.AuthStore().AuthEnable()
+	if err != nil {
+		return nil, err
+	}
+	return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil
+}
+
+func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) {
+	a.s.AuthStore().AuthDisable()
+	return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil
+}
+
+func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) {
+	ctx := context.WithValue(context.WithValue(a.s.ctx, auth.AuthenticateParamIndex{}, a.s.consistIndex.ConsistentIndex()), auth.AuthenticateParamSimpleTokenPrefix{}, r.SimpleToken)
+	resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password)
+	if resp != nil {
+		resp.Header = newHeader(a.s)
+	}
+	return resp, err
+}
+
+func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+	resp, err := a.s.AuthStore().UserAdd(r)
+	if resp != nil {
+		resp.Header = newHeader(a.s)
+	}
+	return resp, err
+}
+
+func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+	resp, err := a.s.AuthStore().UserDelete(r)
+	if resp != nil {
+		resp.Header = newHeader(a.s)
+	}
+	return resp, err
+}
+
+func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+	resp, err := a.s.AuthStore().UserChangePassword(r)
+	if resp != nil {
+		resp.Header = newHeader(a.s)
+	}
+	return resp, err
+}
+
+func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+	resp, err := a.s.AuthStore().UserGrantRole(r)
+	if resp != nil {
+		resp.Header = newHeader(a.s)
+	}
+	return resp, err
+}
+
+func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+	resp, err := a.s.AuthStore().UserGet(r)
+	if resp != nil {
+		resp.Header = newHeader(a.s)
+	}
+	return resp, err
+}
+
+func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+	resp, err := a.s.AuthStore().UserRevokeRole(r)
+	if resp != nil {
+		resp.Header = newHeader(a.s)
+	}
+	return resp, err
+}
+
+func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+	resp, err := a.s.AuthStore().RoleAdd(r)
+	if resp != nil {
+		resp.Header = newHeader(a.s)
+	}
+	return resp, err
+}
+
+func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+	resp, err := a.s.AuthStore().RoleGrantPermission(r)
+	if resp != nil {
+		resp.Header = newHeader(a.s)
+	}
+	return resp, err
+}
+
+func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+	resp, err := a.s.AuthStore().RoleGet(r)
+	if resp != nil {
+		resp.Header = newHeader(a.s)
+	}
+	return resp, err
+}
+
+func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+	resp, err := a.s.AuthStore().RoleRevokePermission(r)
+	if resp != nil {
+		resp.Header = newHeader(a.s)
+	}
+	return resp, err
+}
+
+func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+	resp, err := a.s.AuthStore().RoleDelete(r)
+	if resp != nil {
+		resp.Header = newHeader(a.s)
+	}
+	return resp, err
+}
+
+func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+	resp, err := a.s.AuthStore().UserList(r)
+	if resp != nil {
+		resp.Header = newHeader(a.s)
+	}
+	return resp, err
+}
+
+func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+	resp, err := a.s.AuthStore().RoleList(r)
+	if resp != nil {
+		resp.Header = newHeader(a.s)
+	}
+	return resp, err
+}
+
+type quotaApplierV3 struct {
+	applierV3
+	q Quota
+}
+
+func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 {
+	return &quotaApplierV3{app, NewBackendQuota(s)}
+}
+
+func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
+	ok := a.q.Available(p)
+	resp, err := a.applierV3.Put(txn, p)
+	if err == nil && !ok {
+		err = ErrNoSpace
+	}
+	return resp, err
+}
+
+func (a *quotaApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
+	ok := a.q.Available(rt)
+	resp, err := a.applierV3.Txn(rt)
+	if err == nil && !ok {
+		err = ErrNoSpace
+	}
+	return resp, err
+}
+
+func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+	ok := a.q.Available(lc)
+	resp, err := a.applierV3.LeaseGrant(lc)
+	if err == nil && !ok {
+		err = ErrNoSpace
+	}
+	return resp, err
+}
+
+type kvSort struct{ kvs []mvccpb.KeyValue }
+
+func (s *kvSort) Swap(i, j int) {
+	t := s.kvs[i]
+	s.kvs[i] = s.kvs[j]
+	s.kvs[j] = t
+}
+func (s *kvSort) Len() int { return len(s.kvs) }
+
+type kvSortByKey struct{ *kvSort }
+
+func (s *kvSortByKey) Less(i, j int) bool {
+	return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0
+}
+
+type kvSortByVersion struct{ *kvSort }
+
+func (s *kvSortByVersion) Less(i, j int) bool {
+	return (s.kvs[i].Version - s.kvs[j].Version) < 0
+}
+
+type kvSortByCreate struct{ *kvSort }
+
+func (s *kvSortByCreate) Less(i, j int) bool {
+	return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0
+}
+
+type kvSortByMod struct{ *kvSort }
+
+func (s *kvSortByMod) Less(i, j int) bool {
+	return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0
+}
+
+type kvSortByValue struct{ *kvSort }
+
+func (s *kvSortByValue) Less(i, j int) bool {
+	return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0
+}
+
+func checkRequests(rv mvcc.ReadView, rt *pb.TxnRequest, txnPath []bool, f checkReqFunc) (int, error) {
+	txnCount := 0
+	reqs := rt.Success
+	if !txnPath[0] {
+		reqs = rt.Failure
+	}
+	for _, req := range reqs {
+		if tv, ok := req.Request.(*pb.RequestOp_RequestTxn); ok && tv.RequestTxn != nil {
+			txns, err := checkRequests(rv, tv.RequestTxn, txnPath[1:], f)
+			if err != nil {
+				return 0, err
+			}
+			txnCount += txns + 1
+			txnPath = txnPath[txns+1:]
+			continue
+		}
+		if err := f(rv, req); err != nil {
+			return 0, err
+		}
+	}
+	return txnCount, nil
+}
+
+func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqOp *pb.RequestOp) error {
+	tv, ok := reqOp.Request.(*pb.RequestOp_RequestPut)
+	if !ok || tv.RequestPut == nil {
+		return nil
+	}
+	req := tv.RequestPut
+	if req.IgnoreValue || req.IgnoreLease {
+		// expects previous key-value, error if not exist
+		rr, err := rv.Range(req.Key, nil, mvcc.RangeOptions{})
+		if err != nil {
+			return err
+		}
+		if rr == nil || len(rr.KVs) == 0 {
+			return ErrKeyNotFound
+		}
+	}
+	if lease.LeaseID(req.Lease) != lease.NoLease {
+		if l := a.s.lessor.Lookup(lease.LeaseID(req.Lease)); l == nil {
+			return lease.ErrLeaseNotFound
+		}
+	}
+	return nil
+}
+
+func (a *applierV3backend) checkRequestRange(rv mvcc.ReadView, reqOp *pb.RequestOp) error {
+	tv, ok := reqOp.Request.(*pb.RequestOp_RequestRange)
+	if !ok || tv.RequestRange == nil {
+		return nil
+	}
+	req := tv.RequestRange
+	switch {
+	case req.Revision == 0:
+		return nil
+	case req.Revision > rv.Rev():
+		return mvcc.ErrFutureRev
+	case req.Revision < rv.FirstRev():
+		return mvcc.ErrCompacted
+	}
+	return nil
+}
+
+func compareInt64(a, b int64) int {
+	switch {
+	case a < b:
+		return -1
+	case a > b:
+		return 1
+	default:
+		return 0
+	}
+}
+
+// mkGteRange determines if the range end is a >= range. This works around grpc
+// sending empty byte strings as nil; >= is encoded in the range end as '\0'.
+// If it is a GTE range, then []byte{} is returned to indicate the empty byte
+// string (vs nil being no byte string).
+func mkGteRange(rangeEnd []byte) []byte {
+	if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
+		return []byte{}
+	}
+	return rangeEnd
+}
+
+func noSideEffect(r *pb.InternalRaftRequest) bool {
+	return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil
+}
+
+func removeNeedlessRangeReqs(txn *pb.TxnRequest) {
+	f := func(ops []*pb.RequestOp) []*pb.RequestOp {
+		j := 0
+		for i := 0; i < len(ops); i++ {
+			if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok {
+				continue
+			}
+			ops[j] = ops[i]
+			j++
+		}
+
+		return ops[:j]
+	}
+
+	txn.Success = f(txn.Success)
+	txn.Failure = f(txn.Failure)
+}
+
+func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) {
+	j := 0
+	for i := range rr.KVs {
+		rr.KVs[j] = rr.KVs[i]
+		if !isPrunable(&rr.KVs[i]) {
+			j++
+		}
+	}
+	rr.KVs = rr.KVs[:j]
+}
+
+func newHeader(s *EtcdServer) *pb.ResponseHeader {
+	return &pb.ResponseHeader{
+		ClusterId: uint64(s.Cluster().ID()),
+		MemberId:  uint64(s.ID()),
+		Revision:  s.KV().Rev(),
+		RaftTerm:  s.Term(),
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
new file mode 100644
index 0000000..ec93914
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
@@ -0,0 +1,245 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+	"sync"
+
+	"github.com/coreos/etcd/auth"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/lease"
+	"github.com/coreos/etcd/mvcc"
+)
+
+type authApplierV3 struct {
+	applierV3
+	as     auth.AuthStore
+	lessor lease.Lessor
+
+	// mu serializes Apply so that user isn't corrupted and so that
+	// serialized requests don't leak data from TOCTOU errors
+	mu sync.Mutex
+
+	authInfo auth.AuthInfo
+}
+
+func newAuthApplierV3(as auth.AuthStore, base applierV3, lessor lease.Lessor) *authApplierV3 {
+	return &authApplierV3{applierV3: base, as: as, lessor: lessor}
+}
+
+func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
+	aa.mu.Lock()
+	defer aa.mu.Unlock()
+	if r.Header != nil {
+		// backward-compatible with pre-3.0 releases when internalRaftRequest
+		// does not have header field
+		aa.authInfo.Username = r.Header.Username
+		aa.authInfo.Revision = r.Header.AuthRevision
+	}
+	if needAdminPermission(r) {
+		if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil {
+			aa.authInfo.Username = ""
+			aa.authInfo.Revision = 0
+			return &applyResult{err: err}
+		}
+	}
+	ret := aa.applierV3.Apply(r)
+	aa.authInfo.Username = ""
+	aa.authInfo.Revision = 0
+	return ret
+}
+
+func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) {
+	if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil {
+		return nil, err
+	}
+
+	if err := aa.checkLeasePuts(lease.LeaseID(r.Lease)); err != nil {
+		// The specified lease is already attached with a key that cannot
+		// be written by this user. It means the user cannot revoke the
+		// lease so attaching the lease to the newly written key should
+		// be forbidden.
+		return nil, err
+	}
+
+	if r.PrevKv {
+		err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return aa.applierV3.Put(txn, r)
+}
+
+func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+	if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
+		return nil, err
+	}
+	return aa.applierV3.Range(txn, r)
+}
+
+func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+	if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
+		return nil, err
+	}
+	if r.PrevKv {
+		err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return aa.applierV3.DeleteRange(txn, r)
+}
+
+func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error {
+	for _, requ := range reqs {
+		switch tv := requ.Request.(type) {
+		case *pb.RequestOp_RequestRange:
+			if tv.RequestRange == nil {
+				continue
+			}
+
+			if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil {
+				return err
+			}
+
+		case *pb.RequestOp_RequestPut:
+			if tv.RequestPut == nil {
+				continue
+			}
+
+			if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil {
+				return err
+			}
+
+		case *pb.RequestOp_RequestDeleteRange:
+			if tv.RequestDeleteRange == nil {
+				continue
+			}
+
+			if tv.RequestDeleteRange.PrevKv {
+				err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
+				if err != nil {
+					return err
+				}
+			}
+
+			err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error {
+	for _, c := range rt.Compare {
+		if err := as.IsRangePermitted(ai, c.Key, c.RangeEnd); err != nil {
+			return err
+		}
+	}
+	if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil {
+		return err
+	}
+	if err := checkTxnReqsPermission(as, ai, rt.Failure); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
+	if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil {
+		return nil, err
+	}
+	return aa.applierV3.Txn(rt)
+}
+
+func (aa *authApplierV3) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+	if err := aa.checkLeasePuts(lease.LeaseID(lc.ID)); err != nil {
+		return nil, err
+	}
+	return aa.applierV3.LeaseRevoke(lc)
+}
+
+func (aa *authApplierV3) checkLeasePuts(leaseID lease.LeaseID) error {
+	lease := aa.lessor.Lookup(leaseID)
+	if lease != nil {
+		for _, key := range lease.Keys() {
+			if err := aa.as.IsPutPermitted(&aa.authInfo, []byte(key)); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (aa *authApplierV3) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+	err := aa.as.IsAdminPermitted(&aa.authInfo)
+	if err != nil && r.Name != aa.authInfo.Username {
+		aa.authInfo.Username = ""
+		aa.authInfo.Revision = 0
+		return &pb.AuthUserGetResponse{}, err
+	}
+
+	return aa.applierV3.UserGet(r)
+}
+
+func (aa *authApplierV3) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+	err := aa.as.IsAdminPermitted(&aa.authInfo)
+	if err != nil && !aa.as.HasRole(aa.authInfo.Username, r.Role) {
+		aa.authInfo.Username = ""
+		aa.authInfo.Revision = 0
+		return &pb.AuthRoleGetResponse{}, err
+	}
+
+	return aa.applierV3.RoleGet(r)
+}
+
+func needAdminPermission(r *pb.InternalRaftRequest) bool {
+	switch {
+	case r.AuthEnable != nil:
+		return true
+	case r.AuthDisable != nil:
+		return true
+	case r.AuthUserAdd != nil:
+		return true
+	case r.AuthUserDelete != nil:
+		return true
+	case r.AuthUserChangePassword != nil:
+		return true
+	case r.AuthUserGrantRole != nil:
+		return true
+	case r.AuthUserRevokeRole != nil:
+		return true
+	case r.AuthRoleAdd != nil:
+		return true
+	case r.AuthRoleGrantPermission != nil:
+		return true
+	case r.AuthRoleRevokePermission != nil:
+		return true
+	case r.AuthRoleDelete != nil:
+		return true
+	case r.AuthUserList != nil:
+		return true
+	case r.AuthRoleList != nil:
+		return true
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go b/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go
new file mode 100644
index 0000000..a49b682
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go
@@ -0,0 +1,140 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+	"encoding/json"
+	"path"
+	"time"
+
+	"github.com/coreos/etcd/etcdserver/api"
+	"github.com/coreos/etcd/etcdserver/membership"
+	"github.com/coreos/etcd/pkg/pbutil"
+	"github.com/coreos/etcd/store"
+	"github.com/coreos/go-semver/semver"
+)
+
+// ApplierV2 is the interface for processing V2 raft messages
+type ApplierV2 interface {
+	Delete(r *RequestV2) Response
+	Post(r *RequestV2) Response
+	Put(r *RequestV2) Response
+	QGet(r *RequestV2) Response
+	Sync(r *RequestV2) Response
+}
+
+func NewApplierV2(s store.Store, c *membership.RaftCluster) ApplierV2 {
+	return &applierV2store{store: s, cluster: c}
+}
+
+type applierV2store struct {
+	store   store.Store
+	cluster *membership.RaftCluster
+}
+
+func (a *applierV2store) Delete(r *RequestV2) Response {
+	switch {
+	case r.PrevIndex > 0 || r.PrevValue != "":
+		return toResponse(a.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
+	default:
+		return toResponse(a.store.Delete(r.Path, r.Dir, r.Recursive))
+	}
+}
+
+func (a *applierV2store) Post(r *RequestV2) Response {
+	return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, r.TTLOptions()))
+}
+
+func (a *applierV2store) Put(r *RequestV2) Response {
+	ttlOptions := r.TTLOptions()
+	exists, existsSet := pbutil.GetBool(r.PrevExist)
+	switch {
+	case existsSet:
+		if exists {
+			if r.PrevIndex == 0 && r.PrevValue == "" {
+				return toResponse(a.store.Update(r.Path, r.Val, ttlOptions))
+			}
+			return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
+		}
+		return toResponse(a.store.Create(r.Path, r.Dir, r.Val, false, ttlOptions))
+	case r.PrevIndex > 0 || r.PrevValue != "":
+		return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
+	default:
+		if storeMemberAttributeRegexp.MatchString(r.Path) {
+			id := membership.MustParseMemberIDFromKey(path.Dir(r.Path))
+			var attr membership.Attributes
+			if err := json.Unmarshal([]byte(r.Val), &attr); err != nil {
+				plog.Panicf("unmarshal %s should never fail: %v", r.Val, err)
+			}
+			if a.cluster != nil {
+				a.cluster.UpdateAttributes(id, attr)
+			}
+			// return an empty response since there is no consumer.
+			return Response{}
+		}
+		if r.Path == membership.StoreClusterVersionKey() {
+			if a.cluster != nil {
+				a.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability)
+			}
+			// return an empty response since there is no consumer.
+			return Response{}
+		}
+		return toResponse(a.store.Set(r.Path, r.Dir, r.Val, ttlOptions))
+	}
+}
+
+func (a *applierV2store) QGet(r *RequestV2) Response {
+	return toResponse(a.store.Get(r.Path, r.Recursive, r.Sorted))
+}
+
+func (a *applierV2store) Sync(r *RequestV2) Response {
+	a.store.DeleteExpiredKeys(time.Unix(0, r.Time))
+	return Response{}
+}
+
+// applyV2Request interprets r as a call to store.X and returns a Response interpreted
+// from store.Event
+func (s *EtcdServer) applyV2Request(r *RequestV2) Response {
+	defer warnOfExpensiveRequest(time.Now(), r, nil, nil)
+
+	switch r.Method {
+	case "POST":
+		return s.applyV2.Post(r)
+	case "PUT":
+		return s.applyV2.Put(r)
+	case "DELETE":
+		return s.applyV2.Delete(r)
+	case "QGET":
+		return s.applyV2.QGet(r)
+	case "SYNC":
+		return s.applyV2.Sync(r)
+	default:
+		// This should never be reached, but just in case:
+		return Response{Err: ErrUnknownMethod}
+	}
+}
+
+func (r *RequestV2) TTLOptions() store.TTLOptionSet {
+	refresh, _ := pbutil.GetBool(r.Refresh)
+	ttlOptions := store.TTLOptionSet{Refresh: refresh}
+	if r.Expiration != 0 {
+		ttlOptions.ExpireTime = time.Unix(0, r.Expiration)
+	}
+	return ttlOptions
+}
+
+func toResponse(ev *store.Event, err error) Response {
+	return Response{Event: ev, Err: err}
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/auth/auth.go b/vendor/github.com/coreos/etcd/etcdserver/auth/auth.go
new file mode 100644
index 0000000..8991675
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/auth/auth.go
@@ -0,0 +1,648 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package auth implements etcd authentication.
+package auth
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"path"
+	"reflect"
+	"sort"
+	"strings"
+	"time"
+
+	etcderr "github.com/coreos/etcd/error"
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/pkg/capnslog"
+
+	"golang.org/x/crypto/bcrypt"
+)
+
+const (
+	// StorePermsPrefix is the internal prefix of the storage layer dedicated to storing user data.
+	StorePermsPrefix = "/2"
+
+	// RootRoleName is the name of the ROOT role, with privileges to manage the cluster.
+	RootRoleName = "root"
+
+	// GuestRoleName is the name of the role that defines the privileges of an unauthenticated user.
+	GuestRoleName = "guest"
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/auth")
+)
+
+var rootRole = Role{
+	Role: RootRoleName,
+	Permissions: Permissions{
+		KV: RWPermission{
+			Read:  []string{"/*"},
+			Write: []string{"/*"},
+		},
+	},
+}
+
+var guestRole = Role{
+	Role: GuestRoleName,
+	Permissions: Permissions{
+		KV: RWPermission{
+			Read:  []string{"/*"},
+			Write: []string{"/*"},
+		},
+	},
+}
+
+type doer interface {
+	Do(context.Context, etcdserverpb.Request) (etcdserver.Response, error)
+}
+
+type Store interface {
+	AllUsers() ([]string, error)
+	GetUser(name string) (User, error)
+	CreateOrUpdateUser(user User) (out User, created bool, err error)
+	CreateUser(user User) (User, error)
+	DeleteUser(name string) error
+	UpdateUser(user User) (User, error)
+	AllRoles() ([]string, error)
+	GetRole(name string) (Role, error)
+	CreateRole(role Role) error
+	DeleteRole(name string) error
+	UpdateRole(role Role) (Role, error)
+	AuthEnabled() bool
+	EnableAuth() error
+	DisableAuth() error
+	PasswordStore
+}
+
+type PasswordStore interface {
+	CheckPassword(user User, password string) bool
+	HashPassword(password string) (string, error)
+}
+
+type store struct {
+	server      doer
+	timeout     time.Duration
+	ensuredOnce bool
+
+	PasswordStore
+}
+
+type User struct {
+	User     string   `json:"user"`
+	Password string   `json:"password,omitempty"`
+	Roles    []string `json:"roles"`
+	Grant    []string `json:"grant,omitempty"`
+	Revoke   []string `json:"revoke,omitempty"`
+}
+
+type Role struct {
+	Role        string       `json:"role"`
+	Permissions Permissions  `json:"permissions"`
+	Grant       *Permissions `json:"grant,omitempty"`
+	Revoke      *Permissions `json:"revoke,omitempty"`
+}
+
+type Permissions struct {
+	KV RWPermission `json:"kv"`
+}
+
+func (p *Permissions) IsEmpty() bool {
+	return p == nil || (len(p.KV.Read) == 0 && len(p.KV.Write) == 0)
+}
+
+type RWPermission struct {
+	Read  []string `json:"read"`
+	Write []string `json:"write"`
+}
+
+type Error struct {
+	Status int
+	Errmsg string
+}
+
+func (ae Error) Error() string   { return ae.Errmsg }
+func (ae Error) HTTPStatus() int { return ae.Status }
+
+func authErr(hs int, s string, v ...interface{}) Error {
+	return Error{Status: hs, Errmsg: fmt.Sprintf("auth: "+s, v...)}
+}
+
+func NewStore(server doer, timeout time.Duration) Store {
+	s := &store{
+		server:        server,
+		timeout:       timeout,
+		PasswordStore: passwordStore{},
+	}
+	return s
+}
+
+// passwordStore implements PasswordStore using bcrypt to hash user passwords
+type passwordStore struct{}
+
+func (_ passwordStore) CheckPassword(user User, password string) bool {
+	err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password))
+	return err == nil
+}
+
+func (_ passwordStore) HashPassword(password string) (string, error) {
+	hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
+	return string(hash), err
+}
+
+func (s *store) AllUsers() ([]string, error) {
+	resp, err := s.requestResource("/users/", false, false)
+	if err != nil {
+		if e, ok := err.(*etcderr.Error); ok {
+			if e.ErrorCode == etcderr.EcodeKeyNotFound {
+				return []string{}, nil
+			}
+		}
+		return nil, err
+	}
+	var nodes []string
+	for _, n := range resp.Event.Node.Nodes {
+		_, user := path.Split(n.Key)
+		nodes = append(nodes, user)
+	}
+	sort.Strings(nodes)
+	return nodes, nil
+}
+
+func (s *store) GetUser(name string) (User, error) { return s.getUser(name, false) }
+
+// CreateOrUpdateUser should be only used for creating the new user or when you are not
+// sure if it is a create or update. (When only password is passed in, we are not sure
+// if it is a update or create)
+func (s *store) CreateOrUpdateUser(user User) (out User, created bool, err error) {
+	_, err = s.getUser(user.User, true)
+	if err == nil {
+		out, err = s.UpdateUser(user)
+		return out, false, err
+	}
+	u, err := s.CreateUser(user)
+	return u, true, err
+}
+
+func (s *store) CreateUser(user User) (User, error) {
+	// Attach root role to root user.
+	if user.User == "root" {
+		user = attachRootRole(user)
+	}
+	u, err := s.createUserInternal(user)
+	if err == nil {
+		plog.Noticef("created user %s", user.User)
+	}
+	return u, err
+}
+
+func (s *store) createUserInternal(user User) (User, error) {
+	if user.Password == "" {
+		return user, authErr(http.StatusBadRequest, "Cannot create user %s with an empty password", user.User)
+	}
+	hash, err := s.HashPassword(user.Password)
+	if err != nil {
+		return user, err
+	}
+	user.Password = hash
+
+	_, err = s.createResource("/users/"+user.User, user)
+	if err != nil {
+		if e, ok := err.(*etcderr.Error); ok {
+			if e.ErrorCode == etcderr.EcodeNodeExist {
+				return user, authErr(http.StatusConflict, "User %s already exists.", user.User)
+			}
+		}
+	}
+	return user, err
+}
+
+func (s *store) DeleteUser(name string) error {
+	if s.AuthEnabled() && name == "root" {
+		return authErr(http.StatusForbidden, "Cannot delete root user while auth is enabled.")
+	}
+	_, err := s.deleteResource("/users/" + name)
+	if err != nil {
+		if e, ok := err.(*etcderr.Error); ok {
+			if e.ErrorCode == etcderr.EcodeKeyNotFound {
+				return authErr(http.StatusNotFound, "User %s does not exist", name)
+			}
+		}
+		return err
+	}
+	plog.Noticef("deleted user %s", name)
+	return nil
+}
+
+func (s *store) UpdateUser(user User) (User, error) {
+	old, err := s.getUser(user.User, true)
+	if err != nil {
+		if e, ok := err.(*etcderr.Error); ok {
+			if e.ErrorCode == etcderr.EcodeKeyNotFound {
+				return user, authErr(http.StatusNotFound, "User %s doesn't exist.", user.User)
+			}
+		}
+		return old, err
+	}
+
+	newUser, err := old.merge(user, s.PasswordStore)
+	if err != nil {
+		return old, err
+	}
+	if reflect.DeepEqual(old, newUser) {
+		return old, authErr(http.StatusBadRequest, "User not updated. Use grant/revoke/password to update the user.")
+	}
+	_, err = s.updateResource("/users/"+user.User, newUser)
+	if err == nil {
+		plog.Noticef("updated user %s", user.User)
+	}
+	return newUser, err
+}
+
+func (s *store) AllRoles() ([]string, error) {
+	nodes := []string{RootRoleName}
+	resp, err := s.requestResource("/roles/", false, false)
+	if err != nil {
+		if e, ok := err.(*etcderr.Error); ok {
+			if e.ErrorCode == etcderr.EcodeKeyNotFound {
+				return nodes, nil
+			}
+		}
+		return nil, err
+	}
+	for _, n := range resp.Event.Node.Nodes {
+		_, role := path.Split(n.Key)
+		nodes = append(nodes, role)
+	}
+	sort.Strings(nodes)
+	return nodes, nil
+}
+
+func (s *store) GetRole(name string) (Role, error) { return s.getRole(name, false) }
+
+func (s *store) CreateRole(role Role) error {
+	if role.Role == RootRoleName {
+		return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role)
+	}
+	_, err := s.createResource("/roles/"+role.Role, role)
+	if err != nil {
+		if e, ok := err.(*etcderr.Error); ok {
+			if e.ErrorCode == etcderr.EcodeNodeExist {
+				return authErr(http.StatusConflict, "Role %s already exists.", role.Role)
+			}
+		}
+	}
+	if err == nil {
+		plog.Noticef("created new role %s", role.Role)
+	}
+	return err
+}
+
+func (s *store) DeleteRole(name string) error {
+	if name == RootRoleName {
+		return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", name)
+	}
+	_, err := s.deleteResource("/roles/" + name)
+	if err != nil {
+		if e, ok := err.(*etcderr.Error); ok {
+			if e.ErrorCode == etcderr.EcodeKeyNotFound {
+				return authErr(http.StatusNotFound, "Role %s doesn't exist.", name)
+			}
+		}
+	}
+	if err == nil {
+		plog.Noticef("deleted role %s", name)
+	}
+	return err
+}
+
+func (s *store) UpdateRole(role Role) (Role, error) {
+	if role.Role == RootRoleName {
+		return Role{}, authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role)
+	}
+	old, err := s.getRole(role.Role, true)
+	if err != nil {
+		if e, ok := err.(*etcderr.Error); ok {
+			if e.ErrorCode == etcderr.EcodeKeyNotFound {
+				return role, authErr(http.StatusNotFound, "Role %s doesn't exist.", role.Role)
+			}
+		}
+		return old, err
+	}
+	newRole, err := old.merge(role)
+	if err != nil {
+		return old, err
+	}
+	if reflect.DeepEqual(old, newRole) {
+		return old, authErr(http.StatusBadRequest, "Role not updated. Use grant/revoke to update the role.")
+	}
+	_, err = s.updateResource("/roles/"+role.Role, newRole)
+	if err == nil {
+		plog.Noticef("updated role %s", role.Role)
+	}
+	return newRole, err
+}
+
+func (s *store) AuthEnabled() bool {
+	return s.detectAuth()
+}
+
+func (s *store) EnableAuth() error {
+	if s.AuthEnabled() {
+		return authErr(http.StatusConflict, "already enabled")
+	}
+
+	if _, err := s.getUser("root", true); err != nil {
+		return authErr(http.StatusConflict, "No root user available, please create one")
+	}
+	if _, err := s.getRole(GuestRoleName, true); err != nil {
+		plog.Printf("no guest role access found, creating default")
+		if err := s.CreateRole(guestRole); err != nil {
+			plog.Errorf("error creating guest role. aborting auth enable.")
+			return err
+		}
+	}
+
+	if err := s.enableAuth(); err != nil {
+		plog.Errorf("error enabling auth (%v)", err)
+		return err
+	}
+
+	plog.Noticef("auth: enabled auth")
+	return nil
+}
+
+func (s *store) DisableAuth() error {
+	if !s.AuthEnabled() {
+		return authErr(http.StatusConflict, "already disabled")
+	}
+
+	err := s.disableAuth()
+	if err == nil {
+		plog.Noticef("auth: disabled auth")
+	} else {
+		plog.Errorf("error disabling auth (%v)", err)
+	}
+	return err
+}
+
+// merge applies the properties of the passed-in User to the User on which it
+// is called and returns a new User with these modifications applied. Think of
+// all Users as immutable sets of data. Merge allows you to perform the set
+// operations (desired grants and revokes) atomically
+func (ou User) merge(nu User, s PasswordStore) (User, error) {
+	var out User
+	if ou.User != nu.User {
+		return out, authErr(http.StatusConflict, "Merging user data with conflicting usernames: %s %s", ou.User, nu.User)
+	}
+	out.User = ou.User
+	if nu.Password != "" {
+		hash, err := s.HashPassword(nu.Password)
+		if err != nil {
+			return ou, err
+		}
+		out.Password = hash
+	} else {
+		out.Password = ou.Password
+	}
+	currentRoles := types.NewUnsafeSet(ou.Roles...)
+	for _, g := range nu.Grant {
+		if currentRoles.Contains(g) {
+			plog.Noticef("granting duplicate role %s for user %s", g, nu.User)
+			return User{}, authErr(http.StatusConflict, fmt.Sprintf("Granting duplicate role %s for user %s", g, nu.User))
+		}
+		currentRoles.Add(g)
+	}
+	for _, r := range nu.Revoke {
+		if !currentRoles.Contains(r) {
+			plog.Noticef("revoking ungranted role %s for user %s", r, nu.User)
+			return User{}, authErr(http.StatusConflict, fmt.Sprintf("Revoking ungranted role %s for user %s", r, nu.User))
+		}
+		currentRoles.Remove(r)
+	}
+	out.Roles = currentRoles.Values()
+	sort.Strings(out.Roles)
+	return out, nil
+}
+
+// merge for a role works the same as User above -- atomic Role application to
+// each of the substructures.
+func (r Role) merge(n Role) (Role, error) {
+	var out Role
+	var err error
+	if r.Role != n.Role {
+		return out, authErr(http.StatusConflict, "Merging role with conflicting names: %s %s", r.Role, n.Role)
+	}
+	out.Role = r.Role
+	out.Permissions, err = r.Permissions.Grant(n.Grant)
+	if err != nil {
+		return out, err
+	}
+	out.Permissions, err = out.Permissions.Revoke(n.Revoke)
+	return out, err
+}
+
+func (r Role) HasKeyAccess(key string, write bool) bool {
+	if r.Role == RootRoleName {
+		return true
+	}
+	return r.Permissions.KV.HasAccess(key, write)
+}
+
+func (r Role) HasRecursiveAccess(key string, write bool) bool {
+	if r.Role == RootRoleName {
+		return true
+	}
+	return r.Permissions.KV.HasRecursiveAccess(key, write)
+}
+
+// Grant adds a set of permissions to the permission object on which it is called,
+// returning a new permission object.
+func (p Permissions) Grant(n *Permissions) (Permissions, error) {
+	var out Permissions
+	var err error
+	if n == nil {
+		return p, nil
+	}
+	out.KV, err = p.KV.Grant(n.KV)
+	return out, err
+}
+
+// Revoke removes a set of permissions to the permission object on which it is called,
+// returning a new permission object.
+func (p Permissions) Revoke(n *Permissions) (Permissions, error) {
+	var out Permissions
+	var err error
+	if n == nil {
+		return p, nil
+	}
+	out.KV, err = p.KV.Revoke(n.KV)
+	return out, err
+}
+
+// Grant adds a set of permissions to the permission object on which it is called,
+// returning a new permission object.
+func (rw RWPermission) Grant(n RWPermission) (RWPermission, error) {
+	var out RWPermission
+	currentRead := types.NewUnsafeSet(rw.Read...)
+	for _, r := range n.Read {
+		if currentRead.Contains(r) {
+			return out, authErr(http.StatusConflict, "Granting duplicate read permission %s", r)
+		}
+		currentRead.Add(r)
+	}
+	currentWrite := types.NewUnsafeSet(rw.Write...)
+	for _, w := range n.Write {
+		if currentWrite.Contains(w) {
+			return out, authErr(http.StatusConflict, "Granting duplicate write permission %s", w)
+		}
+		currentWrite.Add(w)
+	}
+	out.Read = currentRead.Values()
+	out.Write = currentWrite.Values()
+	sort.Strings(out.Read)
+	sort.Strings(out.Write)
+	return out, nil
+}
+
+// Revoke removes a set of permissions to the permission object on which it is called,
+// returning a new permission object.
+func (rw RWPermission) Revoke(n RWPermission) (RWPermission, error) {
+	var out RWPermission
+	currentRead := types.NewUnsafeSet(rw.Read...)
+	for _, r := range n.Read {
+		if !currentRead.Contains(r) {
+			plog.Noticef("revoking ungranted read permission %s", r)
+			continue
+		}
+		currentRead.Remove(r)
+	}
+	currentWrite := types.NewUnsafeSet(rw.Write...)
+	for _, w := range n.Write {
+		if !currentWrite.Contains(w) {
+			plog.Noticef("revoking ungranted write permission %s", w)
+			continue
+		}
+		currentWrite.Remove(w)
+	}
+	out.Read = currentRead.Values()
+	out.Write = currentWrite.Values()
+	sort.Strings(out.Read)
+	sort.Strings(out.Write)
+	return out, nil
+}
+
+func (rw RWPermission) HasAccess(key string, write bool) bool {
+	var list []string
+	if write {
+		list = rw.Write
+	} else {
+		list = rw.Read
+	}
+	for _, pat := range list {
+		match, err := simpleMatch(pat, key)
+		if err == nil && match {
+			return true
+		}
+	}
+	return false
+}
+
+func (rw RWPermission) HasRecursiveAccess(key string, write bool) bool {
+	list := rw.Read
+	if write {
+		list = rw.Write
+	}
+	for _, pat := range list {
+		match, err := prefixMatch(pat, key)
+		if err == nil && match {
+			return true
+		}
+	}
+	return false
+}
+
+func simpleMatch(pattern string, key string) (match bool, err error) {
+	if pattern[len(pattern)-1] == '*' {
+		return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil
+	}
+	return key == pattern, nil
+}
+
+func prefixMatch(pattern string, key string) (match bool, err error) {
+	if pattern[len(pattern)-1] != '*' {
+		return false, nil
+	}
+	return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil
+}
+
+func attachRootRole(u User) User {
+	inRoles := false
+	for _, r := range u.Roles {
+		if r == RootRoleName {
+			inRoles = true
+			break
+		}
+	}
+	if !inRoles {
+		u.Roles = append(u.Roles, RootRoleName)
+	}
+	return u
+}
+
+func (s *store) getUser(name string, quorum bool) (User, error) {
+	resp, err := s.requestResource("/users/"+name, false, quorum)
+	if err != nil {
+		if e, ok := err.(*etcderr.Error); ok {
+			if e.ErrorCode == etcderr.EcodeKeyNotFound {
+				return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name)
+			}
+		}
+		return User{}, err
+	}
+	var u User
+	err = json.Unmarshal([]byte(*resp.Event.Node.Value), &u)
+	if err != nil {
+		return u, err
+	}
+	// Attach root role to root user.
+	if u.User == "root" {
+		u = attachRootRole(u)
+	}
+	return u, nil
+}
+
+func (s *store) getRole(name string, quorum bool) (Role, error) {
+	if name == RootRoleName {
+		return rootRole, nil
+	}
+	resp, err := s.requestResource("/roles/"+name, false, quorum)
+	if err != nil {
+		if e, ok := err.(*etcderr.Error); ok {
+			if e.ErrorCode == etcderr.EcodeKeyNotFound {
+				return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name)
+			}
+		}
+		return Role{}, err
+	}
+	var r Role
+	err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r)
+	return r, err
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go b/vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go
new file mode 100644
index 0000000..2464828
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go
@@ -0,0 +1,166 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+	"context"
+	"encoding/json"
+	"path"
+
+	etcderr "github.com/coreos/etcd/error"
+	"github.com/coreos/etcd/etcdserver"
+	"github.com/coreos/etcd/etcdserver/etcdserverpb"
+)
+
+func (s *store) ensureAuthDirectories() error {
+	if s.ensuredOnce {
+		return nil
+	}
+	for _, res := range []string{StorePermsPrefix, StorePermsPrefix + "/users/", StorePermsPrefix + "/roles/"} {
+		ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
+		defer cancel()
+		pe := false
+		rr := etcdserverpb.Request{
+			Method:    "PUT",
+			Path:      res,
+			Dir:       true,
+			PrevExist: &pe,
+		}
+		_, err := s.server.Do(ctx, rr)
+		if err != nil {
+			if e, ok := err.(*etcderr.Error); ok {
+				if e.ErrorCode == etcderr.EcodeNodeExist {
+					continue
+				}
+			}
+			plog.Errorf("failed to create auth directories in the store (%v)", err)
+			return err
+		}
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
+	defer cancel()
+	pe := false
+	rr := etcdserverpb.Request{
+		Method:    "PUT",
+		Path:      StorePermsPrefix + "/enabled",
+		Val:       "false",
+		PrevExist: &pe,
+	}
+	_, err := s.server.Do(ctx, rr)
+	if err != nil {
+		if e, ok := err.(*etcderr.Error); ok {
+			if e.ErrorCode == etcderr.EcodeNodeExist {
+				s.ensuredOnce = true
+				return nil
+			}
+		}
+		return err
+	}
+	s.ensuredOnce = true
+	return nil
+}
+
+func (s *store) enableAuth() error {
+	_, err := s.updateResource("/enabled", true)
+	return err
+}
+func (s *store) disableAuth() error {
+	_, err := s.updateResource("/enabled", false)
+	return err
+}
+
+func (s *store) detectAuth() bool {
+	if s.server == nil {
+		return false
+	}
+	value, err := s.requestResource("/enabled", false, false)
+	if err != nil {
+		if e, ok := err.(*etcderr.Error); ok {
+			if e.ErrorCode == etcderr.EcodeKeyNotFound {
+				return false
+			}
+		}
+		plog.Errorf("failed to detect auth settings (%s)", err)
+		return false
+	}
+
+	var u bool
+	err = json.Unmarshal([]byte(*value.Event.Node.Value), &u)
+	if err != nil {
+		plog.Errorf("internal bookkeeping value for enabled isn't valid JSON (%v)", err)
+		return false
+	}
+	return u
+}
+
+func (s *store) requestResource(res string, dir, quorum bool) (etcdserver.Response, error) {
+	ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
+	defer cancel()
+	p := path.Join(StorePermsPrefix, res)
+	method := "GET"
+	if quorum {
+		method = "QGET"
+	}
+	rr := etcdserverpb.Request{
+		Method: method,
+		Path:   p,
+		Dir:    dir,
+	}
+	return s.server.Do(ctx, rr)
+}
+
+func (s *store) updateResource(res string, value interface{}) (etcdserver.Response, error) {
+	return s.setResource(res, value, true)
+}
+func (s *store) createResource(res string, value interface{}) (etcdserver.Response, error) {
+	return s.setResource(res, value, false)
+}
+func (s *store) setResource(res string, value interface{}, prevexist bool) (etcdserver.Response, error) {
+	err := s.ensureAuthDirectories()
+	if err != nil {
+		return etcdserver.Response{}, err
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
+	defer cancel()
+	data, err := json.Marshal(value)
+	if err != nil {
+		return etcdserver.Response{}, err
+	}
+	p := path.Join(StorePermsPrefix, res)
+	rr := etcdserverpb.Request{
+		Method:    "PUT",
+		Path:      p,
+		Val:       string(data),
+		PrevExist: &prevexist,
+	}
+	return s.server.Do(ctx, rr)
+}
+
+func (s *store) deleteResource(res string) (etcdserver.Response, error) {
+	err := s.ensureAuthDirectories()
+	if err != nil {
+		return etcdserver.Response{}, err
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
+	defer cancel()
+	pex := true
+	p := path.Join(StorePermsPrefix, res)
+	rr := etcdserverpb.Request{
+		Method:    "DELETE",
+		Path:      p,
+		PrevExist: &pex,
+	}
+	return s.server.Do(ctx, rr)
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/backend.go b/vendor/github.com/coreos/etcd/etcdserver/backend.go
new file mode 100644
index 0000000..647773d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/backend.go
@@ -0,0 +1,81 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+	"fmt"
+	"os"
+	"time"
+
+	"github.com/coreos/etcd/lease"
+	"github.com/coreos/etcd/mvcc"
+	"github.com/coreos/etcd/mvcc/backend"
+	"github.com/coreos/etcd/raft/raftpb"
+	"github.com/coreos/etcd/snap"
+)
+
+func newBackend(cfg ServerConfig) backend.Backend {
+	bcfg := backend.DefaultBackendConfig()
+	bcfg.Path = cfg.backendPath()
+	if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes {
+		// permit 10% excess over quota for disarm
+		bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10)
+	}
+	return backend.New(bcfg)
+}
+
+// openSnapshotBackend renames a snapshot db to the current etcd db and opens it.
+func openSnapshotBackend(cfg ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) {
+	snapPath, err := ss.DBFilePath(snapshot.Metadata.Index)
+	if err != nil {
+		return nil, fmt.Errorf("database snapshot file path error: %v", err)
+	}
+	if err := os.Rename(snapPath, cfg.backendPath()); err != nil {
+		return nil, fmt.Errorf("rename snapshot file error: %v", err)
+	}
+	return openBackend(cfg), nil
+}
+
+// openBackend returns a backend using the current etcd db.
+func openBackend(cfg ServerConfig) backend.Backend {
+	fn := cfg.backendPath()
+	beOpened := make(chan backend.Backend)
+	go func() {
+		beOpened <- newBackend(cfg)
+	}()
+	select {
+	case be := <-beOpened:
+		return be
+	case <-time.After(10 * time.Second):
+		plog.Warningf("another etcd process is using %q and holds the file lock, or loading backend file is taking >10 seconds", fn)
+		plog.Warningf("waiting for it to exit before starting...")
+	}
+	return <-beOpened
+}
+
+// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes
+// before updating the backend db after persisting raft snapshot to disk,
+// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this
+// case, replace the db with the snapshot db sent by the leader.
+func recoverSnapshotBackend(cfg ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) {
+	var cIndex consistentIndex
+	kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex)
+	defer kv.Close()
+	if snapshot.Metadata.Index <= kv.ConsistentIndex() {
+		return oldbe, nil
+	}
+	oldbe.Close()
+	return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot)
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
new file mode 100644
index 0000000..f44862a
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
@@ -0,0 +1,258 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"sort"
+	"time"
+
+	"github.com/coreos/etcd/etcdserver/membership"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/version"
+	"github.com/coreos/go-semver/semver"
+)
+
+// isMemberBootstrapped tries to check if the given member has been bootstrapped
+// in the given cluster.
+func isMemberBootstrapped(cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool {
+	rcl, err := getClusterFromRemotePeers(getRemotePeerURLs(cl, member), timeout, false, rt)
+	if err != nil {
+		return false
+	}
+	id := cl.MemberByName(member).ID
+	m := rcl.Member(id)
+	if m == nil {
+		return false
+	}
+	if len(m.ClientURLs) > 0 {
+		return true
+	}
+	return false
+}
+
+// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and
+// attempts to construct a Cluster by accessing the members endpoint on one of
+// these URLs. The first URL to provide a response is used. If no URLs provide
+// a response, or a Cluster cannot be successfully created from a received
+// response, an error is returned.
+// Each request has a 10-second timeout. Because the upper limit of TTL is 5s,
+// 10 second is enough for building connection and finishing request.
+func GetClusterFromRemotePeers(urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) {
+	return getClusterFromRemotePeers(urls, 10*time.Second, true, rt)
+}
+
+// If logerr is true, it prints out more error messages.
+func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) {
+	cc := &http.Client{
+		Transport: rt,
+		Timeout:   timeout,
+	}
+	for _, u := range urls {
+		resp, err := cc.Get(u + "/members")
+		if err != nil {
+			if logerr {
+				plog.Warningf("could not get cluster response from %s: %v", u, err)
+			}
+			continue
+		}
+		b, err := ioutil.ReadAll(resp.Body)
+		resp.Body.Close()
+		if err != nil {
+			if logerr {
+				plog.Warningf("could not read the body of cluster response: %v", err)
+			}
+			continue
+		}
+		var membs []*membership.Member
+		if err = json.Unmarshal(b, &membs); err != nil {
+			if logerr {
+				plog.Warningf("could not unmarshal cluster response: %v", err)
+			}
+			continue
+		}
+		id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID"))
+		if err != nil {
+			if logerr {
+				plog.Warningf("could not parse the cluster ID from cluster res: %v", err)
+			}
+			continue
+		}
+
+		// check the length of membership members
+		// if the membership members are present then prepare and return raft cluster
+		// if membership members are not present then the raft cluster formed will be
+		// an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error
+		if len(membs) > 0 {
+			return membership.NewClusterFromMembers("", id, membs), nil
+		}
+
+		return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.")
+	}
+	return nil, fmt.Errorf("could not retrieve cluster information from the given urls")
+}
+
+// getRemotePeerURLs returns peer urls of remote members in the cluster. The
+// returned list is sorted in ascending lexicographical order.
+func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string {
+	us := make([]string, 0)
+	for _, m := range cl.Members() {
+		if m.Name == local {
+			continue
+		}
+		us = append(us, m.PeerURLs...)
+	}
+	sort.Strings(us)
+	return us
+}
+
+// getVersions returns the versions of the members in the given cluster.
+// The key of the returned map is the member's ID. The value of the returned map
+// is the semver versions string, including server and cluster.
+// If it fails to get the version of a member, the key will be nil.
+func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions {
+	members := cl.Members()
+	vers := make(map[string]*version.Versions)
+	for _, m := range members {
+		if m.ID == local {
+			cv := "not_decided"
+			if cl.Version() != nil {
+				cv = cl.Version().String()
+			}
+			vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv}
+			continue
+		}
+		ver, err := getVersion(m, rt)
+		if err != nil {
+			plog.Warningf("cannot get the version of member %s (%v)", m.ID, err)
+			vers[m.ID.String()] = nil
+		} else {
+			vers[m.ID.String()] = ver
+		}
+	}
+	return vers
+}
+
+// decideClusterVersion decides the cluster version based on the versions map.
+// The returned version is the min server version in the map, or nil if the min
+// version in unknown.
+func decideClusterVersion(vers map[string]*version.Versions) *semver.Version {
+	var cv *semver.Version
+	lv := semver.Must(semver.NewVersion(version.Version))
+
+	for mid, ver := range vers {
+		if ver == nil {
+			return nil
+		}
+		v, err := semver.NewVersion(ver.Server)
+		if err != nil {
+			plog.Errorf("cannot understand the version of member %s (%v)", mid, err)
+			return nil
+		}
+		if lv.LessThan(*v) {
+			plog.Warningf("the local etcd version %s is not up-to-date", lv.String())
+			plog.Warningf("member %s has a higher version %s", mid, ver.Server)
+		}
+		if cv == nil {
+			cv = v
+		} else if v.LessThan(*cv) {
+			cv = v
+		}
+	}
+	return cv
+}
+
+// isCompatibleWithCluster return true if the local member has a compatible version with
+// the current running cluster.
+// The version is considered as compatible when at least one of the other members in the cluster has a
+// cluster version in the range of [MinClusterVersion, Version] and no known members has a cluster version
+// out of the range.
+// We set this rule since when the local member joins, another member might be offline.
+func isCompatibleWithCluster(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool {
+	vers := getVersions(cl, local, rt)
+	minV := semver.Must(semver.NewVersion(version.MinClusterVersion))
+	maxV := semver.Must(semver.NewVersion(version.Version))
+	maxV = &semver.Version{
+		Major: maxV.Major,
+		Minor: maxV.Minor,
+	}
+
+	return isCompatibleWithVers(vers, local, minV, maxV)
+}
+
+func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool {
+	var ok bool
+	for id, v := range vers {
+		// ignore comparison with local version
+		if id == local.String() {
+			continue
+		}
+		if v == nil {
+			continue
+		}
+		clusterv, err := semver.NewVersion(v.Cluster)
+		if err != nil {
+			plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err)
+			continue
+		}
+		if clusterv.LessThan(*minV) {
+			plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String())
+			return false
+		}
+		if maxV.LessThan(*clusterv) {
+			plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String())
+			return false
+		}
+		ok = true
+	}
+	return ok
+}
+
+// getVersion returns the Versions of the given member via its
+// peerURLs. Returns the last error if it fails to get the version.
+func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, error) {
+	cc := &http.Client{
+		Transport: rt,
+	}
+	var (
+		err  error
+		resp *http.Response
+	)
+
+	for _, u := range m.PeerURLs {
+		resp, err = cc.Get(u + "/version")
+		if err != nil {
+			plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err)
+			continue
+		}
+		var b []byte
+		b, err = ioutil.ReadAll(resp.Body)
+		resp.Body.Close()
+		if err != nil {
+			plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err)
+			continue
+		}
+		var vers version.Versions
+		if err = json.Unmarshal(b, &vers); err != nil {
+			plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err)
+			continue
+		}
+		return &vers, nil
+	}
+	return nil, err
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/config.go b/vendor/github.com/coreos/etcd/etcdserver/config.go
new file mode 100644
index 0000000..295d952
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/config.go
@@ -0,0 +1,282 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+	"context"
+	"fmt"
+	"path/filepath"
+	"sort"
+	"strings"
+	"time"
+
+	"github.com/coreos/etcd/pkg/netutil"
+	"github.com/coreos/etcd/pkg/transport"
+	"github.com/coreos/etcd/pkg/types"
+)
+
+// ServerConfig holds the configuration of etcd as taken from the command line or discovery.
+type ServerConfig struct {
+	Name           string
+	DiscoveryURL   string
+	DiscoveryProxy string
+	ClientURLs     types.URLs
+	PeerURLs       types.URLs
+	DataDir        string
+	// DedicatedWALDir config will make the etcd to write the WAL to the WALDir
+	// rather than the dataDir/member/wal.
+	DedicatedWALDir     string
+	SnapCount           uint64
+	MaxSnapFiles        uint
+	MaxWALFiles         uint
+	InitialPeerURLsMap  types.URLsMap
+	InitialClusterToken string
+	NewCluster          bool
+	ForceNewCluster     bool
+	PeerTLSInfo         transport.TLSInfo
+
+	TickMs        uint
+	ElectionTicks int
+
+	// InitialElectionTickAdvance is true, then local member fast-forwards
+	// election ticks to speed up "initial" leader election trigger. This
+	// benefits the case of larger election ticks. For instance, cross
+	// datacenter deployment may require longer election timeout of 10-second.
+	// If true, local node does not need wait up to 10-second. Instead,
+	// forwards its election ticks to 8-second, and have only 2-second left
+	// before leader election.
+	//
+	// Major assumptions are that:
+	//  - cluster has no active leader thus advancing ticks enables faster
+	//    leader election, or
+	//  - cluster already has an established leader, and rejoining follower
+	//    is likely to receive heartbeats from the leader after tick advance
+	//    and before election timeout.
+	//
+	// However, when network from leader to rejoining follower is congested,
+	// and the follower does not receive leader heartbeat within left election
+	// ticks, disruptive election has to happen thus affecting cluster
+	// availabilities.
+	//
+	// Disabling this would slow down initial bootstrap process for cross
+	// datacenter deployments. Make your own tradeoffs by configuring
+	// --initial-election-tick-advance at the cost of slow initial bootstrap.
+	//
+	// If single-node, it advances ticks regardless.
+	//
+	// See https://github.com/coreos/etcd/issues/9333 for more detail.
+	InitialElectionTickAdvance bool
+
+	BootstrapTimeout time.Duration
+
+	AutoCompactionRetention time.Duration
+	AutoCompactionMode      string
+	QuotaBackendBytes       int64
+	MaxTxnOps               uint
+
+	// MaxRequestBytes is the maximum request size to send over raft.
+	MaxRequestBytes uint
+
+	StrictReconfigCheck bool
+
+	// ClientCertAuthEnabled is true when cert has been signed by the client CA.
+	ClientCertAuthEnabled bool
+
+	AuthToken string
+
+	// InitialCorruptCheck is true to check data corruption on boot
+	// before serving any peer/client traffic.
+	InitialCorruptCheck bool
+	CorruptCheckTime    time.Duration
+
+	Debug bool
+}
+
+// VerifyBootstrap sanity-checks the initial config for bootstrap case
+// and returns an error for things that should never happen.
+func (c *ServerConfig) VerifyBootstrap() error {
+	if err := c.hasLocalMember(); err != nil {
+		return err
+	}
+	if err := c.advertiseMatchesCluster(); err != nil {
+		return err
+	}
+	if checkDuplicateURL(c.InitialPeerURLsMap) {
+		return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
+	}
+	if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" {
+		return fmt.Errorf("initial cluster unset and no discovery URL found")
+	}
+	return nil
+}
+
+// VerifyJoinExisting sanity-checks the initial config for join existing cluster
+// case and returns an error for things that should never happen.
+func (c *ServerConfig) VerifyJoinExisting() error {
+	// The member has announced its peer urls to the cluster before starting; no need to
+	// set the configuration again.
+	if err := c.hasLocalMember(); err != nil {
+		return err
+	}
+	if checkDuplicateURL(c.InitialPeerURLsMap) {
+		return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
+	}
+	if c.DiscoveryURL != "" {
+		return fmt.Errorf("discovery URL should not be set when joining existing initial cluster")
+	}
+	return nil
+}
+
+// hasLocalMember checks that the cluster at least contains the local server.
+func (c *ServerConfig) hasLocalMember() error {
+	if urls := c.InitialPeerURLsMap[c.Name]; urls == nil {
+		return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name)
+	}
+	return nil
+}
+
+// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list.
+func (c *ServerConfig) advertiseMatchesCluster() error {
+	urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice()
+	urls.Sort()
+	sort.Strings(apurls)
+	ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
+	defer cancel()
+	ok, err := netutil.URLStringsEqual(ctx, apurls, urls.StringSlice())
+	if ok {
+		return nil
+	}
+
+	initMap, apMap := make(map[string]struct{}), make(map[string]struct{})
+	for _, url := range c.PeerURLs {
+		apMap[url.String()] = struct{}{}
+	}
+	for _, url := range c.InitialPeerURLsMap[c.Name] {
+		initMap[url.String()] = struct{}{}
+	}
+
+	missing := []string{}
+	for url := range initMap {
+		if _, ok := apMap[url]; !ok {
+			missing = append(missing, url)
+		}
+	}
+	if len(missing) > 0 {
+		for i := range missing {
+			missing[i] = c.Name + "=" + missing[i]
+		}
+		mstr := strings.Join(missing, ",")
+		apStr := strings.Join(apurls, ",")
+		return fmt.Errorf("--initial-cluster has %s but missing from --initial-advertise-peer-urls=%s (%v)", mstr, apStr, err)
+	}
+
+	for url := range apMap {
+		if _, ok := initMap[url]; !ok {
+			missing = append(missing, url)
+		}
+	}
+	if len(missing) > 0 {
+		mstr := strings.Join(missing, ",")
+		umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs})
+		return fmt.Errorf("--initial-advertise-peer-urls has %s but missing from --initial-cluster=%s", mstr, umap.String())
+	}
+
+	// resolved URLs from "--initial-advertise-peer-urls" and "--initial-cluster" did not match or failed
+	apStr := strings.Join(apurls, ",")
+	umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs})
+	return fmt.Errorf("failed to resolve %s to match --initial-cluster=%s (%v)", apStr, umap.String(), err)
+}
+
+func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") }
+
+func (c *ServerConfig) WALDir() string {
+	if c.DedicatedWALDir != "" {
+		return c.DedicatedWALDir
+	}
+	return filepath.Join(c.MemberDir(), "wal")
+}
+
+func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") }
+
+func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
+
+// ReqTimeout returns timeout for request to finish.
+func (c *ServerConfig) ReqTimeout() time.Duration {
+	// 5s for queue waiting, computation and disk IO delay
+	// + 2 * election timeout for possible leader election
+	return 5*time.Second + 2*time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
+}
+
+func (c *ServerConfig) electionTimeout() time.Duration {
+	return time.Duration(c.ElectionTicks*int(c.TickMs)) * time.Millisecond
+}
+
+func (c *ServerConfig) peerDialTimeout() time.Duration {
+	// 1s for queue wait and election timeout
+	return time.Second + time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
+}
+
+func (c *ServerConfig) PrintWithInitial() { c.print(true) }
+
+func (c *ServerConfig) Print() { c.print(false) }
+
+func (c *ServerConfig) print(initial bool) {
+	plog.Infof("name = %s", c.Name)
+	if c.ForceNewCluster {
+		plog.Infof("force new cluster")
+	}
+	plog.Infof("data dir = %s", c.DataDir)
+	plog.Infof("member dir = %s", c.MemberDir())
+	if c.DedicatedWALDir != "" {
+		plog.Infof("dedicated WAL dir = %s", c.DedicatedWALDir)
+	}
+	plog.Infof("heartbeat = %dms", c.TickMs)
+	plog.Infof("election = %dms", c.ElectionTicks*int(c.TickMs))
+	plog.Infof("snapshot count = %d", c.SnapCount)
+	if len(c.DiscoveryURL) != 0 {
+		plog.Infof("discovery URL= %s", c.DiscoveryURL)
+		if len(c.DiscoveryProxy) != 0 {
+			plog.Infof("discovery proxy = %s", c.DiscoveryProxy)
+		}
+	}
+	plog.Infof("advertise client URLs = %s", c.ClientURLs)
+	if initial {
+		plog.Infof("initial advertise peer URLs = %s", c.PeerURLs)
+		plog.Infof("initial cluster = %s", c.InitialPeerURLsMap)
+	}
+}
+
+func checkDuplicateURL(urlsmap types.URLsMap) bool {
+	um := make(map[string]bool)
+	for _, urls := range urlsmap {
+		for _, url := range urls {
+			u := url.String()
+			if um[u] {
+				return true
+			}
+			um[u] = true
+		}
+	}
+	return false
+}
+
+func (c *ServerConfig) bootstrapTimeout() time.Duration {
+	if c.BootstrapTimeout != 0 {
+		return c.BootstrapTimeout
+	}
+	return time.Second
+}
+
+func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") }
diff --git a/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go b/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go
new file mode 100644
index 0000000..d513f67
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go
@@ -0,0 +1,33 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+	"sync/atomic"
+)
+
+// consistentIndex represents the offset of an entry in a consistent replica log.
+// It implements the mvcc.ConsistentIndexGetter interface.
+// It is always set to the offset of current entry before executing the entry,
+// so ConsistentWatchableKV could get the consistent index from it.
+type consistentIndex uint64
+
+func (i *consistentIndex) setConsistentIndex(v uint64) {
+	atomic.StoreUint64((*uint64)(i), v)
+}
+
+func (i *consistentIndex) ConsistentIndex() uint64 {
+	return atomic.LoadUint64((*uint64)(i))
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/corrupt.go b/vendor/github.com/coreos/etcd/etcdserver/corrupt.go
new file mode 100644
index 0000000..d998ec5
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/corrupt.go
@@ -0,0 +1,262 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/mvcc"
+	"github.com/coreos/etcd/pkg/types"
+)
+
+// CheckInitialHashKV compares initial hash values with its peers
+// before serving any peer/client traffic. Only mismatch when hashes
+// are different at requested revision, with same compact revision.
+func (s *EtcdServer) CheckInitialHashKV() error {
+	if !s.Cfg.InitialCorruptCheck {
+		return nil
+	}
+
+	plog.Infof("%s starting initial corruption check with timeout %v...", s.ID(), s.Cfg.ReqTimeout())
+	h, rev, crev, err := s.kv.HashByRev(0)
+	if err != nil {
+		return fmt.Errorf("%s failed to fetch hash (%v)", s.ID(), err)
+	}
+	peers := s.getPeerHashKVs(rev)
+	mismatch := 0
+	for _, p := range peers {
+		if p.resp != nil {
+			peerID := types.ID(p.resp.Header.MemberId)
+			if h != p.resp.Hash {
+				if crev == p.resp.CompactRevision {
+					plog.Errorf("%s's hash %d != %s's hash %d (revision %d, peer revision %d, compact revision %d)", s.ID(), h, peerID, p.resp.Hash, rev, p.resp.Header.Revision, crev)
+					mismatch++
+				} else {
+					plog.Warningf("%s cannot check hash of peer(%s): peer has a different compact revision %d (revision:%d)", s.ID(), peerID, p.resp.CompactRevision, rev)
+				}
+			}
+			continue
+		}
+		if p.err != nil {
+			switch p.err {
+			case rpctypes.ErrFutureRev:
+				plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: peer is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error())
+			case rpctypes.ErrCompacted:
+				plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: local node is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error())
+			}
+		}
+	}
+	if mismatch > 0 {
+		return fmt.Errorf("%s found data inconsistency with peers", s.ID())
+	}
+
+	plog.Infof("%s succeeded on initial corruption checking: no corruption", s.ID())
+	return nil
+}
+
+func (s *EtcdServer) monitorKVHash() {
+	t := s.Cfg.CorruptCheckTime
+	if t == 0 {
+		return
+	}
+	plog.Infof("enabled corruption checking with %s interval", t)
+	for {
+		select {
+		case <-s.stopping:
+			return
+		case <-time.After(t):
+		}
+		if !s.isLeader() {
+			continue
+		}
+		if err := s.checkHashKV(); err != nil {
+			plog.Debugf("check hash kv failed %v", err)
+		}
+	}
+}
+
+func (s *EtcdServer) checkHashKV() error {
+	h, rev, crev, err := s.kv.HashByRev(0)
+	if err != nil {
+		plog.Fatalf("failed to hash kv store (%v)", err)
+	}
+	peers := s.getPeerHashKVs(rev)
+
+	ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
+	err = s.linearizableReadNotify(ctx)
+	cancel()
+	if err != nil {
+		return err
+	}
+
+	h2, rev2, crev2, err := s.kv.HashByRev(0)
+	if err != nil {
+		plog.Warningf("failed to hash kv store (%v)", err)
+		return err
+	}
+
+	alarmed := false
+	mismatch := func(id uint64) {
+		if alarmed {
+			return
+		}
+		alarmed = true
+		a := &pb.AlarmRequest{
+			MemberID: uint64(id),
+			Action:   pb.AlarmRequest_ACTIVATE,
+			Alarm:    pb.AlarmType_CORRUPT,
+		}
+		s.goAttach(func() {
+			s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
+		})
+	}
+
+	if h2 != h && rev2 == rev && crev == crev2 {
+		plog.Warningf("mismatched hashes %d and %d for revision %d", h, h2, rev)
+		mismatch(uint64(s.ID()))
+	}
+
+	for _, p := range peers {
+		if p.resp == nil {
+			continue
+		}
+		id := p.resp.Header.MemberId
+
+		// leader expects follower's latest revision less than or equal to leader's
+		if p.resp.Header.Revision > rev2 {
+			plog.Warningf(
+				"revision %d from member %v, expected at most %d",
+				p.resp.Header.Revision,
+				types.ID(id),
+				rev2)
+			mismatch(id)
+		}
+
+		// leader expects follower's latest compact revision less than or equal to leader's
+		if p.resp.CompactRevision > crev2 {
+			plog.Warningf(
+				"compact revision %d from member %v, expected at most %d",
+				p.resp.CompactRevision,
+				types.ID(id),
+				crev2,
+			)
+			mismatch(id)
+		}
+
+		// follower's compact revision is leader's old one, then hashes must match
+		if p.resp.CompactRevision == crev && p.resp.Hash != h {
+			plog.Warningf(
+				"hash %d at revision %d from member %v, expected hash %d",
+				p.resp.Hash,
+				rev,
+				types.ID(id),
+				h,
+			)
+			mismatch(id)
+		}
+	}
+	return nil
+}
+
+type peerHashKVResp struct {
+	resp *clientv3.HashKVResponse
+	err  error
+	eps  []string
+}
+
+func (s *EtcdServer) getPeerHashKVs(rev int64) (resps []*peerHashKVResp) {
+	// TODO: handle the case when "s.cluster.Members" have not
+	// been populated (e.g. no snapshot to load from disk)
+	mbs := s.cluster.Members()
+	pURLs := make([][]string, len(mbs))
+	for _, m := range mbs {
+		if m.ID == s.ID() {
+			continue
+		}
+		pURLs = append(pURLs, m.PeerURLs)
+	}
+
+	for _, purls := range pURLs {
+		if len(purls) == 0 {
+			continue
+		}
+		cli, cerr := clientv3.New(clientv3.Config{
+			DialTimeout: s.Cfg.ReqTimeout(),
+			Endpoints:   purls,
+		})
+		if cerr != nil {
+			plog.Warningf("%s failed to create client to peer %q for hash checking (%q)", s.ID(), purls, cerr.Error())
+			continue
+		}
+
+		respsLen := len(resps)
+		for _, c := range cli.Endpoints() {
+			ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
+			var resp *clientv3.HashKVResponse
+			resp, cerr = cli.HashKV(ctx, c, rev)
+			cancel()
+			if cerr == nil {
+				resps = append(resps, &peerHashKVResp{resp: resp})
+				break
+			}
+			plog.Warningf("%s hash-kv error %q on peer %q with revision %d", s.ID(), cerr.Error(), c, rev)
+		}
+		cli.Close()
+
+		if respsLen == len(resps) {
+			resps = append(resps, &peerHashKVResp{err: cerr, eps: purls})
+		}
+	}
+	return resps
+}
+
+type applierV3Corrupt struct {
+	applierV3
+}
+
+func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} }
+
+func (a *applierV3Corrupt) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
+	return nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) Range(txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) {
+	return nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) DeleteRange(txn mvcc.TxnWrite, p *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+	return nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
+	return nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) {
+	return nil, nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+	return nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+	return nil, ErrCorrupt
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/doc.go b/vendor/github.com/coreos/etcd/etcdserver/doc.go
new file mode 100644
index 0000000..b195d2d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package etcdserver defines how etcd servers interact and store their states.
+package etcdserver
diff --git a/vendor/github.com/coreos/etcd/etcdserver/errors.go b/vendor/github.com/coreos/etcd/etcdserver/errors.go
new file mode 100644
index 0000000..fb93c4b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/errors.go
@@ -0,0 +1,48 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+	"errors"
+	"fmt"
+)
+
+var (
+	ErrUnknownMethod              = errors.New("etcdserver: unknown method")
+	ErrStopped                    = errors.New("etcdserver: server stopped")
+	ErrCanceled                   = errors.New("etcdserver: request cancelled")
+	ErrTimeout                    = errors.New("etcdserver: request timed out")
+	ErrTimeoutDueToLeaderFail     = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
+	ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
+	ErrTimeoutLeaderTransfer      = errors.New("etcdserver: request timed out, leader transfer took too long")
+	ErrNotEnoughStartedMembers    = errors.New("etcdserver: re-configuration failed due to not enough started members")
+	ErrNoLeader                   = errors.New("etcdserver: no leader")
+	ErrNotLeader                  = errors.New("etcdserver: not leader")
+	ErrRequestTooLarge            = errors.New("etcdserver: request is too large")
+	ErrNoSpace                    = errors.New("etcdserver: no space")
+	ErrTooManyRequests            = errors.New("etcdserver: too many requests")
+	ErrUnhealthy                  = errors.New("etcdserver: unhealthy cluster")
+	ErrKeyNotFound                = errors.New("etcdserver: key not found")
+	ErrCorrupt                    = errors.New("etcdserver: corrupt cluster")
+)
+
+type DiscoveryError struct {
+	Op  string
+	Err error
+}
+
+func (e DiscoveryError) Error() string {
+	return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err)
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go
new file mode 100644
index 0000000..c50525b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go
@@ -0,0 +1,2134 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: etcdserver/etcdserverpb/rpc.proto
+
+/*
+Package etcdserverpb is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package gw
+
+import (
+	"github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"io"
+	"net/http"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/grpc-ecosystem/grpc-gateway/runtime"
+	"github.com/grpc-ecosystem/grpc-gateway/utilities"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/status"
+)
+
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+
+func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.RangeRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Range(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.PutRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Put(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.DeleteRangeRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.DeleteRange(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.TxnRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Txn(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.CompactionRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Compact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.WatchClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Watch_WatchClient, runtime.ServerMetadata, error) {
+	var metadata runtime.ServerMetadata
+	stream, err := client.Watch(ctx)
+	if err != nil {
+		grpclog.Printf("Failed to start streaming: %v", err)
+		return nil, metadata, err
+	}
+	dec := marshaler.NewDecoder(req.Body)
+	handleSend := func() error {
+		var protoReq etcdserverpb.WatchRequest
+		err = dec.Decode(&protoReq)
+		if err == io.EOF {
+			return err
+		}
+		if err != nil {
+			grpclog.Printf("Failed to decode request: %v", err)
+			return err
+		}
+		if err = stream.Send(&protoReq); err != nil {
+			grpclog.Printf("Failed to send request: %v", err)
+			return err
+		}
+		return nil
+	}
+	if err := handleSend(); err != nil {
+		if cerr := stream.CloseSend(); cerr != nil {
+			grpclog.Printf("Failed to terminate client stream: %v", cerr)
+		}
+		if err == io.EOF {
+			return stream, metadata, nil
+		}
+		return nil, metadata, err
+	}
+	go func() {
+		for {
+			if err := handleSend(); err != nil {
+				break
+			}
+		}
+		if err := stream.CloseSend(); err != nil {
+			grpclog.Printf("Failed to terminate client stream: %v", err)
+		}
+	}()
+	header, err := stream.Header()
+	if err != nil {
+		grpclog.Printf("Failed to get header from client: %v", err)
+		return nil, metadata, err
+	}
+	metadata.HeaderMD = header
+	return stream, metadata, nil
+}
+
+func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.LeaseGrantRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.LeaseGrant(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.LeaseRevokeRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) {
+	var metadata runtime.ServerMetadata
+	stream, err := client.LeaseKeepAlive(ctx)
+	if err != nil {
+		grpclog.Printf("Failed to start streaming: %v", err)
+		return nil, metadata, err
+	}
+	dec := marshaler.NewDecoder(req.Body)
+	handleSend := func() error {
+		var protoReq etcdserverpb.LeaseKeepAliveRequest
+		err = dec.Decode(&protoReq)
+		if err == io.EOF {
+			return err
+		}
+		if err != nil {
+			grpclog.Printf("Failed to decode request: %v", err)
+			return err
+		}
+		if err = stream.Send(&protoReq); err != nil {
+			grpclog.Printf("Failed to send request: %v", err)
+			return err
+		}
+		return nil
+	}
+	if err := handleSend(); err != nil {
+		if cerr := stream.CloseSend(); cerr != nil {
+			grpclog.Printf("Failed to terminate client stream: %v", cerr)
+		}
+		if err == io.EOF {
+			return stream, metadata, nil
+		}
+		return nil, metadata, err
+	}
+	go func() {
+		for {
+			if err := handleSend(); err != nil {
+				break
+			}
+		}
+		if err := stream.CloseSend(); err != nil {
+			grpclog.Printf("Failed to terminate client stream: %v", err)
+		}
+	}()
+	header, err := stream.Header()
+	if err != nil {
+		grpclog.Printf("Failed to get header from client: %v", err)
+		return nil, metadata, err
+	}
+	metadata.HeaderMD = header
+	return stream, metadata, nil
+}
+
+func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.LeaseTimeToLiveRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Lease_LeaseLeases_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.LeaseLeasesRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.LeaseLeases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.MemberAddRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.MemberAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.MemberRemoveRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.MemberRemove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.MemberUpdateRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.MemberUpdate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.MemberListRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.MemberList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.AlarmRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Alarm(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.StatusRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.DefragmentRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Defragment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.HashRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Hash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Maintenance_HashKV_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.HashKVRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.HashKV(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Maintenance_SnapshotClient, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.SnapshotRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	stream, err := client.Snapshot(ctx, &protoReq)
+	if err != nil {
+		return nil, metadata, err
+	}
+	header, err := stream.Header()
+	if err != nil {
+		return nil, metadata, err
+	}
+	metadata.HeaderMD = header
+	return stream, metadata, nil
+
+}
+
+func request_Maintenance_MoveLeader_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.MoveLeaderRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.MoveLeader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.AuthEnableRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.AuthEnable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.AuthDisableRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.AuthDisable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.AuthenticateRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Authenticate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.AuthUserAddRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.UserAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.AuthUserGetRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.UserGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.AuthUserListRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.UserList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.AuthUserDeleteRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.UserDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.AuthUserChangePasswordRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.UserChangePassword(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.AuthUserGrantRoleRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.UserGrantRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.AuthUserRevokeRoleRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.UserRevokeRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.AuthRoleAddRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.RoleAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.AuthRoleGetRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.RoleGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.AuthRoleListRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.RoleList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.AuthRoleDeleteRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.RoleDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.AuthRoleGrantPermissionRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.RoleGrantPermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq etcdserverpb.AuthRoleRevokePermissionRequest
+	var metadata runtime.ServerMetadata
+
+	if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.RoleRevokePermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+// RegisterKVHandlerFromEndpoint is same as RegisterKVHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterKVHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+	conn, err := grpc.Dial(endpoint, opts...)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+			return
+		}
+		go func() {
+			<-ctx.Done()
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+		}()
+	}()
+
+	return RegisterKVHandler(ctx, mux, conn)
+}
+
+// RegisterKVHandler registers the http handlers for service KV to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+	return RegisterKVHandlerClient(ctx, mux, etcdserverpb.NewKVClient(conn))
+}
+
+// RegisterKVHandler registers the http handlers for service KV to "mux".
+// The handlers forward requests to the grpc endpoint over the given implementation of "KVClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "KVClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "KVClient" to call the correct interceptors.
+func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.KVClient) error {
+
+	mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_KV_Range_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_KV_Range_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_KV_Put_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_KV_Put_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_KV_Put_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_KV_DeleteRange_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_KV_DeleteRange_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_KV_DeleteRange_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_KV_Txn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_KV_Txn_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_KV_Txn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_KV_Compact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_KV_Compact_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_KV_Compact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	return nil
+}
+
+var (
+	pattern_KV_Range_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "range"}, ""))
+
+	pattern_KV_Put_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "put"}, ""))
+
+	pattern_KV_DeleteRange_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "deleterange"}, ""))
+
+	pattern_KV_Txn_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "txn"}, ""))
+
+	pattern_KV_Compact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "compaction"}, ""))
+)
+
+var (
+	forward_KV_Range_0 = runtime.ForwardResponseMessage
+
+	forward_KV_Put_0 = runtime.ForwardResponseMessage
+
+	forward_KV_DeleteRange_0 = runtime.ForwardResponseMessage
+
+	forward_KV_Txn_0 = runtime.ForwardResponseMessage
+
+	forward_KV_Compact_0 = runtime.ForwardResponseMessage
+)
+
+// RegisterWatchHandlerFromEndpoint is same as RegisterWatchHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterWatchHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+	conn, err := grpc.Dial(endpoint, opts...)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+			return
+		}
+		go func() {
+			<-ctx.Done()
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+		}()
+	}()
+
+	return RegisterWatchHandler(ctx, mux, conn)
+}
+
+// RegisterWatchHandler registers the http handlers for service Watch to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+	return RegisterWatchHandlerClient(ctx, mux, etcdserverpb.NewWatchClient(conn))
+}
+
+// RegisterWatchHandler registers the http handlers for service Watch to "mux".
+// The handlers forward requests to the grpc endpoint over the given implementation of "WatchClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WatchClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "WatchClient" to call the correct interceptors.
+func RegisterWatchHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.WatchClient) error {
+
+	mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Watch_Watch_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Watch_Watch_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
+
+	})
+
+	return nil
+}
+
+var (
+	pattern_Watch_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v3beta", "watch"}, ""))
+)
+
+var (
+	forward_Watch_Watch_0 = runtime.ForwardResponseStream
+)
+
+// RegisterLeaseHandlerFromEndpoint is same as RegisterLeaseHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterLeaseHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+	conn, err := grpc.Dial(endpoint, opts...)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+			return
+		}
+		go func() {
+			<-ctx.Done()
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+		}()
+	}()
+
+	return RegisterLeaseHandler(ctx, mux, conn)
+}
+
+// RegisterLeaseHandler registers the http handlers for service Lease to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+	return RegisterLeaseHandlerClient(ctx, mux, etcdserverpb.NewLeaseClient(conn))
+}
+
+// RegisterLeaseHandler registers the http handlers for service Lease to "mux".
+// The handlers forward requests to the grpc endpoint over the given implementation of "LeaseClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LeaseClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "LeaseClient" to call the correct interceptors.
+func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.LeaseClient) error {
+
+	mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Lease_LeaseGrant_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Lease_LeaseGrant_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Lease_LeaseRevoke_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Lease_LeaseRevoke_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Lease_LeaseRevoke_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Lease_LeaseKeepAlive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Lease_LeaseKeepAlive_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Lease_LeaseKeepAlive_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Lease_LeaseTimeToLive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Lease_LeaseLeases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Lease_LeaseLeases_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Lease_LeaseLeases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	return nil
+}
+
+var (
+	pattern_Lease_LeaseGrant_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "lease", "grant"}, ""))
+
+	pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "kv", "lease", "revoke"}, ""))
+
+	pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "lease", "keepalive"}, ""))
+
+	pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "kv", "lease", "timetolive"}, ""))
+
+	pattern_Lease_LeaseLeases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "kv", "lease", "leases"}, ""))
+)
+
+var (
+	forward_Lease_LeaseGrant_0 = runtime.ForwardResponseMessage
+
+	forward_Lease_LeaseRevoke_0 = runtime.ForwardResponseMessage
+
+	forward_Lease_LeaseKeepAlive_0 = runtime.ForwardResponseStream
+
+	forward_Lease_LeaseTimeToLive_0 = runtime.ForwardResponseMessage
+
+	forward_Lease_LeaseLeases_0 = runtime.ForwardResponseMessage
+)
+
+// RegisterClusterHandlerFromEndpoint is same as RegisterClusterHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterClusterHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+	conn, err := grpc.Dial(endpoint, opts...)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+			return
+		}
+		go func() {
+			<-ctx.Done()
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+		}()
+	}()
+
+	return RegisterClusterHandler(ctx, mux, conn)
+}
+
+// RegisterClusterHandler registers the http handlers for service Cluster to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+	return RegisterClusterHandlerClient(ctx, mux, etcdserverpb.NewClusterClient(conn))
+}
+
+// RegisterClusterHandler registers the http handlers for service Cluster to "mux".
+// The handlers forward requests to the grpc endpoint over the given implementation of "ClusterClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ClusterClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "ClusterClient" to call the correct interceptors.
+func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.ClusterClient) error {
+
+	mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Cluster_MemberAdd_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Cluster_MemberAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Cluster_MemberRemove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Cluster_MemberRemove_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Cluster_MemberRemove_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Cluster_MemberUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Cluster_MemberUpdate_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Cluster_MemberUpdate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Cluster_MemberList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Cluster_MemberList_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Cluster_MemberList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	return nil
+}
+
+var (
+	pattern_Cluster_MemberAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "cluster", "member", "add"}, ""))
+
+	pattern_Cluster_MemberRemove_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "cluster", "member", "remove"}, ""))
+
+	pattern_Cluster_MemberUpdate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "cluster", "member", "update"}, ""))
+
+	pattern_Cluster_MemberList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "cluster", "member", "list"}, ""))
+)
+
+var (
+	forward_Cluster_MemberAdd_0 = runtime.ForwardResponseMessage
+
+	forward_Cluster_MemberRemove_0 = runtime.ForwardResponseMessage
+
+	forward_Cluster_MemberUpdate_0 = runtime.ForwardResponseMessage
+
+	forward_Cluster_MemberList_0 = runtime.ForwardResponseMessage
+)
+
+// RegisterMaintenanceHandlerFromEndpoint is same as RegisterMaintenanceHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterMaintenanceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+	conn, err := grpc.Dial(endpoint, opts...)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+			return
+		}
+		go func() {
+			<-ctx.Done()
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+		}()
+	}()
+
+	return RegisterMaintenanceHandler(ctx, mux, conn)
+}
+
+// RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+	return RegisterMaintenanceHandlerClient(ctx, mux, etcdserverpb.NewMaintenanceClient(conn))
+}
+
+// RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux".
+// The handlers forward requests to the grpc endpoint over the given implementation of "MaintenanceClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MaintenanceClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "MaintenanceClient" to call the correct interceptors.
+func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.MaintenanceClient) error {
+
+	mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Maintenance_Alarm_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Maintenance_Alarm_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Maintenance_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Maintenance_Status_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Maintenance_Status_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Maintenance_Defragment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Maintenance_Defragment_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Maintenance_Defragment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Maintenance_Hash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Maintenance_Hash_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Maintenance_Hash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Maintenance_HashKV_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Maintenance_HashKV_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Maintenance_HashKV_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Maintenance_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Maintenance_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Maintenance_Snapshot_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Maintenance_MoveLeader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Maintenance_MoveLeader_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Maintenance_MoveLeader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	return nil
+}
+
+var (
+	pattern_Maintenance_Alarm_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "alarm"}, ""))
+
+	pattern_Maintenance_Status_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "status"}, ""))
+
+	pattern_Maintenance_Defragment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "defragment"}, ""))
+
+	pattern_Maintenance_Hash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "hash"}, ""))
+
+	pattern_Maintenance_HashKV_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "hash"}, ""))
+
+	pattern_Maintenance_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "snapshot"}, ""))
+
+	pattern_Maintenance_MoveLeader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "transfer-leadership"}, ""))
+)
+
+var (
+	forward_Maintenance_Alarm_0 = runtime.ForwardResponseMessage
+
+	forward_Maintenance_Status_0 = runtime.ForwardResponseMessage
+
+	forward_Maintenance_Defragment_0 = runtime.ForwardResponseMessage
+
+	forward_Maintenance_Hash_0 = runtime.ForwardResponseMessage
+
+	forward_Maintenance_HashKV_0 = runtime.ForwardResponseMessage
+
+	forward_Maintenance_Snapshot_0 = runtime.ForwardResponseStream
+
+	forward_Maintenance_MoveLeader_0 = runtime.ForwardResponseMessage
+)
+
+// RegisterAuthHandlerFromEndpoint is same as RegisterAuthHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+	conn, err := grpc.Dial(endpoint, opts...)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+			return
+		}
+		go func() {
+			<-ctx.Done()
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+		}()
+	}()
+
+	return RegisterAuthHandler(ctx, mux, conn)
+}
+
+// RegisterAuthHandler registers the http handlers for service Auth to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+	return RegisterAuthHandlerClient(ctx, mux, etcdserverpb.NewAuthClient(conn))
+}
+
+// RegisterAuthHandler registers the http handlers for service Auth to "mux".
+// The handlers forward requests to the grpc endpoint over the given implementation of "AuthClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "AuthClient" to call the correct interceptors.
+func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.AuthClient) error {
+
+	mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Auth_AuthEnable_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Auth_AuthEnable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Auth_AuthDisable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Auth_AuthDisable_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Auth_AuthDisable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Auth_Authenticate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Auth_Authenticate_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Auth_Authenticate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Auth_UserAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Auth_UserAdd_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Auth_UserAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Auth_UserGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Auth_UserGet_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Auth_UserGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Auth_UserList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Auth_UserList_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Auth_UserList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Auth_UserDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Auth_UserDelete_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Auth_UserDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Auth_UserChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Auth_UserChangePassword_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Auth_UserChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Auth_UserGrantRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Auth_UserGrantRole_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Auth_UserGrantRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Auth_UserRevokeRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Auth_UserRevokeRole_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Auth_UserRevokeRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Auth_RoleAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Auth_RoleAdd_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Auth_RoleAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Auth_RoleGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Auth_RoleGet_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Auth_RoleGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Auth_RoleList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Auth_RoleList_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Auth_RoleList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Auth_RoleDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Auth_RoleDelete_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Auth_RoleDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Auth_RoleGrantPermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Auth_RoleGrantPermission_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Auth_RoleGrantPermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	mux.Handle("POST", pattern_Auth_RoleRevokePermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		if cn, ok := w.(http.CloseNotifier); ok {
+			go func(done <-chan struct{}, closed <-chan bool) {
+				select {
+				case <-done:
+				case <-closed:
+					cancel()
+				}
+			}(ctx.Done(), cn.CloseNotify())
+		}
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_Auth_RoleRevokePermission_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_Auth_RoleRevokePermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	return nil
+}
+
+var (
+	pattern_Auth_AuthEnable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "auth", "enable"}, ""))
+
+	pattern_Auth_AuthDisable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "auth", "disable"}, ""))
+
+	pattern_Auth_Authenticate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "auth", "authenticate"}, ""))
+
+	pattern_Auth_UserAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "add"}, ""))
+
+	pattern_Auth_UserGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "get"}, ""))
+
+	pattern_Auth_UserList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "list"}, ""))
+
+	pattern_Auth_UserDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "delete"}, ""))
+
+	pattern_Auth_UserChangePassword_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "changepw"}, ""))
+
+	pattern_Auth_UserGrantRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "grant"}, ""))
+
+	pattern_Auth_UserRevokeRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "revoke"}, ""))
+
+	pattern_Auth_RoleAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "add"}, ""))
+
+	pattern_Auth_RoleGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "get"}, ""))
+
+	pattern_Auth_RoleList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "list"}, ""))
+
+	pattern_Auth_RoleDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "delete"}, ""))
+
+	pattern_Auth_RoleGrantPermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "grant"}, ""))
+
+	pattern_Auth_RoleRevokePermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "revoke"}, ""))
+)
+
+var (
+	forward_Auth_AuthEnable_0 = runtime.ForwardResponseMessage
+
+	forward_Auth_AuthDisable_0 = runtime.ForwardResponseMessage
+
+	forward_Auth_Authenticate_0 = runtime.ForwardResponseMessage
+
+	forward_Auth_UserAdd_0 = runtime.ForwardResponseMessage
+
+	forward_Auth_UserGet_0 = runtime.ForwardResponseMessage
+
+	forward_Auth_UserList_0 = runtime.ForwardResponseMessage
+
+	forward_Auth_UserDelete_0 = runtime.ForwardResponseMessage
+
+	forward_Auth_UserChangePassword_0 = runtime.ForwardResponseMessage
+
+	forward_Auth_UserGrantRole_0 = runtime.ForwardResponseMessage
+
+	forward_Auth_UserRevokeRole_0 = runtime.ForwardResponseMessage
+
+	forward_Auth_RoleAdd_0 = runtime.ForwardResponseMessage
+
+	forward_Auth_RoleGet_0 = runtime.ForwardResponseMessage
+
+	forward_Auth_RoleList_0 = runtime.ForwardResponseMessage
+
+	forward_Auth_RoleDelete_0 = runtime.ForwardResponseMessage
+
+	forward_Auth_RoleGrantPermission_0 = runtime.ForwardResponseMessage
+
+	forward_Auth_RoleRevokePermission_0 = runtime.ForwardResponseMessage
+)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go
new file mode 100644
index 0000000..4f0b157
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go
@@ -0,0 +1,512 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+	"bytes"
+	"context"
+	"crypto/sha1"
+	"encoding/binary"
+	"encoding/json"
+	"fmt"
+	"path"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/mvcc/backend"
+	"github.com/coreos/etcd/pkg/netutil"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/raft"
+	"github.com/coreos/etcd/raft/raftpb"
+	"github.com/coreos/etcd/store"
+	"github.com/coreos/etcd/version"
+
+	"github.com/coreos/go-semver/semver"
+)
+
+// RaftCluster is a list of Members that belong to the same raft cluster
+type RaftCluster struct {
+	id    types.ID
+	token string
+
+	store store.Store
+	be    backend.Backend
+
+	sync.Mutex // guards the fields below
+	version    *semver.Version
+	members    map[types.ID]*Member
+	// removed contains the ids of removed members in the cluster.
+	// removed id cannot be reused.
+	removed map[types.ID]bool
+}
+
+func NewClusterFromURLsMap(token string, urlsmap types.URLsMap) (*RaftCluster, error) {
+	c := NewCluster(token)
+	for name, urls := range urlsmap {
+		m := NewMember(name, urls, token, nil)
+		if _, ok := c.members[m.ID]; ok {
+			return nil, fmt.Errorf("member exists with identical ID %v", m)
+		}
+		if uint64(m.ID) == raft.None {
+			return nil, fmt.Errorf("cannot use %x as member id", raft.None)
+		}
+		c.members[m.ID] = m
+	}
+	c.genID()
+	return c, nil
+}
+
+func NewClusterFromMembers(token string, id types.ID, membs []*Member) *RaftCluster {
+	c := NewCluster(token)
+	c.id = id
+	for _, m := range membs {
+		c.members[m.ID] = m
+	}
+	return c
+}
+
+func NewCluster(token string) *RaftCluster {
+	return &RaftCluster{
+		token:   token,
+		members: make(map[types.ID]*Member),
+		removed: make(map[types.ID]bool),
+	}
+}
+
+func (c *RaftCluster) ID() types.ID { return c.id }
+
+func (c *RaftCluster) Members() []*Member {
+	c.Lock()
+	defer c.Unlock()
+	var ms MembersByID
+	for _, m := range c.members {
+		ms = append(ms, m.Clone())
+	}
+	sort.Sort(ms)
+	return []*Member(ms)
+}
+
+func (c *RaftCluster) Member(id types.ID) *Member {
+	c.Lock()
+	defer c.Unlock()
+	return c.members[id].Clone()
+}
+
+// MemberByName returns a Member with the given name if exists.
+// If more than one member has the given name, it will panic.
+func (c *RaftCluster) MemberByName(name string) *Member {
+	c.Lock()
+	defer c.Unlock()
+	var memb *Member
+	for _, m := range c.members {
+		if m.Name == name {
+			if memb != nil {
+				plog.Panicf("two members with the given name %q exist", name)
+			}
+			memb = m
+		}
+	}
+	return memb.Clone()
+}
+
+func (c *RaftCluster) MemberIDs() []types.ID {
+	c.Lock()
+	defer c.Unlock()
+	var ids []types.ID
+	for _, m := range c.members {
+		ids = append(ids, m.ID)
+	}
+	sort.Sort(types.IDSlice(ids))
+	return ids
+}
+
+func (c *RaftCluster) IsIDRemoved(id types.ID) bool {
+	c.Lock()
+	defer c.Unlock()
+	return c.removed[id]
+}
+
+// PeerURLs returns a list of all peer addresses.
+// The returned list is sorted in ascending lexicographical order.
+func (c *RaftCluster) PeerURLs() []string {
+	c.Lock()
+	defer c.Unlock()
+	urls := make([]string, 0)
+	for _, p := range c.members {
+		urls = append(urls, p.PeerURLs...)
+	}
+	sort.Strings(urls)
+	return urls
+}
+
+// ClientURLs returns a list of all client addresses.
+// The returned list is sorted in ascending lexicographical order.
+func (c *RaftCluster) ClientURLs() []string {
+	c.Lock()
+	defer c.Unlock()
+	urls := make([]string, 0)
+	for _, p := range c.members {
+		urls = append(urls, p.ClientURLs...)
+	}
+	sort.Strings(urls)
+	return urls
+}
+
+func (c *RaftCluster) String() string {
+	c.Lock()
+	defer c.Unlock()
+	b := &bytes.Buffer{}
+	fmt.Fprintf(b, "{ClusterID:%s ", c.id)
+	var ms []string
+	for _, m := range c.members {
+		ms = append(ms, fmt.Sprintf("%+v", m))
+	}
+	fmt.Fprintf(b, "Members:[%s] ", strings.Join(ms, " "))
+	var ids []string
+	for id := range c.removed {
+		ids = append(ids, id.String())
+	}
+	fmt.Fprintf(b, "RemovedMemberIDs:[%s]}", strings.Join(ids, " "))
+	return b.String()
+}
+
+func (c *RaftCluster) genID() {
+	mIDs := c.MemberIDs()
+	b := make([]byte, 8*len(mIDs))
+	for i, id := range mIDs {
+		binary.BigEndian.PutUint64(b[8*i:], uint64(id))
+	}
+	hash := sha1.Sum(b)
+	c.id = types.ID(binary.BigEndian.Uint64(hash[:8]))
+}
+
+func (c *RaftCluster) SetID(id types.ID) { c.id = id }
+
+func (c *RaftCluster) SetStore(st store.Store) { c.store = st }
+
+func (c *RaftCluster) SetBackend(be backend.Backend) {
+	c.be = be
+	mustCreateBackendBuckets(c.be)
+}
+
+func (c *RaftCluster) Recover(onSet func(*semver.Version)) {
+	c.Lock()
+	defer c.Unlock()
+
+	c.members, c.removed = membersFromStore(c.store)
+	c.version = clusterVersionFromStore(c.store)
+	mustDetectDowngrade(c.version)
+	onSet(c.version)
+
+	for _, m := range c.members {
+		plog.Infof("added member %s %v to cluster %s from store", m.ID, m.PeerURLs, c.id)
+	}
+	if c.version != nil {
+		plog.Infof("set the cluster version to %v from store", version.Cluster(c.version.String()))
+	}
+}
+
+// ValidateConfigurationChange takes a proposed ConfChange and
+// ensures that it is still valid.
+func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error {
+	members, removed := membersFromStore(c.store)
+	id := types.ID(cc.NodeID)
+	if removed[id] {
+		return ErrIDRemoved
+	}
+	switch cc.Type {
+	case raftpb.ConfChangeAddNode:
+		if members[id] != nil {
+			return ErrIDExists
+		}
+		urls := make(map[string]bool)
+		for _, m := range members {
+			for _, u := range m.PeerURLs {
+				urls[u] = true
+			}
+		}
+		m := new(Member)
+		if err := json.Unmarshal(cc.Context, m); err != nil {
+			plog.Panicf("unmarshal member should never fail: %v", err)
+		}
+		for _, u := range m.PeerURLs {
+			if urls[u] {
+				return ErrPeerURLexists
+			}
+		}
+	case raftpb.ConfChangeRemoveNode:
+		if members[id] == nil {
+			return ErrIDNotFound
+		}
+	case raftpb.ConfChangeUpdateNode:
+		if members[id] == nil {
+			return ErrIDNotFound
+		}
+		urls := make(map[string]bool)
+		for _, m := range members {
+			if m.ID == id {
+				continue
+			}
+			for _, u := range m.PeerURLs {
+				urls[u] = true
+			}
+		}
+		m := new(Member)
+		if err := json.Unmarshal(cc.Context, m); err != nil {
+			plog.Panicf("unmarshal member should never fail: %v", err)
+		}
+		for _, u := range m.PeerURLs {
+			if urls[u] {
+				return ErrPeerURLexists
+			}
+		}
+	default:
+		plog.Panicf("ConfChange type should be either AddNode, RemoveNode or UpdateNode")
+	}
+	return nil
+}
+
+// AddMember adds a new Member into the cluster, and saves the given member's
+// raftAttributes into the store. The given member should have empty attributes.
+// A Member with a matching id must not exist.
+func (c *RaftCluster) AddMember(m *Member) {
+	c.Lock()
+	defer c.Unlock()
+	if c.store != nil {
+		mustSaveMemberToStore(c.store, m)
+	}
+	if c.be != nil {
+		mustSaveMemberToBackend(c.be, m)
+	}
+
+	c.members[m.ID] = m
+
+	plog.Infof("added member %s %v to cluster %s", m.ID, m.PeerURLs, c.id)
+}
+
+// RemoveMember removes a member from the store.
+// The given id MUST exist, or the function panics.
+func (c *RaftCluster) RemoveMember(id types.ID) {
+	c.Lock()
+	defer c.Unlock()
+	if c.store != nil {
+		mustDeleteMemberFromStore(c.store, id)
+	}
+	if c.be != nil {
+		mustDeleteMemberFromBackend(c.be, id)
+	}
+
+	delete(c.members, id)
+	c.removed[id] = true
+
+	plog.Infof("removed member %s from cluster %s", id, c.id)
+}
+
+func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) {
+	c.Lock()
+	defer c.Unlock()
+	if m, ok := c.members[id]; ok {
+		m.Attributes = attr
+		if c.store != nil {
+			mustUpdateMemberAttrInStore(c.store, m)
+		}
+		if c.be != nil {
+			mustSaveMemberToBackend(c.be, m)
+		}
+		return
+	}
+	_, ok := c.removed[id]
+	if !ok {
+		plog.Panicf("error updating attributes of unknown member %s", id)
+	}
+	plog.Warningf("skipped updating attributes of removed member %s", id)
+}
+
+func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes) {
+	c.Lock()
+	defer c.Unlock()
+
+	c.members[id].RaftAttributes = raftAttr
+	if c.store != nil {
+		mustUpdateMemberInStore(c.store, c.members[id])
+	}
+	if c.be != nil {
+		mustSaveMemberToBackend(c.be, c.members[id])
+	}
+
+	plog.Noticef("updated member %s %v in cluster %s", id, raftAttr.PeerURLs, c.id)
+}
+
+func (c *RaftCluster) Version() *semver.Version {
+	c.Lock()
+	defer c.Unlock()
+	if c.version == nil {
+		return nil
+	}
+	return semver.Must(semver.NewVersion(c.version.String()))
+}
+
+func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*semver.Version)) {
+	c.Lock()
+	defer c.Unlock()
+	if c.version != nil {
+		plog.Noticef("updated the cluster version from %v to %v", version.Cluster(c.version.String()), version.Cluster(ver.String()))
+	} else {
+		plog.Noticef("set the initial cluster version to %v", version.Cluster(ver.String()))
+	}
+	c.version = ver
+	mustDetectDowngrade(c.version)
+	if c.store != nil {
+		mustSaveClusterVersionToStore(c.store, ver)
+	}
+	if c.be != nil {
+		mustSaveClusterVersionToBackend(c.be, ver)
+	}
+	onSet(ver)
+}
+
+func (c *RaftCluster) IsReadyToAddNewMember() bool {
+	nmembers := 1
+	nstarted := 0
+
+	for _, member := range c.members {
+		if member.IsStarted() {
+			nstarted++
+		}
+		nmembers++
+	}
+
+	if nstarted == 1 && nmembers == 2 {
+		// a case of adding a new node to 1-member cluster for restoring cluster data
+		// https://github.com/coreos/etcd/blob/master/Documentation/v2/admin_guide.md#restoring-the-cluster
+
+		plog.Debugf("The number of started member is 1. This cluster can accept add member request.")
+		return true
+	}
+
+	nquorum := nmembers/2 + 1
+	if nstarted < nquorum {
+		plog.Warningf("Reject add member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum)
+		return false
+	}
+
+	return true
+}
+
+func (c *RaftCluster) IsReadyToRemoveMember(id uint64) bool {
+	nmembers := 0
+	nstarted := 0
+
+	for _, member := range c.members {
+		if uint64(member.ID) == id {
+			continue
+		}
+
+		if member.IsStarted() {
+			nstarted++
+		}
+		nmembers++
+	}
+
+	nquorum := nmembers/2 + 1
+	if nstarted < nquorum {
+		plog.Warningf("Reject remove member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum)
+		return false
+	}
+
+	return true
+}
+
+func membersFromStore(st store.Store) (map[types.ID]*Member, map[types.ID]bool) {
+	members := make(map[types.ID]*Member)
+	removed := make(map[types.ID]bool)
+	e, err := st.Get(StoreMembersPrefix, true, true)
+	if err != nil {
+		if isKeyNotFound(err) {
+			return members, removed
+		}
+		plog.Panicf("get storeMembers should never fail: %v", err)
+	}
+	for _, n := range e.Node.Nodes {
+		var m *Member
+		m, err = nodeToMember(n)
+		if err != nil {
+			plog.Panicf("nodeToMember should never fail: %v", err)
+		}
+		members[m.ID] = m
+	}
+
+	e, err = st.Get(storeRemovedMembersPrefix, true, true)
+	if err != nil {
+		if isKeyNotFound(err) {
+			return members, removed
+		}
+		plog.Panicf("get storeRemovedMembers should never fail: %v", err)
+	}
+	for _, n := range e.Node.Nodes {
+		removed[MustParseMemberIDFromKey(n.Key)] = true
+	}
+	return members, removed
+}
+
+func clusterVersionFromStore(st store.Store) *semver.Version {
+	e, err := st.Get(path.Join(storePrefix, "version"), false, false)
+	if err != nil {
+		if isKeyNotFound(err) {
+			return nil
+		}
+		plog.Panicf("unexpected error (%v) when getting cluster version from store", err)
+	}
+	return semver.Must(semver.NewVersion(*e.Node.Value))
+}
+
+// ValidateClusterAndAssignIDs validates the local cluster by matching the PeerURLs
+// with the existing cluster. If the validation succeeds, it assigns the IDs
+// from the existing cluster to the local cluster.
+// If the validation fails, an error will be returned.
+func ValidateClusterAndAssignIDs(local *RaftCluster, existing *RaftCluster) error {
+	ems := existing.Members()
+	lms := local.Members()
+	if len(ems) != len(lms) {
+		return fmt.Errorf("member count is unequal")
+	}
+	sort.Sort(MembersByPeerURLs(ems))
+	sort.Sort(MembersByPeerURLs(lms))
+
+	ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
+	defer cancel()
+	for i := range ems {
+		if ok, err := netutil.URLStringsEqual(ctx, ems[i].PeerURLs, lms[i].PeerURLs); !ok {
+			return fmt.Errorf("unmatched member while checking PeerURLs (%v)", err)
+		}
+		lms[i].ID = ems[i].ID
+	}
+	local.members = make(map[types.ID]*Member)
+	for _, m := range lms {
+		local.members[m.ID] = m
+	}
+	return nil
+}
+
+func mustDetectDowngrade(cv *semver.Version) {
+	lv := semver.Must(semver.NewVersion(version.Version))
+	// only keep major.minor version for comparison against cluster version
+	lv = &semver.Version{Major: lv.Major, Minor: lv.Minor}
+	if cv != nil && lv.LessThan(*cv) {
+		plog.Fatalf("cluster cannot be downgraded (current version: %s is lower than determined cluster version: %s).", version.Version, version.Cluster(cv.String()))
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go b/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go
new file mode 100644
index 0000000..b07fb2d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package membership describes individual etcd members and clusters of members.
+package membership
diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/errors.go b/vendor/github.com/coreos/etcd/etcdserver/membership/errors.go
new file mode 100644
index 0000000..e4d36af
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/membership/errors.go
@@ -0,0 +1,33 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+	"errors"
+
+	etcdErr "github.com/coreos/etcd/error"
+)
+
+var (
+	ErrIDRemoved     = errors.New("membership: ID removed")
+	ErrIDExists      = errors.New("membership: ID exists")
+	ErrIDNotFound    = errors.New("membership: ID not found")
+	ErrPeerURLexists = errors.New("membership: peerURL exists")
+)
+
+func isKeyNotFound(err error) bool {
+	e, ok := err.(*etcdErr.Error)
+	return ok && e.ErrorCode == etcdErr.EcodeKeyNotFound
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/member.go b/vendor/github.com/coreos/etcd/etcdserver/membership/member.go
new file mode 100644
index 0000000..6de74d2
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/membership/member.go
@@ -0,0 +1,124 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+	"crypto/sha1"
+	"encoding/binary"
+	"fmt"
+	"math/rand"
+	"sort"
+	"time"
+
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/pkg/capnslog"
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/membership")
+)
+
+// RaftAttributes represents the raft related attributes of an etcd member.
+type RaftAttributes struct {
+	// PeerURLs is the list of peers in the raft cluster.
+	// TODO(philips): ensure these are URLs
+	PeerURLs []string `json:"peerURLs"`
+}
+
+// Attributes represents all the non-raft related attributes of an etcd member.
+type Attributes struct {
+	Name       string   `json:"name,omitempty"`
+	ClientURLs []string `json:"clientURLs,omitempty"`
+}
+
+type Member struct {
+	ID types.ID `json:"id"`
+	RaftAttributes
+	Attributes
+}
+
+// NewMember creates a Member without an ID and generates one based on the
+// cluster name, peer URLs, and time. This is used for bootstrapping/adding new member.
+func NewMember(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member {
+	m := &Member{
+		RaftAttributes: RaftAttributes{PeerURLs: peerURLs.StringSlice()},
+		Attributes:     Attributes{Name: name},
+	}
+
+	var b []byte
+	sort.Strings(m.PeerURLs)
+	for _, p := range m.PeerURLs {
+		b = append(b, []byte(p)...)
+	}
+
+	b = append(b, []byte(clusterName)...)
+	if now != nil {
+		b = append(b, []byte(fmt.Sprintf("%d", now.Unix()))...)
+	}
+
+	hash := sha1.Sum(b)
+	m.ID = types.ID(binary.BigEndian.Uint64(hash[:8]))
+	return m
+}
+
+// PickPeerURL chooses a random address from a given Member's PeerURLs.
+// It will panic if there is no PeerURLs available in Member.
+func (m *Member) PickPeerURL() string {
+	if len(m.PeerURLs) == 0 {
+		plog.Panicf("member should always have some peer url")
+	}
+	return m.PeerURLs[rand.Intn(len(m.PeerURLs))]
+}
+
+func (m *Member) Clone() *Member {
+	if m == nil {
+		return nil
+	}
+	mm := &Member{
+		ID: m.ID,
+		Attributes: Attributes{
+			Name: m.Name,
+		},
+	}
+	if m.PeerURLs != nil {
+		mm.PeerURLs = make([]string, len(m.PeerURLs))
+		copy(mm.PeerURLs, m.PeerURLs)
+	}
+	if m.ClientURLs != nil {
+		mm.ClientURLs = make([]string, len(m.ClientURLs))
+		copy(mm.ClientURLs, m.ClientURLs)
+	}
+	return mm
+}
+
+func (m *Member) IsStarted() bool {
+	return len(m.Name) != 0
+}
+
+// MembersByID implements sort by ID interface
+type MembersByID []*Member
+
+func (ms MembersByID) Len() int           { return len(ms) }
+func (ms MembersByID) Less(i, j int) bool { return ms[i].ID < ms[j].ID }
+func (ms MembersByID) Swap(i, j int)      { ms[i], ms[j] = ms[j], ms[i] }
+
+// MembersByPeerURLs implements sort by peer urls interface
+type MembersByPeerURLs []*Member
+
+func (ms MembersByPeerURLs) Len() int { return len(ms) }
+func (ms MembersByPeerURLs) Less(i, j int) bool {
+	return ms[i].PeerURLs[0] < ms[j].PeerURLs[0]
+}
+func (ms MembersByPeerURLs) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] }
diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go b/vendor/github.com/coreos/etcd/etcdserver/membership/store.go
new file mode 100644
index 0000000..d3f8f24
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/membership/store.go
@@ -0,0 +1,193 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+	"encoding/json"
+	"fmt"
+	"path"
+
+	"github.com/coreos/etcd/mvcc/backend"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/store"
+
+	"github.com/coreos/go-semver/semver"
+)
+
+const (
+	attributesSuffix     = "attributes"
+	raftAttributesSuffix = "raftAttributes"
+
+	// the prefix for stroing membership related information in store provided by store pkg.
+	storePrefix = "/0"
+)
+
+var (
+	membersBucketName        = []byte("members")
+	membersRemovedBucketName = []byte("members_removed")
+	clusterBucketName        = []byte("cluster")
+
+	StoreMembersPrefix        = path.Join(storePrefix, "members")
+	storeRemovedMembersPrefix = path.Join(storePrefix, "removed_members")
+)
+
+func mustSaveMemberToBackend(be backend.Backend, m *Member) {
+	mkey := backendMemberKey(m.ID)
+	mvalue, err := json.Marshal(m)
+	if err != nil {
+		plog.Panicf("marshal raftAttributes should never fail: %v", err)
+	}
+
+	tx := be.BatchTx()
+	tx.Lock()
+	tx.UnsafePut(membersBucketName, mkey, mvalue)
+	tx.Unlock()
+}
+
+func mustDeleteMemberFromBackend(be backend.Backend, id types.ID) {
+	mkey := backendMemberKey(id)
+
+	tx := be.BatchTx()
+	tx.Lock()
+	tx.UnsafeDelete(membersBucketName, mkey)
+	tx.UnsafePut(membersRemovedBucketName, mkey, []byte("removed"))
+	tx.Unlock()
+}
+
+func mustSaveClusterVersionToBackend(be backend.Backend, ver *semver.Version) {
+	ckey := backendClusterVersionKey()
+
+	tx := be.BatchTx()
+	tx.Lock()
+	defer tx.Unlock()
+	tx.UnsafePut(clusterBucketName, ckey, []byte(ver.String()))
+}
+
+func mustSaveMemberToStore(s store.Store, m *Member) {
+	b, err := json.Marshal(m.RaftAttributes)
+	if err != nil {
+		plog.Panicf("marshal raftAttributes should never fail: %v", err)
+	}
+	p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix)
+	if _, err := s.Create(p, false, string(b), false, store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
+		plog.Panicf("create raftAttributes should never fail: %v", err)
+	}
+}
+
+func mustDeleteMemberFromStore(s store.Store, id types.ID) {
+	if _, err := s.Delete(MemberStoreKey(id), true, true); err != nil {
+		plog.Panicf("delete member should never fail: %v", err)
+	}
+	if _, err := s.Create(RemovedMemberStoreKey(id), false, "", false, store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
+		plog.Panicf("create removedMember should never fail: %v", err)
+	}
+}
+
+func mustUpdateMemberInStore(s store.Store, m *Member) {
+	b, err := json.Marshal(m.RaftAttributes)
+	if err != nil {
+		plog.Panicf("marshal raftAttributes should never fail: %v", err)
+	}
+	p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix)
+	if _, err := s.Update(p, string(b), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
+		plog.Panicf("update raftAttributes should never fail: %v", err)
+	}
+}
+
+func mustUpdateMemberAttrInStore(s store.Store, m *Member) {
+	b, err := json.Marshal(m.Attributes)
+	if err != nil {
+		plog.Panicf("marshal raftAttributes should never fail: %v", err)
+	}
+	p := path.Join(MemberStoreKey(m.ID), attributesSuffix)
+	if _, err := s.Set(p, false, string(b), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
+		plog.Panicf("update raftAttributes should never fail: %v", err)
+	}
+}
+
+func mustSaveClusterVersionToStore(s store.Store, ver *semver.Version) {
+	if _, err := s.Set(StoreClusterVersionKey(), false, ver.String(), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
+		plog.Panicf("save cluster version should never fail: %v", err)
+	}
+}
+
+// nodeToMember builds member from a key value node.
+// the child nodes of the given node MUST be sorted by key.
+func nodeToMember(n *store.NodeExtern) (*Member, error) {
+	m := &Member{ID: MustParseMemberIDFromKey(n.Key)}
+	attrs := make(map[string][]byte)
+	raftAttrKey := path.Join(n.Key, raftAttributesSuffix)
+	attrKey := path.Join(n.Key, attributesSuffix)
+	for _, nn := range n.Nodes {
+		if nn.Key != raftAttrKey && nn.Key != attrKey {
+			return nil, fmt.Errorf("unknown key %q", nn.Key)
+		}
+		attrs[nn.Key] = []byte(*nn.Value)
+	}
+	if data := attrs[raftAttrKey]; data != nil {
+		if err := json.Unmarshal(data, &m.RaftAttributes); err != nil {
+			return nil, fmt.Errorf("unmarshal raftAttributes error: %v", err)
+		}
+	} else {
+		return nil, fmt.Errorf("raftAttributes key doesn't exist")
+	}
+	if data := attrs[attrKey]; data != nil {
+		if err := json.Unmarshal(data, &m.Attributes); err != nil {
+			return m, fmt.Errorf("unmarshal attributes error: %v", err)
+		}
+	}
+	return m, nil
+}
+
+func backendMemberKey(id types.ID) []byte {
+	return []byte(id.String())
+}
+
+func backendClusterVersionKey() []byte {
+	return []byte("clusterVersion")
+}
+
+func mustCreateBackendBuckets(be backend.Backend) {
+	tx := be.BatchTx()
+	tx.Lock()
+	defer tx.Unlock()
+	tx.UnsafeCreateBucket(membersBucketName)
+	tx.UnsafeCreateBucket(membersRemovedBucketName)
+	tx.UnsafeCreateBucket(clusterBucketName)
+}
+
+func MemberStoreKey(id types.ID) string {
+	return path.Join(StoreMembersPrefix, id.String())
+}
+
+func StoreClusterVersionKey() string {
+	return path.Join(storePrefix, "version")
+}
+
+func MemberAttributesStorePath(id types.ID) string {
+	return path.Join(MemberStoreKey(id), attributesSuffix)
+}
+
+func MustParseMemberIDFromKey(key string) types.ID {
+	id, err := types.IDFromString(path.Base(key))
+	if err != nil {
+		plog.Panicf("unexpected parse member id error: %v", err)
+	}
+	return id
+}
+
+func RemovedMemberStoreKey(id types.ID) string {
+	return path.Join(storeRemovedMembersPrefix, id.String())
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/metrics.go
new file mode 100644
index 0000000..10f8a47
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/metrics.go
@@ -0,0 +1,177 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+	goruntime "runtime"
+	"time"
+
+	"github.com/coreos/etcd/pkg/runtime"
+	"github.com/coreos/etcd/version"
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+	hasLeader = prometheus.NewGauge(prometheus.GaugeOpts{
+		Namespace: "etcd",
+		Subsystem: "server",
+		Name:      "has_leader",
+		Help:      "Whether or not a leader exists. 1 is existence, 0 is not.",
+	})
+	isLeader = prometheus.NewGauge(prometheus.GaugeOpts{
+		Namespace: "etcd",
+		Subsystem: "server",
+		Name:      "is_leader",
+		Help:      "Whether or not this member is a leader. 1 if is, 0 otherwise.",
+	})
+	leaderChanges = prometheus.NewCounter(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "server",
+		Name:      "leader_changes_seen_total",
+		Help:      "The number of leader changes seen.",
+	})
+	heartbeatSendFailures = prometheus.NewCounter(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "server",
+		Name:      "heartbeat_send_failures_total",
+		Help:      "The total number of leader heartbeat send failures (likely overloaded from slow disk).",
+	})
+	slowApplies = prometheus.NewCounter(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "server",
+		Name:      "slow_apply_total",
+		Help:      "The total number of slow apply requests (likely overloaded from slow disk).",
+	})
+	proposalsCommitted = prometheus.NewGauge(prometheus.GaugeOpts{
+		Namespace: "etcd",
+		Subsystem: "server",
+		Name:      "proposals_committed_total",
+		Help:      "The total number of consensus proposals committed.",
+	})
+	proposalsApplied = prometheus.NewGauge(prometheus.GaugeOpts{
+		Namespace: "etcd",
+		Subsystem: "server",
+		Name:      "proposals_applied_total",
+		Help:      "The total number of consensus proposals applied.",
+	})
+	proposalsPending = prometheus.NewGauge(prometheus.GaugeOpts{
+		Namespace: "etcd",
+		Subsystem: "server",
+		Name:      "proposals_pending",
+		Help:      "The current number of pending proposals to commit.",
+	})
+	proposalsFailed = prometheus.NewCounter(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "server",
+		Name:      "proposals_failed_total",
+		Help:      "The total number of failed proposals seen.",
+	})
+	leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{
+		Namespace: "etcd_debugging",
+		Subsystem: "server",
+		Name:      "lease_expired_total",
+		Help:      "The total number of expired leases.",
+	})
+	slowReadIndex = prometheus.NewCounter(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "server",
+		Name:      "slow_read_indexes_total",
+		Help:      "The total number of pending read indexes not in sync with leader's or timed out read index requests.",
+	})
+	readIndexFailed = prometheus.NewCounter(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "server",
+		Name:      "read_indexes_failed_total",
+		Help:      "The total number of failed read indexes seen.",
+	})
+	quotaBackendBytes = prometheus.NewGauge(prometheus.GaugeOpts{
+		Namespace: "etcd",
+		Subsystem: "server",
+		Name:      "quota_backend_bytes",
+		Help:      "Current backend storage quota size in bytes.",
+	})
+	currentVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+		Namespace: "etcd",
+		Subsystem: "server",
+		Name:      "version",
+		Help:      "Which version is running. 1 for 'server_version' label with current version.",
+	},
+		[]string{"server_version"})
+	currentGoVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+		Namespace: "etcd",
+		Subsystem: "server",
+		Name:      "go_version",
+		Help:      "Which Go version server is running with. 1 for 'server_go_version' label with current version.",
+	},
+		[]string{"server_go_version"})
+	serverID = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+		Namespace: "etcd",
+		Subsystem: "server",
+		Name:      "id",
+		Help:      "Server or member ID in hexadecimal format. 1 for 'server_id' label with current ID.",
+	},
+		[]string{"server_id"})
+)
+
+func init() {
+	prometheus.MustRegister(hasLeader)
+	prometheus.MustRegister(isLeader)
+	prometheus.MustRegister(leaderChanges)
+	prometheus.MustRegister(heartbeatSendFailures)
+	prometheus.MustRegister(slowApplies)
+	prometheus.MustRegister(proposalsCommitted)
+	prometheus.MustRegister(proposalsApplied)
+	prometheus.MustRegister(proposalsPending)
+	prometheus.MustRegister(proposalsFailed)
+	prometheus.MustRegister(leaseExpired)
+	prometheus.MustRegister(slowReadIndex)
+	prometheus.MustRegister(readIndexFailed)
+	prometheus.MustRegister(quotaBackendBytes)
+	prometheus.MustRegister(currentVersion)
+	prometheus.MustRegister(currentGoVersion)
+	prometheus.MustRegister(serverID)
+
+	currentVersion.With(prometheus.Labels{
+		"server_version": version.Version,
+	}).Set(1)
+	currentGoVersion.With(prometheus.Labels{
+		"server_go_version": goruntime.Version(),
+	}).Set(1)
+}
+
+func monitorFileDescriptor(done <-chan struct{}) {
+	ticker := time.NewTicker(5 * time.Second)
+	defer ticker.Stop()
+	for {
+		used, err := runtime.FDUsage()
+		if err != nil {
+			plog.Errorf("cannot monitor file descriptor usage (%v)", err)
+			return
+		}
+		limit, err := runtime.FDLimit()
+		if err != nil {
+			plog.Errorf("cannot monitor file descriptor usage (%v)", err)
+			return
+		}
+		if used >= limit/5*4 {
+			plog.Warningf("80%% of the file descriptor limit is used [used = %d, limit = %d]", used, limit)
+		}
+		select {
+		case <-ticker.C:
+		case <-done:
+			return
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/quota.go b/vendor/github.com/coreos/etcd/etcdserver/quota.go
new file mode 100644
index 0000000..882eb76
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/quota.go
@@ -0,0 +1,124 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+const (
+	// DefaultQuotaBytes is the number of bytes the backend Size may
+	// consume before exceeding the space quota.
+	DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB
+	// MaxQuotaBytes is the maximum number of bytes suggested for a backend
+	// quota. A larger quota may lead to degraded performance.
+	MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB
+)
+
+// Quota represents an arbitrary quota against arbitrary requests. Each request
+// costs some charge; if there is not enough remaining charge, then there are
+// too few resources available within the quota to apply the request.
+type Quota interface {
+	// Available judges whether the given request fits within the quota.
+	Available(req interface{}) bool
+	// Cost computes the charge against the quota for a given request.
+	Cost(req interface{}) int
+	// Remaining is the amount of charge left for the quota.
+	Remaining() int64
+}
+
+type passthroughQuota struct{}
+
+func (*passthroughQuota) Available(interface{}) bool { return true }
+func (*passthroughQuota) Cost(interface{}) int       { return 0 }
+func (*passthroughQuota) Remaining() int64           { return 1 }
+
+type backendQuota struct {
+	s               *EtcdServer
+	maxBackendBytes int64
+}
+
+const (
+	// leaseOverhead is an estimate for the cost of storing a lease
+	leaseOverhead = 64
+	// kvOverhead is an estimate for the cost of storing a key's metadata
+	kvOverhead = 256
+)
+
+func NewBackendQuota(s *EtcdServer) Quota {
+	quotaBackendBytes.Set(float64(s.Cfg.QuotaBackendBytes))
+
+	if s.Cfg.QuotaBackendBytes < 0 {
+		// disable quotas if negative
+		plog.Warningf("disabling backend quota")
+		return &passthroughQuota{}
+	}
+
+	if s.Cfg.QuotaBackendBytes == 0 {
+		// use default size if no quota size given
+		quotaBackendBytes.Set(float64(DefaultQuotaBytes))
+		return &backendQuota{s, DefaultQuotaBytes}
+	}
+
+	if s.Cfg.QuotaBackendBytes > MaxQuotaBytes {
+		plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes)
+	}
+	return &backendQuota{s, s.Cfg.QuotaBackendBytes}
+}
+
+func (b *backendQuota) Available(v interface{}) bool {
+	// TODO: maybe optimize backend.Size()
+	return b.s.Backend().Size()+int64(b.Cost(v)) < b.maxBackendBytes
+}
+
+func (b *backendQuota) Cost(v interface{}) int {
+	switch r := v.(type) {
+	case *pb.PutRequest:
+		return costPut(r)
+	case *pb.TxnRequest:
+		return costTxn(r)
+	case *pb.LeaseGrantRequest:
+		return leaseOverhead
+	default:
+		panic("unexpected cost")
+	}
+}
+
+func costPut(r *pb.PutRequest) int { return kvOverhead + len(r.Key) + len(r.Value) }
+
+func costTxnReq(u *pb.RequestOp) int {
+	r := u.GetRequestPut()
+	if r == nil {
+		return 0
+	}
+	return costPut(r)
+}
+
+func costTxn(r *pb.TxnRequest) int {
+	sizeSuccess := 0
+	for _, u := range r.Success {
+		sizeSuccess += costTxnReq(u)
+	}
+	sizeFailure := 0
+	for _, u := range r.Failure {
+		sizeFailure += costTxnReq(u)
+	}
+	if sizeFailure > sizeSuccess {
+		return sizeFailure
+	}
+	return sizeSuccess
+}
+
+func (b *backendQuota) Remaining() int64 {
+	return b.maxBackendBytes - b.s.Backend().Size()
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/raft.go b/vendor/github.com/coreos/etcd/etcdserver/raft.go
new file mode 100644
index 0000000..1080633
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/raft.go
@@ -0,0 +1,608 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+	"encoding/json"
+	"expvar"
+	"sort"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/etcdserver/membership"
+	"github.com/coreos/etcd/pkg/contention"
+	"github.com/coreos/etcd/pkg/pbutil"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/raft"
+	"github.com/coreos/etcd/raft/raftpb"
+	"github.com/coreos/etcd/rafthttp"
+	"github.com/coreos/etcd/wal"
+	"github.com/coreos/etcd/wal/walpb"
+	"github.com/coreos/pkg/capnslog"
+)
+
+const (
+	// Number of entries for slow follower to catch-up after compacting
+	// the raft storage entries.
+	// We expect the follower has a millisecond level latency with the leader.
+	// The max throughput is around 10K. Keep a 5K entries is enough for helping
+	// follower to catch up.
+	numberOfCatchUpEntries = 5000
+
+	// The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
+	// Assuming the RTT is around 10ms, 1MB max size is large enough.
+	maxSizePerMsg = 1 * 1024 * 1024
+	// Never overflow the rafthttp buffer, which is 4096.
+	// TODO: a better const?
+	maxInflightMsgs = 4096 / 8
+)
+
+var (
+	// protects raftStatus
+	raftStatusMu sync.Mutex
+	// indirection for expvar func interface
+	// expvar panics when publishing duplicate name
+	// expvar does not support remove a registered name
+	// so only register a func that calls raftStatus
+	// and change raftStatus as we need.
+	raftStatus func() raft.Status
+)
+
+func init() {
+	raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft"))
+	expvar.Publish("raft.status", expvar.Func(func() interface{} {
+		raftStatusMu.Lock()
+		defer raftStatusMu.Unlock()
+		return raftStatus()
+	}))
+}
+
+type RaftTimer interface {
+	Index() uint64
+	Term() uint64
+}
+
+// apply contains entries, snapshot to be applied. Once
+// an apply is consumed, the entries will be persisted to
+// to raft storage concurrently; the application must read
+// raftDone before assuming the raft messages are stable.
+type apply struct {
+	entries  []raftpb.Entry
+	snapshot raftpb.Snapshot
+	// notifyc synchronizes etcd server applies with the raft node
+	notifyc chan struct{}
+}
+
+type raftNode struct {
+	// Cache of the latest raft index and raft term the server has seen.
+	// These three unit64 fields must be the first elements to keep 64-bit
+	// alignment for atomic access to the fields.
+	index uint64
+	term  uint64
+	lead  uint64
+
+	tickMu *sync.Mutex
+	raftNodeConfig
+
+	// a chan to send/receive snapshot
+	msgSnapC chan raftpb.Message
+
+	// a chan to send out apply
+	applyc chan apply
+
+	// a chan to send out readState
+	readStateC chan raft.ReadState
+
+	// utility
+	ticker *time.Ticker
+	// contention detectors for raft heartbeat message
+	td *contention.TimeoutDetector
+
+	stopped chan struct{}
+	done    chan struct{}
+}
+
+type raftNodeConfig struct {
+	// to check if msg receiver is removed from cluster
+	isIDRemoved func(id uint64) bool
+	raft.Node
+	raftStorage *raft.MemoryStorage
+	storage     Storage
+	heartbeat   time.Duration // for logging
+	// transport specifies the transport to send and receive msgs to members.
+	// Sending messages MUST NOT block. It is okay to drop messages, since
+	// clients should timeout and reissue their messages.
+	// If transport is nil, server will panic.
+	transport rafthttp.Transporter
+}
+
+func newRaftNode(cfg raftNodeConfig) *raftNode {
+	r := &raftNode{
+		tickMu:         new(sync.Mutex),
+		raftNodeConfig: cfg,
+		// set up contention detectors for raft heartbeat message.
+		// expect to send a heartbeat within 2 heartbeat intervals.
+		td:         contention.NewTimeoutDetector(2 * cfg.heartbeat),
+		readStateC: make(chan raft.ReadState, 1),
+		msgSnapC:   make(chan raftpb.Message, maxInFlightMsgSnap),
+		applyc:     make(chan apply),
+		stopped:    make(chan struct{}),
+		done:       make(chan struct{}),
+	}
+	if r.heartbeat == 0 {
+		r.ticker = &time.Ticker{}
+	} else {
+		r.ticker = time.NewTicker(r.heartbeat)
+	}
+	return r
+}
+
+// raft.Node does not have locks in Raft package
+func (r *raftNode) tick() {
+	r.tickMu.Lock()
+	r.Tick()
+	r.tickMu.Unlock()
+}
+
+// start prepares and starts raftNode in a new goroutine. It is no longer safe
+// to modify the fields after it has been started.
+func (r *raftNode) start(rh *raftReadyHandler) {
+	internalTimeout := time.Second
+
+	go func() {
+		defer r.onStop()
+		islead := false
+
+		for {
+			select {
+			case <-r.ticker.C:
+				r.tick()
+			case rd := <-r.Ready():
+				if rd.SoftState != nil {
+					newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead
+					if newLeader {
+						leaderChanges.Inc()
+					}
+
+					if rd.SoftState.Lead == raft.None {
+						hasLeader.Set(0)
+					} else {
+						hasLeader.Set(1)
+					}
+
+					atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
+					islead = rd.RaftState == raft.StateLeader
+					if islead {
+						isLeader.Set(1)
+					} else {
+						isLeader.Set(0)
+					}
+					rh.updateLeadership(newLeader)
+					r.td.Reset()
+				}
+
+				if len(rd.ReadStates) != 0 {
+					select {
+					case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
+					case <-time.After(internalTimeout):
+						plog.Warningf("timed out sending read state")
+					case <-r.stopped:
+						return
+					}
+				}
+
+				notifyc := make(chan struct{}, 1)
+				ap := apply{
+					entries:  rd.CommittedEntries,
+					snapshot: rd.Snapshot,
+					notifyc:  notifyc,
+				}
+
+				updateCommittedIndex(&ap, rh)
+
+				select {
+				case r.applyc <- ap:
+				case <-r.stopped:
+					return
+				}
+
+				// the leader can write to its disk in parallel with replicating to the followers and them
+				// writing to their disks.
+				// For more details, check raft thesis 10.2.1
+				if islead {
+					// gofail: var raftBeforeLeaderSend struct{}
+					r.transport.Send(r.processMessages(rd.Messages))
+				}
+
+				// gofail: var raftBeforeSave struct{}
+				if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
+					plog.Fatalf("raft save state and entries error: %v", err)
+				}
+				if !raft.IsEmptyHardState(rd.HardState) {
+					proposalsCommitted.Set(float64(rd.HardState.Commit))
+				}
+				// gofail: var raftAfterSave struct{}
+
+				if !raft.IsEmptySnap(rd.Snapshot) {
+					// gofail: var raftBeforeSaveSnap struct{}
+					if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
+						plog.Fatalf("raft save snapshot error: %v", err)
+					}
+					// etcdserver now claim the snapshot has been persisted onto the disk
+					notifyc <- struct{}{}
+
+					// gofail: var raftAfterSaveSnap struct{}
+					r.raftStorage.ApplySnapshot(rd.Snapshot)
+					plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
+					// gofail: var raftAfterApplySnap struct{}
+				}
+
+				r.raftStorage.Append(rd.Entries)
+
+				if !islead {
+					// finish processing incoming messages before we signal raftdone chan
+					msgs := r.processMessages(rd.Messages)
+
+					// now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
+					notifyc <- struct{}{}
+
+					// Candidate or follower needs to wait for all pending configuration
+					// changes to be applied before sending messages.
+					// Otherwise we might incorrectly count votes (e.g. votes from removed members).
+					// Also slow machine's follower raft-layer could proceed to become the leader
+					// on its own single-node cluster, before apply-layer applies the config change.
+					// We simply wait for ALL pending entries to be applied for now.
+					// We might improve this later on if it causes unnecessary long blocking issues.
+					waitApply := false
+					for _, ent := range rd.CommittedEntries {
+						if ent.Type == raftpb.EntryConfChange {
+							waitApply = true
+							break
+						}
+					}
+					if waitApply {
+						// blocks until 'applyAll' calls 'applyWait.Trigger'
+						// to be in sync with scheduled config-change job
+						// (assume notifyc has cap of 1)
+						select {
+						case notifyc <- struct{}{}:
+						case <-r.stopped:
+							return
+						}
+					}
+
+					// gofail: var raftBeforeFollowerSend struct{}
+					r.transport.Send(msgs)
+				} else {
+					// leader already processed 'MsgSnap' and signaled
+					notifyc <- struct{}{}
+				}
+
+				r.Advance()
+			case <-r.stopped:
+				return
+			}
+		}
+	}()
+}
+
+func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
+	var ci uint64
+	if len(ap.entries) != 0 {
+		ci = ap.entries[len(ap.entries)-1].Index
+	}
+	if ap.snapshot.Metadata.Index > ci {
+		ci = ap.snapshot.Metadata.Index
+	}
+	if ci != 0 {
+		rh.updateCommittedIndex(ci)
+	}
+}
+
+func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
+	sentAppResp := false
+	for i := len(ms) - 1; i >= 0; i-- {
+		if r.isIDRemoved(ms[i].To) {
+			ms[i].To = 0
+		}
+
+		if ms[i].Type == raftpb.MsgAppResp {
+			if sentAppResp {
+				ms[i].To = 0
+			} else {
+				sentAppResp = true
+			}
+		}
+
+		if ms[i].Type == raftpb.MsgSnap {
+			// There are two separate data store: the store for v2, and the KV for v3.
+			// The msgSnap only contains the most recent snapshot of store without KV.
+			// So we need to redirect the msgSnap to etcd server main loop for merging in the
+			// current store snapshot and KV snapshot.
+			select {
+			case r.msgSnapC <- ms[i]:
+			default:
+				// drop msgSnap if the inflight chan if full.
+			}
+			ms[i].To = 0
+		}
+		if ms[i].Type == raftpb.MsgHeartbeat {
+			ok, exceed := r.td.Observe(ms[i].To)
+			if !ok {
+				// TODO: limit request rate.
+				plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
+				plog.Warningf("server is likely overloaded")
+				heartbeatSendFailures.Inc()
+			}
+		}
+	}
+	return ms
+}
+
+func (r *raftNode) apply() chan apply {
+	return r.applyc
+}
+
+func (r *raftNode) stop() {
+	r.stopped <- struct{}{}
+	<-r.done
+}
+
+func (r *raftNode) onStop() {
+	r.Stop()
+	r.ticker.Stop()
+	r.transport.Stop()
+	if err := r.storage.Close(); err != nil {
+		plog.Panicf("raft close storage error: %v", err)
+	}
+	close(r.done)
+}
+
+// for testing
+func (r *raftNode) pauseSending() {
+	p := r.transport.(rafthttp.Pausable)
+	p.Pause()
+}
+
+func (r *raftNode) resumeSending() {
+	p := r.transport.(rafthttp.Pausable)
+	p.Resume()
+}
+
+// advanceTicks advances ticks of Raft node.
+// This can be used for fast-forwarding election
+// ticks in multi data-center deployments, thus
+// speeding up election process.
+func (r *raftNode) advanceTicks(ticks int) {
+	for i := 0; i < ticks; i++ {
+		r.tick()
+	}
+}
+
+func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
+	var err error
+	member := cl.MemberByName(cfg.Name)
+	metadata := pbutil.MustMarshal(
+		&pb.Metadata{
+			NodeID:    uint64(member.ID),
+			ClusterID: uint64(cl.ID()),
+		},
+	)
+	if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
+		plog.Fatalf("create wal error: %v", err)
+	}
+	peers := make([]raft.Peer, len(ids))
+	for i, id := range ids {
+		ctx, err := json.Marshal((*cl).Member(id))
+		if err != nil {
+			plog.Panicf("marshal member should never fail: %v", err)
+		}
+		peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
+	}
+	id = member.ID
+	plog.Infof("starting member %s in cluster %s", id, cl.ID())
+	s = raft.NewMemoryStorage()
+	c := &raft.Config{
+		ID:              uint64(id),
+		ElectionTick:    cfg.ElectionTicks,
+		HeartbeatTick:   1,
+		Storage:         s,
+		MaxSizePerMsg:   maxSizePerMsg,
+		MaxInflightMsgs: maxInflightMsgs,
+		CheckQuorum:     true,
+	}
+
+	n = raft.StartNode(c, peers)
+	raftStatusMu.Lock()
+	raftStatus = n.Status
+	raftStatusMu.Unlock()
+	return id, n, s, w
+}
+
+func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
+	var walsnap walpb.Snapshot
+	if snapshot != nil {
+		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
+	}
+	w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
+
+	plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
+	cl := membership.NewCluster("")
+	cl.SetID(cid)
+	s := raft.NewMemoryStorage()
+	if snapshot != nil {
+		s.ApplySnapshot(*snapshot)
+	}
+	s.SetHardState(st)
+	s.Append(ents)
+	c := &raft.Config{
+		ID:              uint64(id),
+		ElectionTick:    cfg.ElectionTicks,
+		HeartbeatTick:   1,
+		Storage:         s,
+		MaxSizePerMsg:   maxSizePerMsg,
+		MaxInflightMsgs: maxInflightMsgs,
+		CheckQuorum:     true,
+	}
+
+	n := raft.RestartNode(c)
+	raftStatusMu.Lock()
+	raftStatus = n.Status
+	raftStatusMu.Unlock()
+	return id, cl, n, s, w
+}
+
+func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
+	var walsnap walpb.Snapshot
+	if snapshot != nil {
+		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
+	}
+	w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
+
+	// discard the previously uncommitted entries
+	for i, ent := range ents {
+		if ent.Index > st.Commit {
+			plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
+			ents = ents[:i]
+			break
+		}
+	}
+
+	// force append the configuration change entries
+	toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
+	ents = append(ents, toAppEnts...)
+
+	// force commit newly appended entries
+	err := w.Save(raftpb.HardState{}, toAppEnts)
+	if err != nil {
+		plog.Fatalf("%v", err)
+	}
+	if len(ents) != 0 {
+		st.Commit = ents[len(ents)-1].Index
+	}
+
+	plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
+	cl := membership.NewCluster("")
+	cl.SetID(cid)
+	s := raft.NewMemoryStorage()
+	if snapshot != nil {
+		s.ApplySnapshot(*snapshot)
+	}
+	s.SetHardState(st)
+	s.Append(ents)
+	c := &raft.Config{
+		ID:              uint64(id),
+		ElectionTick:    cfg.ElectionTicks,
+		HeartbeatTick:   1,
+		Storage:         s,
+		MaxSizePerMsg:   maxSizePerMsg,
+		MaxInflightMsgs: maxInflightMsgs,
+		CheckQuorum:     true,
+	}
+	n := raft.RestartNode(c)
+	raftStatus = n.Status
+	return id, cl, n, s, w
+}
+
+// getIDs returns an ordered set of IDs included in the given snapshot and
+// the entries. The given snapshot/entries can contain two kinds of
+// ID-related entry:
+// - ConfChangeAddNode, in which case the contained ID will be added into the set.
+// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
+func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
+	ids := make(map[uint64]bool)
+	if snap != nil {
+		for _, id := range snap.Metadata.ConfState.Nodes {
+			ids[id] = true
+		}
+	}
+	for _, e := range ents {
+		if e.Type != raftpb.EntryConfChange {
+			continue
+		}
+		var cc raftpb.ConfChange
+		pbutil.MustUnmarshal(&cc, e.Data)
+		switch cc.Type {
+		case raftpb.ConfChangeAddNode:
+			ids[cc.NodeID] = true
+		case raftpb.ConfChangeRemoveNode:
+			delete(ids, cc.NodeID)
+		case raftpb.ConfChangeUpdateNode:
+			// do nothing
+		default:
+			plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
+		}
+	}
+	sids := make(types.Uint64Slice, 0, len(ids))
+	for id := range ids {
+		sids = append(sids, id)
+	}
+	sort.Sort(sids)
+	return []uint64(sids)
+}
+
+// createConfigChangeEnts creates a series of Raft entries (i.e.
+// EntryConfChange) to remove the set of given IDs from the cluster. The ID
+// `self` is _not_ removed, even if present in the set.
+// If `self` is not inside the given ids, it creates a Raft entry to add a
+// default member with the given `self`.
+func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
+	ents := make([]raftpb.Entry, 0)
+	next := index + 1
+	found := false
+	for _, id := range ids {
+		if id == self {
+			found = true
+			continue
+		}
+		cc := &raftpb.ConfChange{
+			Type:   raftpb.ConfChangeRemoveNode,
+			NodeID: id,
+		}
+		e := raftpb.Entry{
+			Type:  raftpb.EntryConfChange,
+			Data:  pbutil.MustMarshal(cc),
+			Term:  term,
+			Index: next,
+		}
+		ents = append(ents, e)
+		next++
+	}
+	if !found {
+		m := membership.Member{
+			ID:             types.ID(self),
+			RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
+		}
+		ctx, err := json.Marshal(m)
+		if err != nil {
+			plog.Panicf("marshal member should never fail: %v", err)
+		}
+		cc := &raftpb.ConfChange{
+			Type:    raftpb.ConfChangeAddNode,
+			NodeID:  self,
+			Context: ctx,
+		}
+		e := raftpb.Entry{
+			Type:  raftpb.EntryConfChange,
+			Data:  pbutil.MustMarshal(cc),
+			Term:  term,
+			Index: next,
+		}
+		ents = append(ents, e)
+	}
+	return ents
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/server.go b/vendor/github.com/coreos/etcd/etcdserver/server.go
new file mode 100644
index 0000000..71e2bcf
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/server.go
@@ -0,0 +1,1745 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+	"context"
+	"encoding/json"
+	"expvar"
+	"fmt"
+	"math"
+	"math/rand"
+	"net/http"
+	"os"
+	"path"
+	"regexp"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/coreos/etcd/alarm"
+	"github.com/coreos/etcd/auth"
+	"github.com/coreos/etcd/compactor"
+	"github.com/coreos/etcd/discovery"
+	"github.com/coreos/etcd/etcdserver/api"
+	"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/etcdserver/membership"
+	"github.com/coreos/etcd/etcdserver/stats"
+	"github.com/coreos/etcd/lease"
+	"github.com/coreos/etcd/lease/leasehttp"
+	"github.com/coreos/etcd/mvcc"
+	"github.com/coreos/etcd/mvcc/backend"
+	"github.com/coreos/etcd/pkg/fileutil"
+	"github.com/coreos/etcd/pkg/idutil"
+	"github.com/coreos/etcd/pkg/pbutil"
+	"github.com/coreos/etcd/pkg/runtime"
+	"github.com/coreos/etcd/pkg/schedule"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/pkg/wait"
+	"github.com/coreos/etcd/raft"
+	"github.com/coreos/etcd/raft/raftpb"
+	"github.com/coreos/etcd/rafthttp"
+	"github.com/coreos/etcd/snap"
+	"github.com/coreos/etcd/store"
+	"github.com/coreos/etcd/version"
+	"github.com/coreos/etcd/wal"
+
+	"github.com/coreos/go-semver/semver"
+	"github.com/coreos/pkg/capnslog"
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+	DefaultSnapCount = 100000
+
+	StoreClusterPrefix = "/0"
+	StoreKeysPrefix    = "/1"
+
+	// HealthInterval is the minimum time the cluster should be healthy
+	// before accepting add member requests.
+	HealthInterval = 5 * time.Second
+
+	purgeFileInterval = 30 * time.Second
+	// monitorVersionInterval should be smaller than the timeout
+	// on the connection. Or we will not be able to reuse the connection
+	// (since it will timeout).
+	monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second
+
+	// max number of in-flight snapshot messages etcdserver allows to have
+	// This number is more than enough for most clusters with 5 machines.
+	maxInFlightMsgSnap = 16
+
+	releaseDelayAfterSnapshot = 30 * time.Second
+
+	// maxPendingRevokes is the maximum number of outstanding expired lease revocations.
+	maxPendingRevokes = 16
+
+	recommendedMaxRequestBytes = 10 * 1024 * 1024
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver")
+
+	storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes"))
+)
+
+func init() {
+	rand.Seed(time.Now().UnixNano())
+
+	expvar.Publish(
+		"file_descriptor_limit",
+		expvar.Func(
+			func() interface{} {
+				n, _ := runtime.FDLimit()
+				return n
+			},
+		),
+	)
+}
+
+type Response struct {
+	Term    uint64
+	Index   uint64
+	Event   *store.Event
+	Watcher store.Watcher
+	Err     error
+}
+
+type ServerV2 interface {
+	Server
+	// Do takes a V2 request and attempts to fulfill it, returning a Response.
+	Do(ctx context.Context, r pb.Request) (Response, error)
+	stats.Stats
+	ClientCertAuthEnabled() bool
+}
+
+type ServerV3 interface {
+	Server
+	ID() types.ID
+	RaftTimer
+}
+
+func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled }
+
+type Server interface {
+	// Leader returns the ID of the leader Server.
+	Leader() types.ID
+
+	// AddMember attempts to add a member into the cluster. It will return
+	// ErrIDRemoved if member ID is removed from the cluster, or return
+	// ErrIDExists if member ID exists in the cluster.
+	AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error)
+	// RemoveMember attempts to remove a member from the cluster. It will
+	// return ErrIDRemoved if member ID is removed from the cluster, or return
+	// ErrIDNotFound if member ID is not in the cluster.
+	RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error)
+	// UpdateMember attempts to update an existing member in the cluster. It will
+	// return ErrIDNotFound if the member ID does not exist.
+	UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error)
+
+	// ClusterVersion is the cluster-wide minimum major.minor version.
+	// Cluster version is set to the min version that an etcd member is
+	// compatible with when first bootstrap.
+	//
+	// ClusterVersion is nil until the cluster is bootstrapped (has a quorum).
+	//
+	// During a rolling upgrades, the ClusterVersion will be updated
+	// automatically after a sync. (5 second by default)
+	//
+	// The API/raft component can utilize ClusterVersion to determine if
+	// it can accept a client request or a raft RPC.
+	// NOTE: ClusterVersion might be nil when etcd 2.1 works with etcd 2.0 and
+	// the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since
+	// this feature is introduced post 2.0.
+	ClusterVersion() *semver.Version
+	Cluster() api.Cluster
+	Alarms() []*pb.AlarmMember
+}
+
+// EtcdServer is the production implementation of the Server interface
+type EtcdServer struct {
+	// inflightSnapshots holds count the number of snapshots currently inflight.
+	inflightSnapshots int64  // must use atomic operations to access; keep 64-bit aligned.
+	appliedIndex      uint64 // must use atomic operations to access; keep 64-bit aligned.
+	committedIndex    uint64 // must use atomic operations to access; keep 64-bit aligned.
+	// consistIndex used to hold the offset of current executing entry
+	// It is initialized to 0 before executing any entry.
+	consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned.
+	r            raftNode        // uses 64-bit atomics; keep 64-bit aligned.
+
+	readych chan struct{}
+	Cfg     ServerConfig
+
+	w wait.Wait
+
+	readMu sync.RWMutex
+	// read routine notifies etcd server that it waits for reading by sending an empty struct to
+	// readwaitC
+	readwaitc chan struct{}
+	// readNotifier is used to notify the read routine that it can process the request
+	// when there is no error
+	readNotifier *notifier
+
+	// stop signals the run goroutine should shutdown.
+	stop chan struct{}
+	// stopping is closed by run goroutine on shutdown.
+	stopping chan struct{}
+	// done is closed when all goroutines from start() complete.
+	done chan struct{}
+
+	errorc     chan error
+	id         types.ID
+	attributes membership.Attributes
+
+	cluster *membership.RaftCluster
+
+	store       store.Store
+	snapshotter *snap.Snapshotter
+
+	applyV2 ApplierV2
+
+	// applyV3 is the applier with auth and quotas
+	applyV3 applierV3
+	// applyV3Base is the core applier without auth or quotas
+	applyV3Base applierV3
+	applyWait   wait.WaitTime
+
+	kv         mvcc.ConsistentWatchableKV
+	lessor     lease.Lessor
+	bemu       sync.Mutex
+	be         backend.Backend
+	authStore  auth.AuthStore
+	alarmStore *alarm.AlarmStore
+
+	stats  *stats.ServerStats
+	lstats *stats.LeaderStats
+
+	SyncTicker *time.Ticker
+	// compactor is used to auto-compact the KV.
+	compactor compactor.Compactor
+
+	// peerRt used to send requests (version, lease) to peers.
+	peerRt   http.RoundTripper
+	reqIDGen *idutil.Generator
+
+	// forceVersionC is used to force the version monitor loop
+	// to detect the cluster version immediately.
+	forceVersionC chan struct{}
+
+	// wgMu blocks concurrent waitgroup mutation while server stopping
+	wgMu sync.RWMutex
+	// wg is used to wait for the go routines that depends on the server state
+	// to exit when stopping the server.
+	wg sync.WaitGroup
+
+	// ctx is used for etcd-initiated requests that may need to be canceled
+	// on etcd server shutdown.
+	ctx    context.Context
+	cancel context.CancelFunc
+
+	leadTimeMu      sync.RWMutex
+	leadElectedTime time.Time
+}
+
+// NewServer creates a new EtcdServer from the supplied configuration. The
+// configuration is considered static for the lifetime of the EtcdServer.
+func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
+	st := store.New(StoreClusterPrefix, StoreKeysPrefix)
+
+	var (
+		w  *wal.WAL
+		n  raft.Node
+		s  *raft.MemoryStorage
+		id types.ID
+		cl *membership.RaftCluster
+	)
+
+	if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
+		plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes)
+	}
+
+	if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
+		return nil, fmt.Errorf("cannot access data directory: %v", terr)
+	}
+
+	haveWAL := wal.Exist(cfg.WALDir())
+
+	if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
+		plog.Fatalf("create snapshot directory error: %v", err)
+	}
+	ss := snap.New(cfg.SnapDir())
+
+	bepath := cfg.backendPath()
+	beExist := fileutil.Exist(bepath)
+	be := openBackend(cfg)
+
+	defer func() {
+		if err != nil {
+			be.Close()
+		}
+	}()
+
+	prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout())
+	if err != nil {
+		return nil, err
+	}
+	var (
+		remotes  []*membership.Member
+		snapshot *raftpb.Snapshot
+	)
+
+	switch {
+	case !haveWAL && !cfg.NewCluster:
+		if err = cfg.VerifyJoinExisting(); err != nil {
+			return nil, err
+		}
+		cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
+		if err != nil {
+			return nil, err
+		}
+		existingCluster, gerr := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), prt)
+		if gerr != nil {
+			return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
+		}
+		if err = membership.ValidateClusterAndAssignIDs(cl, existingCluster); err != nil {
+			return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
+		}
+		if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, prt) {
+			return nil, fmt.Errorf("incompatible with current running cluster")
+		}
+
+		remotes = existingCluster.Members()
+		cl.SetID(existingCluster.ID())
+		cl.SetStore(st)
+		cl.SetBackend(be)
+		cfg.Print()
+		id, n, s, w = startNode(cfg, cl, nil)
+	case !haveWAL && cfg.NewCluster:
+		if err = cfg.VerifyBootstrap(); err != nil {
+			return nil, err
+		}
+		cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
+		if err != nil {
+			return nil, err
+		}
+		m := cl.MemberByName(cfg.Name)
+		if isMemberBootstrapped(cl, cfg.Name, prt, cfg.bootstrapTimeout()) {
+			return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
+		}
+		if cfg.ShouldDiscover() {
+			var str string
+			str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
+			if err != nil {
+				return nil, &DiscoveryError{Op: "join", Err: err}
+			}
+			var urlsmap types.URLsMap
+			urlsmap, err = types.NewURLsMap(str)
+			if err != nil {
+				return nil, err
+			}
+			if checkDuplicateURL(urlsmap) {
+				return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
+			}
+			if cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil {
+				return nil, err
+			}
+		}
+		cl.SetStore(st)
+		cl.SetBackend(be)
+		cfg.PrintWithInitial()
+		id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
+	case haveWAL:
+		if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
+			return nil, fmt.Errorf("cannot write to member directory: %v", err)
+		}
+
+		if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
+			return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
+		}
+
+		if cfg.ShouldDiscover() {
+			plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
+		}
+		snapshot, err = ss.Load()
+		if err != nil && err != snap.ErrNoSnapshot {
+			return nil, err
+		}
+		if snapshot != nil {
+			if err = st.Recovery(snapshot.Data); err != nil {
+				plog.Panicf("recovered store from snapshot error: %v", err)
+			}
+			plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
+			if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil {
+				plog.Panicf("recovering backend from snapshot error: %v", err)
+			}
+		}
+		cfg.Print()
+		if !cfg.ForceNewCluster {
+			id, cl, n, s, w = restartNode(cfg, snapshot)
+		} else {
+			id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot)
+		}
+		cl.SetStore(st)
+		cl.SetBackend(be)
+		cl.Recover(api.UpdateCapability)
+		if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
+			os.RemoveAll(bepath)
+			return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
+		}
+	default:
+		return nil, fmt.Errorf("unsupported bootstrap config")
+	}
+
+	if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
+		return nil, fmt.Errorf("cannot access member directory: %v", terr)
+	}
+
+	sstats := stats.NewServerStats(cfg.Name, id.String())
+	lstats := stats.NewLeaderStats(id.String())
+
+	heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
+	srv = &EtcdServer{
+		readych:     make(chan struct{}),
+		Cfg:         cfg,
+		errorc:      make(chan error, 1),
+		store:       st,
+		snapshotter: ss,
+		r: *newRaftNode(
+			raftNodeConfig{
+				isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
+				Node:        n,
+				heartbeat:   heartbeat,
+				raftStorage: s,
+				storage:     NewStorage(w, ss),
+			},
+		),
+		id:            id,
+		attributes:    membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
+		cluster:       cl,
+		stats:         sstats,
+		lstats:        lstats,
+		SyncTicker:    time.NewTicker(500 * time.Millisecond),
+		peerRt:        prt,
+		reqIDGen:      idutil.NewGenerator(uint16(id), time.Now()),
+		forceVersionC: make(chan struct{}),
+	}
+	serverID.With(prometheus.Labels{"server_id": id.String()}).Set(1)
+
+	srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}
+
+	srv.be = be
+	minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat
+
+	// always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
+	// If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
+	srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds())))
+	srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex)
+	if beExist {
+		kvindex := srv.kv.ConsistentIndex()
+		// TODO: remove kvindex != 0 checking when we do not expect users to upgrade
+		// etcd from pre-3.0 release.
+		if snapshot != nil && kvindex < snapshot.Metadata.Index {
+			if kvindex != 0 {
+				return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d).", bepath, kvindex, snapshot.Metadata.Index)
+			}
+			plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index)
+		}
+	}
+	newSrv := srv // since srv == nil in defer if srv is returned as nil
+	defer func() {
+		// closing backend without first closing kv can cause
+		// resumed compactions to fail with closed tx errors
+		if err != nil {
+			newSrv.kv.Close()
+		}
+	}()
+
+	srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())
+	tp, err := auth.NewTokenProvider(cfg.AuthToken,
+		func(index uint64) <-chan struct{} {
+			return srv.applyWait.Wait(index)
+		},
+	)
+	if err != nil {
+		plog.Errorf("failed to create token provider: %s", err)
+		return nil, err
+	}
+	srv.authStore = auth.NewAuthStore(srv.be, tp)
+	if num := cfg.AutoCompactionRetention; num != 0 {
+		srv.compactor, err = compactor.New(cfg.AutoCompactionMode, num, srv.kv, srv)
+		if err != nil {
+			return nil, err
+		}
+		srv.compactor.Run()
+	}
+
+	srv.applyV3Base = srv.newApplierV3Backend()
+	if err = srv.restoreAlarms(); err != nil {
+		return nil, err
+	}
+
+	// TODO: move transport initialization near the definition of remote
+	tr := &rafthttp.Transport{
+		TLSInfo:     cfg.PeerTLSInfo,
+		DialTimeout: cfg.peerDialTimeout(),
+		ID:          id,
+		URLs:        cfg.PeerURLs,
+		ClusterID:   cl.ID(),
+		Raft:        srv,
+		Snapshotter: ss,
+		ServerStats: sstats,
+		LeaderStats: lstats,
+		ErrorC:      srv.errorc,
+	}
+	if err = tr.Start(); err != nil {
+		return nil, err
+	}
+	// add all remotes into transport
+	for _, m := range remotes {
+		if m.ID != id {
+			tr.AddRemote(m.ID, m.PeerURLs)
+		}
+	}
+	for _, m := range cl.Members() {
+		if m.ID != id {
+			tr.AddPeer(m.ID, m.PeerURLs)
+		}
+	}
+	srv.r.transport = tr
+
+	return srv, nil
+}
+
+func (s *EtcdServer) adjustTicks() {
+	clusterN := len(s.cluster.Members())
+
+	// single-node fresh start, or single-node recovers from snapshot
+	if clusterN == 1 {
+		ticks := s.Cfg.ElectionTicks - 1
+		plog.Infof("%s as single-node; fast-forwarding %d ticks (election ticks %d)", s.ID(), ticks, s.Cfg.ElectionTicks)
+		s.r.advanceTicks(ticks)
+		return
+	}
+
+	if !s.Cfg.InitialElectionTickAdvance {
+		plog.Infof("skipping initial election tick advance (election tick %d)", s.Cfg.ElectionTicks)
+		return
+	}
+
+	// retry up to "rafthttp.ConnReadTimeout", which is 5-sec
+	// until peer connection reports; otherwise:
+	// 1. all connections failed, or
+	// 2. no active peers, or
+	// 3. restarted single-node with no snapshot
+	// then, do nothing, because advancing ticks would have no effect
+	waitTime := rafthttp.ConnReadTimeout
+	itv := 50 * time.Millisecond
+	for i := int64(0); i < int64(waitTime/itv); i++ {
+		select {
+		case <-time.After(itv):
+		case <-s.stopping:
+			return
+		}
+
+		peerN := s.r.transport.ActivePeers()
+		if peerN > 1 {
+			// multi-node received peer connection reports
+			// adjust ticks, in case slow leader message receive
+			ticks := s.Cfg.ElectionTicks - 2
+			plog.Infof("%s initialzed peer connection; fast-forwarding %d ticks (election ticks %d) with %d active peer(s)", s.ID(), ticks, s.Cfg.ElectionTicks, peerN)
+			s.r.advanceTicks(ticks)
+			return
+		}
+	}
+}
+
+// Start performs any initialization of the Server necessary for it to
+// begin serving requests. It must be called before Do or Process.
+// Start must be non-blocking; any long-running server functionality
+// should be implemented in goroutines.
+func (s *EtcdServer) Start() {
+	s.start()
+	s.goAttach(func() { s.adjustTicks() })
+	s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) })
+	s.goAttach(s.purgeFile)
+	s.goAttach(func() { monitorFileDescriptor(s.stopping) })
+	s.goAttach(s.monitorVersions)
+	s.goAttach(s.linearizableReadLoop)
+	s.goAttach(s.monitorKVHash)
+}
+
+// start prepares and starts server in a new goroutine. It is no longer safe to
+// modify a server's fields after it has been sent to Start.
+// This function is just used for testing.
+func (s *EtcdServer) start() {
+	if s.Cfg.SnapCount == 0 {
+		plog.Infof("set snapshot count to default %d", DefaultSnapCount)
+		s.Cfg.SnapCount = DefaultSnapCount
+	}
+	s.w = wait.New()
+	s.applyWait = wait.NewTimeList()
+	s.done = make(chan struct{})
+	s.stop = make(chan struct{})
+	s.stopping = make(chan struct{})
+	s.ctx, s.cancel = context.WithCancel(context.Background())
+	s.readwaitc = make(chan struct{}, 1)
+	s.readNotifier = newNotifier()
+	if s.ClusterVersion() != nil {
+		plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String()))
+	} else {
+		plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version)
+	}
+	// TODO: if this is an empty log, writes all peer infos
+	// into the first entry
+	go s.run()
+}
+
+func (s *EtcdServer) purgeFile() {
+	var dberrc, serrc, werrc <-chan error
+	if s.Cfg.MaxSnapFiles > 0 {
+		dberrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
+		serrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
+	}
+	if s.Cfg.MaxWALFiles > 0 {
+		werrc = fileutil.PurgeFile(s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done)
+	}
+	select {
+	case e := <-dberrc:
+		plog.Fatalf("failed to purge snap db file %v", e)
+	case e := <-serrc:
+		plog.Fatalf("failed to purge snap file %v", e)
+	case e := <-werrc:
+		plog.Fatalf("failed to purge wal file %v", e)
+	case <-s.stopping:
+		return
+	}
+}
+
+func (s *EtcdServer) ID() types.ID { return s.id }
+
+func (s *EtcdServer) Cluster() api.Cluster { return s.cluster }
+
+func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) }
+
+type ServerPeer interface {
+	ServerV2
+	RaftHandler() http.Handler
+	LeaseHandler() http.Handler
+}
+
+func (s *EtcdServer) LeaseHandler() http.Handler {
+	if s.lessor == nil {
+		return nil
+	}
+	return leasehttp.NewHandler(s.lessor, s.ApplyWait)
+}
+
+func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() }
+
+// Process takes a raft message and applies it to the server's raft state
+// machine, respecting any timeout of the given context.
+func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
+	if s.cluster.IsIDRemoved(types.ID(m.From)) {
+		plog.Warningf("reject message from removed member %s", types.ID(m.From).String())
+		return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member")
+	}
+	if m.Type == raftpb.MsgApp {
+		s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size())
+	}
+	return s.r.Step(ctx, m)
+}
+
+func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) }
+
+func (s *EtcdServer) ReportUnreachable(id uint64) { s.r.ReportUnreachable(id) }
+
+// ReportSnapshot reports snapshot sent status to the raft state machine,
+// and clears the used snapshot from the snapshot store.
+func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
+	s.r.ReportSnapshot(id, status)
+}
+
+type etcdProgress struct {
+	confState raftpb.ConfState
+	snapi     uint64
+	appliedt  uint64
+	appliedi  uint64
+}
+
+// raftReadyHandler contains a set of EtcdServer operations to be called by raftNode,
+// and helps decouple state machine logic from Raft algorithms.
+// TODO: add a state machine interface to apply the commit entries and do snapshot/recover
+type raftReadyHandler struct {
+	updateLeadership     func(newLeader bool)
+	updateCommittedIndex func(uint64)
+}
+
+func (s *EtcdServer) run() {
+	sn, err := s.r.raftStorage.Snapshot()
+	if err != nil {
+		plog.Panicf("get snapshot from raft storage error: %v", err)
+	}
+
+	// asynchronously accept apply packets, dispatch progress in-order
+	sched := schedule.NewFIFOScheduler()
+
+	var (
+		smu   sync.RWMutex
+		syncC <-chan time.Time
+	)
+	setSyncC := func(ch <-chan time.Time) {
+		smu.Lock()
+		syncC = ch
+		smu.Unlock()
+	}
+	getSyncC := func() (ch <-chan time.Time) {
+		smu.RLock()
+		ch = syncC
+		smu.RUnlock()
+		return
+	}
+	rh := &raftReadyHandler{
+		updateLeadership: func(newLeader bool) {
+			if !s.isLeader() {
+				if s.lessor != nil {
+					s.lessor.Demote()
+				}
+				if s.compactor != nil {
+					s.compactor.Pause()
+				}
+				setSyncC(nil)
+			} else {
+				if newLeader {
+					t := time.Now()
+					s.leadTimeMu.Lock()
+					s.leadElectedTime = t
+					s.leadTimeMu.Unlock()
+				}
+				setSyncC(s.SyncTicker.C)
+				if s.compactor != nil {
+					s.compactor.Resume()
+				}
+			}
+
+			// TODO: remove the nil checking
+			// current test utility does not provide the stats
+			if s.stats != nil {
+				s.stats.BecomeLeader()
+			}
+		},
+		updateCommittedIndex: func(ci uint64) {
+			cci := s.getCommittedIndex()
+			if ci > cci {
+				s.setCommittedIndex(ci)
+			}
+		},
+	}
+	s.r.start(rh)
+
+	ep := etcdProgress{
+		confState: sn.Metadata.ConfState,
+		snapi:     sn.Metadata.Index,
+		appliedt:  sn.Metadata.Term,
+		appliedi:  sn.Metadata.Index,
+	}
+
+	defer func() {
+		s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping
+		close(s.stopping)
+		s.wgMu.Unlock()
+		s.cancel()
+
+		sched.Stop()
+
+		// wait for gouroutines before closing raft so wal stays open
+		s.wg.Wait()
+
+		s.SyncTicker.Stop()
+
+		// must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
+		// by adding a peer after raft stops the transport
+		s.r.stop()
+
+		// kv, lessor and backend can be nil if running without v3 enabled
+		// or running unit tests.
+		if s.lessor != nil {
+			s.lessor.Stop()
+		}
+		if s.kv != nil {
+			s.kv.Close()
+		}
+		if s.authStore != nil {
+			s.authStore.Close()
+		}
+		if s.be != nil {
+			s.be.Close()
+		}
+		if s.compactor != nil {
+			s.compactor.Stop()
+		}
+		close(s.done)
+	}()
+
+	var expiredLeaseC <-chan []*lease.Lease
+	if s.lessor != nil {
+		expiredLeaseC = s.lessor.ExpiredLeasesC()
+	}
+
+	for {
+		select {
+		case ap := <-s.r.apply():
+			f := func(context.Context) { s.applyAll(&ep, &ap) }
+			sched.Schedule(f)
+		case leases := <-expiredLeaseC:
+			s.goAttach(func() {
+				// Increases throughput of expired leases deletion process through parallelization
+				c := make(chan struct{}, maxPendingRevokes)
+				for _, lease := range leases {
+					select {
+					case c <- struct{}{}:
+					case <-s.stopping:
+						return
+					}
+					lid := lease.ID
+					s.goAttach(func() {
+						ctx := s.authStore.WithRoot(s.ctx)
+						_, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
+						if lerr == nil {
+							leaseExpired.Inc()
+						} else {
+							plog.Warningf("failed to revoke %016x (%q)", lid, lerr.Error())
+						}
+
+						<-c
+					})
+				}
+			})
+		case err := <-s.errorc:
+			plog.Errorf("%s", err)
+			plog.Infof("the data-dir used by this member must be removed.")
+			return
+		case <-getSyncC():
+			if s.store.HasTTLKeys() {
+				s.sync(s.Cfg.ReqTimeout())
+			}
+		case <-s.stop:
+			return
+		}
+	}
+}
+
+func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
+	s.applySnapshot(ep, apply)
+	s.applyEntries(ep, apply)
+
+	proposalsApplied.Set(float64(ep.appliedi))
+	s.applyWait.Trigger(ep.appliedi)
+	// wait for the raft routine to finish the disk writes before triggering a
+	// snapshot. or applied index might be greater than the last index in raft
+	// storage, since the raft routine might be slower than apply routine.
+	<-apply.notifyc
+
+	s.triggerSnapshot(ep)
+	select {
+	// snapshot requested via send()
+	case m := <-s.r.msgSnapC:
+		merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState)
+		s.sendMergedSnap(merged)
+	default:
+	}
+}
+
+func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
+	if raft.IsEmptySnap(apply.snapshot) {
+		return
+	}
+
+	plog.Infof("applying snapshot at index %d...", ep.snapi)
+	defer plog.Infof("finished applying incoming snapshot at index %d", ep.snapi)
+
+	if apply.snapshot.Metadata.Index <= ep.appliedi {
+		plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1",
+			apply.snapshot.Metadata.Index, ep.appliedi)
+	}
+
+	// wait for raftNode to persist snapshot onto the disk
+	<-apply.notifyc
+
+	newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot)
+	if err != nil {
+		plog.Panic(err)
+	}
+
+	// always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
+	// If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
+	if s.lessor != nil {
+		plog.Info("recovering lessor...")
+		s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() })
+		plog.Info("finished recovering lessor")
+	}
+
+	plog.Info("restoring mvcc store...")
+
+	if err := s.kv.Restore(newbe); err != nil {
+		plog.Panicf("restore KV error: %v", err)
+	}
+	s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex())
+
+	plog.Info("finished restoring mvcc store")
+
+	// Closing old backend might block until all the txns
+	// on the backend are finished.
+	// We do not want to wait on closing the old backend.
+	s.bemu.Lock()
+	oldbe := s.be
+	go func() {
+		plog.Info("closing old backend...")
+		defer plog.Info("finished closing old backend")
+
+		if err := oldbe.Close(); err != nil {
+			plog.Panicf("close backend error: %v", err)
+		}
+	}()
+
+	s.be = newbe
+	s.bemu.Unlock()
+
+	plog.Info("recovering alarms...")
+	if err := s.restoreAlarms(); err != nil {
+		plog.Panicf("restore alarms error: %v", err)
+	}
+	plog.Info("finished recovering alarms")
+
+	if s.authStore != nil {
+		plog.Info("recovering auth store...")
+		s.authStore.Recover(newbe)
+		plog.Info("finished recovering auth store")
+	}
+
+	plog.Info("recovering store v2...")
+	if err := s.store.Recovery(apply.snapshot.Data); err != nil {
+		plog.Panicf("recovery store error: %v", err)
+	}
+	plog.Info("finished recovering store v2")
+
+	s.cluster.SetBackend(s.be)
+	plog.Info("recovering cluster configuration...")
+	s.cluster.Recover(api.UpdateCapability)
+	plog.Info("finished recovering cluster configuration")
+
+	plog.Info("removing old peers from network...")
+	// recover raft transport
+	s.r.transport.RemoveAllPeers()
+	plog.Info("finished removing old peers from network")
+
+	plog.Info("adding peers from new cluster configuration into network...")
+	for _, m := range s.cluster.Members() {
+		if m.ID == s.ID() {
+			continue
+		}
+		s.r.transport.AddPeer(m.ID, m.PeerURLs)
+	}
+	plog.Info("finished adding peers from new cluster configuration into network...")
+
+	ep.appliedt = apply.snapshot.Metadata.Term
+	ep.appliedi = apply.snapshot.Metadata.Index
+	ep.snapi = ep.appliedi
+	ep.confState = apply.snapshot.Metadata.ConfState
+}
+
+func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
+	if len(apply.entries) == 0 {
+		return
+	}
+	firsti := apply.entries[0].Index
+	if firsti > ep.appliedi+1 {
+		plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi)
+	}
+	var ents []raftpb.Entry
+	if ep.appliedi+1-firsti < uint64(len(apply.entries)) {
+		ents = apply.entries[ep.appliedi+1-firsti:]
+	}
+	if len(ents) == 0 {
+		return
+	}
+	var shouldstop bool
+	if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
+		go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster"))
+	}
+}
+
+func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) {
+	if ep.appliedi-ep.snapi <= s.Cfg.SnapCount {
+		return
+	}
+
+	plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi)
+	s.snapshot(ep.appliedi, ep.confState)
+	ep.snapi = ep.appliedi
+}
+
+func (s *EtcdServer) isMultiNode() bool {
+	return s.cluster != nil && len(s.cluster.MemberIDs()) > 1
+}
+
+func (s *EtcdServer) isLeader() bool {
+	return uint64(s.ID()) == s.Lead()
+}
+
+// MoveLeader transfers the leader to the given transferee.
+func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error {
+	now := time.Now()
+	interval := time.Duration(s.Cfg.TickMs) * time.Millisecond
+
+	plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee))
+	s.r.TransferLeadership(ctx, lead, transferee)
+	for s.Lead() != transferee {
+		select {
+		case <-ctx.Done(): // time out
+			return ErrTimeoutLeaderTransfer
+		case <-time.After(interval):
+		}
+	}
+
+	// TODO: drain all requests, or drop all messages to the old leader
+
+	plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now))
+	return nil
+}
+
+// TransferLeadership transfers the leader to the chosen transferee.
+func (s *EtcdServer) TransferLeadership() error {
+	if !s.isLeader() {
+		plog.Printf("skipped leadership transfer for stopping non-leader member")
+		return nil
+	}
+
+	if !s.isMultiNode() {
+		plog.Printf("skipped leadership transfer for single member cluster")
+		return nil
+	}
+
+	transferee, ok := longestConnected(s.r.transport, s.cluster.MemberIDs())
+	if !ok {
+		return ErrUnhealthy
+	}
+
+	tm := s.Cfg.ReqTimeout()
+	ctx, cancel := context.WithTimeout(s.ctx, tm)
+	err := s.MoveLeader(ctx, s.Lead(), uint64(transferee))
+	cancel()
+	return err
+}
+
+// HardStop stops the server without coordination with other members in the cluster.
+func (s *EtcdServer) HardStop() {
+	select {
+	case s.stop <- struct{}{}:
+	case <-s.done:
+		return
+	}
+	<-s.done
+}
+
+// Stop stops the server gracefully, and shuts down the running goroutine.
+// Stop should be called after a Start(s), otherwise it will block forever.
+// When stopping leader, Stop transfers its leadership to one of its peers
+// before stopping the server.
+// Stop terminates the Server and performs any necessary finalization.
+// Do and Process cannot be called after Stop has been invoked.
+func (s *EtcdServer) Stop() {
+	if err := s.TransferLeadership(); err != nil {
+		plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err)
+	}
+	s.HardStop()
+}
+
+// ReadyNotify returns a channel that will be closed when the server
+// is ready to serve client requests
+func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych }
+
+func (s *EtcdServer) stopWithDelay(d time.Duration, err error) {
+	select {
+	case <-time.After(d):
+	case <-s.done:
+	}
+	select {
+	case s.errorc <- err:
+	default:
+	}
+}
+
+// StopNotify returns a channel that receives a empty struct
+// when the server is stopped.
+func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done }
+
+func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() }
+
+func (s *EtcdServer) LeaderStats() []byte {
+	lead := atomic.LoadUint64(&s.r.lead)
+	if lead != uint64(s.id) {
+		return nil
+	}
+	return s.lstats.JSON()
+}
+
+func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() }
+
+func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error {
+	if s.authStore == nil {
+		// In the context of ordinary etcd process, s.authStore will never be nil.
+		// This branch is for handling cases in server_test.go
+		return nil
+	}
+
+	// Note that this permission check is done in the API layer,
+	// so TOCTOU problem can be caused potentially in a schedule like this:
+	// update membership with user A -> revoke root role of A -> apply membership change
+	// in the state machine layer
+	// However, both of membership change and role management requires the root privilege.
+	// So careful operation by admins can prevent the problem.
+	authInfo, err := s.AuthInfoFromCtx(ctx)
+	if err != nil {
+		return err
+	}
+
+	return s.AuthStore().IsAdminPermitted(authInfo)
+}
+
+func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
+	if err := s.checkMembershipOperationPermission(ctx); err != nil {
+		return nil, err
+	}
+
+	if s.Cfg.StrictReconfigCheck {
+		// by default StrictReconfigCheck is enabled; reject new members if unhealthy
+		if !s.cluster.IsReadyToAddNewMember() {
+			plog.Warningf("not enough started members, rejecting member add %+v", memb)
+			return nil, ErrNotEnoughStartedMembers
+		}
+		if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) {
+			plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb)
+			return nil, ErrUnhealthy
+		}
+	}
+
+	// TODO: move Member to protobuf type
+	b, err := json.Marshal(memb)
+	if err != nil {
+		return nil, err
+	}
+	cc := raftpb.ConfChange{
+		Type:    raftpb.ConfChangeAddNode,
+		NodeID:  uint64(memb.ID),
+		Context: b,
+	}
+	return s.configure(ctx, cc)
+}
+
+func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
+	if err := s.checkMembershipOperationPermission(ctx); err != nil {
+		return nil, err
+	}
+
+	// by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss
+	if err := s.mayRemoveMember(types.ID(id)); err != nil {
+		return nil, err
+	}
+
+	cc := raftpb.ConfChange{
+		Type:   raftpb.ConfChangeRemoveNode,
+		NodeID: id,
+	}
+	return s.configure(ctx, cc)
+}
+
+func (s *EtcdServer) mayRemoveMember(id types.ID) error {
+	if !s.Cfg.StrictReconfigCheck {
+		return nil
+	}
+
+	if !s.cluster.IsReadyToRemoveMember(uint64(id)) {
+		plog.Warningf("not enough started members, rejecting remove member %s", id)
+		return ErrNotEnoughStartedMembers
+	}
+
+	// downed member is safe to remove since it's not part of the active quorum
+	if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() {
+		return nil
+	}
+
+	// protect quorum if some members are down
+	m := s.cluster.Members()
+	active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m)
+	if (active - 1) < 1+((len(m)-1)/2) {
+		plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id)
+		return ErrUnhealthy
+	}
+
+	return nil
+}
+
+func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
+	b, merr := json.Marshal(memb)
+	if merr != nil {
+		return nil, merr
+	}
+
+	if err := s.checkMembershipOperationPermission(ctx); err != nil {
+		return nil, err
+	}
+	cc := raftpb.ConfChange{
+		Type:    raftpb.ConfChangeUpdateNode,
+		NodeID:  uint64(memb.ID),
+		Context: b,
+	}
+	return s.configure(ctx, cc)
+}
+
+// Implement the RaftTimer interface
+
+func (s *EtcdServer) Index() uint64 { return atomic.LoadUint64(&s.r.index) }
+
+func (s *EtcdServer) Term() uint64 { return atomic.LoadUint64(&s.r.term) }
+
+// Lead is only for testing purposes.
+// TODO: add Raft server interface to expose raft related info:
+// Index, Term, Lead, Committed, Applied, LastIndex, etc.
+func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) }
+
+func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) }
+
+type confChangeResponse struct {
+	membs []*membership.Member
+	err   error
+}
+
+// configure sends a configuration change through consensus and
+// then waits for it to be applied to the server. It
+// will block until the change is performed or there is an error.
+func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) {
+	cc.ID = s.reqIDGen.Next()
+	ch := s.w.Register(cc.ID)
+	start := time.Now()
+	if err := s.r.ProposeConfChange(ctx, cc); err != nil {
+		s.w.Trigger(cc.ID, nil)
+		return nil, err
+	}
+	select {
+	case x := <-ch:
+		if x == nil {
+			plog.Panicf("configure trigger value should never be nil")
+		}
+		resp := x.(*confChangeResponse)
+		return resp.membs, resp.err
+	case <-ctx.Done():
+		s.w.Trigger(cc.ID, nil) // GC wait
+		return nil, s.parseProposeCtxErr(ctx.Err(), start)
+	case <-s.stopping:
+		return nil, ErrStopped
+	}
+}
+
+// sync proposes a SYNC request and is non-blocking.
+// This makes no guarantee that the request will be proposed or performed.
+// The request will be canceled after the given timeout.
+func (s *EtcdServer) sync(timeout time.Duration) {
+	req := pb.Request{
+		Method: "SYNC",
+		ID:     s.reqIDGen.Next(),
+		Time:   time.Now().UnixNano(),
+	}
+	data := pbutil.MustMarshal(&req)
+	// There is no promise that node has leader when do SYNC request,
+	// so it uses goroutine to propose.
+	ctx, cancel := context.WithTimeout(s.ctx, timeout)
+	s.goAttach(func() {
+		s.r.Propose(ctx, data)
+		cancel()
+	})
+}
+
+// publish registers server information into the cluster. The information
+// is the JSON representation of this server's member struct, updated with the
+// static clientURLs of the server.
+// The function keeps attempting to register until it succeeds,
+// or its server is stopped.
+func (s *EtcdServer) publish(timeout time.Duration) {
+	b, err := json.Marshal(s.attributes)
+	if err != nil {
+		plog.Panicf("json marshal error: %v", err)
+		return
+	}
+	req := pb.Request{
+		Method: "PUT",
+		Path:   membership.MemberAttributesStorePath(s.id),
+		Val:    string(b),
+	}
+
+	for {
+		ctx, cancel := context.WithTimeout(s.ctx, timeout)
+		_, err := s.Do(ctx, req)
+		cancel()
+		switch err {
+		case nil:
+			close(s.readych)
+			plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID())
+			return
+		case ErrStopped:
+			plog.Infof("aborting publish because server is stopped")
+			return
+		default:
+			plog.Errorf("publish error: %v", err)
+		}
+	}
+}
+
+func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
+	atomic.AddInt64(&s.inflightSnapshots, 1)
+
+	s.r.transport.SendSnapshot(merged)
+	s.goAttach(func() {
+		select {
+		case ok := <-merged.CloseNotify():
+			// delay releasing inflight snapshot for another 30 seconds to
+			// block log compaction.
+			// If the follower still fails to catch up, it is probably just too slow
+			// to catch up. We cannot avoid the snapshot cycle anyway.
+			if ok {
+				select {
+				case <-time.After(releaseDelayAfterSnapshot):
+				case <-s.stopping:
+				}
+			}
+			atomic.AddInt64(&s.inflightSnapshots, -1)
+		case <-s.stopping:
+			return
+		}
+	})
+}
+
+// apply takes entries received from Raft (after it has been committed) and
+// applies them to the current state of the EtcdServer.
+// The given entries should not be empty.
+func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appliedt uint64, appliedi uint64, shouldStop bool) {
+	for i := range es {
+		e := es[i]
+		switch e.Type {
+		case raftpb.EntryNormal:
+			s.applyEntryNormal(&e)
+		case raftpb.EntryConfChange:
+			// set the consistent index of current executing entry
+			if e.Index > s.consistIndex.ConsistentIndex() {
+				s.consistIndex.setConsistentIndex(e.Index)
+			}
+			var cc raftpb.ConfChange
+			pbutil.MustUnmarshal(&cc, e.Data)
+			removedSelf, err := s.applyConfChange(cc, confState)
+			s.setAppliedIndex(e.Index)
+			shouldStop = shouldStop || removedSelf
+			s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err})
+		default:
+			plog.Panicf("entry type should be either EntryNormal or EntryConfChange")
+		}
+		atomic.StoreUint64(&s.r.index, e.Index)
+		atomic.StoreUint64(&s.r.term, e.Term)
+		appliedt = e.Term
+		appliedi = e.Index
+	}
+	return appliedt, appliedi, shouldStop
+}
+
+// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer
+func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
+	shouldApplyV3 := false
+	if e.Index > s.consistIndex.ConsistentIndex() {
+		// set the consistent index of current executing entry
+		s.consistIndex.setConsistentIndex(e.Index)
+		shouldApplyV3 = true
+	}
+	defer s.setAppliedIndex(e.Index)
+
+	// raft state machine may generate noop entry when leader confirmation.
+	// skip it in advance to avoid some potential bug in the future
+	if len(e.Data) == 0 {
+		select {
+		case s.forceVersionC <- struct{}{}:
+		default:
+		}
+		// promote lessor when the local member is leader and finished
+		// applying all entries from the last term.
+		if s.isLeader() {
+			s.lessor.Promote(s.Cfg.electionTimeout())
+		}
+		return
+	}
+
+	var raftReq pb.InternalRaftRequest
+	if !pbutil.MaybeUnmarshal(&raftReq, e.Data) { // backward compatible
+		var r pb.Request
+		rp := &r
+		pbutil.MustUnmarshal(rp, e.Data)
+		s.w.Trigger(r.ID, s.applyV2Request((*RequestV2)(rp)))
+		return
+	}
+	if raftReq.V2 != nil {
+		req := (*RequestV2)(raftReq.V2)
+		s.w.Trigger(req.ID, s.applyV2Request(req))
+		return
+	}
+
+	// do not re-apply applied entries.
+	if !shouldApplyV3 {
+		return
+	}
+
+	id := raftReq.ID
+	if id == 0 {
+		id = raftReq.Header.ID
+	}
+
+	var ar *applyResult
+	needResult := s.w.IsRegistered(id)
+	if needResult || !noSideEffect(&raftReq) {
+		if !needResult && raftReq.Txn != nil {
+			removeNeedlessRangeReqs(raftReq.Txn)
+		}
+		ar = s.applyV3.Apply(&raftReq)
+	}
+
+	if ar == nil {
+		return
+	}
+
+	if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 {
+		s.w.Trigger(id, ar)
+		return
+	}
+
+	plog.Errorf("applying raft message exceeded backend quota")
+	s.goAttach(func() {
+		a := &pb.AlarmRequest{
+			MemberID: uint64(s.ID()),
+			Action:   pb.AlarmRequest_ACTIVATE,
+			Alarm:    pb.AlarmType_NOSPACE,
+		}
+		s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
+		s.w.Trigger(id, ar)
+	})
+}
+
+// applyConfChange applies a ConfChange to the server. It is only
+// invoked with a ConfChange that has already passed through Raft
+func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) {
+	if err := s.cluster.ValidateConfigurationChange(cc); err != nil {
+		cc.NodeID = raft.None
+		s.r.ApplyConfChange(cc)
+		return false, err
+	}
+	*confState = *s.r.ApplyConfChange(cc)
+	switch cc.Type {
+	case raftpb.ConfChangeAddNode:
+		m := new(membership.Member)
+		if err := json.Unmarshal(cc.Context, m); err != nil {
+			plog.Panicf("unmarshal member should never fail: %v", err)
+		}
+		if cc.NodeID != uint64(m.ID) {
+			plog.Panicf("nodeID should always be equal to member ID")
+		}
+		s.cluster.AddMember(m)
+		if m.ID != s.id {
+			s.r.transport.AddPeer(m.ID, m.PeerURLs)
+		}
+	case raftpb.ConfChangeRemoveNode:
+		id := types.ID(cc.NodeID)
+		s.cluster.RemoveMember(id)
+		if id == s.id {
+			return true, nil
+		}
+		s.r.transport.RemovePeer(id)
+	case raftpb.ConfChangeUpdateNode:
+		m := new(membership.Member)
+		if err := json.Unmarshal(cc.Context, m); err != nil {
+			plog.Panicf("unmarshal member should never fail: %v", err)
+		}
+		if cc.NodeID != uint64(m.ID) {
+			plog.Panicf("nodeID should always be equal to member ID")
+		}
+		s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes)
+		if m.ID != s.id {
+			s.r.transport.UpdatePeer(m.ID, m.PeerURLs)
+		}
+	}
+	return false, nil
+}
+
+// TODO: non-blocking snapshot
+func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
+	clone := s.store.Clone()
+	// commit kv to write metadata (for example: consistent index) to disk.
+	// KV().commit() updates the consistent index in backend.
+	// All operations that update consistent index must be called sequentially
+	// from applyAll function.
+	// So KV().Commit() cannot run in parallel with apply. It has to be called outside
+	// the go routine created below.
+	s.KV().Commit()
+
+	s.goAttach(func() {
+		d, err := clone.SaveNoCopy()
+		// TODO: current store will never fail to do a snapshot
+		// what should we do if the store might fail?
+		if err != nil {
+			plog.Panicf("store save should never fail: %v", err)
+		}
+		snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d)
+		if err != nil {
+			// the snapshot was done asynchronously with the progress of raft.
+			// raft might have already got a newer snapshot.
+			if err == raft.ErrSnapOutOfDate {
+				return
+			}
+			plog.Panicf("unexpected create snapshot error %v", err)
+		}
+		// SaveSnap saves the snapshot and releases the locked wal files
+		// to the snapshot index.
+		if err = s.r.storage.SaveSnap(snap); err != nil {
+			plog.Fatalf("save snapshot error: %v", err)
+		}
+		plog.Infof("saved snapshot at index %d", snap.Metadata.Index)
+
+		// When sending a snapshot, etcd will pause compaction.
+		// After receives a snapshot, the slow follower needs to get all the entries right after
+		// the snapshot sent to catch up. If we do not pause compaction, the log entries right after
+		// the snapshot sent might already be compacted. It happens when the snapshot takes long time
+		// to send and save. Pausing compaction avoids triggering a snapshot sending cycle.
+		if atomic.LoadInt64(&s.inflightSnapshots) != 0 {
+			plog.Infof("skip compaction since there is an inflight snapshot")
+			return
+		}
+
+		// keep some in memory log entries for slow followers.
+		compacti := uint64(1)
+		if snapi > numberOfCatchUpEntries {
+			compacti = snapi - numberOfCatchUpEntries
+		}
+		err = s.r.raftStorage.Compact(compacti)
+		if err != nil {
+			// the compaction was done asynchronously with the progress of raft.
+			// raft log might already been compact.
+			if err == raft.ErrCompacted {
+				return
+			}
+			plog.Panicf("unexpected compaction error %v", err)
+		}
+		plog.Infof("compacted raft log at %d", compacti)
+	})
+}
+
+// CutPeer drops messages to the specified peer.
+func (s *EtcdServer) CutPeer(id types.ID) {
+	tr, ok := s.r.transport.(*rafthttp.Transport)
+	if ok {
+		tr.CutPeer(id)
+	}
+}
+
+// MendPeer recovers the message dropping behavior of the given peer.
+func (s *EtcdServer) MendPeer(id types.ID) {
+	tr, ok := s.r.transport.(*rafthttp.Transport)
+	if ok {
+		tr.MendPeer(id)
+	}
+}
+
+func (s *EtcdServer) PauseSending() { s.r.pauseSending() }
+
+func (s *EtcdServer) ResumeSending() { s.r.resumeSending() }
+
+func (s *EtcdServer) ClusterVersion() *semver.Version {
+	if s.cluster == nil {
+		return nil
+	}
+	return s.cluster.Version()
+}
+
+// monitorVersions checks the member's version every monitorVersionInterval.
+// It updates the cluster version if all members agrees on a higher one.
+// It prints out log if there is a member with a higher version than the
+// local version.
+func (s *EtcdServer) monitorVersions() {
+	for {
+		select {
+		case <-s.forceVersionC:
+		case <-time.After(monitorVersionInterval):
+		case <-s.stopping:
+			return
+		}
+
+		if s.Leader() != s.ID() {
+			continue
+		}
+
+		v := decideClusterVersion(getVersions(s.cluster, s.id, s.peerRt))
+		if v != nil {
+			// only keep major.minor version for comparison
+			v = &semver.Version{
+				Major: v.Major,
+				Minor: v.Minor,
+			}
+		}
+
+		// if the current version is nil:
+		// 1. use the decided version if possible
+		// 2. or use the min cluster version
+		if s.cluster.Version() == nil {
+			verStr := version.MinClusterVersion
+			if v != nil {
+				verStr = v.String()
+			}
+			s.goAttach(func() { s.updateClusterVersion(verStr) })
+			continue
+		}
+
+		// update cluster version only if the decided version is greater than
+		// the current cluster version
+		if v != nil && s.cluster.Version().LessThan(*v) {
+			s.goAttach(func() { s.updateClusterVersion(v.String()) })
+		}
+	}
+}
+
+func (s *EtcdServer) updateClusterVersion(ver string) {
+	if s.cluster.Version() == nil {
+		plog.Infof("setting up the initial cluster version to %s", version.Cluster(ver))
+	} else {
+		plog.Infof("updating the cluster version from %s to %s", version.Cluster(s.cluster.Version().String()), version.Cluster(ver))
+	}
+	req := pb.Request{
+		Method: "PUT",
+		Path:   membership.StoreClusterVersionKey(),
+		Val:    ver,
+	}
+	ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout())
+	_, err := s.Do(ctx, req)
+	cancel()
+	switch err {
+	case nil:
+		return
+	case ErrStopped:
+		plog.Infof("aborting update cluster version because server is stopped")
+		return
+	default:
+		plog.Errorf("error updating cluster version (%v)", err)
+	}
+}
+
+func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error {
+	switch err {
+	case context.Canceled:
+		return ErrCanceled
+	case context.DeadlineExceeded:
+		s.leadTimeMu.RLock()
+		curLeadElected := s.leadElectedTime
+		s.leadTimeMu.RUnlock()
+		prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond)
+		if start.After(prevLeadLost) && start.Before(curLeadElected) {
+			return ErrTimeoutDueToLeaderFail
+		}
+
+		lead := types.ID(atomic.LoadUint64(&s.r.lead))
+		switch lead {
+		case types.ID(raft.None):
+			// TODO: return error to specify it happens because the cluster does not have leader now
+		case s.ID():
+			if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) {
+				return ErrTimeoutDueToConnectionLost
+			}
+		default:
+			if !isConnectedSince(s.r.transport, start, lead) {
+				return ErrTimeoutDueToConnectionLost
+			}
+		}
+
+		return ErrTimeout
+	default:
+		return err
+	}
+}
+
+func (s *EtcdServer) KV() mvcc.ConsistentWatchableKV { return s.kv }
+func (s *EtcdServer) Backend() backend.Backend {
+	s.bemu.Lock()
+	defer s.bemu.Unlock()
+	return s.be
+}
+
+func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore }
+
+func (s *EtcdServer) restoreAlarms() error {
+	s.applyV3 = s.newApplierV3()
+	as, err := alarm.NewAlarmStore(s)
+	if err != nil {
+		return err
+	}
+	s.alarmStore = as
+	if len(as.Get(pb.AlarmType_NOSPACE)) > 0 {
+		s.applyV3 = newApplierV3Capped(s.applyV3)
+	}
+	if len(as.Get(pb.AlarmType_CORRUPT)) > 0 {
+		s.applyV3 = newApplierV3Corrupt(s.applyV3)
+	}
+	return nil
+}
+
+func (s *EtcdServer) getAppliedIndex() uint64 {
+	return atomic.LoadUint64(&s.appliedIndex)
+}
+
+func (s *EtcdServer) setAppliedIndex(v uint64) {
+	atomic.StoreUint64(&s.appliedIndex, v)
+}
+
+func (s *EtcdServer) getCommittedIndex() uint64 {
+	return atomic.LoadUint64(&s.committedIndex)
+}
+
+func (s *EtcdServer) setCommittedIndex(v uint64) {
+	atomic.StoreUint64(&s.committedIndex, v)
+}
+
+// goAttach creates a goroutine on a given function and tracks it using
+// the etcdserver waitgroup.
+func (s *EtcdServer) goAttach(f func()) {
+	s.wgMu.RLock() // this blocks with ongoing close(s.stopping)
+	defer s.wgMu.RUnlock()
+	select {
+	case <-s.stopping:
+		plog.Warning("server has stopped (skipping goAttach)")
+		return
+	default:
+	}
+
+	// now safe to add since waitgroup wait has not started yet
+	s.wg.Add(1)
+	go func() {
+		defer s.wg.Done()
+		f()
+	}()
+}
+
+func (s *EtcdServer) Alarms() []*pb.AlarmMember {
+	return s.alarmStore.Get(pb.AlarmType_NONE)
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go
new file mode 100644
index 0000000..928aa95
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go
@@ -0,0 +1,73 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+	"io"
+
+	"github.com/coreos/etcd/mvcc/backend"
+	"github.com/coreos/etcd/raft/raftpb"
+	"github.com/coreos/etcd/snap"
+)
+
+// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf),
+// a snapshot of v2 store inside raft.Snapshot as []byte, a snapshot of v3 KV in the top level message
+// as ReadCloser.
+func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message {
+	// get a snapshot of v2 store as []byte
+	clone := s.store.Clone()
+	d, err := clone.SaveNoCopy()
+	if err != nil {
+		plog.Panicf("store save should never fail: %v", err)
+	}
+
+	// commit kv to write metadata(for example: consistent index).
+	s.KV().Commit()
+	dbsnap := s.be.Snapshot()
+	// get a snapshot of v3 KV as readCloser
+	rc := newSnapshotReaderCloser(dbsnap)
+
+	// put the []byte snapshot of store into raft snapshot and return the merged snapshot with
+	// KV readCloser snapshot.
+	snapshot := raftpb.Snapshot{
+		Metadata: raftpb.SnapshotMetadata{
+			Index:     snapi,
+			Term:      snapt,
+			ConfState: confState,
+		},
+		Data: d,
+	}
+	m.Snapshot = snapshot
+
+	return *snap.NewMessage(m, rc, dbsnap.Size())
+}
+
+func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser {
+	pr, pw := io.Pipe()
+	go func() {
+		n, err := snapshot.WriteTo(pw)
+		if err == nil {
+			plog.Infof("wrote database snapshot out [total bytes: %d]", n)
+		} else {
+			plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err)
+		}
+		pw.CloseWithError(err)
+		err = snapshot.Close()
+		if err != nil {
+			plog.Panicf("failed to close database snapshot: %v", err)
+		}
+	}()
+	return pr
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go b/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go
new file mode 100644
index 0000000..8f6a54f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go
@@ -0,0 +1,128 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stats
+
+import (
+	"encoding/json"
+	"math"
+	"sync"
+	"time"
+)
+
+// LeaderStats is used by the leader in an etcd cluster, and encapsulates
+// statistics about communication with its followers
+type LeaderStats struct {
+	leaderStats
+	sync.Mutex
+}
+
+type leaderStats struct {
+	// Leader is the ID of the leader in the etcd cluster.
+	// TODO(jonboulle): clarify that these are IDs, not names
+	Leader    string                    `json:"leader"`
+	Followers map[string]*FollowerStats `json:"followers"`
+}
+
+// NewLeaderStats generates a new LeaderStats with the given id as leader
+func NewLeaderStats(id string) *LeaderStats {
+	return &LeaderStats{
+		leaderStats: leaderStats{
+			Leader:    id,
+			Followers: make(map[string]*FollowerStats),
+		},
+	}
+}
+
+func (ls *LeaderStats) JSON() []byte {
+	ls.Lock()
+	stats := ls.leaderStats
+	ls.Unlock()
+	b, err := json.Marshal(stats)
+	// TODO(jonboulle): appropriate error handling?
+	if err != nil {
+		plog.Errorf("error marshalling leader stats (%v)", err)
+	}
+	return b
+}
+
+func (ls *LeaderStats) Follower(name string) *FollowerStats {
+	ls.Lock()
+	defer ls.Unlock()
+	fs, ok := ls.Followers[name]
+	if !ok {
+		fs = &FollowerStats{}
+		fs.Latency.Minimum = 1 << 63
+		ls.Followers[name] = fs
+	}
+	return fs
+}
+
+// FollowerStats encapsulates various statistics about a follower in an etcd cluster
+type FollowerStats struct {
+	Latency LatencyStats `json:"latency"`
+	Counts  CountsStats  `json:"counts"`
+
+	sync.Mutex
+}
+
+// LatencyStats encapsulates latency statistics.
+type LatencyStats struct {
+	Current           float64 `json:"current"`
+	Average           float64 `json:"average"`
+	averageSquare     float64
+	StandardDeviation float64 `json:"standardDeviation"`
+	Minimum           float64 `json:"minimum"`
+	Maximum           float64 `json:"maximum"`
+}
+
+// CountsStats encapsulates raft statistics.
+type CountsStats struct {
+	Fail    uint64 `json:"fail"`
+	Success uint64 `json:"success"`
+}
+
+// Succ updates the FollowerStats with a successful send
+func (fs *FollowerStats) Succ(d time.Duration) {
+	fs.Lock()
+	defer fs.Unlock()
+
+	total := float64(fs.Counts.Success) * fs.Latency.Average
+	totalSquare := float64(fs.Counts.Success) * fs.Latency.averageSquare
+
+	fs.Counts.Success++
+
+	fs.Latency.Current = float64(d) / (1000000.0)
+
+	if fs.Latency.Current > fs.Latency.Maximum {
+		fs.Latency.Maximum = fs.Latency.Current
+	}
+
+	if fs.Latency.Current < fs.Latency.Minimum {
+		fs.Latency.Minimum = fs.Latency.Current
+	}
+
+	fs.Latency.Average = (total + fs.Latency.Current) / float64(fs.Counts.Success)
+	fs.Latency.averageSquare = (totalSquare + fs.Latency.Current*fs.Latency.Current) / float64(fs.Counts.Success)
+
+	// sdv = sqrt(avg(x^2) - avg(x)^2)
+	fs.Latency.StandardDeviation = math.Sqrt(fs.Latency.averageSquare - fs.Latency.Average*fs.Latency.Average)
+}
+
+// Fail updates the FollowerStats with an unsuccessful send
+func (fs *FollowerStats) Fail() {
+	fs.Lock()
+	defer fs.Unlock()
+	fs.Counts.Fail++
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/queue.go b/vendor/github.com/coreos/etcd/etcdserver/stats/queue.go
new file mode 100644
index 0000000..635074c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/stats/queue.go
@@ -0,0 +1,110 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stats
+
+import (
+	"sync"
+	"time"
+)
+
+const (
+	queueCapacity = 200
+)
+
+// RequestStats represent the stats for a request.
+// It encapsulates the sending time and the size of the request.
+type RequestStats struct {
+	SendingTime time.Time
+	Size        int
+}
+
+type statsQueue struct {
+	items        [queueCapacity]*RequestStats
+	size         int
+	front        int
+	back         int
+	totalReqSize int
+	rwl          sync.RWMutex
+}
+
+func (q *statsQueue) Len() int {
+	return q.size
+}
+
+func (q *statsQueue) ReqSize() int {
+	return q.totalReqSize
+}
+
+// FrontAndBack gets the front and back elements in the queue
+// We must grab front and back together with the protection of the lock
+func (q *statsQueue) frontAndBack() (*RequestStats, *RequestStats) {
+	q.rwl.RLock()
+	defer q.rwl.RUnlock()
+	if q.size != 0 {
+		return q.items[q.front], q.items[q.back]
+	}
+	return nil, nil
+}
+
+// Insert function insert a RequestStats into the queue and update the records
+func (q *statsQueue) Insert(p *RequestStats) {
+	q.rwl.Lock()
+	defer q.rwl.Unlock()
+
+	q.back = (q.back + 1) % queueCapacity
+
+	if q.size == queueCapacity { //dequeue
+		q.totalReqSize -= q.items[q.front].Size
+		q.front = (q.back + 1) % queueCapacity
+	} else {
+		q.size++
+	}
+
+	q.items[q.back] = p
+	q.totalReqSize += q.items[q.back].Size
+
+}
+
+// Rate function returns the package rate and byte rate
+func (q *statsQueue) Rate() (float64, float64) {
+	front, back := q.frontAndBack()
+
+	if front == nil || back == nil {
+		return 0, 0
+	}
+
+	if time.Since(back.SendingTime) > time.Second {
+		q.Clear()
+		return 0, 0
+	}
+
+	sampleDuration := back.SendingTime.Sub(front.SendingTime)
+
+	pr := float64(q.Len()) / float64(sampleDuration) * float64(time.Second)
+
+	br := float64(q.ReqSize()) / float64(sampleDuration) * float64(time.Second)
+
+	return pr, br
+}
+
+// Clear function clear up the statsQueue
+func (q *statsQueue) Clear() {
+	q.rwl.Lock()
+	defer q.rwl.Unlock()
+	q.back = -1
+	q.front = 0
+	q.size = 0
+	q.totalReqSize = 0
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go b/vendor/github.com/coreos/etcd/etcdserver/stats/server.go
new file mode 100644
index 0000000..b026e44
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/stats/server.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stats
+
+import (
+	"encoding/json"
+	"log"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/raft"
+)
+
+// ServerStats encapsulates various statistics about an EtcdServer and its
+// communication with other members of the cluster
+type ServerStats struct {
+	serverStats
+	sync.Mutex
+}
+
+func NewServerStats(name, id string) *ServerStats {
+	ss := &ServerStats{
+		serverStats: serverStats{
+			Name: name,
+			ID:   id,
+		},
+	}
+	now := time.Now()
+	ss.StartTime = now
+	ss.LeaderInfo.StartTime = now
+	ss.sendRateQueue = &statsQueue{back: -1}
+	ss.recvRateQueue = &statsQueue{back: -1}
+	return ss
+}
+
+type serverStats struct {
+	Name string `json:"name"`
+	// ID is the raft ID of the node.
+	// TODO(jonboulle): use ID instead of name?
+	ID        string         `json:"id"`
+	State     raft.StateType `json:"state"`
+	StartTime time.Time      `json:"startTime"`
+
+	LeaderInfo struct {
+		Name      string    `json:"leader"`
+		Uptime    string    `json:"uptime"`
+		StartTime time.Time `json:"startTime"`
+	} `json:"leaderInfo"`
+
+	RecvAppendRequestCnt uint64  `json:"recvAppendRequestCnt,"`
+	RecvingPkgRate       float64 `json:"recvPkgRate,omitempty"`
+	RecvingBandwidthRate float64 `json:"recvBandwidthRate,omitempty"`
+
+	SendAppendRequestCnt uint64  `json:"sendAppendRequestCnt"`
+	SendingPkgRate       float64 `json:"sendPkgRate,omitempty"`
+	SendingBandwidthRate float64 `json:"sendBandwidthRate,omitempty"`
+
+	sendRateQueue *statsQueue
+	recvRateQueue *statsQueue
+}
+
+func (ss *ServerStats) JSON() []byte {
+	ss.Lock()
+	stats := ss.serverStats
+	stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate()
+	stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate()
+	stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String()
+	ss.Unlock()
+	b, err := json.Marshal(stats)
+	// TODO(jonboulle): appropriate error handling?
+	if err != nil {
+		log.Printf("stats: error marshalling server stats: %v", err)
+	}
+	return b
+}
+
+// RecvAppendReq updates the ServerStats in response to an AppendRequest
+// from the given leader being received
+func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) {
+	ss.Lock()
+	defer ss.Unlock()
+
+	now := time.Now()
+
+	ss.State = raft.StateFollower
+	if leader != ss.LeaderInfo.Name {
+		ss.LeaderInfo.Name = leader
+		ss.LeaderInfo.StartTime = now
+	}
+
+	ss.recvRateQueue.Insert(
+		&RequestStats{
+			SendingTime: now,
+			Size:        reqSize,
+		},
+	)
+	ss.RecvAppendRequestCnt++
+}
+
+// SendAppendReq updates the ServerStats in response to an AppendRequest
+// being sent by this server
+func (ss *ServerStats) SendAppendReq(reqSize int) {
+	ss.Lock()
+	defer ss.Unlock()
+
+	ss.becomeLeader()
+
+	ss.sendRateQueue.Insert(
+		&RequestStats{
+			SendingTime: time.Now(),
+			Size:        reqSize,
+		},
+	)
+
+	ss.SendAppendRequestCnt++
+}
+
+func (ss *ServerStats) BecomeLeader() {
+	ss.Lock()
+	defer ss.Unlock()
+	ss.becomeLeader()
+}
+
+func (ss *ServerStats) becomeLeader() {
+	if ss.State != raft.StateLeader {
+		ss.State = raft.StateLeader
+		ss.LeaderInfo.Name = ss.ID
+		ss.LeaderInfo.StartTime = time.Now()
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/stats.go b/vendor/github.com/coreos/etcd/etcdserver/stats/stats.go
new file mode 100644
index 0000000..2b5f707
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/stats/stats.go
@@ -0,0 +1,32 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package stats defines a standard interface for etcd cluster statistics.
+package stats
+
+import "github.com/coreos/pkg/capnslog"
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/stats")
+)
+
+type Stats interface {
+	// SelfStats returns the struct representing statistics of this server
+	SelfStats() []byte
+	// LeaderStats returns the statistics of all followers in the cluster
+	// if this server is leader. Otherwise, nil is returned.
+	LeaderStats() []byte
+	// StoreStats returns statistics of the store backing this EtcdServer
+	StoreStats() []byte
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/storage.go b/vendor/github.com/coreos/etcd/etcdserver/storage.go
new file mode 100644
index 0000000..55c2dd4
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/storage.go
@@ -0,0 +1,98 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+	"io"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/pkg/pbutil"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/raft/raftpb"
+	"github.com/coreos/etcd/snap"
+	"github.com/coreos/etcd/wal"
+	"github.com/coreos/etcd/wal/walpb"
+)
+
+type Storage interface {
+	// Save function saves ents and state to the underlying stable storage.
+	// Save MUST block until st and ents are on stable storage.
+	Save(st raftpb.HardState, ents []raftpb.Entry) error
+	// SaveSnap function saves snapshot to the underlying stable storage.
+	SaveSnap(snap raftpb.Snapshot) error
+	// Close closes the Storage and performs finalization.
+	Close() error
+}
+
+type storage struct {
+	*wal.WAL
+	*snap.Snapshotter
+}
+
+func NewStorage(w *wal.WAL, s *snap.Snapshotter) Storage {
+	return &storage{w, s}
+}
+
+// SaveSnap saves the snapshot to disk and release the locked
+// wal files since they will not be used.
+func (st *storage) SaveSnap(snap raftpb.Snapshot) error {
+	walsnap := walpb.Snapshot{
+		Index: snap.Metadata.Index,
+		Term:  snap.Metadata.Term,
+	}
+	err := st.WAL.SaveSnapshot(walsnap)
+	if err != nil {
+		return err
+	}
+	err = st.Snapshotter.SaveSnap(snap)
+	if err != nil {
+		return err
+	}
+	return st.WAL.ReleaseLockTo(snap.Metadata.Index)
+}
+
+func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) {
+	var (
+		err       error
+		wmetadata []byte
+	)
+
+	repaired := false
+	for {
+		if w, err = wal.Open(waldir, snap); err != nil {
+			plog.Fatalf("open wal error: %v", err)
+		}
+		if wmetadata, st, ents, err = w.ReadAll(); err != nil {
+			w.Close()
+			// we can only repair ErrUnexpectedEOF and we never repair twice.
+			if repaired || err != io.ErrUnexpectedEOF {
+				plog.Fatalf("read wal error (%v) and cannot be repaired", err)
+			}
+			if !wal.Repair(waldir) {
+				plog.Fatalf("WAL error (%v) cannot be repaired", err)
+			} else {
+				plog.Infof("repaired WAL error (%v)", err)
+				repaired = true
+			}
+			continue
+		}
+		break
+	}
+	var metadata pb.Metadata
+	pbutil.MustUnmarshal(&metadata, wmetadata)
+	id = types.ID(metadata.NodeID)
+	cid = types.ID(metadata.ClusterID)
+	return w, id, cid, st, ents
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/util.go b/vendor/github.com/coreos/etcd/etcdserver/util.go
new file mode 100644
index 0000000..79bb6b8
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/util.go
@@ -0,0 +1,155 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"time"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/etcdserver/membership"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/rafthttp"
+	"github.com/golang/protobuf/proto"
+)
+
+// isConnectedToQuorumSince checks whether the local member is connected to the
+// quorum of the cluster since the given time.
+func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
+	return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1
+}
+
+// isConnectedSince checks whether the local member is connected to the
+// remote member since the given time.
+func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote types.ID) bool {
+	t := transport.ActiveSince(remote)
+	return !t.IsZero() && t.Before(since)
+}
+
+// isConnectedFullySince checks whether the local member is connected to all
+// members in the cluster since the given time.
+func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
+	return numConnectedSince(transport, since, self, members) == len(members)
+}
+
+// numConnectedSince counts how many members are connected to the local member
+// since the given time.
+func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int {
+	connectedNum := 0
+	for _, m := range members {
+		if m.ID == self || isConnectedSince(transport, since, m.ID) {
+			connectedNum++
+		}
+	}
+	return connectedNum
+}
+
+// longestConnected chooses the member with longest active-since-time.
+// It returns false, if nothing is active.
+func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) {
+	var longest types.ID
+	var oldest time.Time
+	for _, id := range membs {
+		tm := tp.ActiveSince(id)
+		if tm.IsZero() { // inactive
+			continue
+		}
+
+		if oldest.IsZero() { // first longest candidate
+			oldest = tm
+			longest = id
+		}
+
+		if tm.Before(oldest) {
+			oldest = tm
+			longest = id
+		}
+	}
+	if uint64(longest) == 0 {
+		return longest, false
+	}
+	return longest, true
+}
+
+type notifier struct {
+	c   chan struct{}
+	err error
+}
+
+func newNotifier() *notifier {
+	return &notifier{
+		c: make(chan struct{}),
+	}
+}
+
+func (nc *notifier) notify(err error) {
+	nc.err = err
+	close(nc.c)
+}
+
+func warnOfExpensiveRequest(now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
+	var resp string
+	if !isNil(respMsg) {
+		resp = fmt.Sprintf("size:%d", proto.Size(respMsg))
+	}
+	warnOfExpensiveGenericRequest(now, reqStringer, "", resp, err)
+}
+
+func warnOfExpensiveReadOnlyTxnRequest(now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) {
+	reqStringer := pb.NewLoggableTxnRequest(r)
+	var resp string
+	if !isNil(txnResponse) {
+		var resps []string
+		for _, r := range txnResponse.Responses {
+			switch op := r.Response.(type) {
+			case *pb.ResponseOp_ResponseRange:
+				resps = append(resps, fmt.Sprintf("range_response_count:%d", len(op.ResponseRange.Kvs)))
+			default:
+				// only range responses should be in a read only txn request
+			}
+		}
+		resp = fmt.Sprintf("responses:<%s> size:%d", strings.Join(resps, " "), proto.Size(txnResponse))
+	}
+	warnOfExpensiveGenericRequest(now, reqStringer, "read-only range ", resp, err)
+}
+
+func warnOfExpensiveReadOnlyRangeRequest(now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) {
+	var resp string
+	if !isNil(rangeResponse) {
+		resp = fmt.Sprintf("range_response_count:%d size:%d", len(rangeResponse.Kvs), proto.Size(rangeResponse))
+	}
+	warnOfExpensiveGenericRequest(now, reqStringer, "read-only range ", resp, err)
+}
+
+func warnOfExpensiveGenericRequest(now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) {
+	// TODO: add metrics
+	d := time.Since(now)
+	if d > warnApplyDuration {
+		var result string
+		if err != nil {
+			result = fmt.Sprintf("error:%v", err)
+		} else {
+			result = resp
+		}
+		plog.Warningf("%srequest %q with result %q took too long (%v) to execute", prefix, reqStringer.String(), result, d)
+		slowApplies.Inc()
+	}
+}
+
+func isNil(msg proto.Message) bool {
+	return msg == nil || reflect.ValueOf(msg).IsNil()
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/v2_server.go b/vendor/github.com/coreos/etcd/etcdserver/v2_server.go
new file mode 100644
index 0000000..b458350
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/v2_server.go
@@ -0,0 +1,165 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+	"context"
+	"time"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/store"
+)
+
+type RequestV2 pb.Request
+
+type RequestV2Handler interface {
+	Post(ctx context.Context, r *RequestV2) (Response, error)
+	Put(ctx context.Context, r *RequestV2) (Response, error)
+	Delete(ctx context.Context, r *RequestV2) (Response, error)
+	QGet(ctx context.Context, r *RequestV2) (Response, error)
+	Get(ctx context.Context, r *RequestV2) (Response, error)
+	Head(ctx context.Context, r *RequestV2) (Response, error)
+}
+
+type reqV2HandlerEtcdServer struct {
+	reqV2HandlerStore
+	s *EtcdServer
+}
+
+type reqV2HandlerStore struct {
+	store   store.Store
+	applier ApplierV2
+}
+
+func NewStoreRequestV2Handler(s store.Store, applier ApplierV2) RequestV2Handler {
+	return &reqV2HandlerStore{s, applier}
+}
+
+func (a *reqV2HandlerStore) Post(ctx context.Context, r *RequestV2) (Response, error) {
+	return a.applier.Post(r), nil
+}
+
+func (a *reqV2HandlerStore) Put(ctx context.Context, r *RequestV2) (Response, error) {
+	return a.applier.Put(r), nil
+}
+
+func (a *reqV2HandlerStore) Delete(ctx context.Context, r *RequestV2) (Response, error) {
+	return a.applier.Delete(r), nil
+}
+
+func (a *reqV2HandlerStore) QGet(ctx context.Context, r *RequestV2) (Response, error) {
+	return a.applier.QGet(r), nil
+}
+
+func (a *reqV2HandlerStore) Get(ctx context.Context, r *RequestV2) (Response, error) {
+	if r.Wait {
+		wc, err := a.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
+		return Response{Watcher: wc}, err
+	}
+	ev, err := a.store.Get(r.Path, r.Recursive, r.Sorted)
+	return Response{Event: ev}, err
+}
+
+func (a *reqV2HandlerStore) Head(ctx context.Context, r *RequestV2) (Response, error) {
+	ev, err := a.store.Get(r.Path, r.Recursive, r.Sorted)
+	return Response{Event: ev}, err
+}
+
+func (a *reqV2HandlerEtcdServer) Post(ctx context.Context, r *RequestV2) (Response, error) {
+	return a.processRaftRequest(ctx, r)
+}
+
+func (a *reqV2HandlerEtcdServer) Put(ctx context.Context, r *RequestV2) (Response, error) {
+	return a.processRaftRequest(ctx, r)
+}
+
+func (a *reqV2HandlerEtcdServer) Delete(ctx context.Context, r *RequestV2) (Response, error) {
+	return a.processRaftRequest(ctx, r)
+}
+
+func (a *reqV2HandlerEtcdServer) QGet(ctx context.Context, r *RequestV2) (Response, error) {
+	return a.processRaftRequest(ctx, r)
+}
+
+func (a *reqV2HandlerEtcdServer) processRaftRequest(ctx context.Context, r *RequestV2) (Response, error) {
+	data, err := ((*pb.Request)(r)).Marshal()
+	if err != nil {
+		return Response{}, err
+	}
+	ch := a.s.w.Register(r.ID)
+
+	start := time.Now()
+	a.s.r.Propose(ctx, data)
+	proposalsPending.Inc()
+	defer proposalsPending.Dec()
+
+	select {
+	case x := <-ch:
+		resp := x.(Response)
+		return resp, resp.Err
+	case <-ctx.Done():
+		proposalsFailed.Inc()
+		a.s.w.Trigger(r.ID, nil) // GC wait
+		return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start)
+	case <-a.s.stopping:
+	}
+	return Response{}, ErrStopped
+}
+
+func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
+	r.ID = s.reqIDGen.Next()
+	h := &reqV2HandlerEtcdServer{
+		reqV2HandlerStore: reqV2HandlerStore{
+			store:   s.store,
+			applier: s.applyV2,
+		},
+		s: s,
+	}
+	rp := &r
+	resp, err := ((*RequestV2)(rp)).Handle(ctx, h)
+	resp.Term, resp.Index = s.Term(), s.Index()
+	return resp, err
+}
+
+// Handle interprets r and performs an operation on s.store according to r.Method
+// and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
+// Quorum == true, r will be sent through consensus before performing its
+// respective operation. Do will block until an action is performed or there is
+// an error.
+func (r *RequestV2) Handle(ctx context.Context, v2api RequestV2Handler) (Response, error) {
+	if r.Method == "GET" && r.Quorum {
+		r.Method = "QGET"
+	}
+	switch r.Method {
+	case "POST":
+		return v2api.Post(ctx, r)
+	case "PUT":
+		return v2api.Put(ctx, r)
+	case "DELETE":
+		return v2api.Delete(ctx, r)
+	case "QGET":
+		return v2api.QGet(ctx, r)
+	case "GET":
+		return v2api.Get(ctx, r)
+	case "HEAD":
+		return v2api.Head(ctx, r)
+	}
+	return Response{}, ErrUnknownMethod
+}
+
+func (r *RequestV2) String() string {
+	rpb := pb.Request(*r)
+	return rpb.String()
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go
new file mode 100644
index 0000000..f214a19
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go
@@ -0,0 +1,720 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+	"bytes"
+	"context"
+	"encoding/binary"
+	"time"
+
+	"github.com/coreos/etcd/auth"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/etcdserver/membership"
+	"github.com/coreos/etcd/lease"
+	"github.com/coreos/etcd/lease/leasehttp"
+	"github.com/coreos/etcd/mvcc"
+	"github.com/coreos/etcd/raft"
+
+	"github.com/gogo/protobuf/proto"
+)
+
+const (
+	// In the health case, there might be a small gap (10s of entries) between
+	// the applied index and committed index.
+	// However, if the committed entries are very heavy to apply, the gap might grow.
+	// We should stop accepting new proposals if the gap growing to a certain point.
+	maxGapBetweenApplyAndCommitIndex = 5000
+)
+
+type RaftKV interface {
+	Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error)
+	Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error)
+	DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
+	Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error)
+	Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error)
+}
+
+type Lessor interface {
+	// LeaseGrant sends LeaseGrant request to raft and apply it after committed.
+	LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
+	// LeaseRevoke sends LeaseRevoke request to raft and apply it after committed.
+	LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
+
+	// LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error
+	// is returned.
+	LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error)
+
+	// LeaseTimeToLive retrieves lease information.
+	LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error)
+
+	// LeaseLeases lists all leases.
+	LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error)
+}
+
+type Authenticator interface {
+	AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error)
+	AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error)
+	Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error)
+	UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
+	UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
+	UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
+	UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
+	UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
+	UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
+	RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
+	RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
+	RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
+	RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
+	RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
+	UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
+	RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
+}
+
+func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+	var resp *pb.RangeResponse
+	var err error
+	defer func(start time.Time) {
+		warnOfExpensiveReadOnlyRangeRequest(start, r, resp, err)
+	}(time.Now())
+
+	if !r.Serializable {
+		err = s.linearizableReadNotify(ctx)
+		if err != nil {
+			return nil, err
+		}
+	}
+	chk := func(ai *auth.AuthInfo) error {
+		return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
+	}
+
+	get := func() { resp, err = s.applyV3Base.Range(nil, r) }
+	if serr := s.doSerialize(ctx, chk, get); serr != nil {
+		err = serr
+		return nil, err
+	}
+	return resp, err
+}
+
+func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
+	resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.PutResponse), nil
+}
+
+func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+	resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.DeleteRangeResponse), nil
+}
+
+func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
+	if isTxnReadonly(r) {
+		if !isTxnSerializable(r) {
+			err := s.linearizableReadNotify(ctx)
+			if err != nil {
+				return nil, err
+			}
+		}
+		var resp *pb.TxnResponse
+		var err error
+		chk := func(ai *auth.AuthInfo) error {
+			return checkTxnAuth(s.authStore, ai, r)
+		}
+
+		defer func(start time.Time) {
+			warnOfExpensiveReadOnlyTxnRequest(start, r, resp, err)
+		}(time.Now())
+
+		get := func() { resp, err = s.applyV3Base.Txn(r) }
+		if serr := s.doSerialize(ctx, chk, get); serr != nil {
+			return nil, serr
+		}
+		return resp, err
+	}
+
+	resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.TxnResponse), nil
+}
+
+func isTxnSerializable(r *pb.TxnRequest) bool {
+	for _, u := range r.Success {
+		if r := u.GetRequestRange(); r == nil || !r.Serializable {
+			return false
+		}
+	}
+	for _, u := range r.Failure {
+		if r := u.GetRequestRange(); r == nil || !r.Serializable {
+			return false
+		}
+	}
+	return true
+}
+
+func isTxnReadonly(r *pb.TxnRequest) bool {
+	for _, u := range r.Success {
+		if r := u.GetRequestRange(); r == nil {
+			return false
+		}
+	}
+	for _, u := range r.Failure {
+		if r := u.GetRequestRange(); r == nil {
+			return false
+		}
+	}
+	return true
+}
+
+func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
+	result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r})
+	if r.Physical && result != nil && result.physc != nil {
+		<-result.physc
+		// The compaction is done deleting keys; the hash is now settled
+		// but the data is not necessarily committed. If there's a crash,
+		// the hash may revert to a hash prior to compaction completing
+		// if the compaction resumes. Force the finished compaction to
+		// commit so it won't resume following a crash.
+		s.be.ForceCommit()
+	}
+	if err != nil {
+		return nil, err
+	}
+	if result.err != nil {
+		return nil, result.err
+	}
+	resp := result.resp.(*pb.CompactionResponse)
+	if resp == nil {
+		resp = &pb.CompactionResponse{}
+	}
+	if resp.Header == nil {
+		resp.Header = &pb.ResponseHeader{}
+	}
+	resp.Header.Revision = s.kv.Rev()
+	return resp, nil
+}
+
+func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+	// no id given? choose one
+	for r.ID == int64(lease.NoLease) {
+		// only use positive int64 id's
+		r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1))
+	}
+	resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.LeaseGrantResponse), nil
+}
+
+func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+	resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.LeaseRevokeResponse), nil
+}
+
+func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) {
+	ttl, err := s.lessor.Renew(id)
+	if err == nil { // already requested to primary lessor(leader)
+		return ttl, nil
+	}
+	if err != lease.ErrNotPrimary {
+		return -1, err
+	}
+
+	cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
+	defer cancel()
+
+	// renewals don't go through raft; forward to leader manually
+	for cctx.Err() == nil && err != nil {
+		leader, lerr := s.waitLeader(cctx)
+		if lerr != nil {
+			return -1, lerr
+		}
+		for _, url := range leader.PeerURLs {
+			lurl := url + leasehttp.LeasePrefix
+			ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt)
+			if err == nil || err == lease.ErrLeaseNotFound {
+				return ttl, err
+			}
+		}
+	}
+	return -1, ErrTimeout
+}
+
+func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
+	if s.Leader() == s.ID() {
+		// primary; timetolive directly from leader
+		le := s.lessor.Lookup(lease.LeaseID(r.ID))
+		if le == nil {
+			return nil, lease.ErrLeaseNotFound
+		}
+		// TODO: fill out ResponseHeader
+		resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()}
+		if r.Keys {
+			ks := le.Keys()
+			kbs := make([][]byte, len(ks))
+			for i := range ks {
+				kbs[i] = []byte(ks[i])
+			}
+			resp.Keys = kbs
+		}
+		return resp, nil
+	}
+
+	cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
+	defer cancel()
+
+	// forward to leader
+	for cctx.Err() == nil {
+		leader, err := s.waitLeader(cctx)
+		if err != nil {
+			return nil, err
+		}
+		for _, url := range leader.PeerURLs {
+			lurl := url + leasehttp.LeaseInternalPrefix
+			resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt)
+			if err == nil {
+				return resp.LeaseTimeToLiveResponse, nil
+			}
+			if err == lease.ErrLeaseNotFound {
+				return nil, err
+			}
+		}
+	}
+	return nil, ErrTimeout
+}
+
+func (s *EtcdServer) LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
+	ls := s.lessor.Leases()
+	lss := make([]*pb.LeaseStatus, len(ls))
+	for i := range ls {
+		lss[i] = &pb.LeaseStatus{ID: int64(ls[i].ID)}
+	}
+	return &pb.LeaseLeasesResponse{Header: newHeader(s), Leases: lss}, nil
+}
+
+func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) {
+	leader := s.cluster.Member(s.Leader())
+	for leader == nil {
+		// wait an election
+		dur := time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond
+		select {
+		case <-time.After(dur):
+			leader = s.cluster.Member(s.Leader())
+		case <-s.stopping:
+			return nil, ErrStopped
+		case <-ctx.Done():
+			return nil, ErrNoLeader
+		}
+	}
+	if leader == nil || len(leader.PeerURLs) == 0 {
+		return nil, ErrNoLeader
+	}
+	return leader, nil
+}
+
+func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) {
+	resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AlarmResponse), nil
+}
+
+func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
+	resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AuthEnableResponse), nil
+}
+
+func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
+	resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AuthDisableResponse), nil
+}
+
+func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
+	if err := s.linearizableReadNotify(ctx); err != nil {
+		return nil, err
+	}
+
+	var resp proto.Message
+	for {
+		checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password)
+		if err != nil {
+			if err != auth.ErrAuthNotEnabled {
+				plog.Errorf("invalid authentication request to user %s was issued", r.Name)
+			}
+			return nil, err
+		}
+
+		st, err := s.AuthStore().GenTokenPrefix()
+		if err != nil {
+			return nil, err
+		}
+
+		internalReq := &pb.InternalAuthenticateRequest{
+			Name:        r.Name,
+			Password:    r.Password,
+			SimpleToken: st,
+		}
+
+		resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
+		if err != nil {
+			return nil, err
+		}
+		if checkedRevision == s.AuthStore().Revision() {
+			break
+		}
+		plog.Infof("revision when password checked is obsolete, retrying")
+	}
+
+	return resp.(*pb.AuthenticateResponse), nil
+}
+
+func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+	resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AuthUserAddResponse), nil
+}
+
+func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+	resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AuthUserDeleteResponse), nil
+}
+
+func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+	resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AuthUserChangePasswordResponse), nil
+}
+
+func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+	resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AuthUserGrantRoleResponse), nil
+}
+
+func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+	resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AuthUserGetResponse), nil
+}
+
+func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+	resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AuthUserListResponse), nil
+}
+
+func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+	resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AuthUserRevokeRoleResponse), nil
+}
+
+func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+	resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AuthRoleAddResponse), nil
+}
+
+func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+	resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AuthRoleGrantPermissionResponse), nil
+}
+
+func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+	resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AuthRoleGetResponse), nil
+}
+
+func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+	resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AuthRoleListResponse), nil
+}
+
+func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+	resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AuthRoleRevokePermissionResponse), nil
+}
+
+func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+	resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r})
+	if err != nil {
+		return nil, err
+	}
+	return resp.(*pb.AuthRoleDeleteResponse), nil
+}
+
+func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
+	result, err := s.processInternalRaftRequestOnce(ctx, r)
+	if err != nil {
+		return nil, err
+	}
+	if result.err != nil {
+		return nil, result.err
+	}
+	return result.resp, nil
+}
+
+func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
+	for {
+		resp, err := s.raftRequestOnce(ctx, r)
+		if err != auth.ErrAuthOldRevision {
+			return resp, err
+		}
+	}
+}
+
+// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure.
+func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error {
+	for {
+		ai, err := s.AuthInfoFromCtx(ctx)
+		if err != nil {
+			return err
+		}
+		if ai == nil {
+			// chk expects non-nil AuthInfo; use empty credentials
+			ai = &auth.AuthInfo{}
+		}
+		if err = chk(ai); err != nil {
+			if err == auth.ErrAuthOldRevision {
+				continue
+			}
+			return err
+		}
+		// fetch response for serialized request
+		get()
+		//  empty credentials or current auth info means no need to retry
+		if ai.Revision == 0 || ai.Revision == s.authStore.Revision() {
+			return nil
+		}
+		// avoid TOCTOU error, retry of the request is required.
+	}
+}
+
+func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
+	ai := s.getAppliedIndex()
+	ci := s.getCommittedIndex()
+	if ci > ai+maxGapBetweenApplyAndCommitIndex {
+		return nil, ErrTooManyRequests
+	}
+
+	r.Header = &pb.RequestHeader{
+		ID: s.reqIDGen.Next(),
+	}
+
+	authInfo, err := s.AuthInfoFromCtx(ctx)
+	if err != nil {
+		return nil, err
+	}
+	if authInfo != nil {
+		r.Header.Username = authInfo.Username
+		r.Header.AuthRevision = authInfo.Revision
+	}
+
+	data, err := r.Marshal()
+	if err != nil {
+		return nil, err
+	}
+
+	if len(data) > int(s.Cfg.MaxRequestBytes) {
+		return nil, ErrRequestTooLarge
+	}
+
+	id := r.ID
+	if id == 0 {
+		id = r.Header.ID
+	}
+	ch := s.w.Register(id)
+
+	cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
+	defer cancel()
+
+	start := time.Now()
+	s.r.Propose(cctx, data)
+	proposalsPending.Inc()
+	defer proposalsPending.Dec()
+
+	select {
+	case x := <-ch:
+		return x.(*applyResult), nil
+	case <-cctx.Done():
+		proposalsFailed.Inc()
+		s.w.Trigger(id, nil) // GC wait
+		return nil, s.parseProposeCtxErr(cctx.Err(), start)
+	case <-s.done:
+		return nil, ErrStopped
+	}
+}
+
+// Watchable returns a watchable interface attached to the etcdserver.
+func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() }
+
+func (s *EtcdServer) linearizableReadLoop() {
+	var rs raft.ReadState
+
+	for {
+		ctxToSend := make([]byte, 8)
+		id1 := s.reqIDGen.Next()
+		binary.BigEndian.PutUint64(ctxToSend, id1)
+
+		select {
+		case <-s.readwaitc:
+		case <-s.stopping:
+			return
+		}
+
+		nextnr := newNotifier()
+
+		s.readMu.Lock()
+		nr := s.readNotifier
+		s.readNotifier = nextnr
+		s.readMu.Unlock()
+
+		cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
+		if err := s.r.ReadIndex(cctx, ctxToSend); err != nil {
+			cancel()
+			if err == raft.ErrStopped {
+				return
+			}
+			plog.Errorf("failed to get read index from raft: %v", err)
+			readIndexFailed.Inc()
+			nr.notify(err)
+			continue
+		}
+		cancel()
+
+		var (
+			timeout bool
+			done    bool
+		)
+		for !timeout && !done {
+			select {
+			case rs = <-s.r.readStateC:
+				done = bytes.Equal(rs.RequestCtx, ctxToSend)
+				if !done {
+					// a previous request might time out. now we should ignore the response of it and
+					// continue waiting for the response of the current requests.
+					id2 := uint64(0)
+					if len(rs.RequestCtx) == 8 {
+						id2 = binary.BigEndian.Uint64(rs.RequestCtx)
+					}
+					plog.Warningf("ignored out-of-date read index response; local node read indexes queueing up and waiting to be in sync with leader (request ID want %d, got %d)", id1, id2)
+					slowReadIndex.Inc()
+				}
+
+			case <-time.After(s.Cfg.ReqTimeout()):
+				plog.Warningf("timed out waiting for read index response (local node might have slow network)")
+				nr.notify(ErrTimeout)
+				timeout = true
+				slowReadIndex.Inc()
+
+			case <-s.stopping:
+				return
+			}
+		}
+		if !done {
+			continue
+		}
+
+		if ai := s.getAppliedIndex(); ai < rs.Index {
+			select {
+			case <-s.applyWait.Wait(rs.Index):
+			case <-s.stopping:
+				return
+			}
+		}
+		// unblock all l-reads requested at indices before rs.Index
+		nr.notify(nil)
+	}
+}
+
+func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error {
+	s.readMu.RLock()
+	nc := s.readNotifier
+	s.readMu.RUnlock()
+
+	// signal linearizable loop for current notify if it hasn't been already
+	select {
+	case s.readwaitc <- struct{}{}:
+	default:
+	}
+
+	// wait for read state notification
+	select {
+	case <-nc.c:
+		return nc.err
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-s.done:
+		return ErrStopped
+	}
+}
+
+func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) {
+	authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx)
+	if authInfo != nil || err != nil {
+		return authInfo, err
+	}
+	if !s.Cfg.ClientCertAuthEnabled {
+		return nil, nil
+	}
+	authInfo = s.AuthStore().AuthInfoFromTLS(ctx)
+	return authInfo, nil
+}
diff --git a/vendor/github.com/coreos/etcd/functional.yaml b/vendor/github.com/coreos/etcd/functional.yaml
new file mode 100644
index 0000000..2029a01
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/functional.yaml
@@ -0,0 +1,206 @@
+agent-configs:
+- etcd-exec-path: ./bin/etcd
+  agent-addr: 127.0.0.1:19027
+  failpoint-http-addr: http://127.0.0.1:7381
+  base-dir: /tmp/etcd-functional-1
+  etcd-log-path: /tmp/etcd-functional-1/etcd.log
+  etcd-client-proxy: false
+  etcd-peer-proxy: true
+  etcd-client-endpoint: 127.0.0.1:1379
+  etcd:
+    name: s1
+    data-dir: /tmp/etcd-functional-1/etcd.data
+    wal-dir: /tmp/etcd-functional-1/etcd.data/member/wal
+    heartbeat-interval: 100
+    election-timeout: 1000
+    listen-client-urls: ["https://127.0.0.1:1379"]
+    advertise-client-urls: ["https://127.0.0.1:1379"]
+    auto-tls: true
+    client-cert-auth: false
+    cert-file: ""
+    key-file: ""
+    trusted-ca-file: ""
+    listen-peer-urls: ["https://127.0.0.1:1380"]
+    initial-advertise-peer-urls: ["https://127.0.0.1:1381"]
+    peer-auto-tls: true
+    peer-client-cert-auth: false
+    peer-cert-file: ""
+    peer-key-file: ""
+    peer-trusted-ca-file: ""
+    initial-cluster: s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381
+    initial-cluster-state: new
+    initial-cluster-token: tkn
+    snapshot-count: 10000
+    quota-backend-bytes: 10740000000 # 10 GiB
+    pre-vote: true
+    initial-corrupt-check: true
+  client-cert-data: ""
+  client-cert-path: ""
+  client-key-data: ""
+  client-key-path: ""
+  client-trusted-ca-data: ""
+  client-trusted-ca-path: ""
+  peer-cert-data: ""
+  peer-cert-path: ""
+  peer-key-data: ""
+  peer-key-path: ""
+  peer-trusted-ca-data: ""
+  peer-trusted-ca-path: ""
+  snapshot-path: /tmp/etcd-functional-1.snapshot.db
+
+- etcd-exec-path: ./bin/etcd
+  agent-addr: 127.0.0.1:29027
+  failpoint-http-addr: http://127.0.0.1:7382
+  base-dir: /tmp/etcd-functional-2
+  etcd-log-path: /tmp/etcd-functional-2/etcd.log
+  etcd-client-proxy: false
+  etcd-peer-proxy: true
+  etcd-client-endpoint: 127.0.0.1:2379
+  etcd:
+    name: s2
+    data-dir: /tmp/etcd-functional-2/etcd.data
+    wal-dir: /tmp/etcd-functional-2/etcd.data/member/wal
+    heartbeat-interval: 100
+    election-timeout: 1000
+    listen-client-urls: ["https://127.0.0.1:2379"]
+    advertise-client-urls: ["https://127.0.0.1:2379"]
+    auto-tls: true
+    client-cert-auth: false
+    cert-file: ""
+    key-file: ""
+    trusted-ca-file: ""
+    listen-peer-urls: ["https://127.0.0.1:2380"]
+    initial-advertise-peer-urls: ["https://127.0.0.1:2381"]
+    peer-auto-tls: true
+    peer-client-cert-auth: false
+    peer-cert-file: ""
+    peer-key-file: ""
+    peer-trusted-ca-file: ""
+    initial-cluster: s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381
+    initial-cluster-state: new
+    initial-cluster-token: tkn
+    snapshot-count: 10000
+    quota-backend-bytes: 10740000000 # 10 GiB
+    pre-vote: true
+    initial-corrupt-check: true
+  client-cert-data: ""
+  client-cert-path: ""
+  client-key-data: ""
+  client-key-path: ""
+  client-trusted-ca-data: ""
+  client-trusted-ca-path: ""
+  peer-cert-data: ""
+  peer-cert-path: ""
+  peer-key-data: ""
+  peer-key-path: ""
+  peer-trusted-ca-data: ""
+  peer-trusted-ca-path: ""
+  snapshot-path: /tmp/etcd-functional-2.snapshot.db
+
+- etcd-exec-path: ./bin/etcd
+  agent-addr: 127.0.0.1:39027
+  failpoint-http-addr: http://127.0.0.1:7383
+  base-dir: /tmp/etcd-functional-3
+  etcd-log-path: /tmp/etcd-functional-3/etcd.log
+  etcd-client-proxy: false
+  etcd-peer-proxy: true
+  etcd-client-endpoint: 127.0.0.1:3379
+  etcd:
+    name: s3
+    data-dir: /tmp/etcd-functional-3/etcd.data
+    wal-dir: /tmp/etcd-functional-3/etcd.data/member/wal
+    heartbeat-interval: 100
+    election-timeout: 1000
+    listen-client-urls: ["https://127.0.0.1:3379"]
+    advertise-client-urls: ["https://127.0.0.1:3379"]
+    auto-tls: true
+    client-cert-auth: false
+    cert-file: ""
+    key-file: ""
+    trusted-ca-file: ""
+    listen-peer-urls: ["https://127.0.0.1:3380"]
+    initial-advertise-peer-urls: ["https://127.0.0.1:3381"]
+    peer-auto-tls: true
+    peer-client-cert-auth: false
+    peer-cert-file: ""
+    peer-key-file: ""
+    peer-trusted-ca-file: ""
+    initial-cluster: s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381
+    initial-cluster-state: new
+    initial-cluster-token: tkn
+    snapshot-count: 10000
+    quota-backend-bytes: 10740000000 # 10 GiB
+    pre-vote: true
+    initial-corrupt-check: true
+  client-cert-data: ""
+  client-cert-path: ""
+  client-key-data: ""
+  client-key-path: ""
+  client-trusted-ca-data: ""
+  client-trusted-ca-path: ""
+  peer-cert-data: ""
+  peer-cert-path: ""
+  peer-key-data: ""
+  peer-key-path: ""
+  peer-trusted-ca-data: ""
+  peer-trusted-ca-path: ""
+  snapshot-path: /tmp/etcd-functional-3.snapshot.db
+
+tester-config:
+  data-dir: /tmp/etcd-tester-data
+  network: tcp
+  addr: 127.0.0.1:9028
+
+  # slow enough to trigger election
+  delay-latency-ms: 5000
+  delay-latency-ms-rv: 500
+
+  round-limit: 1
+  exit-on-failure: true
+  enable-pprof: true
+
+  case-delay-ms: 7000
+  case-shuffle: true
+
+  # For full descriptions,
+  # https://godoc.org/github.com/coreos/etcd/functional/rpcpb#Case
+  cases:
+  - SIGTERM_ONE_FOLLOWER
+  - SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
+  - SIGTERM_LEADER
+  - SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT
+  - SIGTERM_QUORUM
+  - SIGTERM_ALL
+  - SIGQUIT_AND_REMOVE_ONE_FOLLOWER
+  - SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
+  - BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER
+  - BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
+  - BLACKHOLE_PEER_PORT_TX_RX_LEADER
+  - BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT
+  - BLACKHOLE_PEER_PORT_TX_RX_QUORUM
+  - DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER
+  - DELAY_PEER_PORT_TX_RX_LEADER
+  - DELAY_PEER_PORT_TX_RX_QUORUM
+
+  failpoint-commands:
+  - panic("etcd-tester")
+
+  runner-exec-path: ./bin/etcd-runner
+  external-exec-path: ""
+
+  stressers:
+  - KV
+  - LEASE
+
+  checkers:
+  - KV_HASH
+  - LEASE_EXPIRE
+
+  stress-key-size: 100
+  stress-key-size-large: 32769
+  stress-key-suffix-range: 250000
+  stress-key-suffix-range-txn: 100
+  stress-key-txn-ops: 10
+
+  stress-clients: 100
+  stress-qps: 2000
diff --git a/vendor/github.com/coreos/etcd/glide.lock b/vendor/github.com/coreos/etcd/glide.lock
new file mode 100644
index 0000000..4554c84
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/glide.lock
@@ -0,0 +1,199 @@
+hash: f0697416d74e4c0fb9d6471c39c3e005ecdeccc8a864c1b0b65e0087b3242027
+updated: 2018-04-10T23:45:04.40596807-07:00
+imports:
+- name: github.com/beorn7/perks
+  version: 3a771d992973f24aa725d07868b467d1ddfceafb
+  subpackages:
+  - quantile
+- name: github.com/bgentry/speakeasy
+  version: 4aabc24848ce5fd31929f7d1e4ea74d3709c14cd
+- name: github.com/coreos/bbolt
+  version: 48ea1b39c25fc1bab3506fbc712ecbaa842c4d2d
+- name: github.com/coreos/go-semver
+  version: 8ab6407b697782a06568d4b7f1db25550ec2e4c6
+  subpackages:
+  - semver
+- name: github.com/coreos/go-systemd
+  version: d2196463941895ee908e13531a23a39feb9e1243
+  subpackages:
+  - daemon
+  - journal
+  - util
+- name: github.com/coreos/pkg
+  version: 3ac0863d7acf3bc44daf49afef8919af12f704ef
+  subpackages:
+  - capnslog
+  - dlopen
+- name: github.com/cpuguy83/go-md2man
+  version: 23709d0847197db6021a51fdb193e66e9222d4e7
+  subpackages:
+  - md2man
+- name: github.com/dgrijalva/jwt-go
+  version: d2709f9f1f31ebcda9651b03077758c1f3a0018c
+- name: github.com/dustin/go-humanize
+  version: bb3d318650d48840a39aa21a027c6630e198e626
+- name: github.com/ghodss/yaml
+  version: 0ca9ea5df5451ffdf184b4428c902747c2c11cd7
+- name: github.com/gogo/protobuf
+  version: 342cbe0a04158f6dcb03ca0079991a51a4248c02
+  subpackages:
+  - gogoproto
+  - proto
+  - protoc-gen-gogo/descriptor
+- name: github.com/golang/groupcache
+  version: 02826c3e79038b59d737d3b1c0a1d937f71a4433
+  subpackages:
+  - lru
+- name: github.com/golang/protobuf
+  version: 1e59b77b52bf8e4b449a57e6f79f21226d571845
+  subpackages:
+  - jsonpb
+  - proto
+  - ptypes
+  - ptypes/any
+  - ptypes/duration
+  - ptypes/struct
+  - ptypes/timestamp
+- name: github.com/google/btree
+  version: 925471ac9e2131377a91e1595defec898166fe49
+- name: github.com/gorilla/websocket
+  version: 4201258b820c74ac8e6922fc9e6b52f71fe46f8d
+- name: github.com/grpc-ecosystem/go-grpc-prometheus
+  version: 0dafe0d496ea71181bf2dd039e7e3f44b6bd11a7
+- name: github.com/grpc-ecosystem/grpc-gateway
+  version: 8cc3a55af3bcf171a1c23a90c4df9cf591706104
+  subpackages:
+  - runtime
+  - runtime/internal
+  - utilities
+- name: github.com/inconshreveable/mousetrap
+  version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
+- name: github.com/jonboulle/clockwork
+  version: 2eee05ed794112d45db504eb05aa693efd2b8b09
+- name: github.com/kr/pty
+  version: 2c10821df3c3cf905230d078702dfbe9404c9b23
+- name: github.com/mattn/go-runewidth
+  version: 9e777a8366cce605130a531d2cd6363d07ad7317
+  subpackages:
+  - runewidth.go
+- name: github.com/matttproud/golang_protobuf_extensions
+  version: c12348ce28de40eed0136aa2b644d0ee0650e56c
+  subpackages:
+  - pbutil
+- name: github.com/olekukonko/tablewriter
+  version: a0225b3f23b5ce0cbec6d7a66a968f8a59eca9c4
+- name: github.com/prometheus/client_golang
+  version: 5cec1d0429b02e4323e042eb04dafdb079ddf568
+  subpackages:
+  - prometheus
+  - prometheus/promhttp
+- name: github.com/prometheus/client_model
+  version: 6f3806018612930941127f2a7c6c453ba2c527d2
+  subpackages:
+  - go
+- name: github.com/prometheus/common
+  version: e3fb1a1acd7605367a2b378bc2e2f893c05174b7
+  subpackages:
+  - expfmt
+  - internal/bitbucket.org/ww/goautoneg
+  - model
+- name: github.com/prometheus/procfs
+  version: a6e9df898b1336106c743392c48ee0b71f5c4efa
+  subpackages:
+  - xfs
+- name: github.com/russross/blackfriday
+  version: 4048872b16cc0fc2c5fd9eacf0ed2c2fedaa0c8c
+- name: github.com/sirupsen/logrus
+  version: f006c2ac4710855cf0f916dd6b77acf6b048dc6e
+- name: github.com/soheilhy/cmux
+  version: bb79a83465015a27a175925ebd155e660f55e9f1
+- name: github.com/spf13/cobra
+  version: 1c44ec8d3f1552cac48999f9306da23c4d8a288b
+- name: github.com/spf13/pflag
+  version: e57e3eeb33f795204c1ca35f56c44f83227c6e66
+- name: github.com/tmc/grpc-websocket-proxy
+  version: 89b8d40f7ca833297db804fcb3be53a76d01c238
+  subpackages:
+  - wsproxy
+- name: github.com/ugorji/go
+  version: bdcc60b419d136a85cdf2e7cbcac34b3f1cd6e57
+  subpackages:
+  - codec
+- name: github.com/urfave/cli
+  version: 1efa31f08b9333f1bd4882d61f9d668a70cd902e
+- name: github.com/xiang90/probing
+  version: 07dd2e8dfe18522e9c447ba95f2fe95262f63bb2
+- name: go.uber.org/atomic
+  version: 8474b86a5a6f79c443ce4b2992817ff32cf208b8
+- name: go.uber.org/multierr
+  version: 3c4937480c32f4c13a875a1829af76c98ca3d40a
+- name: go.uber.org/zap
+  version: 35aad584952c3e7020db7b839f6b102de6271f89
+  subpackages:
+  - buffer
+  - internal/bufferpool
+  - internal/color
+  - internal/exit
+  - zapcore
+- name: golang.org/x/crypto
+  version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3
+  subpackages:
+  - bcrypt
+  - blowfish
+  - ssh/terminal
+- name: golang.org/x/net
+  version: 66aacef3dd8a676686c7ae3716979581e8b03c47
+  subpackages:
+  - context
+  - http2
+  - http2/hpack
+  - idna
+  - internal/timeseries
+  - lex/httplex
+  - trace
+- name: golang.org/x/sys
+  version: ebfc5b4631820b793c9010c87fd8fef0f39eb082
+  subpackages:
+  - unix
+  - windows
+- name: golang.org/x/text
+  version: b19bf474d317b857955b12035d2c5acb57ce8b01
+  subpackages:
+  - secure/bidirule
+  - transform
+  - unicode/bidi
+  - unicode/norm
+- name: golang.org/x/time
+  version: c06e80d9300e4443158a03817b8a8cb37d230320
+  subpackages:
+  - rate
+- name: google.golang.org/genproto
+  version: 09f6ed296fc66555a25fe4ce95173148778dfa85
+  subpackages:
+  - googleapis/rpc/status
+- name: google.golang.org/grpc
+  version: 5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e
+  subpackages:
+  - balancer
+  - codes
+  - connectivity
+  - credentials
+  - grpclb/grpc_lb_v1/messages
+  - grpclog
+  - health
+  - health/grpc_health_v1
+  - internal
+  - keepalive
+  - metadata
+  - naming
+  - peer
+  - resolver
+  - stats
+  - status
+  - tap
+  - transport
+- name: gopkg.in/cheggaaa/pb.v1
+  version: 226d21d43a305fac52b3a104ef83e721b15275e0
+- name: gopkg.in/yaml.v2
+  version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
+testImports: []
diff --git a/vendor/github.com/coreos/etcd/glide.yaml b/vendor/github.com/coreos/etcd/glide.yaml
new file mode 100644
index 0000000..cc83a85
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/glide.yaml
@@ -0,0 +1,152 @@
+package: github.com/coreos/etcd
+ignore:
+- google.golang.org/appengine
+import:
+- package: github.com/bgentry/speakeasy
+  version: v0.1.0
+- package: github.com/coreos/bbolt
+  version: v1.3.1-coreos.6
+- package: github.com/coreos/go-semver
+  version: v0.2.0
+  subpackages:
+  - semver
+- package: github.com/coreos/go-systemd
+  version: v15
+  subpackages:
+  - daemon
+  - journal
+  - util
+- package: go.uber.org/zap
+  version: v1.7.1
+- package: github.com/coreos/pkg
+  version: v3
+  subpackages:
+  - capnslog
+- package: github.com/cpuguy83/go-md2man
+  version: 23709d0847197db6021a51fdb193e66e9222d4e7
+- package: github.com/dustin/go-humanize
+  version: bb3d318650d48840a39aa21a027c6630e198e626
+- package: github.com/ghodss/yaml
+  version: v1.0.0
+- package: github.com/gogo/protobuf
+  version: v0.5
+  subpackages:
+  - proto
+  - gogoproto
+- package: github.com/gorilla/websocket
+  version: 4201258b820c74ac8e6922fc9e6b52f71fe46f8d
+- package: github.com/golang/groupcache
+  version: 02826c3e79038b59d737d3b1c0a1d937f71a4433
+  subpackages:
+  - lru
+- package: github.com/golang/protobuf
+  version: 1e59b77b52bf8e4b449a57e6f79f21226d571845
+  subpackages:
+  - jsonpb
+  - proto
+- package: github.com/google/btree
+  version: 925471ac9e2131377a91e1595defec898166fe49
+- package: github.com/grpc-ecosystem/grpc-gateway
+  version: v1.3.0
+  subpackages:
+  - runtime
+  - runtime/internal
+  - utilities
+- package: github.com/jonboulle/clockwork
+  version: v0.1.0
+- package: github.com/kr/pty
+  version: v1.0.0
+- package: github.com/olekukonko/tablewriter
+  version: a0225b3f23b5ce0cbec6d7a66a968f8a59eca9c4
+- package: github.com/mattn/go-runewidth
+  version: v0.0.2
+  subpackages:
+  - runewidth.go
+- package: github.com/prometheus/client_golang
+  version: 5cec1d0429b02e4323e042eb04dafdb079ddf568
+  subpackages:
+  - prometheus
+  - prometheus/promhttp
+- package: github.com/prometheus/client_model
+  version: 6f3806018612930941127f2a7c6c453ba2c527d2
+  subpackages:
+  - go
+- package: github.com/prometheus/common
+  version: e3fb1a1acd7605367a2b378bc2e2f893c05174b7
+- package: github.com/prometheus/procfs
+  version: a6e9df898b1336106c743392c48ee0b71f5c4efa
+  subpackages:
+  - xfs
+- package: github.com/grpc-ecosystem/go-grpc-prometheus
+  version: 0dafe0d496ea71181bf2dd039e7e3f44b6bd11a7
+- package: github.com/spf13/cobra
+  version: 1c44ec8d3f1552cac48999f9306da23c4d8a288b
+- package: github.com/spf13/pflag
+  version: v1.0.0
+- package: github.com/ugorji/go
+  version: bdcc60b419d136a85cdf2e7cbcac34b3f1cd6e57
+  subpackages:
+  - codec
+- package: github.com/urfave/cli
+  version: v1.18.0
+- package: github.com/xiang90/probing
+  version: 0.0.1
+- package: golang.org/x/crypto
+  version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3
+  subpackages:
+  - bcrypt
+  - blowfish
+- package: golang.org/x/net
+  version: 66aacef3dd8a676686c7ae3716979581e8b03c47
+  subpackages:
+  - context
+  - http2
+  - http2/hpack
+  - internal/timeseries
+  - trace
+- package: golang.org/x/sys
+  version: ebfc5b4631820b793c9010c87fd8fef0f39eb082
+- package: golang.org/x/time
+  version: c06e80d9300e4443158a03817b8a8cb37d230320
+  subpackages:
+  - rate
+- package: google.golang.org/grpc
+  version: v1.7.5
+  subpackages:
+  - codes
+  - credentials
+  - grpclog
+  - internal
+  - metadata
+  - naming
+  - peer
+  - transport
+  - health
+  - health/grpc_health_v1
+- package: gopkg.in/cheggaaa/pb.v1
+  version: v1.0.2
+- package: gopkg.in/yaml.v2
+  version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
+- package: github.com/dgrijalva/jwt-go
+  version: v3.0.0
+- package: google.golang.org/genproto
+  version: 09f6ed296fc66555a25fe4ce95173148778dfa85
+  subpackages:
+  - googleapis/rpc/status
+- package: golang.org/x/text
+  version: b19bf474d317b857955b12035d2c5acb57ce8b01
+  subpackages:
+  - secure/bidirule
+  - transform
+  - unicode/bidi
+  - unicode/norm
+- package: github.com/russross/blackfriday
+  version: 4048872b16cc0fc2c5fd9eacf0ed2c2fedaa0c8c
+- package: github.com/sirupsen/logrus
+  version: v1.0.3
+- package: github.com/soheilhy/cmux
+  version: v0.1.3
+- package: github.com/tmc/grpc-websocket-proxy
+  version: 89b8d40f7ca833297db804fcb3be53a76d01c238
+  subpackages:
+  - wsproxy
diff --git a/vendor/github.com/coreos/etcd/lease/doc.go b/vendor/github.com/coreos/etcd/lease/doc.go
new file mode 100644
index 0000000..a74eaf7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/lease/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package lease provides an interface and implementation for time-limited leases over arbitrary resources.
+package lease
diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/doc.go b/vendor/github.com/coreos/etcd/lease/leasehttp/doc.go
new file mode 100644
index 0000000..8177a37
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/lease/leasehttp/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package leasehttp serves lease renewals made through HTTP requests.
+package leasehttp
diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go b/vendor/github.com/coreos/etcd/lease/leasehttp/http.go
new file mode 100644
index 0000000..ac2e788
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/lease/leasehttp/http.go
@@ -0,0 +1,247 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package leasehttp
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"time"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/lease"
+	"github.com/coreos/etcd/lease/leasepb"
+	"github.com/coreos/etcd/pkg/httputil"
+)
+
+var (
+	LeasePrefix         = "/leases"
+	LeaseInternalPrefix = "/leases/internal"
+	applyTimeout        = time.Second
+	ErrLeaseHTTPTimeout = errors.New("waiting for node to catch up its applied index has timed out")
+)
+
+// NewHandler returns an http Handler for lease renewals
+func NewHandler(l lease.Lessor, waitch func() <-chan struct{}) http.Handler {
+	return &leaseHandler{l, waitch}
+}
+
+type leaseHandler struct {
+	l      lease.Lessor
+	waitch func() <-chan struct{}
+}
+
+func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	if r.Method != "POST" {
+		http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+		return
+	}
+
+	b, err := ioutil.ReadAll(r.Body)
+	if err != nil {
+		http.Error(w, "error reading body", http.StatusBadRequest)
+		return
+	}
+
+	var v []byte
+	switch r.URL.Path {
+	case LeasePrefix:
+		lreq := pb.LeaseKeepAliveRequest{}
+		if err := lreq.Unmarshal(b); err != nil {
+			http.Error(w, "error unmarshalling request", http.StatusBadRequest)
+			return
+		}
+		select {
+		case <-h.waitch():
+		case <-time.After(applyTimeout):
+			http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout)
+			return
+		}
+		ttl, err := h.l.Renew(lease.LeaseID(lreq.ID))
+		if err != nil {
+			if err == lease.ErrLeaseNotFound {
+				http.Error(w, err.Error(), http.StatusNotFound)
+				return
+			}
+
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+		// TODO: fill out ResponseHeader
+		resp := &pb.LeaseKeepAliveResponse{ID: lreq.ID, TTL: ttl}
+		v, err = resp.Marshal()
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusInternalServerError)
+			return
+		}
+
+	case LeaseInternalPrefix:
+		lreq := leasepb.LeaseInternalRequest{}
+		if err := lreq.Unmarshal(b); err != nil {
+			http.Error(w, "error unmarshalling request", http.StatusBadRequest)
+			return
+		}
+		select {
+		case <-h.waitch():
+		case <-time.After(applyTimeout):
+			http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout)
+			return
+		}
+		l := h.l.Lookup(lease.LeaseID(lreq.LeaseTimeToLiveRequest.ID))
+		if l == nil {
+			http.Error(w, lease.ErrLeaseNotFound.Error(), http.StatusNotFound)
+			return
+		}
+		// TODO: fill out ResponseHeader
+		resp := &leasepb.LeaseInternalResponse{
+			LeaseTimeToLiveResponse: &pb.LeaseTimeToLiveResponse{
+				Header:     &pb.ResponseHeader{},
+				ID:         lreq.LeaseTimeToLiveRequest.ID,
+				TTL:        int64(l.Remaining().Seconds()),
+				GrantedTTL: l.TTL(),
+			},
+		}
+		if lreq.LeaseTimeToLiveRequest.Keys {
+			ks := l.Keys()
+			kbs := make([][]byte, len(ks))
+			for i := range ks {
+				kbs[i] = []byte(ks[i])
+			}
+			resp.LeaseTimeToLiveResponse.Keys = kbs
+		}
+
+		v, err = resp.Marshal()
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusInternalServerError)
+			return
+		}
+
+	default:
+		http.Error(w, fmt.Sprintf("unknown request path %q", r.URL.Path), http.StatusBadRequest)
+		return
+	}
+
+	w.Header().Set("Content-Type", "application/protobuf")
+	w.Write(v)
+}
+
+// RenewHTTP renews a lease at a given primary server.
+// TODO: Batch request in future?
+func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundTripper) (int64, error) {
+	// will post lreq protobuf to leader
+	lreq, err := (&pb.LeaseKeepAliveRequest{ID: int64(id)}).Marshal()
+	if err != nil {
+		return -1, err
+	}
+
+	cc := &http.Client{Transport: rt}
+	req, err := http.NewRequest("POST", url, bytes.NewReader(lreq))
+	if err != nil {
+		return -1, err
+	}
+	req.Header.Set("Content-Type", "application/protobuf")
+	req.Cancel = ctx.Done()
+
+	resp, err := cc.Do(req)
+	if err != nil {
+		return -1, err
+	}
+	b, err := readResponse(resp)
+	if err != nil {
+		return -1, err
+	}
+
+	if resp.StatusCode == http.StatusRequestTimeout {
+		return -1, ErrLeaseHTTPTimeout
+	}
+
+	if resp.StatusCode == http.StatusNotFound {
+		return -1, lease.ErrLeaseNotFound
+	}
+
+	if resp.StatusCode != http.StatusOK {
+		return -1, fmt.Errorf("lease: unknown error(%s)", string(b))
+	}
+
+	lresp := &pb.LeaseKeepAliveResponse{}
+	if err := lresp.Unmarshal(b); err != nil {
+		return -1, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b))
+	}
+	if lresp.ID != int64(id) {
+		return -1, fmt.Errorf("lease: renew id mismatch")
+	}
+	return lresp.TTL, nil
+}
+
+// TimeToLiveHTTP retrieves lease information of the given lease ID.
+func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string, rt http.RoundTripper) (*leasepb.LeaseInternalResponse, error) {
+	// will post lreq protobuf to leader
+	lreq, err := (&leasepb.LeaseInternalRequest{
+		LeaseTimeToLiveRequest: &pb.LeaseTimeToLiveRequest{
+			ID:   int64(id),
+			Keys: keys,
+		},
+	}).Marshal()
+	if err != nil {
+		return nil, err
+	}
+
+	req, err := http.NewRequest("POST", url, bytes.NewReader(lreq))
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", "application/protobuf")
+
+	req = req.WithContext(ctx)
+
+	cc := &http.Client{Transport: rt}
+	var b []byte
+	// buffer errc channel so that errc don't block inside the go routinue
+	resp, err := cc.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	b, err = readResponse(resp)
+	if err != nil {
+		return nil, err
+	}
+	if resp.StatusCode == http.StatusRequestTimeout {
+		return nil, ErrLeaseHTTPTimeout
+	}
+	if resp.StatusCode == http.StatusNotFound {
+		return nil, lease.ErrLeaseNotFound
+	}
+	if resp.StatusCode != http.StatusOK {
+		return nil, fmt.Errorf("lease: unknown error(%s)", string(b))
+	}
+
+	lresp := &leasepb.LeaseInternalResponse{}
+	if err := lresp.Unmarshal(b); err != nil {
+		return nil, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b))
+	}
+	if lresp.LeaseTimeToLiveResponse.ID != int64(id) {
+		return nil, fmt.Errorf("lease: renew id mismatch")
+	}
+	return lresp, nil
+}
+
+func readResponse(resp *http.Response) (b []byte, err error) {
+	b, err = ioutil.ReadAll(resp.Body)
+	httputil.GracefulClose(resp)
+	return
+}
diff --git a/vendor/github.com/coreos/etcd/lease/leasepb/lease.proto b/vendor/github.com/coreos/etcd/lease/leasepb/lease.proto
new file mode 100644
index 0000000..be414b9
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/lease/leasepb/lease.proto
@@ -0,0 +1,24 @@
+syntax = "proto3";
+package leasepb;
+
+import "gogoproto/gogo.proto";
+import "etcd/etcdserver/etcdserverpb/rpc.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+message Lease {
+  int64 ID = 1;
+  int64 TTL = 2;
+}
+
+message LeaseInternalRequest {
+  etcdserverpb.LeaseTimeToLiveRequest LeaseTimeToLiveRequest = 1;
+}
+
+message LeaseInternalResponse {
+  etcdserverpb.LeaseTimeToLiveResponse LeaseTimeToLiveResponse = 1;
+}
diff --git a/vendor/github.com/coreos/etcd/lease/lessor.go b/vendor/github.com/coreos/etcd/lease/lessor.go
new file mode 100644
index 0000000..43f0503
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/lease/lessor.go
@@ -0,0 +1,680 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package lease
+
+import (
+	"encoding/binary"
+	"errors"
+	"math"
+	"sort"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/lease/leasepb"
+	"github.com/coreos/etcd/mvcc/backend"
+)
+
+// NoLease is a special LeaseID representing the absence of a lease.
+const NoLease = LeaseID(0)
+
+// MaxLeaseTTL is the maximum lease TTL value
+const MaxLeaseTTL = 9000000000
+
+var (
+	forever = time.Time{}
+
+	leaseBucketName = []byte("lease")
+
+	// maximum number of leases to revoke per second; configurable for tests
+	leaseRevokeRate = 1000
+
+	ErrNotPrimary       = errors.New("not a primary lessor")
+	ErrLeaseNotFound    = errors.New("lease not found")
+	ErrLeaseExists      = errors.New("lease already exists")
+	ErrLeaseTTLTooLarge = errors.New("too large lease TTL")
+)
+
+// TxnDelete is a TxnWrite that only permits deletes. Defined here
+// to avoid circular dependency with mvcc.
+type TxnDelete interface {
+	DeleteRange(key, end []byte) (n, rev int64)
+	End()
+}
+
+// RangeDeleter is a TxnDelete constructor.
+type RangeDeleter func() TxnDelete
+
+type LeaseID int64
+
+// Lessor owns leases. It can grant, revoke, renew and modify leases for lessee.
+type Lessor interface {
+	// SetRangeDeleter lets the lessor create TxnDeletes to the store.
+	// Lessor deletes the items in the revoked or expired lease by creating
+	// new TxnDeletes.
+	SetRangeDeleter(rd RangeDeleter)
+
+	// Grant grants a lease that expires at least after TTL seconds.
+	Grant(id LeaseID, ttl int64) (*Lease, error)
+	// Revoke revokes a lease with given ID. The item attached to the
+	// given lease will be removed. If the ID does not exist, an error
+	// will be returned.
+	Revoke(id LeaseID) error
+
+	// Attach attaches given leaseItem to the lease with given LeaseID.
+	// If the lease does not exist, an error will be returned.
+	Attach(id LeaseID, items []LeaseItem) error
+
+	// GetLease returns LeaseID for given item.
+	// If no lease found, NoLease value will be returned.
+	GetLease(item LeaseItem) LeaseID
+
+	// Detach detaches given leaseItem from the lease with given LeaseID.
+	// If the lease does not exist, an error will be returned.
+	Detach(id LeaseID, items []LeaseItem) error
+
+	// Promote promotes the lessor to be the primary lessor. Primary lessor manages
+	// the expiration and renew of leases.
+	// Newly promoted lessor renew the TTL of all lease to extend + previous TTL.
+	Promote(extend time.Duration)
+
+	// Demote demotes the lessor from being the primary lessor.
+	Demote()
+
+	// Renew renews a lease with given ID. It returns the renewed TTL. If the ID does not exist,
+	// an error will be returned.
+	Renew(id LeaseID) (int64, error)
+
+	// Lookup gives the lease at a given lease id, if any
+	Lookup(id LeaseID) *Lease
+
+	// Leases lists all leases.
+	Leases() []*Lease
+
+	// ExpiredLeasesC returns a chan that is used to receive expired leases.
+	ExpiredLeasesC() <-chan []*Lease
+
+	// Recover recovers the lessor state from the given backend and RangeDeleter.
+	Recover(b backend.Backend, rd RangeDeleter)
+
+	// Stop stops the lessor for managing leases. The behavior of calling Stop multiple
+	// times is undefined.
+	Stop()
+}
+
+// lessor implements Lessor interface.
+// TODO: use clockwork for testability.
+type lessor struct {
+	mu sync.Mutex
+
+	// demotec is set when the lessor is the primary.
+	// demotec will be closed if the lessor is demoted.
+	demotec chan struct{}
+
+	// TODO: probably this should be a heap with a secondary
+	// id index.
+	// Now it is O(N) to loop over the leases to find expired ones.
+	// We want to make Grant, Revoke, and findExpiredLeases all O(logN) and
+	// Renew O(1).
+	// findExpiredLeases and Renew should be the most frequent operations.
+	leaseMap map[LeaseID]*Lease
+
+	itemMap map[LeaseItem]LeaseID
+
+	// When a lease expires, the lessor will delete the
+	// leased range (or key) by the RangeDeleter.
+	rd RangeDeleter
+
+	// backend to persist leases. We only persist lease ID and expiry for now.
+	// The leased items can be recovered by iterating all the keys in kv.
+	b backend.Backend
+
+	// minLeaseTTL is the minimum lease TTL that can be granted for a lease. Any
+	// requests for shorter TTLs are extended to the minimum TTL.
+	minLeaseTTL int64
+
+	expiredC chan []*Lease
+	// stopC is a channel whose closure indicates that the lessor should be stopped.
+	stopC chan struct{}
+	// doneC is a channel whose closure indicates that the lessor is stopped.
+	doneC chan struct{}
+}
+
+func NewLessor(b backend.Backend, minLeaseTTL int64) Lessor {
+	return newLessor(b, minLeaseTTL)
+}
+
+func newLessor(b backend.Backend, minLeaseTTL int64) *lessor {
+	l := &lessor{
+		leaseMap:    make(map[LeaseID]*Lease),
+		itemMap:     make(map[LeaseItem]LeaseID),
+		b:           b,
+		minLeaseTTL: minLeaseTTL,
+		// expiredC is a small buffered chan to avoid unnecessary blocking.
+		expiredC: make(chan []*Lease, 16),
+		stopC:    make(chan struct{}),
+		doneC:    make(chan struct{}),
+	}
+	l.initAndRecover()
+
+	go l.runLoop()
+
+	return l
+}
+
+// isPrimary indicates if this lessor is the primary lessor. The primary
+// lessor manages lease expiration and renew.
+//
+// in etcd, raft leader is the primary. Thus there might be two primary
+// leaders at the same time (raft allows concurrent leader but with different term)
+// for at most a leader election timeout.
+// The old primary leader cannot affect the correctness since its proposal has a
+// smaller term and will not be committed.
+//
+// TODO: raft follower do not forward lease management proposals. There might be a
+// very small window (within second normally which depends on go scheduling) that
+// a raft follow is the primary between the raft leader demotion and lessor demotion.
+// Usually this should not be a problem. Lease should not be that sensitive to timing.
+func (le *lessor) isPrimary() bool {
+	return le.demotec != nil
+}
+
+func (le *lessor) SetRangeDeleter(rd RangeDeleter) {
+	le.mu.Lock()
+	defer le.mu.Unlock()
+
+	le.rd = rd
+}
+
+func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) {
+	if id == NoLease {
+		return nil, ErrLeaseNotFound
+	}
+
+	if ttl > MaxLeaseTTL {
+		return nil, ErrLeaseTTLTooLarge
+	}
+
+	// TODO: when lessor is under high load, it should give out lease
+	// with longer TTL to reduce renew load.
+	l := &Lease{
+		ID:      id,
+		ttl:     ttl,
+		itemSet: make(map[LeaseItem]struct{}),
+		revokec: make(chan struct{}),
+	}
+
+	le.mu.Lock()
+	defer le.mu.Unlock()
+
+	if _, ok := le.leaseMap[id]; ok {
+		return nil, ErrLeaseExists
+	}
+
+	if l.ttl < le.minLeaseTTL {
+		l.ttl = le.minLeaseTTL
+	}
+
+	if le.isPrimary() {
+		l.refresh(0)
+	} else {
+		l.forever()
+	}
+
+	le.leaseMap[id] = l
+	l.persistTo(le.b)
+
+	return l, nil
+}
+
+func (le *lessor) Revoke(id LeaseID) error {
+	le.mu.Lock()
+
+	l := le.leaseMap[id]
+	if l == nil {
+		le.mu.Unlock()
+		return ErrLeaseNotFound
+	}
+	defer close(l.revokec)
+	// unlock before doing external work
+	le.mu.Unlock()
+
+	if le.rd == nil {
+		return nil
+	}
+
+	txn := le.rd()
+
+	// sort keys so deletes are in same order among all members,
+	// otherwise the backened hashes will be different
+	keys := l.Keys()
+	sort.StringSlice(keys).Sort()
+	for _, key := range keys {
+		txn.DeleteRange([]byte(key), nil)
+	}
+
+	le.mu.Lock()
+	defer le.mu.Unlock()
+	delete(le.leaseMap, l.ID)
+	// lease deletion needs to be in the same backend transaction with the
+	// kv deletion. Or we might end up with not executing the revoke or not
+	// deleting the keys if etcdserver fails in between.
+	le.b.BatchTx().UnsafeDelete(leaseBucketName, int64ToBytes(int64(l.ID)))
+
+	txn.End()
+	return nil
+}
+
+// Renew renews an existing lease. If the given lease does not exist or
+// has expired, an error will be returned.
+func (le *lessor) Renew(id LeaseID) (int64, error) {
+	le.mu.Lock()
+
+	unlock := func() { le.mu.Unlock() }
+	defer func() { unlock() }()
+
+	if !le.isPrimary() {
+		// forward renew request to primary instead of returning error.
+		return -1, ErrNotPrimary
+	}
+
+	demotec := le.demotec
+
+	l := le.leaseMap[id]
+	if l == nil {
+		return -1, ErrLeaseNotFound
+	}
+
+	if l.expired() {
+		le.mu.Unlock()
+		unlock = func() {}
+		select {
+		// A expired lease might be pending for revoking or going through
+		// quorum to be revoked. To be accurate, renew request must wait for the
+		// deletion to complete.
+		case <-l.revokec:
+			return -1, ErrLeaseNotFound
+		// The expired lease might fail to be revoked if the primary changes.
+		// The caller will retry on ErrNotPrimary.
+		case <-demotec:
+			return -1, ErrNotPrimary
+		case <-le.stopC:
+			return -1, ErrNotPrimary
+		}
+	}
+
+	l.refresh(0)
+	return l.ttl, nil
+}
+
+func (le *lessor) Lookup(id LeaseID) *Lease {
+	le.mu.Lock()
+	defer le.mu.Unlock()
+	return le.leaseMap[id]
+}
+
+func (le *lessor) unsafeLeases() []*Lease {
+	leases := make([]*Lease, 0, len(le.leaseMap))
+	for _, l := range le.leaseMap {
+		leases = append(leases, l)
+	}
+	sort.Sort(leasesByExpiry(leases))
+	return leases
+}
+
+func (le *lessor) Leases() []*Lease {
+	le.mu.Lock()
+	ls := le.unsafeLeases()
+	le.mu.Unlock()
+	return ls
+}
+
+func (le *lessor) Promote(extend time.Duration) {
+	le.mu.Lock()
+	defer le.mu.Unlock()
+
+	le.demotec = make(chan struct{})
+
+	// refresh the expiries of all leases.
+	for _, l := range le.leaseMap {
+		l.refresh(extend)
+	}
+
+	if len(le.leaseMap) < leaseRevokeRate {
+		// no possibility of lease pile-up
+		return
+	}
+
+	// adjust expiries in case of overlap
+	leases := le.unsafeLeases()
+
+	baseWindow := leases[0].Remaining()
+	nextWindow := baseWindow + time.Second
+	expires := 0
+	// have fewer expires than the total revoke rate so piled up leases
+	// don't consume the entire revoke limit
+	targetExpiresPerSecond := (3 * leaseRevokeRate) / 4
+	for _, l := range leases {
+		remaining := l.Remaining()
+		if remaining > nextWindow {
+			baseWindow = remaining
+			nextWindow = baseWindow + time.Second
+			expires = 1
+			continue
+		}
+		expires++
+		if expires <= targetExpiresPerSecond {
+			continue
+		}
+		rateDelay := float64(time.Second) * (float64(expires) / float64(targetExpiresPerSecond))
+		// If leases are extended by n seconds, leases n seconds ahead of the
+		// base window should be extended by only one second.
+		rateDelay -= float64(remaining - baseWindow)
+		delay := time.Duration(rateDelay)
+		nextWindow = baseWindow + delay
+		l.refresh(delay + extend)
+	}
+}
+
+type leasesByExpiry []*Lease
+
+func (le leasesByExpiry) Len() int           { return len(le) }
+func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() }
+func (le leasesByExpiry) Swap(i, j int)      { le[i], le[j] = le[j], le[i] }
+
+func (le *lessor) Demote() {
+	le.mu.Lock()
+	defer le.mu.Unlock()
+
+	// set the expiries of all leases to forever
+	for _, l := range le.leaseMap {
+		l.forever()
+	}
+
+	if le.demotec != nil {
+		close(le.demotec)
+		le.demotec = nil
+	}
+}
+
+// Attach attaches items to the lease with given ID. When the lease
+// expires, the attached items will be automatically removed.
+// If the given lease does not exist, an error will be returned.
+func (le *lessor) Attach(id LeaseID, items []LeaseItem) error {
+	le.mu.Lock()
+	defer le.mu.Unlock()
+
+	l := le.leaseMap[id]
+	if l == nil {
+		return ErrLeaseNotFound
+	}
+
+	l.mu.Lock()
+	for _, it := range items {
+		l.itemSet[it] = struct{}{}
+		le.itemMap[it] = id
+	}
+	l.mu.Unlock()
+	return nil
+}
+
+func (le *lessor) GetLease(item LeaseItem) LeaseID {
+	le.mu.Lock()
+	id := le.itemMap[item]
+	le.mu.Unlock()
+	return id
+}
+
+// Detach detaches items from the lease with given ID.
+// If the given lease does not exist, an error will be returned.
+func (le *lessor) Detach(id LeaseID, items []LeaseItem) error {
+	le.mu.Lock()
+	defer le.mu.Unlock()
+
+	l := le.leaseMap[id]
+	if l == nil {
+		return ErrLeaseNotFound
+	}
+
+	l.mu.Lock()
+	for _, it := range items {
+		delete(l.itemSet, it)
+		delete(le.itemMap, it)
+	}
+	l.mu.Unlock()
+	return nil
+}
+
+func (le *lessor) Recover(b backend.Backend, rd RangeDeleter) {
+	le.mu.Lock()
+	defer le.mu.Unlock()
+
+	le.b = b
+	le.rd = rd
+	le.leaseMap = make(map[LeaseID]*Lease)
+	le.itemMap = make(map[LeaseItem]LeaseID)
+	le.initAndRecover()
+}
+
+func (le *lessor) ExpiredLeasesC() <-chan []*Lease {
+	return le.expiredC
+}
+
+func (le *lessor) Stop() {
+	close(le.stopC)
+	<-le.doneC
+}
+
+func (le *lessor) runLoop() {
+	defer close(le.doneC)
+
+	for {
+		var ls []*Lease
+
+		// rate limit
+		revokeLimit := leaseRevokeRate / 2
+
+		le.mu.Lock()
+		if le.isPrimary() {
+			ls = le.findExpiredLeases(revokeLimit)
+		}
+		le.mu.Unlock()
+
+		if len(ls) != 0 {
+			select {
+			case <-le.stopC:
+				return
+			case le.expiredC <- ls:
+			default:
+				// the receiver of expiredC is probably busy handling
+				// other stuff
+				// let's try this next time after 500ms
+			}
+		}
+
+		select {
+		case <-time.After(500 * time.Millisecond):
+		case <-le.stopC:
+			return
+		}
+	}
+}
+
+// findExpiredLeases loops leases in the leaseMap until reaching expired limit
+// and returns the expired leases that needed to be revoked.
+func (le *lessor) findExpiredLeases(limit int) []*Lease {
+	leases := make([]*Lease, 0, 16)
+
+	for _, l := range le.leaseMap {
+		// TODO: probably should change to <= 100-500 millisecond to
+		// make up committing latency.
+		if l.expired() {
+			leases = append(leases, l)
+
+			// reach expired limit
+			if len(leases) == limit {
+				break
+			}
+		}
+	}
+
+	return leases
+}
+
+func (le *lessor) initAndRecover() {
+	tx := le.b.BatchTx()
+	tx.Lock()
+
+	tx.UnsafeCreateBucket(leaseBucketName)
+	_, vs := tx.UnsafeRange(leaseBucketName, int64ToBytes(0), int64ToBytes(math.MaxInt64), 0)
+	// TODO: copy vs and do decoding outside tx lock if lock contention becomes an issue.
+	for i := range vs {
+		var lpb leasepb.Lease
+		err := lpb.Unmarshal(vs[i])
+		if err != nil {
+			tx.Unlock()
+			panic("failed to unmarshal lease proto item")
+		}
+		ID := LeaseID(lpb.ID)
+		if lpb.TTL < le.minLeaseTTL {
+			lpb.TTL = le.minLeaseTTL
+		}
+		le.leaseMap[ID] = &Lease{
+			ID:  ID,
+			ttl: lpb.TTL,
+			// itemSet will be filled in when recover key-value pairs
+			// set expiry to forever, refresh when promoted
+			itemSet: make(map[LeaseItem]struct{}),
+			expiry:  forever,
+			revokec: make(chan struct{}),
+		}
+	}
+	tx.Unlock()
+
+	le.b.ForceCommit()
+}
+
+type Lease struct {
+	ID  LeaseID
+	ttl int64 // time to live in seconds
+	// expiryMu protects concurrent accesses to expiry
+	expiryMu sync.RWMutex
+	// expiry is time when lease should expire. no expiration when expiry.IsZero() is true
+	expiry time.Time
+
+	// mu protects concurrent accesses to itemSet
+	mu      sync.RWMutex
+	itemSet map[LeaseItem]struct{}
+	revokec chan struct{}
+}
+
+func (l *Lease) expired() bool {
+	return l.Remaining() <= 0
+}
+
+func (l *Lease) persistTo(b backend.Backend) {
+	key := int64ToBytes(int64(l.ID))
+
+	lpb := leasepb.Lease{ID: int64(l.ID), TTL: int64(l.ttl)}
+	val, err := lpb.Marshal()
+	if err != nil {
+		panic("failed to marshal lease proto item")
+	}
+
+	b.BatchTx().Lock()
+	b.BatchTx().UnsafePut(leaseBucketName, key, val)
+	b.BatchTx().Unlock()
+}
+
+// TTL returns the TTL of the Lease.
+func (l *Lease) TTL() int64 {
+	return l.ttl
+}
+
+// refresh refreshes the expiry of the lease.
+func (l *Lease) refresh(extend time.Duration) {
+	newExpiry := time.Now().Add(extend + time.Duration(l.ttl)*time.Second)
+	l.expiryMu.Lock()
+	defer l.expiryMu.Unlock()
+	l.expiry = newExpiry
+}
+
+// forever sets the expiry of lease to be forever.
+func (l *Lease) forever() {
+	l.expiryMu.Lock()
+	defer l.expiryMu.Unlock()
+	l.expiry = forever
+}
+
+// Keys returns all the keys attached to the lease.
+func (l *Lease) Keys() []string {
+	l.mu.RLock()
+	keys := make([]string, 0, len(l.itemSet))
+	for k := range l.itemSet {
+		keys = append(keys, k.Key)
+	}
+	l.mu.RUnlock()
+	return keys
+}
+
+// Remaining returns the remaining time of the lease.
+func (l *Lease) Remaining() time.Duration {
+	l.expiryMu.RLock()
+	defer l.expiryMu.RUnlock()
+	if l.expiry.IsZero() {
+		return time.Duration(math.MaxInt64)
+	}
+	return time.Until(l.expiry)
+}
+
+type LeaseItem struct {
+	Key string
+}
+
+func int64ToBytes(n int64) []byte {
+	bytes := make([]byte, 8)
+	binary.BigEndian.PutUint64(bytes, uint64(n))
+	return bytes
+}
+
+// FakeLessor is a fake implementation of Lessor interface.
+// Used for testing only.
+type FakeLessor struct{}
+
+func (fl *FakeLessor) SetRangeDeleter(dr RangeDeleter) {}
+
+func (fl *FakeLessor) Grant(id LeaseID, ttl int64) (*Lease, error) { return nil, nil }
+
+func (fl *FakeLessor) Revoke(id LeaseID) error { return nil }
+
+func (fl *FakeLessor) Attach(id LeaseID, items []LeaseItem) error { return nil }
+
+func (fl *FakeLessor) GetLease(item LeaseItem) LeaseID            { return 0 }
+func (fl *FakeLessor) Detach(id LeaseID, items []LeaseItem) error { return nil }
+
+func (fl *FakeLessor) Promote(extend time.Duration) {}
+
+func (fl *FakeLessor) Demote() {}
+
+func (fl *FakeLessor) Renew(id LeaseID) (int64, error) { return 10, nil }
+
+func (fl *FakeLessor) Lookup(id LeaseID) *Lease { return nil }
+
+func (fl *FakeLessor) Leases() []*Lease { return nil }
+
+func (fl *FakeLessor) ExpiredLeasesC() <-chan []*Lease { return nil }
+
+func (fl *FakeLessor) Recover(b backend.Backend, rd RangeDeleter) {}
+
+func (fl *FakeLessor) Stop() {}
diff --git a/vendor/github.com/coreos/etcd/main.go b/vendor/github.com/coreos/etcd/main.go
new file mode 100644
index 0000000..0b73573
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/main.go
@@ -0,0 +1,29 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package main is a simple wrapper of the real etcd entrypoint package
+// (located at github.com/coreos/etcd/etcdmain) to ensure that etcd is still
+// "go getable"; e.g. `go get github.com/coreos/etcd` works as expected and
+// builds a binary in $GOBIN/etcd
+//
+// This package should NOT be extended or modified in any way; to modify the
+// etcd binary, work in the `github.com/coreos/etcd/etcdmain` package.
+//
+package main
+
+import "github.com/coreos/etcd/etcdmain"
+
+func main() {
+	etcdmain.Main()
+}
diff --git a/vendor/github.com/coreos/etcd/meeting.ics b/vendor/github.com/coreos/etcd/meeting.ics
new file mode 100644
index 0000000..0157f9a
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/meeting.ics
@@ -0,0 +1,49 @@
+BEGIN:VCALENDAR
+PRODID:-//Google Inc//Google Calendar 70.9054//EN
+VERSION:2.0
+CALSCALE:GREGORIAN
+METHOD:REPLY
+BEGIN:VTIMEZONE
+TZID:America/Los_Angeles
+X-LIC-LOCATION:America/Los_Angeles
+BEGIN:DAYLIGHT
+TZOFFSETFROM:-0800
+TZOFFSETTO:-0700
+TZNAME:PDT
+DTSTART:19700308T020000
+RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
+END:DAYLIGHT
+BEGIN:STANDARD
+TZOFFSETFROM:-0700
+TZOFFSETTO:-0800
+TZNAME:PST
+DTSTART:19701101T020000
+RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
+END:STANDARD
+END:VTIMEZONE
+BEGIN:VEVENT
+DTSTART;TZID=America/Los_Angeles:20180116T110000
+DTEND;TZID=America/Los_Angeles:20180116T115000
+RRULE:FREQ=WEEKLY;INTERVAL=2;BYDAY=TU
+DTSTAMP:20171213T131221Z
+ORGANIZER;CN=Gyuho Lee:mailto:gyu_ho.lee@coreos.com
+UID:11ivec3kg2egsng3vrl8t5alar@google.com
+CREATED:20171212T194217Z
+DESCRIPTION:<br>Please add your discussion items to the meeting notes.<br><
+ br>Meeting notes<br><a href="https://docs.google.com/document/d/1DbVXOHvd9s
+ cFsSmL2oNg4YGOHJdXqtx583DmeVWrB_M/edit?usp=sharing">https://docs.google.com
+ /document/d/1DbVXOHvd9scFsSmL2oNg4YGOHJdXqtx583DmeVWrB_M/edit?usp=sharing</
+ a><br><br>Zoom meeting<br><a href="https://www.google.com/url?q=https%3A%2F
+ %2Fcoreos.zoom.us%2Fj%2F854793406&amp\;sa=D&amp\;ust=1509474820520000&amp\;
+ usg=AFQjCNFIOIfx1O_dgC-1N5YLyLOMa7D3Dg" target="_blank">https://coreos.zoom
+ .us/j/854793406</a><br><br>Slack<br><a href="https://www.google.com/url?q=h
+ ttps%3A%2F%2Fkubernetes.slack.com&amp\;sa=D&amp\;ust=1513114941738000&amp\;
+ usg=AFQjCNHbdDPJcyZ2tVATRqTQDuZDFzGoRQ" target="_blank">https://kubernetes.
+ slack.com</a> <i>#etcd</i><br><br><i><br></i>
+LAST-MODIFIED:20171213T131220Z
+SEQUENCE:0
+STATUS:CONFIRMED
+SUMMARY:etcd meeting
+TRANSP:OPAQUE
+END:VEVENT
+END:VCALENDAR
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go b/vendor/github.com/coreos/etcd/mvcc/backend/backend.go
new file mode 100644
index 0000000..f7d9e60
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/backend/backend.go
@@ -0,0 +1,464 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+	"fmt"
+	"hash/crc32"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	bolt "github.com/coreos/bbolt"
+	"github.com/coreos/pkg/capnslog"
+)
+
+var (
+	defaultBatchLimit    = 10000
+	defaultBatchInterval = 100 * time.Millisecond
+
+	defragLimit = 10000
+
+	// initialMmapSize is the initial size of the mmapped region. Setting this larger than
+	// the potential max db size can prevent writer from blocking reader.
+	// This only works for linux.
+	initialMmapSize = uint64(10 * 1024 * 1024 * 1024)
+
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc/backend")
+
+	// minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning.
+	minSnapshotWarningTimeout = time.Duration(30 * time.Second)
+)
+
+type Backend interface {
+	ReadTx() ReadTx
+	BatchTx() BatchTx
+
+	Snapshot() Snapshot
+	Hash(ignores map[IgnoreKey]struct{}) (uint32, error)
+	// Size returns the current size of the backend.
+	Size() int64
+	// SizeInUse returns the current size of the backend logically in use.
+	// Since the backend can manage free space in a non-byte unit such as
+	// number of pages, the returned value can be not exactly accurate in bytes.
+	SizeInUse() int64
+	Defrag() error
+	ForceCommit()
+	Close() error
+}
+
+type Snapshot interface {
+	// Size gets the size of the snapshot.
+	Size() int64
+	// WriteTo writes the snapshot into the given writer.
+	WriteTo(w io.Writer) (n int64, err error)
+	// Close closes the snapshot.
+	Close() error
+}
+
+type backend struct {
+	// size and commits are used with atomic operations so they must be
+	// 64-bit aligned, otherwise 32-bit tests will crash
+
+	// size is the number of bytes in the backend
+	size int64
+
+	// sizeInUse is the number of bytes actually used in the backend
+	sizeInUse int64
+
+	// commits counts number of commits since start
+	commits int64
+
+	mu sync.RWMutex
+	db *bolt.DB
+
+	batchInterval time.Duration
+	batchLimit    int
+	batchTx       *batchTxBuffered
+
+	readTx *readTx
+
+	stopc chan struct{}
+	donec chan struct{}
+}
+
+type BackendConfig struct {
+	// Path is the file path to the backend file.
+	Path string
+	// BatchInterval is the maximum time before flushing the BatchTx.
+	BatchInterval time.Duration
+	// BatchLimit is the maximum puts before flushing the BatchTx.
+	BatchLimit int
+	// MmapSize is the number of bytes to mmap for the backend.
+	MmapSize uint64
+}
+
+func DefaultBackendConfig() BackendConfig {
+	return BackendConfig{
+		BatchInterval: defaultBatchInterval,
+		BatchLimit:    defaultBatchLimit,
+		MmapSize:      initialMmapSize,
+	}
+}
+
+func New(bcfg BackendConfig) Backend {
+	return newBackend(bcfg)
+}
+
+func NewDefaultBackend(path string) Backend {
+	bcfg := DefaultBackendConfig()
+	bcfg.Path = path
+	return newBackend(bcfg)
+}
+
+func newBackend(bcfg BackendConfig) *backend {
+	bopts := &bolt.Options{}
+	if boltOpenOptions != nil {
+		*bopts = *boltOpenOptions
+	}
+	bopts.InitialMmapSize = bcfg.mmapSize()
+
+	db, err := bolt.Open(bcfg.Path, 0600, bopts)
+	if err != nil {
+		plog.Panicf("cannot open database at %s (%v)", bcfg.Path, err)
+	}
+
+	// In future, may want to make buffering optional for low-concurrency systems
+	// or dynamically swap between buffered/non-buffered depending on workload.
+	b := &backend{
+		db: db,
+
+		batchInterval: bcfg.BatchInterval,
+		batchLimit:    bcfg.BatchLimit,
+
+		readTx: &readTx{
+			buf: txReadBuffer{
+				txBuffer: txBuffer{make(map[string]*bucketBuffer)},
+			},
+			buckets: make(map[string]*bolt.Bucket),
+		},
+
+		stopc: make(chan struct{}),
+		donec: make(chan struct{}),
+	}
+	b.batchTx = newBatchTxBuffered(b)
+	go b.run()
+	return b
+}
+
+// BatchTx returns the current batch tx in coalescer. The tx can be used for read and
+// write operations. The write result can be retrieved within the same tx immediately.
+// The write result is isolated with other txs until the current one get committed.
+func (b *backend) BatchTx() BatchTx {
+	return b.batchTx
+}
+
+func (b *backend) ReadTx() ReadTx { return b.readTx }
+
+// ForceCommit forces the current batching tx to commit.
+func (b *backend) ForceCommit() {
+	b.batchTx.Commit()
+}
+
+func (b *backend) Snapshot() Snapshot {
+	b.batchTx.Commit()
+
+	b.mu.RLock()
+	defer b.mu.RUnlock()
+	tx, err := b.db.Begin(false)
+	if err != nil {
+		plog.Fatalf("cannot begin tx (%s)", err)
+	}
+
+	stopc, donec := make(chan struct{}), make(chan struct{})
+	dbBytes := tx.Size()
+	go func() {
+		defer close(donec)
+		// sendRateBytes is based on transferring snapshot data over a 1 gigabit/s connection
+		// assuming a min tcp throughput of 100MB/s.
+		var sendRateBytes int64 = 100 * 1024 * 1014
+		warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second)))
+		if warningTimeout < minSnapshotWarningTimeout {
+			warningTimeout = minSnapshotWarningTimeout
+		}
+		start := time.Now()
+		ticker := time.NewTicker(warningTimeout)
+		defer ticker.Stop()
+		for {
+			select {
+			case <-ticker.C:
+				plog.Warningf("snapshotting is taking more than %v seconds to finish transferring %v MB [started at %v]", time.Since(start).Seconds(), float64(dbBytes)/float64(1024*1014), start)
+			case <-stopc:
+				snapshotDurations.Observe(time.Since(start).Seconds())
+				return
+			}
+		}
+	}()
+
+	return &snapshot{tx, stopc, donec}
+}
+
+type IgnoreKey struct {
+	Bucket string
+	Key    string
+}
+
+func (b *backend) Hash(ignores map[IgnoreKey]struct{}) (uint32, error) {
+	h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+
+	b.mu.RLock()
+	defer b.mu.RUnlock()
+	err := b.db.View(func(tx *bolt.Tx) error {
+		c := tx.Cursor()
+		for next, _ := c.First(); next != nil; next, _ = c.Next() {
+			b := tx.Bucket(next)
+			if b == nil {
+				return fmt.Errorf("cannot get hash of bucket %s", string(next))
+			}
+			h.Write(next)
+			b.ForEach(func(k, v []byte) error {
+				bk := IgnoreKey{Bucket: string(next), Key: string(k)}
+				if _, ok := ignores[bk]; !ok {
+					h.Write(k)
+					h.Write(v)
+				}
+				return nil
+			})
+		}
+		return nil
+	})
+
+	if err != nil {
+		return 0, err
+	}
+
+	return h.Sum32(), nil
+}
+
+func (b *backend) Size() int64 {
+	return atomic.LoadInt64(&b.size)
+}
+
+func (b *backend) SizeInUse() int64 {
+	return atomic.LoadInt64(&b.sizeInUse)
+}
+
+func (b *backend) run() {
+	defer close(b.donec)
+	t := time.NewTimer(b.batchInterval)
+	defer t.Stop()
+	for {
+		select {
+		case <-t.C:
+		case <-b.stopc:
+			b.batchTx.CommitAndStop()
+			return
+		}
+		b.batchTx.Commit()
+		t.Reset(b.batchInterval)
+	}
+}
+
+func (b *backend) Close() error {
+	close(b.stopc)
+	<-b.donec
+	return b.db.Close()
+}
+
+// Commits returns total number of commits since start
+func (b *backend) Commits() int64 {
+	return atomic.LoadInt64(&b.commits)
+}
+
+func (b *backend) Defrag() error {
+	return b.defrag()
+}
+
+func (b *backend) defrag() error {
+	now := time.Now()
+	
+	// TODO: make this non-blocking?
+	// lock batchTx to ensure nobody is using previous tx, and then
+	// close previous ongoing tx.
+	b.batchTx.Lock()
+	defer b.batchTx.Unlock()
+
+	// lock database after lock tx to avoid deadlock.
+	b.mu.Lock()
+	defer b.mu.Unlock()
+
+	// block concurrent read requests while resetting tx
+	b.readTx.mu.Lock()
+	defer b.readTx.mu.Unlock()
+
+	b.batchTx.unsafeCommit(true)
+	b.batchTx.tx = nil
+
+	tmpdb, err := bolt.Open(b.db.Path()+".tmp", 0600, boltOpenOptions)
+	if err != nil {
+		return err
+	}
+
+	err = defragdb(b.db, tmpdb, defragLimit)
+
+	if err != nil {
+		tmpdb.Close()
+		os.RemoveAll(tmpdb.Path())
+		return err
+	}
+
+	dbp := b.db.Path()
+	tdbp := tmpdb.Path()
+
+	err = b.db.Close()
+	if err != nil {
+		plog.Fatalf("cannot close database (%s)", err)
+	}
+	err = tmpdb.Close()
+	if err != nil {
+		plog.Fatalf("cannot close database (%s)", err)
+	}
+	err = os.Rename(tdbp, dbp)
+	if err != nil {
+		plog.Fatalf("cannot rename database (%s)", err)
+	}
+
+	b.db, err = bolt.Open(dbp, 0600, boltOpenOptions)
+	if err != nil {
+		plog.Panicf("cannot open database at %s (%v)", dbp, err)
+	}
+	b.batchTx.tx, err = b.db.Begin(true)
+	if err != nil {
+		plog.Fatalf("cannot begin tx (%s)", err)
+	}
+
+	b.readTx.reset()
+	b.readTx.tx = b.unsafeBegin(false)
+
+	size := b.readTx.tx.Size()
+	db := b.db
+	atomic.StoreInt64(&b.size, size)
+	atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize)))
+
+	took := time.Since(now)
+	defragDurations.Observe(took.Seconds())
+
+	return nil
+}
+
+func defragdb(odb, tmpdb *bolt.DB, limit int) error {
+	// open a tx on tmpdb for writes
+	tmptx, err := tmpdb.Begin(true)
+	if err != nil {
+		return err
+	}
+
+	// open a tx on old db for read
+	tx, err := odb.Begin(false)
+	if err != nil {
+		return err
+	}
+	defer tx.Rollback()
+
+	c := tx.Cursor()
+
+	count := 0
+	for next, _ := c.First(); next != nil; next, _ = c.Next() {
+		b := tx.Bucket(next)
+		if b == nil {
+			return fmt.Errorf("backend: cannot defrag bucket %s", string(next))
+		}
+
+		tmpb, berr := tmptx.CreateBucketIfNotExists(next)
+		if berr != nil {
+			return berr
+		}
+		tmpb.FillPercent = 0.9 // for seq write in for each
+
+		b.ForEach(func(k, v []byte) error {
+			count++
+			if count > limit {
+				err = tmptx.Commit()
+				if err != nil {
+					return err
+				}
+				tmptx, err = tmpdb.Begin(true)
+				if err != nil {
+					return err
+				}
+				tmpb = tmptx.Bucket(next)
+				tmpb.FillPercent = 0.9 // for seq write in for each
+
+				count = 0
+			}
+			return tmpb.Put(k, v)
+		})
+	}
+
+	return tmptx.Commit()
+}
+
+func (b *backend) begin(write bool) *bolt.Tx {
+	b.mu.RLock()
+	tx := b.unsafeBegin(write)
+	b.mu.RUnlock()
+
+	size := tx.Size()
+	db := tx.DB()
+	atomic.StoreInt64(&b.size, size)
+	atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize)))
+
+	return tx
+}
+
+func (b *backend) unsafeBegin(write bool) *bolt.Tx {
+	tx, err := b.db.Begin(write)
+	if err != nil {
+		plog.Fatalf("cannot begin tx (%s)", err)
+	}
+	return tx
+}
+
+// NewTmpBackend creates a backend implementation for testing.
+func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, string) {
+	dir, err := ioutil.TempDir(os.TempDir(), "etcd_backend_test")
+	if err != nil {
+		plog.Fatal(err)
+	}
+	tmpPath := filepath.Join(dir, "database")
+	bcfg := DefaultBackendConfig()
+	bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = tmpPath, batchInterval, batchLimit
+	return newBackend(bcfg), tmpPath
+}
+
+func NewDefaultTmpBackend() (*backend, string) {
+	return NewTmpBackend(defaultBatchInterval, defaultBatchLimit)
+}
+
+type snapshot struct {
+	*bolt.Tx
+	stopc chan struct{}
+	donec chan struct{}
+}
+
+func (s *snapshot) Close() error {
+	close(s.stopc)
+	<-s.donec
+	return s.Tx.Rollback()
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go
new file mode 100644
index 0000000..aed6893
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go
@@ -0,0 +1,254 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+	"bytes"
+	"math"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	bolt "github.com/coreos/bbolt"
+)
+
+type BatchTx interface {
+	ReadTx
+	UnsafeCreateBucket(name []byte)
+	UnsafePut(bucketName []byte, key []byte, value []byte)
+	UnsafeSeqPut(bucketName []byte, key []byte, value []byte)
+	UnsafeDelete(bucketName []byte, key []byte)
+	// Commit commits a previous tx and begins a new writable one.
+	Commit()
+	// CommitAndStop commits the previous tx and does not create a new one.
+	CommitAndStop()
+}
+
+type batchTx struct {
+	sync.Mutex
+	tx      *bolt.Tx
+	backend *backend
+
+	pending int
+}
+
+func (t *batchTx) UnsafeCreateBucket(name []byte) {
+	_, err := t.tx.CreateBucket(name)
+	if err != nil && err != bolt.ErrBucketExists {
+		plog.Fatalf("cannot create bucket %s (%v)", name, err)
+	}
+	t.pending++
+}
+
+// UnsafePut must be called holding the lock on the tx.
+func (t *batchTx) UnsafePut(bucketName []byte, key []byte, value []byte) {
+	t.unsafePut(bucketName, key, value, false)
+}
+
+// UnsafeSeqPut must be called holding the lock on the tx.
+func (t *batchTx) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
+	t.unsafePut(bucketName, key, value, true)
+}
+
+func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq bool) {
+	bucket := t.tx.Bucket(bucketName)
+	if bucket == nil {
+		plog.Fatalf("bucket %s does not exist", bucketName)
+	}
+	if seq {
+		// it is useful to increase fill percent when the workloads are mostly append-only.
+		// this can delay the page split and reduce space usage.
+		bucket.FillPercent = 0.9
+	}
+	if err := bucket.Put(key, value); err != nil {
+		plog.Fatalf("cannot put key into bucket (%v)", err)
+	}
+	t.pending++
+}
+
+// UnsafeRange must be called holding the lock on the tx.
+func (t *batchTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+	bucket := t.tx.Bucket(bucketName)
+	if bucket == nil {
+		plog.Fatalf("bucket %s does not exist", bucketName)
+	}
+	return unsafeRange(bucket.Cursor(), key, endKey, limit)
+}
+
+func unsafeRange(c *bolt.Cursor, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte) {
+	if limit <= 0 {
+		limit = math.MaxInt64
+	}
+	var isMatch func(b []byte) bool
+	if len(endKey) > 0 {
+		isMatch = func(b []byte) bool { return bytes.Compare(b, endKey) < 0 }
+	} else {
+		isMatch = func(b []byte) bool { return bytes.Equal(b, key) }
+		limit = 1
+	}
+	for ck, cv := c.Seek(key); ck != nil && isMatch(ck); ck, cv = c.Next() {
+		vs = append(vs, cv)
+		keys = append(keys, ck)
+		if limit == int64(len(keys)) {
+			break
+		}
+	}
+	return keys, vs
+}
+
+// UnsafeDelete must be called holding the lock on the tx.
+func (t *batchTx) UnsafeDelete(bucketName []byte, key []byte) {
+	bucket := t.tx.Bucket(bucketName)
+	if bucket == nil {
+		plog.Fatalf("bucket %s does not exist", bucketName)
+	}
+	err := bucket.Delete(key)
+	if err != nil {
+		plog.Fatalf("cannot delete key from bucket (%v)", err)
+	}
+	t.pending++
+}
+
+// UnsafeForEach must be called holding the lock on the tx.
+func (t *batchTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
+	return unsafeForEach(t.tx, bucketName, visitor)
+}
+
+func unsafeForEach(tx *bolt.Tx, bucket []byte, visitor func(k, v []byte) error) error {
+	if b := tx.Bucket(bucket); b != nil {
+		return b.ForEach(visitor)
+	}
+	return nil
+}
+
+// Commit commits a previous tx and begins a new writable one.
+func (t *batchTx) Commit() {
+	t.Lock()
+	t.commit(false)
+	t.Unlock()
+}
+
+// CommitAndStop commits the previous tx and does not create a new one.
+func (t *batchTx) CommitAndStop() {
+	t.Lock()
+	t.commit(true)
+	t.Unlock()
+}
+
+func (t *batchTx) Unlock() {
+	if t.pending >= t.backend.batchLimit {
+		t.commit(false)
+	}
+	t.Mutex.Unlock()
+}
+
+func (t *batchTx) commit(stop bool) {
+	// commit the last tx
+	if t.tx != nil {
+		if t.pending == 0 && !stop {
+			return
+		}
+
+		start := time.Now()
+
+		// gofail: var beforeCommit struct{}
+		err := t.tx.Commit()
+		// gofail: var afterCommit struct{}
+
+		commitDurations.Observe(time.Since(start).Seconds())
+		atomic.AddInt64(&t.backend.commits, 1)
+
+		t.pending = 0
+		if err != nil {
+			plog.Fatalf("cannot commit tx (%s)", err)
+		}
+	}
+	if !stop {
+		t.tx = t.backend.begin(true)
+	}
+}
+
+type batchTxBuffered struct {
+	batchTx
+	buf txWriteBuffer
+}
+
+func newBatchTxBuffered(backend *backend) *batchTxBuffered {
+	tx := &batchTxBuffered{
+		batchTx: batchTx{backend: backend},
+		buf: txWriteBuffer{
+			txBuffer: txBuffer{make(map[string]*bucketBuffer)},
+			seq:      true,
+		},
+	}
+	tx.Commit()
+	return tx
+}
+
+func (t *batchTxBuffered) Unlock() {
+	if t.pending != 0 {
+		t.backend.readTx.mu.Lock()
+		t.buf.writeback(&t.backend.readTx.buf)
+		t.backend.readTx.mu.Unlock()
+		if t.pending >= t.backend.batchLimit {
+			t.commit(false)
+		}
+	}
+	t.batchTx.Unlock()
+}
+
+func (t *batchTxBuffered) Commit() {
+	t.Lock()
+	t.commit(false)
+	t.Unlock()
+}
+
+func (t *batchTxBuffered) CommitAndStop() {
+	t.Lock()
+	t.commit(true)
+	t.Unlock()
+}
+
+func (t *batchTxBuffered) commit(stop bool) {
+	// all read txs must be closed to acquire boltdb commit rwlock
+	t.backend.readTx.mu.Lock()
+	t.unsafeCommit(stop)
+	t.backend.readTx.mu.Unlock()
+}
+
+func (t *batchTxBuffered) unsafeCommit(stop bool) {
+	if t.backend.readTx.tx != nil {
+		if err := t.backend.readTx.tx.Rollback(); err != nil {
+			plog.Fatalf("cannot rollback tx (%s)", err)
+		}
+		t.backend.readTx.reset()
+	}
+
+	t.batchTx.commit(stop)
+
+	if !stop {
+		t.backend.readTx.tx = t.backend.begin(false)
+	}
+}
+
+func (t *batchTxBuffered) UnsafePut(bucketName []byte, key []byte, value []byte) {
+	t.batchTx.UnsafePut(bucketName, key, value)
+	t.buf.put(bucketName, key, value)
+}
+
+func (t *batchTxBuffered) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
+	t.batchTx.UnsafeSeqPut(bucketName, key, value)
+	t.buf.putSeq(bucketName, key, value)
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go
new file mode 100644
index 0000000..edfed00
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go
@@ -0,0 +1,23 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux,!windows
+
+package backend
+
+import bolt "github.com/coreos/bbolt"
+
+var boltOpenOptions *bolt.Options = nil
+
+func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) }
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go
new file mode 100644
index 0000000..b01785f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go
@@ -0,0 +1,34 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+	"syscall"
+
+	bolt "github.com/coreos/bbolt"
+)
+
+// syscall.MAP_POPULATE on linux 2.6.23+ does sequential read-ahead
+// which can speed up entire-database read with boltdb. We want to
+// enable MAP_POPULATE for faster key-value store recovery in storage
+// package. If your kernel version is lower than 2.6.23
+// (https://github.com/torvalds/linux/releases/tag/v2.6.23), mmap might
+// silently ignore this flag. Please update your kernel to prevent this.
+var boltOpenOptions = &bolt.Options{
+	MmapFlags:      syscall.MAP_POPULATE,
+	NoFreelistSync: true,
+}
+
+func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) }
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go
new file mode 100644
index 0000000..71d0270
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go
@@ -0,0 +1,26 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows
+
+package backend
+
+import bolt "github.com/coreos/bbolt"
+
+var boltOpenOptions *bolt.Options = nil
+
+// setting mmap size != 0 on windows will allocate the entire
+// mmap size for the file, instead of growing it. So, force 0.
+
+func (bcfg *BackendConfig) mmapSize() int { return 0 }
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/doc.go b/vendor/github.com/coreos/etcd/mvcc/backend/doc.go
new file mode 100644
index 0000000..9cc42fa
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/backend/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package backend defines a standard interface for etcd's backend MVCC storage.
+package backend
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go b/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go
new file mode 100644
index 0000000..3415708
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go
@@ -0,0 +1,59 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+	commitDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Namespace: "etcd",
+		Subsystem: "disk",
+		Name:      "backend_commit_duration_seconds",
+		Help:      "The latency distributions of commit called by backend.",
+
+		// lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+		// highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+		Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+	})
+
+	defragDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Namespace: "etcd",
+		Subsystem: "disk",
+		Name:      "backend_defrag_duration_seconds",
+		Help:      "The latency distribution of backend defragmentation.",
+
+		// 100 MB usually takes 1 sec, so start with 10 MB of 100 ms
+		// lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
+		// highest bucket start of 0.1 sec * 2^12 == 409.6 sec
+		Buckets: prometheus.ExponentialBuckets(.1, 2, 13),
+	})
+
+	snapshotDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Namespace: "etcd",
+		Subsystem: "disk",
+		Name:      "backend_snapshot_duration_seconds",
+		Help:      "The latency distribution of backend snapshots.",
+
+		// lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
+		// highest bucket start of 0.01 sec * 2^16 == 655.36 sec
+		Buckets: prometheus.ExponentialBuckets(.01, 2, 17),
+	})
+)
+
+func init() {
+	prometheus.MustRegister(commitDurations)
+	prometheus.MustRegister(defragDurations)
+	prometheus.MustRegister(snapshotDurations)
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go
new file mode 100644
index 0000000..0536de7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go
@@ -0,0 +1,120 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+	"bytes"
+	"math"
+	"sync"
+
+	bolt "github.com/coreos/bbolt"
+)
+
+// safeRangeBucket is a hack to avoid inadvertently reading duplicate keys;
+// overwrites on a bucket should only fetch with limit=1, but safeRangeBucket
+// is known to never overwrite any key so range is safe.
+var safeRangeBucket = []byte("key")
+
+type ReadTx interface {
+	Lock()
+	Unlock()
+
+	UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte)
+	UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error
+}
+
+type readTx struct {
+	// mu protects accesses to the txReadBuffer
+	mu  sync.RWMutex
+	buf txReadBuffer
+
+	// txmu protects accesses to buckets and tx on Range requests.
+	txmu    sync.RWMutex
+	tx      *bolt.Tx
+	buckets map[string]*bolt.Bucket
+}
+
+func (rt *readTx) Lock()   { rt.mu.RLock() }
+func (rt *readTx) Unlock() { rt.mu.RUnlock() }
+
+func (rt *readTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+	if endKey == nil {
+		// forbid duplicates for single keys
+		limit = 1
+	}
+	if limit <= 0 {
+		limit = math.MaxInt64
+	}
+	if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) {
+		panic("do not use unsafeRange on non-keys bucket")
+	}
+	keys, vals := rt.buf.Range(bucketName, key, endKey, limit)
+	if int64(len(keys)) == limit {
+		return keys, vals
+	}
+
+	// find/cache bucket
+	bn := string(bucketName)
+	rt.txmu.RLock()
+	bucket, ok := rt.buckets[bn]
+	rt.txmu.RUnlock()
+	if !ok {
+		rt.txmu.Lock()
+		bucket = rt.tx.Bucket(bucketName)
+		rt.buckets[bn] = bucket
+		rt.txmu.Unlock()
+	}
+
+	// ignore missing bucket since may have been created in this batch
+	if bucket == nil {
+		return keys, vals
+	}
+	rt.txmu.Lock()
+	c := bucket.Cursor()
+	rt.txmu.Unlock()
+
+	k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys)))
+	return append(k2, keys...), append(v2, vals...)
+}
+
+func (rt *readTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
+	dups := make(map[string]struct{})
+	getDups := func(k, v []byte) error {
+		dups[string(k)] = struct{}{}
+		return nil
+	}
+	visitNoDup := func(k, v []byte) error {
+		if _, ok := dups[string(k)]; ok {
+			return nil
+		}
+		return visitor(k, v)
+	}
+	if err := rt.buf.ForEach(bucketName, getDups); err != nil {
+		return err
+	}
+	rt.txmu.Lock()
+	err := unsafeForEach(rt.tx, bucketName, visitNoDup)
+	rt.txmu.Unlock()
+	if err != nil {
+		return err
+	}
+	return rt.buf.ForEach(bucketName, visitor)
+}
+
+func (rt *readTx) reset() {
+	rt.buf.reset()
+	rt.buckets = make(map[string]*bolt.Bucket)
+	rt.tx = nil
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go b/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go
new file mode 100644
index 0000000..56e885d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go
@@ -0,0 +1,181 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+	"bytes"
+	"sort"
+)
+
+// txBuffer handles functionality shared between txWriteBuffer and txReadBuffer.
+type txBuffer struct {
+	buckets map[string]*bucketBuffer
+}
+
+func (txb *txBuffer) reset() {
+	for k, v := range txb.buckets {
+		if v.used == 0 {
+			// demote
+			delete(txb.buckets, k)
+		}
+		v.used = 0
+	}
+}
+
+// txWriteBuffer buffers writes of pending updates that have not yet committed.
+type txWriteBuffer struct {
+	txBuffer
+	seq bool
+}
+
+func (txw *txWriteBuffer) put(bucket, k, v []byte) {
+	txw.seq = false
+	txw.putSeq(bucket, k, v)
+}
+
+func (txw *txWriteBuffer) putSeq(bucket, k, v []byte) {
+	b, ok := txw.buckets[string(bucket)]
+	if !ok {
+		b = newBucketBuffer()
+		txw.buckets[string(bucket)] = b
+	}
+	b.add(k, v)
+}
+
+func (txw *txWriteBuffer) writeback(txr *txReadBuffer) {
+	for k, wb := range txw.buckets {
+		rb, ok := txr.buckets[k]
+		if !ok {
+			delete(txw.buckets, k)
+			txr.buckets[k] = wb
+			continue
+		}
+		if !txw.seq && wb.used > 1 {
+			// assume no duplicate keys
+			sort.Sort(wb)
+		}
+		rb.merge(wb)
+	}
+	txw.reset()
+}
+
+// txReadBuffer accesses buffered updates.
+type txReadBuffer struct{ txBuffer }
+
+func (txr *txReadBuffer) Range(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+	if b := txr.buckets[string(bucketName)]; b != nil {
+		return b.Range(key, endKey, limit)
+	}
+	return nil, nil
+}
+
+func (txr *txReadBuffer) ForEach(bucketName []byte, visitor func(k, v []byte) error) error {
+	if b := txr.buckets[string(bucketName)]; b != nil {
+		return b.ForEach(visitor)
+	}
+	return nil
+}
+
+type kv struct {
+	key []byte
+	val []byte
+}
+
+// bucketBuffer buffers key-value pairs that are pending commit.
+type bucketBuffer struct {
+	buf []kv
+	// used tracks number of elements in use so buf can be reused without reallocation.
+	used int
+}
+
+func newBucketBuffer() *bucketBuffer {
+	return &bucketBuffer{buf: make([]kv, 512), used: 0}
+}
+
+func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) {
+	f := func(i int) bool { return bytes.Compare(bb.buf[i].key, key) >= 0 }
+	idx := sort.Search(bb.used, f)
+	if idx < 0 {
+		return nil, nil
+	}
+	if len(endKey) == 0 {
+		if bytes.Equal(key, bb.buf[idx].key) {
+			keys = append(keys, bb.buf[idx].key)
+			vals = append(vals, bb.buf[idx].val)
+		}
+		return keys, vals
+	}
+	if bytes.Compare(endKey, bb.buf[idx].key) <= 0 {
+		return nil, nil
+	}
+	for i := idx; i < bb.used && int64(len(keys)) < limit; i++ {
+		if bytes.Compare(endKey, bb.buf[i].key) <= 0 {
+			break
+		}
+		keys = append(keys, bb.buf[i].key)
+		vals = append(vals, bb.buf[i].val)
+	}
+	return keys, vals
+}
+
+func (bb *bucketBuffer) ForEach(visitor func(k, v []byte) error) error {
+	for i := 0; i < bb.used; i++ {
+		if err := visitor(bb.buf[i].key, bb.buf[i].val); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (bb *bucketBuffer) add(k, v []byte) {
+	bb.buf[bb.used].key, bb.buf[bb.used].val = k, v
+	bb.used++
+	if bb.used == len(bb.buf) {
+		buf := make([]kv, (3*len(bb.buf))/2)
+		copy(buf, bb.buf)
+		bb.buf = buf
+	}
+}
+
+// merge merges data from bb into bbsrc.
+func (bb *bucketBuffer) merge(bbsrc *bucketBuffer) {
+	for i := 0; i < bbsrc.used; i++ {
+		bb.add(bbsrc.buf[i].key, bbsrc.buf[i].val)
+	}
+	if bb.used == bbsrc.used {
+		return
+	}
+	if bytes.Compare(bb.buf[(bb.used-bbsrc.used)-1].key, bbsrc.buf[0].key) < 0 {
+		return
+	}
+
+	sort.Stable(bb)
+
+	// remove duplicates, using only newest update
+	widx := 0
+	for ridx := 1; ridx < bb.used; ridx++ {
+		if !bytes.Equal(bb.buf[ridx].key, bb.buf[widx].key) {
+			widx++
+		}
+		bb.buf[widx] = bb.buf[ridx]
+	}
+	bb.used = widx + 1
+}
+
+func (bb *bucketBuffer) Len() int { return bb.used }
+func (bb *bucketBuffer) Less(i, j int) bool {
+	return bytes.Compare(bb.buf[i].key, bb.buf[j].key) < 0
+}
+func (bb *bucketBuffer) Swap(i, j int) { bb.buf[i], bb.buf[j] = bb.buf[j], bb.buf[i] }
diff --git a/vendor/github.com/coreos/etcd/mvcc/doc.go b/vendor/github.com/coreos/etcd/mvcc/doc.go
new file mode 100644
index 0000000..ad5be03
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package mvcc defines etcd's stable MVCC storage.
+package mvcc
diff --git a/vendor/github.com/coreos/etcd/mvcc/index.go b/vendor/github.com/coreos/etcd/mvcc/index.go
new file mode 100644
index 0000000..b27a9e5
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/index.go
@@ -0,0 +1,251 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+	"sort"
+	"sync"
+
+	"github.com/google/btree"
+)
+
+type index interface {
+	Get(key []byte, atRev int64) (rev, created revision, ver int64, err error)
+	Range(key, end []byte, atRev int64) ([][]byte, []revision)
+	Revisions(key, end []byte, atRev int64) []revision
+	Put(key []byte, rev revision)
+	Tombstone(key []byte, rev revision) error
+	RangeSince(key, end []byte, rev int64) []revision
+	Compact(rev int64) map[revision]struct{}
+	Keep(rev int64) map[revision]struct{}
+	Equal(b index) bool
+
+	Insert(ki *keyIndex)
+	KeyIndex(ki *keyIndex) *keyIndex
+}
+
+type treeIndex struct {
+	sync.RWMutex
+	tree *btree.BTree
+}
+
+func newTreeIndex() index {
+	return &treeIndex{
+		tree: btree.New(32),
+	}
+}
+
+func (ti *treeIndex) Put(key []byte, rev revision) {
+	keyi := &keyIndex{key: key}
+
+	ti.Lock()
+	defer ti.Unlock()
+	item := ti.tree.Get(keyi)
+	if item == nil {
+		keyi.put(rev.main, rev.sub)
+		ti.tree.ReplaceOrInsert(keyi)
+		return
+	}
+	okeyi := item.(*keyIndex)
+	okeyi.put(rev.main, rev.sub)
+}
+
+func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) {
+	keyi := &keyIndex{key: key}
+	ti.RLock()
+	defer ti.RUnlock()
+	if keyi = ti.keyIndex(keyi); keyi == nil {
+		return revision{}, revision{}, 0, ErrRevisionNotFound
+	}
+	return keyi.get(atRev)
+}
+
+func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex {
+	ti.RLock()
+	defer ti.RUnlock()
+	return ti.keyIndex(keyi)
+}
+
+func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex {
+	if item := ti.tree.Get(keyi); item != nil {
+		return item.(*keyIndex)
+	}
+	return nil
+}
+
+func (ti *treeIndex) visit(key, end []byte, f func(ki *keyIndex)) {
+	keyi, endi := &keyIndex{key: key}, &keyIndex{key: end}
+
+	ti.RLock()
+	defer ti.RUnlock()
+
+	ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool {
+		if len(endi.key) > 0 && !item.Less(endi) {
+			return false
+		}
+		f(item.(*keyIndex))
+		return true
+	})
+}
+
+func (ti *treeIndex) Revisions(key, end []byte, atRev int64) (revs []revision) {
+	if end == nil {
+		rev, _, _, err := ti.Get(key, atRev)
+		if err != nil {
+			return nil
+		}
+		return []revision{rev}
+	}
+	ti.visit(key, end, func(ki *keyIndex) {
+		if rev, _, _, err := ki.get(atRev); err == nil {
+			revs = append(revs, rev)
+		}
+	})
+	return revs
+}
+
+func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) {
+	if end == nil {
+		rev, _, _, err := ti.Get(key, atRev)
+		if err != nil {
+			return nil, nil
+		}
+		return [][]byte{key}, []revision{rev}
+	}
+	ti.visit(key, end, func(ki *keyIndex) {
+		if rev, _, _, err := ki.get(atRev); err == nil {
+			revs = append(revs, rev)
+			keys = append(keys, ki.key)
+		}
+	})
+	return keys, revs
+}
+
+func (ti *treeIndex) Tombstone(key []byte, rev revision) error {
+	keyi := &keyIndex{key: key}
+
+	ti.Lock()
+	defer ti.Unlock()
+	item := ti.tree.Get(keyi)
+	if item == nil {
+		return ErrRevisionNotFound
+	}
+
+	ki := item.(*keyIndex)
+	return ki.tombstone(rev.main, rev.sub)
+}
+
+// RangeSince returns all revisions from key(including) to end(excluding)
+// at or after the given rev. The returned slice is sorted in the order
+// of revision.
+func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision {
+	keyi := &keyIndex{key: key}
+
+	ti.RLock()
+	defer ti.RUnlock()
+
+	if end == nil {
+		item := ti.tree.Get(keyi)
+		if item == nil {
+			return nil
+		}
+		keyi = item.(*keyIndex)
+		return keyi.since(rev)
+	}
+
+	endi := &keyIndex{key: end}
+	var revs []revision
+	ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool {
+		if len(endi.key) > 0 && !item.Less(endi) {
+			return false
+		}
+		curKeyi := item.(*keyIndex)
+		revs = append(revs, curKeyi.since(rev)...)
+		return true
+	})
+	sort.Sort(revisions(revs))
+
+	return revs
+}
+
+func (ti *treeIndex) Compact(rev int64) map[revision]struct{} {
+	available := make(map[revision]struct{})
+	var emptyki []*keyIndex
+	plog.Printf("store.index: compact %d", rev)
+	// TODO: do not hold the lock for long time?
+	// This is probably OK. Compacting 10M keys takes O(10ms).
+	ti.Lock()
+	defer ti.Unlock()
+	ti.tree.Ascend(compactIndex(rev, available, &emptyki))
+	for _, ki := range emptyki {
+		item := ti.tree.Delete(ki)
+		if item == nil {
+			plog.Panic("store.index: unexpected delete failure during compaction")
+		}
+	}
+	return available
+}
+
+// Keep finds all revisions to be kept for a Compaction at the given rev.
+func (ti *treeIndex) Keep(rev int64) map[revision]struct{} {
+	available := make(map[revision]struct{})
+	ti.RLock()
+	defer ti.RUnlock()
+	ti.tree.Ascend(func(i btree.Item) bool {
+		keyi := i.(*keyIndex)
+		keyi.keep(rev, available)
+		return true
+	})
+	return available
+}
+
+func compactIndex(rev int64, available map[revision]struct{}, emptyki *[]*keyIndex) func(i btree.Item) bool {
+	return func(i btree.Item) bool {
+		keyi := i.(*keyIndex)
+		keyi.compact(rev, available)
+		if keyi.isEmpty() {
+			*emptyki = append(*emptyki, keyi)
+		}
+		return true
+	}
+}
+
+func (ti *treeIndex) Equal(bi index) bool {
+	b := bi.(*treeIndex)
+
+	if ti.tree.Len() != b.tree.Len() {
+		return false
+	}
+
+	equal := true
+
+	ti.tree.Ascend(func(item btree.Item) bool {
+		aki := item.(*keyIndex)
+		bki := b.tree.Get(item).(*keyIndex)
+		if !aki.equal(bki) {
+			equal = false
+			return false
+		}
+		return true
+	})
+
+	return equal
+}
+
+func (ti *treeIndex) Insert(ki *keyIndex) {
+	ti.Lock()
+	defer ti.Unlock()
+	ti.tree.ReplaceOrInsert(ki)
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/key_index.go b/vendor/github.com/coreos/etcd/mvcc/key_index.go
new file mode 100644
index 0000000..805922b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/key_index.go
@@ -0,0 +1,356 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+
+	"github.com/google/btree"
+)
+
+var (
+	ErrRevisionNotFound = errors.New("mvcc: revision not found")
+)
+
+// keyIndex stores the revisions of a key in the backend.
+// Each keyIndex has at least one key generation.
+// Each generation might have several key versions.
+// Tombstone on a key appends an tombstone version at the end
+// of the current generation and creates a new empty generation.
+// Each version of a key has an index pointing to the backend.
+//
+// For example: put(1.0);put(2.0);tombstone(3.0);put(4.0);tombstone(5.0) on key "foo"
+// generate a keyIndex:
+// key:     "foo"
+// rev: 5
+// generations:
+//    {empty}
+//    {4.0, 5.0(t)}
+//    {1.0, 2.0, 3.0(t)}
+//
+// Compact a keyIndex removes the versions with smaller or equal to
+// rev except the largest one. If the generation becomes empty
+// during compaction, it will be removed. if all the generations get
+// removed, the keyIndex should be removed.
+//
+// For example:
+// compact(2) on the previous example
+// generations:
+//    {empty}
+//    {4.0, 5.0(t)}
+//    {2.0, 3.0(t)}
+//
+// compact(4)
+// generations:
+//    {empty}
+//    {4.0, 5.0(t)}
+//
+// compact(5):
+// generations:
+//    {empty} -> key SHOULD be removed.
+//
+// compact(6):
+// generations:
+//    {empty} -> key SHOULD be removed.
+type keyIndex struct {
+	key         []byte
+	modified    revision // the main rev of the last modification
+	generations []generation
+}
+
+// put puts a revision to the keyIndex.
+func (ki *keyIndex) put(main int64, sub int64) {
+	rev := revision{main: main, sub: sub}
+
+	if !rev.GreaterThan(ki.modified) {
+		plog.Panicf("store.keyindex: put with unexpected smaller revision [%v / %v]", rev, ki.modified)
+	}
+	if len(ki.generations) == 0 {
+		ki.generations = append(ki.generations, generation{})
+	}
+	g := &ki.generations[len(ki.generations)-1]
+	if len(g.revs) == 0 { // create a new key
+		keysGauge.Inc()
+		g.created = rev
+	}
+	g.revs = append(g.revs, rev)
+	g.ver++
+	ki.modified = rev
+}
+
+func (ki *keyIndex) restore(created, modified revision, ver int64) {
+	if len(ki.generations) != 0 {
+		plog.Panicf("store.keyindex: cannot restore non-empty keyIndex")
+	}
+
+	ki.modified = modified
+	g := generation{created: created, ver: ver, revs: []revision{modified}}
+	ki.generations = append(ki.generations, g)
+	keysGauge.Inc()
+}
+
+// tombstone puts a revision, pointing to a tombstone, to the keyIndex.
+// It also creates a new empty generation in the keyIndex.
+// It returns ErrRevisionNotFound when tombstone on an empty generation.
+func (ki *keyIndex) tombstone(main int64, sub int64) error {
+	if ki.isEmpty() {
+		plog.Panicf("store.keyindex: unexpected tombstone on empty keyIndex %s", string(ki.key))
+	}
+	if ki.generations[len(ki.generations)-1].isEmpty() {
+		return ErrRevisionNotFound
+	}
+	ki.put(main, sub)
+	ki.generations = append(ki.generations, generation{})
+	keysGauge.Dec()
+	return nil
+}
+
+// get gets the modified, created revision and version of the key that satisfies the given atRev.
+// Rev must be higher than or equal to the given atRev.
+func (ki *keyIndex) get(atRev int64) (modified, created revision, ver int64, err error) {
+	if ki.isEmpty() {
+		plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key))
+	}
+	g := ki.findGeneration(atRev)
+	if g.isEmpty() {
+		return revision{}, revision{}, 0, ErrRevisionNotFound
+	}
+
+	n := g.walk(func(rev revision) bool { return rev.main > atRev })
+	if n != -1 {
+		return g.revs[n], g.created, g.ver - int64(len(g.revs)-n-1), nil
+	}
+
+	return revision{}, revision{}, 0, ErrRevisionNotFound
+}
+
+// since returns revisions since the given rev. Only the revision with the
+// largest sub revision will be returned if multiple revisions have the same
+// main revision.
+func (ki *keyIndex) since(rev int64) []revision {
+	if ki.isEmpty() {
+		plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key))
+	}
+	since := revision{rev, 0}
+	var gi int
+	// find the generations to start checking
+	for gi = len(ki.generations) - 1; gi > 0; gi-- {
+		g := ki.generations[gi]
+		if g.isEmpty() {
+			continue
+		}
+		if since.GreaterThan(g.created) {
+			break
+		}
+	}
+
+	var revs []revision
+	var last int64
+	for ; gi < len(ki.generations); gi++ {
+		for _, r := range ki.generations[gi].revs {
+			if since.GreaterThan(r) {
+				continue
+			}
+			if r.main == last {
+				// replace the revision with a new one that has higher sub value,
+				// because the original one should not be seen by external
+				revs[len(revs)-1] = r
+				continue
+			}
+			revs = append(revs, r)
+			last = r.main
+		}
+	}
+	return revs
+}
+
+// compact compacts a keyIndex by removing the versions with smaller or equal
+// revision than the given atRev except the largest one (If the largest one is
+// a tombstone, it will not be kept).
+// If a generation becomes empty during compaction, it will be removed.
+func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) {
+	if ki.isEmpty() {
+		plog.Panicf("store.keyindex: unexpected compact on empty keyIndex %s", string(ki.key))
+	}
+
+	genIdx, revIndex := ki.doCompact(atRev, available)
+
+	g := &ki.generations[genIdx]
+	if !g.isEmpty() {
+		// remove the previous contents.
+		if revIndex != -1 {
+			g.revs = g.revs[revIndex:]
+		}
+		// remove any tombstone
+		if len(g.revs) == 1 && genIdx != len(ki.generations)-1 {
+			delete(available, g.revs[0])
+			genIdx++
+		}
+	}
+
+	// remove the previous generations.
+	ki.generations = ki.generations[genIdx:]
+}
+
+// keep finds the revision to be kept if compact is called at given atRev.
+func (ki *keyIndex) keep(atRev int64, available map[revision]struct{}) {
+	if ki.isEmpty() {
+		return
+	}
+
+	genIdx, revIndex := ki.doCompact(atRev, available)
+	g := &ki.generations[genIdx]
+	if !g.isEmpty() {
+		// remove any tombstone
+		if revIndex == len(g.revs)-1 && genIdx != len(ki.generations)-1 {
+			delete(available, g.revs[revIndex])
+		}
+	}
+}
+
+func (ki *keyIndex) doCompact(atRev int64, available map[revision]struct{}) (genIdx int, revIndex int) {
+	// walk until reaching the first revision smaller or equal to "atRev",
+	// and add the revision to the available map
+	f := func(rev revision) bool {
+		if rev.main <= atRev {
+			available[rev] = struct{}{}
+			return false
+		}
+		return true
+	}
+
+	genIdx, g := 0, &ki.generations[0]
+	// find first generation includes atRev or created after atRev
+	for genIdx < len(ki.generations)-1 {
+		if tomb := g.revs[len(g.revs)-1].main; tomb > atRev {
+			break
+		}
+		genIdx++
+		g = &ki.generations[genIdx]
+	}
+
+	revIndex = g.walk(f)
+
+	return genIdx, revIndex
+}
+
+func (ki *keyIndex) isEmpty() bool {
+	return len(ki.generations) == 1 && ki.generations[0].isEmpty()
+}
+
+// findGeneration finds out the generation of the keyIndex that the
+// given rev belongs to. If the given rev is at the gap of two generations,
+// which means that the key does not exist at the given rev, it returns nil.
+func (ki *keyIndex) findGeneration(rev int64) *generation {
+	lastg := len(ki.generations) - 1
+	cg := lastg
+
+	for cg >= 0 {
+		if len(ki.generations[cg].revs) == 0 {
+			cg--
+			continue
+		}
+		g := ki.generations[cg]
+		if cg != lastg {
+			if tomb := g.revs[len(g.revs)-1].main; tomb <= rev {
+				return nil
+			}
+		}
+		if g.revs[0].main <= rev {
+			return &ki.generations[cg]
+		}
+		cg--
+	}
+	return nil
+}
+
+func (a *keyIndex) Less(b btree.Item) bool {
+	return bytes.Compare(a.key, b.(*keyIndex).key) == -1
+}
+
+func (a *keyIndex) equal(b *keyIndex) bool {
+	if !bytes.Equal(a.key, b.key) {
+		return false
+	}
+	if a.modified != b.modified {
+		return false
+	}
+	if len(a.generations) != len(b.generations) {
+		return false
+	}
+	for i := range a.generations {
+		ag, bg := a.generations[i], b.generations[i]
+		if !ag.equal(bg) {
+			return false
+		}
+	}
+	return true
+}
+
+func (ki *keyIndex) String() string {
+	var s string
+	for _, g := range ki.generations {
+		s += g.String()
+	}
+	return s
+}
+
+// generation contains multiple revisions of a key.
+type generation struct {
+	ver     int64
+	created revision // when the generation is created (put in first revision).
+	revs    []revision
+}
+
+func (g *generation) isEmpty() bool { return g == nil || len(g.revs) == 0 }
+
+// walk walks through the revisions in the generation in descending order.
+// It passes the revision to the given function.
+// walk returns until: 1. it finishes walking all pairs 2. the function returns false.
+// walk returns the position at where it stopped. If it stopped after
+// finishing walking, -1 will be returned.
+func (g *generation) walk(f func(rev revision) bool) int {
+	l := len(g.revs)
+	for i := range g.revs {
+		ok := f(g.revs[l-i-1])
+		if !ok {
+			return l - i - 1
+		}
+	}
+	return -1
+}
+
+func (g *generation) String() string {
+	return fmt.Sprintf("g: created[%d] ver[%d], revs %#v\n", g.created, g.ver, g.revs)
+}
+
+func (a generation) equal(b generation) bool {
+	if a.ver != b.ver {
+		return false
+	}
+	if len(a.revs) != len(b.revs) {
+		return false
+	}
+
+	for i := range a.revs {
+		ar, br := a.revs[i], b.revs[i]
+		if ar != br {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/kv.go b/vendor/github.com/coreos/etcd/mvcc/kv.go
new file mode 100644
index 0000000..2dad3ad
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/kv.go
@@ -0,0 +1,149 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+	"github.com/coreos/etcd/lease"
+	"github.com/coreos/etcd/mvcc/backend"
+	"github.com/coreos/etcd/mvcc/mvccpb"
+)
+
+type RangeOptions struct {
+	Limit int64
+	Rev   int64
+	Count bool
+}
+
+type RangeResult struct {
+	KVs   []mvccpb.KeyValue
+	Rev   int64
+	Count int
+}
+
+type ReadView interface {
+	// FirstRev returns the first KV revision at the time of opening the txn.
+	// After a compaction, the first revision increases to the compaction
+	// revision.
+	FirstRev() int64
+
+	// Rev returns the revision of the KV at the time of opening the txn.
+	Rev() int64
+
+	// Range gets the keys in the range at rangeRev.
+	// The returned rev is the current revision of the KV when the operation is executed.
+	// If rangeRev <=0, range gets the keys at currentRev.
+	// If `end` is nil, the request returns the key.
+	// If `end` is not nil and not empty, it gets the keys in range [key, range_end).
+	// If `end` is not nil and empty, it gets the keys greater than or equal to key.
+	// Limit limits the number of keys returned.
+	// If the required rev is compacted, ErrCompacted will be returned.
+	Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error)
+}
+
+// TxnRead represents a read-only transaction with operations that will not
+// block other read transactions.
+type TxnRead interface {
+	ReadView
+	// End marks the transaction is complete and ready to commit.
+	End()
+}
+
+type WriteView interface {
+	// DeleteRange deletes the given range from the store.
+	// A deleteRange increases the rev of the store if any key in the range exists.
+	// The number of key deleted will be returned.
+	// The returned rev is the current revision of the KV when the operation is executed.
+	// It also generates one event for each key delete in the event history.
+	// if the `end` is nil, deleteRange deletes the key.
+	// if the `end` is not nil, deleteRange deletes the keys in range [key, range_end).
+	DeleteRange(key, end []byte) (n, rev int64)
+
+	// Put puts the given key, value into the store. Put also takes additional argument lease to
+	// attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease
+	// id.
+	// A put also increases the rev of the store, and generates one event in the event history.
+	// The returned rev is the current revision of the KV when the operation is executed.
+	Put(key, value []byte, lease lease.LeaseID) (rev int64)
+}
+
+// TxnWrite represents a transaction that can modify the store.
+type TxnWrite interface {
+	TxnRead
+	WriteView
+	// Changes gets the changes made since opening the write txn.
+	Changes() []mvccpb.KeyValue
+}
+
+// txnReadWrite coerces a read txn to a write, panicking on any write operation.
+type txnReadWrite struct{ TxnRead }
+
+func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("unexpected DeleteRange") }
+func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
+	panic("unexpected Put")
+}
+func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil }
+
+func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} }
+
+type KV interface {
+	ReadView
+	WriteView
+
+	// Read creates a read transaction.
+	Read() TxnRead
+
+	// Write creates a write transaction.
+	Write() TxnWrite
+
+	// Hash computes the hash of the KV's backend.
+	Hash() (hash uint32, revision int64, err error)
+
+	// HashByRev computes the hash of all MVCC revisions up to a given revision.
+	HashByRev(rev int64) (hash uint32, revision int64, compactRev int64, err error)
+
+	// Compact frees all superseded keys with revisions less than rev.
+	Compact(rev int64) (<-chan struct{}, error)
+
+	// Commit commits outstanding txns into the underlying backend.
+	Commit()
+
+	// Restore restores the KV store from a backend.
+	Restore(b backend.Backend) error
+	Close() error
+}
+
+// WatchableKV is a KV that can be watched.
+type WatchableKV interface {
+	KV
+	Watchable
+}
+
+// Watchable is the interface that wraps the NewWatchStream function.
+type Watchable interface {
+	// NewWatchStream returns a WatchStream that can be used to
+	// watch events happened or happening on the KV.
+	NewWatchStream() WatchStream
+}
+
+// ConsistentWatchableKV is a WatchableKV that understands the consistency
+// algorithm and consistent index.
+// If the consistent index of executing entry is not larger than the
+// consistent index of ConsistentWatchableKV, all operations in
+// this entry are skipped and return empty response.
+type ConsistentWatchableKV interface {
+	WatchableKV
+	// ConsistentIndex returns the current consistent index of the KV.
+	ConsistentIndex() uint64
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/kv_view.go b/vendor/github.com/coreos/etcd/mvcc/kv_view.go
new file mode 100644
index 0000000..f40ba8e
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/kv_view.go
@@ -0,0 +1,53 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+	"github.com/coreos/etcd/lease"
+)
+
+type readView struct{ kv KV }
+
+func (rv *readView) FirstRev() int64 {
+	tr := rv.kv.Read()
+	defer tr.End()
+	return tr.FirstRev()
+}
+
+func (rv *readView) Rev() int64 {
+	tr := rv.kv.Read()
+	defer tr.End()
+	return tr.Rev()
+}
+
+func (rv *readView) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
+	tr := rv.kv.Read()
+	defer tr.End()
+	return tr.Range(key, end, ro)
+}
+
+type writeView struct{ kv KV }
+
+func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) {
+	tw := wv.kv.Write()
+	defer tw.End()
+	return tw.DeleteRange(key, end)
+}
+
+func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
+	tw := wv.kv.Write()
+	defer tw.End()
+	return tw.Put(key, value, lease)
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore.go b/vendor/github.com/coreos/etcd/mvcc/kvstore.go
new file mode 100644
index 0000000..dd9f04a
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/kvstore.go
@@ -0,0 +1,510 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+	"context"
+	"encoding/binary"
+	"errors"
+	"hash/crc32"
+	"math"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/coreos/etcd/lease"
+	"github.com/coreos/etcd/mvcc/backend"
+	"github.com/coreos/etcd/mvcc/mvccpb"
+	"github.com/coreos/etcd/pkg/schedule"
+	"github.com/coreos/pkg/capnslog"
+)
+
+var (
+	keyBucketName  = []byte("key")
+	metaBucketName = []byte("meta")
+
+	consistentIndexKeyName  = []byte("consistent_index")
+	scheduledCompactKeyName = []byte("scheduledCompactRev")
+	finishedCompactKeyName  = []byte("finishedCompactRev")
+
+	ErrCompacted = errors.New("mvcc: required revision has been compacted")
+	ErrFutureRev = errors.New("mvcc: required revision is a future revision")
+	ErrCanceled  = errors.New("mvcc: watcher is canceled")
+	ErrClosed    = errors.New("mvcc: closed")
+
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc")
+)
+
+const (
+	// markedRevBytesLen is the byte length of marked revision.
+	// The first `revBytesLen` bytes represents a normal revision. The last
+	// one byte is the mark.
+	markedRevBytesLen      = revBytesLen + 1
+	markBytePosition       = markedRevBytesLen - 1
+	markTombstone     byte = 't'
+)
+
+var restoreChunkKeys = 10000 // non-const for testing
+
+// ConsistentIndexGetter is an interface that wraps the Get method.
+// Consistent index is the offset of an entry in a consistent replicated log.
+type ConsistentIndexGetter interface {
+	// ConsistentIndex returns the consistent index of current executing entry.
+	ConsistentIndex() uint64
+}
+
+type store struct {
+	ReadView
+	WriteView
+
+	// consistentIndex caches the "consistent_index" key's value. Accessed
+	// through atomics so must be 64-bit aligned.
+	consistentIndex uint64
+
+	// mu read locks for txns and write locks for non-txn store changes.
+	mu sync.RWMutex
+
+	ig ConsistentIndexGetter
+
+	b       backend.Backend
+	kvindex index
+
+	le lease.Lessor
+
+	// revMuLock protects currentRev and compactMainRev.
+	// Locked at end of write txn and released after write txn unlock lock.
+	// Locked before locking read txn and released after locking.
+	revMu sync.RWMutex
+	// currentRev is the revision of the last completed transaction.
+	currentRev int64
+	// compactMainRev is the main revision of the last compaction.
+	compactMainRev int64
+
+	// bytesBuf8 is a byte slice of length 8
+	// to avoid a repetitive allocation in saveIndex.
+	bytesBuf8 []byte
+
+	fifoSched schedule.Scheduler
+
+	stopc chan struct{}
+}
+
+// NewStore returns a new store. It is useful to create a store inside
+// mvcc pkg. It should only be used for testing externally.
+func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store {
+	s := &store{
+		b:       b,
+		ig:      ig,
+		kvindex: newTreeIndex(),
+
+		le: le,
+
+		currentRev:     1,
+		compactMainRev: -1,
+
+		bytesBuf8: make([]byte, 8),
+		fifoSched: schedule.NewFIFOScheduler(),
+
+		stopc: make(chan struct{}),
+	}
+	s.ReadView = &readView{s}
+	s.WriteView = &writeView{s}
+	if s.le != nil {
+		s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() })
+	}
+
+	tx := s.b.BatchTx()
+	tx.Lock()
+	tx.UnsafeCreateBucket(keyBucketName)
+	tx.UnsafeCreateBucket(metaBucketName)
+	tx.Unlock()
+	s.b.ForceCommit()
+
+	if err := s.restore(); err != nil {
+		// TODO: return the error instead of panic here?
+		panic("failed to recover store from backend")
+	}
+
+	return s
+}
+
+func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) {
+	if ctx == nil || ctx.Err() != nil {
+		s.mu.Lock()
+		select {
+		case <-s.stopc:
+		default:
+			f := func(ctx context.Context) { s.compactBarrier(ctx, ch) }
+			s.fifoSched.Schedule(f)
+		}
+		s.mu.Unlock()
+		return
+	}
+	close(ch)
+}
+
+func (s *store) Hash() (hash uint32, revision int64, err error) {
+	start := time.Now()
+
+	s.b.ForceCommit()
+	h, err := s.b.Hash(DefaultIgnores)
+
+	hashDurations.Observe(time.Since(start).Seconds())
+	return h, s.currentRev, err
+}
+
+func (s *store) HashByRev(rev int64) (hash uint32, currentRev int64, compactRev int64, err error) {
+	start := time.Now()
+
+	s.mu.RLock()
+	s.revMu.RLock()
+	compactRev, currentRev = s.compactMainRev, s.currentRev
+	s.revMu.RUnlock()
+
+	if rev > 0 && rev <= compactRev {
+		s.mu.RUnlock()
+		return 0, 0, compactRev, ErrCompacted
+	} else if rev > 0 && rev > currentRev {
+		s.mu.RUnlock()
+		return 0, currentRev, 0, ErrFutureRev
+	}
+
+	if rev == 0 {
+		rev = currentRev
+	}
+	keep := s.kvindex.Keep(rev)
+
+	tx := s.b.ReadTx()
+	tx.Lock()
+	defer tx.Unlock()
+	s.mu.RUnlock()
+
+	upper := revision{main: rev + 1}
+	lower := revision{main: compactRev + 1}
+	h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+
+	h.Write(keyBucketName)
+	err = tx.UnsafeForEach(keyBucketName, func(k, v []byte) error {
+		kr := bytesToRev(k)
+		if !upper.GreaterThan(kr) {
+			return nil
+		}
+		// skip revisions that are scheduled for deletion
+		// due to compacting; don't skip if there isn't one.
+		if lower.GreaterThan(kr) && len(keep) > 0 {
+			if _, ok := keep[kr]; !ok {
+				return nil
+			}
+		}
+		h.Write(k)
+		h.Write(v)
+		return nil
+	})
+	hash = h.Sum32()
+
+	hashRevDurations.Observe(time.Since(start).Seconds())
+	return hash, currentRev, compactRev, err
+}
+
+func (s *store) Compact(rev int64) (<-chan struct{}, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.revMu.Lock()
+	defer s.revMu.Unlock()
+
+	if rev <= s.compactMainRev {
+		ch := make(chan struct{})
+		f := func(ctx context.Context) { s.compactBarrier(ctx, ch) }
+		s.fifoSched.Schedule(f)
+		return ch, ErrCompacted
+	}
+	if rev > s.currentRev {
+		return nil, ErrFutureRev
+	}
+
+	start := time.Now()
+
+	s.compactMainRev = rev
+
+	rbytes := newRevBytes()
+	revToBytes(revision{main: rev}, rbytes)
+
+	tx := s.b.BatchTx()
+	tx.Lock()
+	tx.UnsafePut(metaBucketName, scheduledCompactKeyName, rbytes)
+	tx.Unlock()
+	// ensure that desired compaction is persisted
+	s.b.ForceCommit()
+
+	keep := s.kvindex.Compact(rev)
+	ch := make(chan struct{})
+	var j = func(ctx context.Context) {
+		if ctx.Err() != nil {
+			s.compactBarrier(ctx, ch)
+			return
+		}
+		if !s.scheduleCompaction(rev, keep) {
+			s.compactBarrier(nil, ch)
+			return
+		}
+		close(ch)
+	}
+
+	s.fifoSched.Schedule(j)
+
+	indexCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond))
+	return ch, nil
+}
+
+// DefaultIgnores is a map of keys to ignore in hash checking.
+var DefaultIgnores map[backend.IgnoreKey]struct{}
+
+func init() {
+	DefaultIgnores = map[backend.IgnoreKey]struct{}{
+		// consistent index might be changed due to v2 internal sync, which
+		// is not controllable by the user.
+		{Bucket: string(metaBucketName), Key: string(consistentIndexKeyName)}: {},
+	}
+}
+
+func (s *store) Commit() {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	tx := s.b.BatchTx()
+	tx.Lock()
+	s.saveIndex(tx)
+	tx.Unlock()
+	s.b.ForceCommit()
+}
+
+func (s *store) Restore(b backend.Backend) error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	close(s.stopc)
+	s.fifoSched.Stop()
+
+	atomic.StoreUint64(&s.consistentIndex, 0)
+	s.b = b
+	s.kvindex = newTreeIndex()
+	s.currentRev = 1
+	s.compactMainRev = -1
+	s.fifoSched = schedule.NewFIFOScheduler()
+	s.stopc = make(chan struct{})
+
+	return s.restore()
+}
+
+func (s *store) restore() error {
+	b := s.b
+
+	reportDbTotalSizeInBytesMu.Lock()
+	reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) }
+	reportDbTotalSizeInBytesMu.Unlock()
+	reportDbTotalSizeInUseInBytesMu.Lock()
+	reportDbTotalSizeInUseInBytes = func() float64 { return float64(b.SizeInUse()) }
+	reportDbTotalSizeInUseInBytesMu.Unlock()
+
+	min, max := newRevBytes(), newRevBytes()
+	revToBytes(revision{main: 1}, min)
+	revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max)
+
+	keyToLease := make(map[string]lease.LeaseID)
+
+	// restore index
+	tx := s.b.BatchTx()
+	tx.Lock()
+
+	_, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0)
+	if len(finishedCompactBytes) != 0 {
+		s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main
+		plog.Printf("restore compact to %d", s.compactMainRev)
+	}
+	_, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0)
+	scheduledCompact := int64(0)
+	if len(scheduledCompactBytes) != 0 {
+		scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main
+	}
+
+	// index keys concurrently as they're loaded in from tx
+	keysGauge.Set(0)
+	rkvc, revc := restoreIntoIndex(s.kvindex)
+	for {
+		keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys))
+		if len(keys) == 0 {
+			break
+		}
+		// rkvc blocks if the total pending keys exceeds the restore
+		// chunk size to keep keys from consuming too much memory.
+		restoreChunk(rkvc, keys, vals, keyToLease)
+		if len(keys) < restoreChunkKeys {
+			// partial set implies final set
+			break
+		}
+		// next set begins after where this one ended
+		newMin := bytesToRev(keys[len(keys)-1][:revBytesLen])
+		newMin.sub++
+		revToBytes(newMin, min)
+	}
+	close(rkvc)
+	s.currentRev = <-revc
+
+	// keys in the range [compacted revision -N, compaction] might all be deleted due to compaction.
+	// the correct revision should be set to compaction revision in the case, not the largest revision
+	// we have seen.
+	if s.currentRev < s.compactMainRev {
+		s.currentRev = s.compactMainRev
+	}
+	if scheduledCompact <= s.compactMainRev {
+		scheduledCompact = 0
+	}
+
+	for key, lid := range keyToLease {
+		if s.le == nil {
+			panic("no lessor to attach lease")
+		}
+		err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}})
+		if err != nil {
+			plog.Errorf("unexpected Attach error: %v", err)
+		}
+	}
+
+	tx.Unlock()
+
+	if scheduledCompact != 0 {
+		s.Compact(scheduledCompact)
+		plog.Printf("resume scheduled compaction at %d", scheduledCompact)
+	}
+
+	return nil
+}
+
+type revKeyValue struct {
+	key  []byte
+	kv   mvccpb.KeyValue
+	kstr string
+}
+
+func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) {
+	rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1)
+	go func() {
+		currentRev := int64(1)
+		defer func() { revc <- currentRev }()
+		// restore the tree index from streaming the unordered index.
+		kiCache := make(map[string]*keyIndex, restoreChunkKeys)
+		for rkv := range rkvc {
+			ki, ok := kiCache[rkv.kstr]
+			// purge kiCache if many keys but still missing in the cache
+			if !ok && len(kiCache) >= restoreChunkKeys {
+				i := 10
+				for k := range kiCache {
+					delete(kiCache, k)
+					if i--; i == 0 {
+						break
+					}
+				}
+			}
+			// cache miss, fetch from tree index if there
+			if !ok {
+				ki = &keyIndex{key: rkv.kv.Key}
+				if idxKey := idx.KeyIndex(ki); idxKey != nil {
+					kiCache[rkv.kstr], ki = idxKey, idxKey
+					ok = true
+				}
+			}
+			rev := bytesToRev(rkv.key)
+			currentRev = rev.main
+			if ok {
+				if isTombstone(rkv.key) {
+					ki.tombstone(rev.main, rev.sub)
+					continue
+				}
+				ki.put(rev.main, rev.sub)
+			} else if !isTombstone(rkv.key) {
+				ki.restore(revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version)
+				idx.Insert(ki)
+				kiCache[rkv.kstr] = ki
+			}
+		}
+	}()
+	return rkvc, revc
+}
+
+func restoreChunk(kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) {
+	for i, key := range keys {
+		rkv := revKeyValue{key: key}
+		if err := rkv.kv.Unmarshal(vals[i]); err != nil {
+			plog.Fatalf("cannot unmarshal event: %v", err)
+		}
+		rkv.kstr = string(rkv.kv.Key)
+		if isTombstone(key) {
+			delete(keyToLease, rkv.kstr)
+		} else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease {
+			keyToLease[rkv.kstr] = lid
+		} else {
+			delete(keyToLease, rkv.kstr)
+		}
+		kvc <- rkv
+	}
+}
+
+func (s *store) Close() error {
+	close(s.stopc)
+	s.fifoSched.Stop()
+	return nil
+}
+
+func (s *store) saveIndex(tx backend.BatchTx) {
+	if s.ig == nil {
+		return
+	}
+	bs := s.bytesBuf8
+	ci := s.ig.ConsistentIndex()
+	binary.BigEndian.PutUint64(bs, ci)
+	// put the index into the underlying backend
+	// tx has been locked in TxnBegin, so there is no need to lock it again
+	tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs)
+	atomic.StoreUint64(&s.consistentIndex, ci)
+}
+
+func (s *store) ConsistentIndex() uint64 {
+	if ci := atomic.LoadUint64(&s.consistentIndex); ci > 0 {
+		return ci
+	}
+	tx := s.b.BatchTx()
+	tx.Lock()
+	defer tx.Unlock()
+	_, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0)
+	if len(vs) == 0 {
+		return 0
+	}
+	v := binary.BigEndian.Uint64(vs[0])
+	atomic.StoreUint64(&s.consistentIndex, v)
+	return v
+}
+
+// appendMarkTombstone appends tombstone mark to normal revision bytes.
+func appendMarkTombstone(b []byte) []byte {
+	if len(b) != revBytesLen {
+		plog.Panicf("cannot append mark to non normal revision bytes")
+	}
+	return append(b, markTombstone)
+}
+
+// isTombstone checks whether the revision bytes is a tombstone.
+func isTombstone(b []byte) bool {
+	return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go
new file mode 100644
index 0000000..1726490
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go
@@ -0,0 +1,69 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+	"encoding/binary"
+	"time"
+)
+
+func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struct{}) bool {
+	totalStart := time.Now()
+	defer dbCompactionTotalDurations.Observe(float64(time.Since(totalStart) / time.Millisecond))
+	keyCompactions := 0
+	defer func() { dbCompactionKeysCounter.Add(float64(keyCompactions)) }()
+
+	end := make([]byte, 8)
+	binary.BigEndian.PutUint64(end, uint64(compactMainRev+1))
+
+	batchsize := int64(10000)
+	last := make([]byte, 8+1+8)
+	for {
+		var rev revision
+
+		start := time.Now()
+		tx := s.b.BatchTx()
+		tx.Lock()
+
+		keys, _ := tx.UnsafeRange(keyBucketName, last, end, batchsize)
+		for _, key := range keys {
+			rev = bytesToRev(key)
+			if _, ok := keep[rev]; !ok {
+				tx.UnsafeDelete(keyBucketName, key)
+				keyCompactions++
+			}
+		}
+
+		if len(keys) < int(batchsize) {
+			rbytes := make([]byte, 8+1+8)
+			revToBytes(revision{main: compactMainRev}, rbytes)
+			tx.UnsafePut(metaBucketName, finishedCompactKeyName, rbytes)
+			tx.Unlock()
+			plog.Printf("finished scheduled compaction at %d (took %v)", compactMainRev, time.Since(totalStart))
+			return true
+		}
+
+		// update last
+		revToBytes(revision{main: rev.main, sub: rev.sub + 1}, last)
+		tx.Unlock()
+		dbCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond))
+
+		select {
+		case <-time.After(100 * time.Millisecond):
+		case <-s.stopc:
+			return false
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go
new file mode 100644
index 0000000..8896fb8
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go
@@ -0,0 +1,253 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+	"github.com/coreos/etcd/lease"
+	"github.com/coreos/etcd/mvcc/backend"
+	"github.com/coreos/etcd/mvcc/mvccpb"
+)
+
+type storeTxnRead struct {
+	s  *store
+	tx backend.ReadTx
+
+	firstRev int64
+	rev      int64
+}
+
+func (s *store) Read() TxnRead {
+	s.mu.RLock()
+	tx := s.b.ReadTx()
+	s.revMu.RLock()
+	tx.Lock()
+	firstRev, rev := s.compactMainRev, s.currentRev
+	s.revMu.RUnlock()
+	return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev})
+}
+
+func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev }
+func (tr *storeTxnRead) Rev() int64      { return tr.rev }
+
+func (tr *storeTxnRead) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
+	return tr.rangeKeys(key, end, tr.Rev(), ro)
+}
+
+func (tr *storeTxnRead) End() {
+	tr.tx.Unlock()
+	tr.s.mu.RUnlock()
+}
+
+type storeTxnWrite struct {
+	storeTxnRead
+	tx backend.BatchTx
+	// beginRev is the revision where the txn begins; it will write to the next revision.
+	beginRev int64
+	changes  []mvccpb.KeyValue
+}
+
+func (s *store) Write() TxnWrite {
+	s.mu.RLock()
+	tx := s.b.BatchTx()
+	tx.Lock()
+	tw := &storeTxnWrite{
+		storeTxnRead: storeTxnRead{s, tx, 0, 0},
+		tx:           tx,
+		beginRev:     s.currentRev,
+		changes:      make([]mvccpb.KeyValue, 0, 4),
+	}
+	return newMetricsTxnWrite(tw)
+}
+
+func (tw *storeTxnWrite) Rev() int64 { return tw.beginRev }
+
+func (tw *storeTxnWrite) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
+	rev := tw.beginRev
+	if len(tw.changes) > 0 {
+		rev++
+	}
+	return tw.rangeKeys(key, end, rev, ro)
+}
+
+func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) {
+	if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 {
+		return n, int64(tw.beginRev + 1)
+	}
+	return 0, int64(tw.beginRev)
+}
+
+func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 {
+	tw.put(key, value, lease)
+	return int64(tw.beginRev + 1)
+}
+
+func (tw *storeTxnWrite) End() {
+	// only update index if the txn modifies the mvcc state.
+	if len(tw.changes) != 0 {
+		tw.s.saveIndex(tw.tx)
+		// hold revMu lock to prevent new read txns from opening until writeback.
+		tw.s.revMu.Lock()
+		tw.s.currentRev++
+	}
+	tw.tx.Unlock()
+	if len(tw.changes) != 0 {
+		tw.s.revMu.Unlock()
+	}
+	tw.s.mu.RUnlock()
+}
+
+func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) {
+	rev := ro.Rev
+	if rev > curRev {
+		return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev
+	}
+	if rev <= 0 {
+		rev = curRev
+	}
+	if rev < tr.s.compactMainRev {
+		return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted
+	}
+
+	revpairs := tr.s.kvindex.Revisions(key, end, int64(rev))
+	if len(revpairs) == 0 {
+		return &RangeResult{KVs: nil, Count: 0, Rev: curRev}, nil
+	}
+	if ro.Count {
+		return &RangeResult{KVs: nil, Count: len(revpairs), Rev: curRev}, nil
+	}
+
+	limit := int(ro.Limit)
+	if limit <= 0 || limit > len(revpairs) {
+		limit = len(revpairs)
+	}
+
+	kvs := make([]mvccpb.KeyValue, limit)
+	revBytes := newRevBytes()
+	for i, revpair := range revpairs[:len(kvs)] {
+		revToBytes(revpair, revBytes)
+		_, vs := tr.tx.UnsafeRange(keyBucketName, revBytes, nil, 0)
+		if len(vs) != 1 {
+			plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub)
+		}
+		if err := kvs[i].Unmarshal(vs[0]); err != nil {
+			plog.Fatalf("cannot unmarshal event: %v", err)
+		}
+	}
+	return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil
+}
+
+func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) {
+	rev := tw.beginRev + 1
+	c := rev
+	oldLease := lease.NoLease
+
+	// if the key exists before, use its previous created and
+	// get its previous leaseID
+	_, created, ver, err := tw.s.kvindex.Get(key, rev)
+	if err == nil {
+		c = created.main
+		oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)})
+	}
+
+	ibytes := newRevBytes()
+	idxRev := revision{main: rev, sub: int64(len(tw.changes))}
+	revToBytes(idxRev, ibytes)
+
+	ver = ver + 1
+	kv := mvccpb.KeyValue{
+		Key:            key,
+		Value:          value,
+		CreateRevision: c,
+		ModRevision:    rev,
+		Version:        ver,
+		Lease:          int64(leaseID),
+	}
+
+	d, err := kv.Marshal()
+	if err != nil {
+		plog.Fatalf("cannot marshal event: %v", err)
+	}
+
+	tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d)
+	tw.s.kvindex.Put(key, idxRev)
+	tw.changes = append(tw.changes, kv)
+
+	if oldLease != lease.NoLease {
+		if tw.s.le == nil {
+			panic("no lessor to detach lease")
+		}
+		err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}})
+		if err != nil {
+			plog.Errorf("unexpected error from lease detach: %v", err)
+		}
+	}
+	if leaseID != lease.NoLease {
+		if tw.s.le == nil {
+			panic("no lessor to attach lease")
+		}
+		err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}})
+		if err != nil {
+			panic("unexpected error from lease Attach")
+		}
+	}
+}
+
+func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 {
+	rrev := tw.beginRev
+	if len(tw.changes) > 0 {
+		rrev += 1
+	}
+	keys, revs := tw.s.kvindex.Range(key, end, rrev)
+	if len(keys) == 0 {
+		return 0
+	}
+	for i, key := range keys {
+		tw.delete(key, revs[i])
+	}
+	return int64(len(keys))
+}
+
+func (tw *storeTxnWrite) delete(key []byte, rev revision) {
+	ibytes := newRevBytes()
+	idxRev := revision{main: tw.beginRev + 1, sub: int64(len(tw.changes))}
+	revToBytes(idxRev, ibytes)
+	ibytes = appendMarkTombstone(ibytes)
+
+	kv := mvccpb.KeyValue{Key: key}
+
+	d, err := kv.Marshal()
+	if err != nil {
+		plog.Fatalf("cannot marshal event: %v", err)
+	}
+
+	tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d)
+	err = tw.s.kvindex.Tombstone(key, idxRev)
+	if err != nil {
+		plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err)
+	}
+	tw.changes = append(tw.changes, kv)
+
+	item := lease.LeaseItem{Key: string(key)}
+	leaseID := tw.s.le.GetLease(item)
+
+	if leaseID != lease.NoLease {
+		err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item})
+		if err != nil {
+			plog.Errorf("cannot detach %v", err)
+		}
+	}
+}
+
+func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes }
diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics.go b/vendor/github.com/coreos/etcd/mvcc/metrics.go
new file mode 100644
index 0000000..b753310
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/metrics.go
@@ -0,0 +1,239 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+	"sync"
+
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+	rangeCounter = prometheus.NewCounter(
+		prometheus.CounterOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "mvcc",
+			Name:      "range_total",
+			Help:      "Total number of ranges seen by this member.",
+		})
+
+	putCounter = prometheus.NewCounter(
+		prometheus.CounterOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "mvcc",
+			Name:      "put_total",
+			Help:      "Total number of puts seen by this member.",
+		})
+
+	deleteCounter = prometheus.NewCounter(
+		prometheus.CounterOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "mvcc",
+			Name:      "delete_total",
+			Help:      "Total number of deletes seen by this member.",
+		})
+
+	txnCounter = prometheus.NewCounter(
+		prometheus.CounterOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "mvcc",
+			Name:      "txn_total",
+			Help:      "Total number of txns seen by this member.",
+		})
+
+	keysGauge = prometheus.NewGauge(
+		prometheus.GaugeOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "mvcc",
+			Name:      "keys_total",
+			Help:      "Total number of keys.",
+		})
+
+	watchStreamGauge = prometheus.NewGauge(
+		prometheus.GaugeOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "mvcc",
+			Name:      "watch_stream_total",
+			Help:      "Total number of watch streams.",
+		})
+
+	watcherGauge = prometheus.NewGauge(
+		prometheus.GaugeOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "mvcc",
+			Name:      "watcher_total",
+			Help:      "Total number of watchers.",
+		})
+
+	slowWatcherGauge = prometheus.NewGauge(
+		prometheus.GaugeOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "mvcc",
+			Name:      "slow_watcher_total",
+			Help:      "Total number of unsynced slow watchers.",
+		})
+
+	totalEventsCounter = prometheus.NewCounter(
+		prometheus.CounterOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "mvcc",
+			Name:      "events_total",
+			Help:      "Total number of events sent by this member.",
+		})
+
+	pendingEventsGauge = prometheus.NewGauge(
+		prometheus.GaugeOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "mvcc",
+			Name:      "pending_events_total",
+			Help:      "Total number of pending events to be sent.",
+		})
+
+	indexCompactionPauseDurations = prometheus.NewHistogram(
+		prometheus.HistogramOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "mvcc",
+			Name:      "index_compaction_pause_duration_milliseconds",
+			Help:      "Bucketed histogram of index compaction pause duration.",
+			// 0.5ms -> 1second
+			Buckets: prometheus.ExponentialBuckets(0.5, 2, 12),
+		})
+
+	dbCompactionPauseDurations = prometheus.NewHistogram(
+		prometheus.HistogramOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "mvcc",
+			Name:      "db_compaction_pause_duration_milliseconds",
+			Help:      "Bucketed histogram of db compaction pause duration.",
+			// 1ms -> 4second
+			Buckets: prometheus.ExponentialBuckets(1, 2, 13),
+		})
+
+	dbCompactionTotalDurations = prometheus.NewHistogram(
+		prometheus.HistogramOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "mvcc",
+			Name:      "db_compaction_total_duration_milliseconds",
+			Help:      "Bucketed histogram of db compaction total duration.",
+			// 100ms -> 800second
+			Buckets: prometheus.ExponentialBuckets(100, 2, 14),
+		})
+
+	dbCompactionKeysCounter = prometheus.NewCounter(
+		prometheus.CounterOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "mvcc",
+			Name:      "db_compaction_keys_total",
+			Help:      "Total number of db keys compacted.",
+		})
+
+	dbTotalSizeDebugging = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+		Namespace: "etcd_debugging",
+		Subsystem: "mvcc",
+		Name:      "db_total_size_in_bytes",
+		Help:      "Total size of the underlying database physically allocated in bytes. Use etcd_mvcc_db_total_size_in_bytes",
+	},
+		func() float64 {
+			reportDbTotalSizeInBytesMu.RLock()
+			defer reportDbTotalSizeInBytesMu.RUnlock()
+			return reportDbTotalSizeInBytes()
+		},
+	)
+	dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+		Namespace: "etcd",
+		Subsystem: "mvcc",
+		Name:      "db_total_size_in_bytes",
+		Help:      "Total size of the underlying database physically allocated in bytes.",
+	},
+		func() float64 {
+			reportDbTotalSizeInBytesMu.RLock()
+			defer reportDbTotalSizeInBytesMu.RUnlock()
+			return reportDbTotalSizeInBytes()
+		},
+	)
+	// overridden by mvcc initialization
+	reportDbTotalSizeInBytesMu sync.RWMutex
+	reportDbTotalSizeInBytes   = func() float64 { return 0 }
+
+	dbTotalSizeInUse = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+		Namespace: "etcd",
+		Subsystem: "mvcc",
+		Name:      "db_total_size_in_use_in_bytes",
+		Help:      "Total size of the underlying database logically in use in bytes.",
+	},
+		func() float64 {
+			reportDbTotalSizeInUseInBytesMu.RLock()
+			defer reportDbTotalSizeInUseInBytesMu.RUnlock()
+			return reportDbTotalSizeInUseInBytes()
+		},
+	)
+	// overridden by mvcc initialization
+	reportDbTotalSizeInUseInBytesMu sync.RWMutex
+	reportDbTotalSizeInUseInBytes   func() float64 = func() float64 { return 0 }
+
+	hashDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Namespace: "etcd",
+		Subsystem: "mvcc",
+		Name:      "hash_duration_seconds",
+		Help:      "The latency distribution of storage hash operation.",
+
+		// 100 MB usually takes 100 ms, so start with 10 MB of 10 ms
+		// lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
+		// highest bucket start of 0.01 sec * 2^14 == 163.84 sec
+		Buckets: prometheus.ExponentialBuckets(.01, 2, 15),
+	})
+
+	hashRevDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Namespace: "etcd",
+		Subsystem: "mvcc",
+		Name:      "hash_rev_duration_seconds",
+		Help:      "The latency distribution of storage hash by revision operation.",
+
+		// 100 MB usually takes 100 ms, so start with 10 MB of 10 ms
+		// lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
+		// highest bucket start of 0.01 sec * 2^14 == 163.84 sec
+		Buckets: prometheus.ExponentialBuckets(.01, 2, 15),
+	})
+)
+
+func init() {
+	prometheus.MustRegister(rangeCounter)
+	prometheus.MustRegister(putCounter)
+	prometheus.MustRegister(deleteCounter)
+	prometheus.MustRegister(txnCounter)
+	prometheus.MustRegister(keysGauge)
+	prometheus.MustRegister(watchStreamGauge)
+	prometheus.MustRegister(watcherGauge)
+	prometheus.MustRegister(slowWatcherGauge)
+	prometheus.MustRegister(totalEventsCounter)
+	prometheus.MustRegister(pendingEventsGauge)
+	prometheus.MustRegister(indexCompactionPauseDurations)
+	prometheus.MustRegister(dbCompactionPauseDurations)
+	prometheus.MustRegister(dbCompactionTotalDurations)
+	prometheus.MustRegister(dbCompactionKeysCounter)
+	prometheus.MustRegister(dbTotalSizeDebugging)
+	prometheus.MustRegister(dbTotalSize)
+	prometheus.MustRegister(dbTotalSizeInUse)
+	prometheus.MustRegister(hashDurations)
+	prometheus.MustRegister(hashRevDurations)
+}
+
+// ReportEventReceived reports that an event is received.
+// This function should be called when the external systems received an
+// event from mvcc.Watcher.
+func ReportEventReceived(n int) {
+	pendingEventsGauge.Sub(float64(n))
+	totalEventsCounter.Add(float64(n))
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go
new file mode 100644
index 0000000..911d648
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go
@@ -0,0 +1,59 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+	"github.com/coreos/etcd/lease"
+)
+
+type metricsTxnWrite struct {
+	TxnWrite
+	ranges  uint
+	puts    uint
+	deletes uint
+}
+
+func newMetricsTxnRead(tr TxnRead) TxnRead {
+	return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0}
+}
+
+func newMetricsTxnWrite(tw TxnWrite) TxnWrite {
+	return &metricsTxnWrite{tw, 0, 0, 0}
+}
+
+func (tw *metricsTxnWrite) Range(key, end []byte, ro RangeOptions) (*RangeResult, error) {
+	tw.ranges++
+	return tw.TxnWrite.Range(key, end, ro)
+}
+
+func (tw *metricsTxnWrite) DeleteRange(key, end []byte) (n, rev int64) {
+	tw.deletes++
+	return tw.TxnWrite.DeleteRange(key, end)
+}
+
+func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
+	tw.puts++
+	return tw.TxnWrite.Put(key, value, lease)
+}
+
+func (tw *metricsTxnWrite) End() {
+	defer tw.TxnWrite.End()
+	if sum := tw.ranges + tw.puts + tw.deletes; sum > 1 {
+		txnCounter.Inc()
+	}
+	rangeCounter.Add(float64(tw.ranges))
+	putCounter.Add(float64(tw.puts))
+	deleteCounter.Add(float64(tw.deletes))
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/revision.go b/vendor/github.com/coreos/etcd/mvcc/revision.go
new file mode 100644
index 0000000..5fa35a1
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/revision.go
@@ -0,0 +1,67 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import "encoding/binary"
+
+// revBytesLen is the byte length of a normal revision.
+// First 8 bytes is the revision.main in big-endian format. The 9th byte
+// is a '_'. The last 8 bytes is the revision.sub in big-endian format.
+const revBytesLen = 8 + 1 + 8
+
+// A revision indicates modification of the key-value space.
+// The set of changes that share same main revision changes the key-value space atomically.
+type revision struct {
+	// main is the main revision of a set of changes that happen atomically.
+	main int64
+
+	// sub is the the sub revision of a change in a set of changes that happen
+	// atomically. Each change has different increasing sub revision in that
+	// set.
+	sub int64
+}
+
+func (a revision) GreaterThan(b revision) bool {
+	if a.main > b.main {
+		return true
+	}
+	if a.main < b.main {
+		return false
+	}
+	return a.sub > b.sub
+}
+
+func newRevBytes() []byte {
+	return make([]byte, revBytesLen, markedRevBytesLen)
+}
+
+func revToBytes(rev revision, bytes []byte) {
+	binary.BigEndian.PutUint64(bytes, uint64(rev.main))
+	bytes[8] = '_'
+	binary.BigEndian.PutUint64(bytes[9:], uint64(rev.sub))
+}
+
+func bytesToRev(bytes []byte) revision {
+	return revision{
+		main: int64(binary.BigEndian.Uint64(bytes[0:8])),
+		sub:  int64(binary.BigEndian.Uint64(bytes[9:])),
+	}
+}
+
+type revisions []revision
+
+func (a revisions) Len() int           { return len(a) }
+func (a revisions) Less(i, j int) bool { return a[j].GreaterThan(a[i]) }
+func (a revisions) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
diff --git a/vendor/github.com/coreos/etcd/mvcc/util.go b/vendor/github.com/coreos/etcd/mvcc/util.go
new file mode 100644
index 0000000..8a0df0b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/util.go
@@ -0,0 +1,56 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+	"encoding/binary"
+
+	"github.com/coreos/etcd/mvcc/backend"
+	"github.com/coreos/etcd/mvcc/mvccpb"
+)
+
+func UpdateConsistentIndex(be backend.Backend, index uint64) {
+	tx := be.BatchTx()
+	tx.Lock()
+	defer tx.Unlock()
+
+	var oldi uint64
+	_, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0)
+	if len(vs) != 0 {
+		oldi = binary.BigEndian.Uint64(vs[0])
+	}
+
+	if index <= oldi {
+		return
+	}
+
+	bs := make([]byte, 8)
+	binary.BigEndian.PutUint64(bs, index)
+	tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs)
+}
+
+func WriteKV(be backend.Backend, kv mvccpb.KeyValue) {
+	ibytes := newRevBytes()
+	revToBytes(revision{main: kv.ModRevision}, ibytes)
+
+	d, err := kv.Marshal()
+	if err != nil {
+		plog.Fatalf("cannot marshal event: %v", err)
+	}
+
+	be.BatchTx().Lock()
+	be.BatchTx().UnsafePut(keyBucketName, ibytes, d)
+	be.BatchTx().Unlock()
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go
new file mode 100644
index 0000000..78df193
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go
@@ -0,0 +1,534 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/lease"
+	"github.com/coreos/etcd/mvcc/backend"
+	"github.com/coreos/etcd/mvcc/mvccpb"
+)
+
+// non-const so modifiable by tests
+var (
+	// chanBufLen is the length of the buffered chan
+	// for sending out watched events.
+	// TODO: find a good buf value. 1024 is just a random one that
+	// seems to be reasonable.
+	chanBufLen = 1024
+
+	// maxWatchersPerSync is the number of watchers to sync in a single batch
+	maxWatchersPerSync = 512
+)
+
+type watchable interface {
+	watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc)
+	progress(w *watcher)
+	rev() int64
+}
+
+type watchableStore struct {
+	*store
+
+	// mu protects watcher groups and batches. It should never be locked
+	// before locking store.mu to avoid deadlock.
+	mu sync.RWMutex
+
+	// victims are watcher batches that were blocked on the watch channel
+	victims []watcherBatch
+	victimc chan struct{}
+
+	// contains all unsynced watchers that needs to sync with events that have happened
+	unsynced watcherGroup
+
+	// contains all synced watchers that are in sync with the progress of the store.
+	// The key of the map is the key that the watcher watches on.
+	synced watcherGroup
+
+	stopc chan struct{}
+	wg    sync.WaitGroup
+}
+
+// cancelFunc updates unsynced and synced maps when running
+// cancel operations.
+type cancelFunc func()
+
+func New(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) ConsistentWatchableKV {
+	return newWatchableStore(b, le, ig)
+}
+
+func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *watchableStore {
+	s := &watchableStore{
+		store:    NewStore(b, le, ig),
+		victimc:  make(chan struct{}, 1),
+		unsynced: newWatcherGroup(),
+		synced:   newWatcherGroup(),
+		stopc:    make(chan struct{}),
+	}
+	s.store.ReadView = &readView{s}
+	s.store.WriteView = &writeView{s}
+	if s.le != nil {
+		// use this store as the deleter so revokes trigger watch events
+		s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() })
+	}
+	s.wg.Add(2)
+	go s.syncWatchersLoop()
+	go s.syncVictimsLoop()
+	return s
+}
+
+func (s *watchableStore) Close() error {
+	close(s.stopc)
+	s.wg.Wait()
+	return s.store.Close()
+}
+
+func (s *watchableStore) NewWatchStream() WatchStream {
+	watchStreamGauge.Inc()
+	return &watchStream{
+		watchable: s,
+		ch:        make(chan WatchResponse, chanBufLen),
+		cancels:   make(map[WatchID]cancelFunc),
+		watchers:  make(map[WatchID]*watcher),
+	}
+}
+
+func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) {
+	wa := &watcher{
+		key:    key,
+		end:    end,
+		minRev: startRev,
+		id:     id,
+		ch:     ch,
+		fcs:    fcs,
+	}
+
+	s.mu.Lock()
+	s.revMu.RLock()
+	synced := startRev > s.store.currentRev || startRev == 0
+	if synced {
+		wa.minRev = s.store.currentRev + 1
+		if startRev > wa.minRev {
+			wa.minRev = startRev
+		}
+	}
+	if synced {
+		s.synced.add(wa)
+	} else {
+		slowWatcherGauge.Inc()
+		s.unsynced.add(wa)
+	}
+	s.revMu.RUnlock()
+	s.mu.Unlock()
+
+	watcherGauge.Inc()
+
+	return wa, func() { s.cancelWatcher(wa) }
+}
+
+// cancelWatcher removes references of the watcher from the watchableStore
+func (s *watchableStore) cancelWatcher(wa *watcher) {
+	for {
+		s.mu.Lock()
+		if s.unsynced.delete(wa) {
+			slowWatcherGauge.Dec()
+			break
+		} else if s.synced.delete(wa) {
+			break
+		} else if wa.compacted {
+			break
+		} else if wa.ch == nil {
+			// already canceled (e.g., cancel/close race)
+			break
+		}
+
+		if !wa.victim {
+			panic("watcher not victim but not in watch groups")
+		}
+
+		var victimBatch watcherBatch
+		for _, wb := range s.victims {
+			if wb[wa] != nil {
+				victimBatch = wb
+				break
+			}
+		}
+		if victimBatch != nil {
+			slowWatcherGauge.Dec()
+			delete(victimBatch, wa)
+			break
+		}
+
+		// victim being processed so not accessible; retry
+		s.mu.Unlock()
+		time.Sleep(time.Millisecond)
+	}
+
+	watcherGauge.Dec()
+	wa.ch = nil
+	s.mu.Unlock()
+}
+
+func (s *watchableStore) Restore(b backend.Backend) error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	err := s.store.Restore(b)
+	if err != nil {
+		return err
+	}
+
+	for wa := range s.synced.watchers {
+		wa.restore = true
+		s.unsynced.add(wa)
+	}
+	s.synced = newWatcherGroup()
+	return nil
+}
+
+// syncWatchersLoop syncs the watcher in the unsynced map every 100ms.
+func (s *watchableStore) syncWatchersLoop() {
+	defer s.wg.Done()
+
+	for {
+		s.mu.RLock()
+		st := time.Now()
+		lastUnsyncedWatchers := s.unsynced.size()
+		s.mu.RUnlock()
+
+		unsyncedWatchers := 0
+		if lastUnsyncedWatchers > 0 {
+			unsyncedWatchers = s.syncWatchers()
+		}
+		syncDuration := time.Since(st)
+
+		waitDuration := 100 * time.Millisecond
+		// more work pending?
+		if unsyncedWatchers != 0 && lastUnsyncedWatchers > unsyncedWatchers {
+			// be fair to other store operations by yielding time taken
+			waitDuration = syncDuration
+		}
+
+		select {
+		case <-time.After(waitDuration):
+		case <-s.stopc:
+			return
+		}
+	}
+}
+
+// syncVictimsLoop tries to write precomputed watcher responses to
+// watchers that had a blocked watcher channel
+func (s *watchableStore) syncVictimsLoop() {
+	defer s.wg.Done()
+
+	for {
+		for s.moveVictims() != 0 {
+			// try to update all victim watchers
+		}
+		s.mu.RLock()
+		isEmpty := len(s.victims) == 0
+		s.mu.RUnlock()
+
+		var tickc <-chan time.Time
+		if !isEmpty {
+			tickc = time.After(10 * time.Millisecond)
+		}
+
+		select {
+		case <-tickc:
+		case <-s.victimc:
+		case <-s.stopc:
+			return
+		}
+	}
+}
+
+// moveVictims tries to update watches with already pending event data
+func (s *watchableStore) moveVictims() (moved int) {
+	s.mu.Lock()
+	victims := s.victims
+	s.victims = nil
+	s.mu.Unlock()
+
+	var newVictim watcherBatch
+	for _, wb := range victims {
+		// try to send responses again
+		for w, eb := range wb {
+			// watcher has observed the store up to, but not including, w.minRev
+			rev := w.minRev - 1
+			if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
+				pendingEventsGauge.Add(float64(len(eb.evs)))
+			} else {
+				if newVictim == nil {
+					newVictim = make(watcherBatch)
+				}
+				newVictim[w] = eb
+				continue
+			}
+			moved++
+		}
+
+		// assign completed victim watchers to unsync/sync
+		s.mu.Lock()
+		s.store.revMu.RLock()
+		curRev := s.store.currentRev
+		for w, eb := range wb {
+			if newVictim != nil && newVictim[w] != nil {
+				// couldn't send watch response; stays victim
+				continue
+			}
+			w.victim = false
+			if eb.moreRev != 0 {
+				w.minRev = eb.moreRev
+			}
+			if w.minRev <= curRev {
+				s.unsynced.add(w)
+			} else {
+				slowWatcherGauge.Dec()
+				s.synced.add(w)
+			}
+		}
+		s.store.revMu.RUnlock()
+		s.mu.Unlock()
+	}
+
+	if len(newVictim) > 0 {
+		s.mu.Lock()
+		s.victims = append(s.victims, newVictim)
+		s.mu.Unlock()
+	}
+
+	return moved
+}
+
+// syncWatchers syncs unsynced watchers by:
+//	1. choose a set of watchers from the unsynced watcher group
+//	2. iterate over the set to get the minimum revision and remove compacted watchers
+//	3. use minimum revision to get all key-value pairs and send those events to watchers
+//	4. remove synced watchers in set from unsynced group and move to synced group
+func (s *watchableStore) syncWatchers() int {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	if s.unsynced.size() == 0 {
+		return 0
+	}
+
+	s.store.revMu.RLock()
+	defer s.store.revMu.RUnlock()
+
+	// in order to find key-value pairs from unsynced watchers, we need to
+	// find min revision index, and these revisions can be used to
+	// query the backend store of key-value pairs
+	curRev := s.store.currentRev
+	compactionRev := s.store.compactMainRev
+
+	wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev)
+	minBytes, maxBytes := newRevBytes(), newRevBytes()
+	revToBytes(revision{main: minRev}, minBytes)
+	revToBytes(revision{main: curRev + 1}, maxBytes)
+
+	// UnsafeRange returns keys and values. And in boltdb, keys are revisions.
+	// values are actual key-value pairs in backend.
+	tx := s.store.b.ReadTx()
+	tx.Lock()
+	revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0)
+	evs := kvsToEvents(wg, revs, vs)
+	tx.Unlock()
+
+	var victims watcherBatch
+	wb := newWatcherBatch(wg, evs)
+	for w := range wg.watchers {
+		w.minRev = curRev + 1
+
+		eb, ok := wb[w]
+		if !ok {
+			// bring un-notified watcher to synced
+			s.synced.add(w)
+			s.unsynced.delete(w)
+			continue
+		}
+
+		if eb.moreRev != 0 {
+			w.minRev = eb.moreRev
+		}
+
+		if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) {
+			pendingEventsGauge.Add(float64(len(eb.evs)))
+		} else {
+			if victims == nil {
+				victims = make(watcherBatch)
+			}
+			w.victim = true
+		}
+
+		if w.victim {
+			victims[w] = eb
+		} else {
+			if eb.moreRev != 0 {
+				// stay unsynced; more to read
+				continue
+			}
+			s.synced.add(w)
+		}
+		s.unsynced.delete(w)
+	}
+	s.addVictim(victims)
+
+	vsz := 0
+	for _, v := range s.victims {
+		vsz += len(v)
+	}
+	slowWatcherGauge.Set(float64(s.unsynced.size() + vsz))
+
+	return s.unsynced.size()
+}
+
+// kvsToEvents gets all events for the watchers from all key-value pairs
+func kvsToEvents(wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) {
+	for i, v := range vals {
+		var kv mvccpb.KeyValue
+		if err := kv.Unmarshal(v); err != nil {
+			plog.Panicf("cannot unmarshal event: %v", err)
+		}
+
+		if !wg.contains(string(kv.Key)) {
+			continue
+		}
+
+		ty := mvccpb.PUT
+		if isTombstone(revs[i]) {
+			ty = mvccpb.DELETE
+			// patch in mod revision so watchers won't skip
+			kv.ModRevision = bytesToRev(revs[i]).main
+		}
+		evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty})
+	}
+	return evs
+}
+
+// notify notifies the fact that given event at the given rev just happened to
+// watchers that watch on the key of the event.
+func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
+	var victim watcherBatch
+	for w, eb := range newWatcherBatch(&s.synced, evs) {
+		if eb.revs != 1 {
+			plog.Panicf("unexpected multiple revisions in notification")
+		}
+		if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
+			pendingEventsGauge.Add(float64(len(eb.evs)))
+		} else {
+			// move slow watcher to victims
+			w.minRev = rev + 1
+			if victim == nil {
+				victim = make(watcherBatch)
+			}
+			w.victim = true
+			victim[w] = eb
+			s.synced.delete(w)
+			slowWatcherGauge.Inc()
+		}
+	}
+	s.addVictim(victim)
+}
+
+func (s *watchableStore) addVictim(victim watcherBatch) {
+	if victim == nil {
+		return
+	}
+	s.victims = append(s.victims, victim)
+	select {
+	case s.victimc <- struct{}{}:
+	default:
+	}
+}
+
+func (s *watchableStore) rev() int64 { return s.store.Rev() }
+
+func (s *watchableStore) progress(w *watcher) {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+
+	if _, ok := s.synced.watchers[w]; ok {
+		w.send(WatchResponse{WatchID: w.id, Revision: s.rev()})
+		// If the ch is full, this watcher is receiving events.
+		// We do not need to send progress at all.
+	}
+}
+
+type watcher struct {
+	// the watcher key
+	key []byte
+	// end indicates the end of the range to watch.
+	// If end is set, the watcher is on a range.
+	end []byte
+
+	// victim is set when ch is blocked and undergoing victim processing
+	victim bool
+
+	// compacted is set when the watcher is removed because of compaction
+	compacted bool
+
+	// restore is true when the watcher is being restored from leader snapshot
+	// which means that this watcher has just been moved from "synced" to "unsynced"
+	// watcher group, possibly with a future revision when it was first added
+	// to the synced watcher
+	// "unsynced" watcher revision must always be <= current revision,
+	// except when the watcher were to be moved from "synced" watcher group
+	restore bool
+
+	// minRev is the minimum revision update the watcher will accept
+	minRev int64
+	id     WatchID
+
+	fcs []FilterFunc
+	// a chan to send out the watch response.
+	// The chan might be shared with other watchers.
+	ch chan<- WatchResponse
+}
+
+func (w *watcher) send(wr WatchResponse) bool {
+	progressEvent := len(wr.Events) == 0
+
+	if len(w.fcs) != 0 {
+		ne := make([]mvccpb.Event, 0, len(wr.Events))
+		for i := range wr.Events {
+			filtered := false
+			for _, filter := range w.fcs {
+				if filter(wr.Events[i]) {
+					filtered = true
+					break
+				}
+			}
+			if !filtered {
+				ne = append(ne, wr.Events[i])
+			}
+		}
+		wr.Events = ne
+	}
+
+	// if all events are filtered out, we should send nothing.
+	if !progressEvent && len(wr.Events) == 0 {
+		return true
+	}
+	select {
+	case w.ch <- wr:
+		return true
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go
new file mode 100644
index 0000000..5c5bfda
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go
@@ -0,0 +1,53 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+	"github.com/coreos/etcd/mvcc/mvccpb"
+)
+
+func (tw *watchableStoreTxnWrite) End() {
+	changes := tw.Changes()
+	if len(changes) == 0 {
+		tw.TxnWrite.End()
+		return
+	}
+
+	rev := tw.Rev() + 1
+	evs := make([]mvccpb.Event, len(changes))
+	for i, change := range changes {
+		evs[i].Kv = &changes[i]
+		if change.CreateRevision == 0 {
+			evs[i].Type = mvccpb.DELETE
+			evs[i].Kv.ModRevision = rev
+		} else {
+			evs[i].Type = mvccpb.PUT
+		}
+	}
+
+	// end write txn under watchable store lock so the updates are visible
+	// when asynchronous event posting checks the current store revision
+	tw.s.mu.Lock()
+	tw.s.notify(rev, evs)
+	tw.TxnWrite.End()
+	tw.s.mu.Unlock()
+}
+
+type watchableStoreTxnWrite struct {
+	TxnWrite
+	s *watchableStore
+}
+
+func (s *watchableStore) Write() TxnWrite { return &watchableStoreTxnWrite{s.store.Write(), s} }
diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher.go b/vendor/github.com/coreos/etcd/mvcc/watcher.go
new file mode 100644
index 0000000..bc0c632
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/watcher.go
@@ -0,0 +1,180 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+	"bytes"
+	"errors"
+	"sync"
+
+	"github.com/coreos/etcd/mvcc/mvccpb"
+)
+
+var (
+	ErrWatcherNotExist = errors.New("mvcc: watcher does not exist")
+)
+
+type WatchID int64
+
+// FilterFunc returns true if the given event should be filtered out.
+type FilterFunc func(e mvccpb.Event) bool
+
+type WatchStream interface {
+	// Watch creates a watcher. The watcher watches the events happening or
+	// happened on the given key or range [key, end) from the given startRev.
+	//
+	// The whole event history can be watched unless compacted.
+	// If `startRev` <=0, watch observes events after currentRev.
+	//
+	// The returned `id` is the ID of this watcher. It appears as WatchID
+	// in events that are sent to the created watcher through stream channel.
+	//
+	Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID
+
+	// Chan returns a chan. All watch response will be sent to the returned chan.
+	Chan() <-chan WatchResponse
+
+	// RequestProgress requests the progress of the watcher with given ID. The response
+	// will only be sent if the watcher is currently synced.
+	// The responses will be sent through the WatchRespone Chan attached
+	// with this stream to ensure correct ordering.
+	// The responses contains no events. The revision in the response is the progress
+	// of the watchers since the watcher is currently synced.
+	RequestProgress(id WatchID)
+
+	// Cancel cancels a watcher by giving its ID. If watcher does not exist, an error will be
+	// returned.
+	Cancel(id WatchID) error
+
+	// Close closes Chan and release all related resources.
+	Close()
+
+	// Rev returns the current revision of the KV the stream watches on.
+	Rev() int64
+}
+
+type WatchResponse struct {
+	// WatchID is the WatchID of the watcher this response sent to.
+	WatchID WatchID
+
+	// Events contains all the events that needs to send.
+	Events []mvccpb.Event
+
+	// Revision is the revision of the KV when the watchResponse is created.
+	// For a normal response, the revision should be the same as the last
+	// modified revision inside Events. For a delayed response to a unsynced
+	// watcher, the revision is greater than the last modified revision
+	// inside Events.
+	Revision int64
+
+	// CompactRevision is set when the watcher is cancelled due to compaction.
+	CompactRevision int64
+}
+
+// watchStream contains a collection of watchers that share
+// one streaming chan to send out watched events and other control events.
+type watchStream struct {
+	watchable watchable
+	ch        chan WatchResponse
+
+	mu sync.Mutex // guards fields below it
+	// nextID is the ID pre-allocated for next new watcher in this stream
+	nextID   WatchID
+	closed   bool
+	cancels  map[WatchID]cancelFunc
+	watchers map[WatchID]*watcher
+}
+
+// Watch creates a new watcher in the stream and returns its WatchID.
+// TODO: return error if ws is closed?
+func (ws *watchStream) Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID {
+	// prevent wrong range where key >= end lexicographically
+	// watch request with 'WithFromKey' has empty-byte range end
+	if len(end) != 0 && bytes.Compare(key, end) != -1 {
+		return -1
+	}
+
+	ws.mu.Lock()
+	defer ws.mu.Unlock()
+	if ws.closed {
+		return -1
+	}
+
+	id := ws.nextID
+	ws.nextID++
+
+	w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...)
+
+	ws.cancels[id] = c
+	ws.watchers[id] = w
+	return id
+}
+
+func (ws *watchStream) Chan() <-chan WatchResponse {
+	return ws.ch
+}
+
+func (ws *watchStream) Cancel(id WatchID) error {
+	ws.mu.Lock()
+	cancel, ok := ws.cancels[id]
+	w := ws.watchers[id]
+	ok = ok && !ws.closed
+	ws.mu.Unlock()
+
+	if !ok {
+		return ErrWatcherNotExist
+	}
+	cancel()
+
+	ws.mu.Lock()
+	// The watch isn't removed until cancel so that if Close() is called,
+	// it will wait for the cancel. Otherwise, Close() could close the
+	// watch channel while the store is still posting events.
+	if ww := ws.watchers[id]; ww == w {
+		delete(ws.cancels, id)
+		delete(ws.watchers, id)
+	}
+	ws.mu.Unlock()
+
+	return nil
+}
+
+func (ws *watchStream) Close() {
+	ws.mu.Lock()
+	defer ws.mu.Unlock()
+
+	for _, cancel := range ws.cancels {
+		cancel()
+	}
+	ws.closed = true
+	close(ws.ch)
+	watchStreamGauge.Dec()
+}
+
+func (ws *watchStream) Rev() int64 {
+	ws.mu.Lock()
+	defer ws.mu.Unlock()
+	return ws.watchable.rev()
+}
+
+func (ws *watchStream) RequestProgress(id WatchID) {
+	ws.mu.Lock()
+	w, ok := ws.watchers[id]
+	ws.mu.Unlock()
+	if !ok {
+		return
+	}
+	ws.watchable.progress(w)
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go
new file mode 100644
index 0000000..b65c7bc
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go
@@ -0,0 +1,292 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+	"fmt"
+	"math"
+
+	"github.com/coreos/etcd/mvcc/mvccpb"
+	"github.com/coreos/etcd/pkg/adt"
+)
+
+var (
+	// watchBatchMaxRevs is the maximum distinct revisions that
+	// may be sent to an unsynced watcher at a time. Declared as
+	// var instead of const for testing purposes.
+	watchBatchMaxRevs = 1000
+)
+
+type eventBatch struct {
+	// evs is a batch of revision-ordered events
+	evs []mvccpb.Event
+	// revs is the minimum unique revisions observed for this batch
+	revs int
+	// moreRev is first revision with more events following this batch
+	moreRev int64
+}
+
+func (eb *eventBatch) add(ev mvccpb.Event) {
+	if eb.revs > watchBatchMaxRevs {
+		// maxed out batch size
+		return
+	}
+
+	if len(eb.evs) == 0 {
+		// base case
+		eb.revs = 1
+		eb.evs = append(eb.evs, ev)
+		return
+	}
+
+	// revision accounting
+	ebRev := eb.evs[len(eb.evs)-1].Kv.ModRevision
+	evRev := ev.Kv.ModRevision
+	if evRev > ebRev {
+		eb.revs++
+		if eb.revs > watchBatchMaxRevs {
+			eb.moreRev = evRev
+			return
+		}
+	}
+
+	eb.evs = append(eb.evs, ev)
+}
+
+type watcherBatch map[*watcher]*eventBatch
+
+func (wb watcherBatch) add(w *watcher, ev mvccpb.Event) {
+	eb := wb[w]
+	if eb == nil {
+		eb = &eventBatch{}
+		wb[w] = eb
+	}
+	eb.add(ev)
+}
+
+// newWatcherBatch maps watchers to their matched events. It enables quick
+// events look up by watcher.
+func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch {
+	if len(wg.watchers) == 0 {
+		return nil
+	}
+
+	wb := make(watcherBatch)
+	for _, ev := range evs {
+		for w := range wg.watcherSetByKey(string(ev.Kv.Key)) {
+			if ev.Kv.ModRevision >= w.minRev {
+				// don't double notify
+				wb.add(w, ev)
+			}
+		}
+	}
+	return wb
+}
+
+type watcherSet map[*watcher]struct{}
+
+func (w watcherSet) add(wa *watcher) {
+	if _, ok := w[wa]; ok {
+		panic("add watcher twice!")
+	}
+	w[wa] = struct{}{}
+}
+
+func (w watcherSet) union(ws watcherSet) {
+	for wa := range ws {
+		w.add(wa)
+	}
+}
+
+func (w watcherSet) delete(wa *watcher) {
+	if _, ok := w[wa]; !ok {
+		panic("removing missing watcher!")
+	}
+	delete(w, wa)
+}
+
+type watcherSetByKey map[string]watcherSet
+
+func (w watcherSetByKey) add(wa *watcher) {
+	set := w[string(wa.key)]
+	if set == nil {
+		set = make(watcherSet)
+		w[string(wa.key)] = set
+	}
+	set.add(wa)
+}
+
+func (w watcherSetByKey) delete(wa *watcher) bool {
+	k := string(wa.key)
+	if v, ok := w[k]; ok {
+		if _, ok := v[wa]; ok {
+			delete(v, wa)
+			if len(v) == 0 {
+				// remove the set; nothing left
+				delete(w, k)
+			}
+			return true
+		}
+	}
+	return false
+}
+
+// watcherGroup is a collection of watchers organized by their ranges
+type watcherGroup struct {
+	// keyWatchers has the watchers that watch on a single key
+	keyWatchers watcherSetByKey
+	// ranges has the watchers that watch a range; it is sorted by interval
+	ranges adt.IntervalTree
+	// watchers is the set of all watchers
+	watchers watcherSet
+}
+
+func newWatcherGroup() watcherGroup {
+	return watcherGroup{
+		keyWatchers: make(watcherSetByKey),
+		watchers:    make(watcherSet),
+	}
+}
+
+// add puts a watcher in the group.
+func (wg *watcherGroup) add(wa *watcher) {
+	wg.watchers.add(wa)
+	if wa.end == nil {
+		wg.keyWatchers.add(wa)
+		return
+	}
+
+	// interval already registered?
+	ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end))
+	if iv := wg.ranges.Find(ivl); iv != nil {
+		iv.Val.(watcherSet).add(wa)
+		return
+	}
+
+	// not registered, put in interval tree
+	ws := make(watcherSet)
+	ws.add(wa)
+	wg.ranges.Insert(ivl, ws)
+}
+
+// contains is whether the given key has a watcher in the group.
+func (wg *watcherGroup) contains(key string) bool {
+	_, ok := wg.keyWatchers[key]
+	return ok || wg.ranges.Intersects(adt.NewStringAffinePoint(key))
+}
+
+// size gives the number of unique watchers in the group.
+func (wg *watcherGroup) size() int { return len(wg.watchers) }
+
+// delete removes a watcher from the group.
+func (wg *watcherGroup) delete(wa *watcher) bool {
+	if _, ok := wg.watchers[wa]; !ok {
+		return false
+	}
+	wg.watchers.delete(wa)
+	if wa.end == nil {
+		wg.keyWatchers.delete(wa)
+		return true
+	}
+
+	ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end))
+	iv := wg.ranges.Find(ivl)
+	if iv == nil {
+		return false
+	}
+
+	ws := iv.Val.(watcherSet)
+	delete(ws, wa)
+	if len(ws) == 0 {
+		// remove interval missing watchers
+		if ok := wg.ranges.Delete(ivl); !ok {
+			panic("could not remove watcher from interval tree")
+		}
+	}
+
+	return true
+}
+
+// choose selects watchers from the watcher group to update
+func (wg *watcherGroup) choose(maxWatchers int, curRev, compactRev int64) (*watcherGroup, int64) {
+	if len(wg.watchers) < maxWatchers {
+		return wg, wg.chooseAll(curRev, compactRev)
+	}
+	ret := newWatcherGroup()
+	for w := range wg.watchers {
+		if maxWatchers <= 0 {
+			break
+		}
+		maxWatchers--
+		ret.add(w)
+	}
+	return &ret, ret.chooseAll(curRev, compactRev)
+}
+
+func (wg *watcherGroup) chooseAll(curRev, compactRev int64) int64 {
+	minRev := int64(math.MaxInt64)
+	for w := range wg.watchers {
+		if w.minRev > curRev {
+			// after network partition, possibly choosing future revision watcher from restore operation
+			// with watch key "proxy-namespace__lostleader" and revision "math.MaxInt64 - 2"
+			// do not panic when such watcher had been moved from "synced" watcher during restore operation
+			if !w.restore {
+				panic(fmt.Errorf("watcher minimum revision %d should not exceed current revision %d", w.minRev, curRev))
+			}
+
+			// mark 'restore' done, since it's chosen
+			w.restore = false
+		}
+		if w.minRev < compactRev {
+			select {
+			case w.ch <- WatchResponse{WatchID: w.id, CompactRevision: compactRev}:
+				w.compacted = true
+				wg.delete(w)
+			default:
+				// retry next time
+			}
+			continue
+		}
+		if minRev > w.minRev {
+			minRev = w.minRev
+		}
+	}
+	return minRev
+}
+
+// watcherSetByKey gets the set of watchers that receive events on the given key.
+func (wg *watcherGroup) watcherSetByKey(key string) watcherSet {
+	wkeys := wg.keyWatchers[key]
+	wranges := wg.ranges.Stab(adt.NewStringAffinePoint(key))
+
+	// zero-copy cases
+	switch {
+	case len(wranges) == 0:
+		// no need to merge ranges or copy; reuse single-key set
+		return wkeys
+	case len(wranges) == 0 && len(wkeys) == 0:
+		return nil
+	case len(wranges) == 1 && len(wkeys) == 0:
+		return wranges[0].Val.(watcherSet)
+	}
+
+	// copy case
+	ret := make(watcherSet)
+	ret.union(wg.keyWatchers[key])
+	for _, item := range wranges {
+		ret.union(item.Val.(watcherSet))
+	}
+	return ret
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/adt/doc.go b/vendor/github.com/coreos/etcd/pkg/adt/doc.go
new file mode 100644
index 0000000..1a95591
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/adt/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package adt implements useful abstract data types.
+package adt
diff --git a/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go b/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go
new file mode 100644
index 0000000..ec302e4
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go
@@ -0,0 +1,599 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+	"bytes"
+	"math"
+)
+
+// Comparable is an interface for trichotomic comparisons.
+type Comparable interface {
+	// Compare gives the result of a 3-way comparison
+	// a.Compare(b) = 1 => a > b
+	// a.Compare(b) = 0 => a == b
+	// a.Compare(b) = -1 => a < b
+	Compare(c Comparable) int
+}
+
+type rbcolor int
+
+const (
+	black rbcolor = iota
+	red
+)
+
+// Interval implements a Comparable interval [begin, end)
+// TODO: support different sorts of intervals: (a,b), [a,b], (a, b]
+type Interval struct {
+	Begin Comparable
+	End   Comparable
+}
+
+// Compare on an interval gives == if the interval overlaps.
+func (ivl *Interval) Compare(c Comparable) int {
+	ivl2 := c.(*Interval)
+	ivbCmpBegin := ivl.Begin.Compare(ivl2.Begin)
+	ivbCmpEnd := ivl.Begin.Compare(ivl2.End)
+	iveCmpBegin := ivl.End.Compare(ivl2.Begin)
+
+	// ivl is left of ivl2
+	if ivbCmpBegin < 0 && iveCmpBegin <= 0 {
+		return -1
+	}
+
+	// iv is right of iv2
+	if ivbCmpEnd >= 0 {
+		return 1
+	}
+
+	return 0
+}
+
+type intervalNode struct {
+	// iv is the interval-value pair entry.
+	iv IntervalValue
+	// max endpoint of all descendent nodes.
+	max Comparable
+	// left and right are sorted by low endpoint of key interval
+	left, right *intervalNode
+	// parent is the direct ancestor of the node
+	parent *intervalNode
+	c      rbcolor
+}
+
+func (x *intervalNode) color() rbcolor {
+	if x == nil {
+		return black
+	}
+	return x.c
+}
+
+func (n *intervalNode) height() int {
+	if n == nil {
+		return 0
+	}
+	ld := n.left.height()
+	rd := n.right.height()
+	if ld < rd {
+		return rd + 1
+	}
+	return ld + 1
+}
+
+func (x *intervalNode) min() *intervalNode {
+	for x.left != nil {
+		x = x.left
+	}
+	return x
+}
+
+// successor is the next in-order node in the tree
+func (x *intervalNode) successor() *intervalNode {
+	if x.right != nil {
+		return x.right.min()
+	}
+	y := x.parent
+	for y != nil && x == y.right {
+		x = y
+		y = y.parent
+	}
+	return y
+}
+
+// updateMax updates the maximum values for a node and its ancestors
+func (x *intervalNode) updateMax() {
+	for x != nil {
+		oldmax := x.max
+		max := x.iv.Ivl.End
+		if x.left != nil && x.left.max.Compare(max) > 0 {
+			max = x.left.max
+		}
+		if x.right != nil && x.right.max.Compare(max) > 0 {
+			max = x.right.max
+		}
+		if oldmax.Compare(max) == 0 {
+			break
+		}
+		x.max = max
+		x = x.parent
+	}
+}
+
+type nodeVisitor func(n *intervalNode) bool
+
+// visit will call a node visitor on each node that overlaps the given interval
+func (x *intervalNode) visit(iv *Interval, nv nodeVisitor) bool {
+	if x == nil {
+		return true
+	}
+	v := iv.Compare(&x.iv.Ivl)
+	switch {
+	case v < 0:
+		if !x.left.visit(iv, nv) {
+			return false
+		}
+	case v > 0:
+		maxiv := Interval{x.iv.Ivl.Begin, x.max}
+		if maxiv.Compare(iv) == 0 {
+			if !x.left.visit(iv, nv) || !x.right.visit(iv, nv) {
+				return false
+			}
+		}
+	default:
+		if !x.left.visit(iv, nv) || !nv(x) || !x.right.visit(iv, nv) {
+			return false
+		}
+	}
+	return true
+}
+
+type IntervalValue struct {
+	Ivl Interval
+	Val interface{}
+}
+
+// IntervalTree represents a (mostly) textbook implementation of the
+// "Introduction to Algorithms" (Cormen et al, 2nd ed.) chapter 13 red-black tree
+// and chapter 14.3 interval tree with search supporting "stabbing queries".
+type IntervalTree struct {
+	root  *intervalNode
+	count int
+}
+
+// Delete removes the node with the given interval from the tree, returning
+// true if a node is in fact removed.
+func (ivt *IntervalTree) Delete(ivl Interval) bool {
+	z := ivt.find(ivl)
+	if z == nil {
+		return false
+	}
+
+	y := z
+	if z.left != nil && z.right != nil {
+		y = z.successor()
+	}
+
+	x := y.left
+	if x == nil {
+		x = y.right
+	}
+	if x != nil {
+		x.parent = y.parent
+	}
+
+	if y.parent == nil {
+		ivt.root = x
+	} else {
+		if y == y.parent.left {
+			y.parent.left = x
+		} else {
+			y.parent.right = x
+		}
+		y.parent.updateMax()
+	}
+	if y != z {
+		z.iv = y.iv
+		z.updateMax()
+	}
+
+	if y.color() == black && x != nil {
+		ivt.deleteFixup(x)
+	}
+
+	ivt.count--
+	return true
+}
+
+func (ivt *IntervalTree) deleteFixup(x *intervalNode) {
+	for x != ivt.root && x.color() == black && x.parent != nil {
+		if x == x.parent.left {
+			w := x.parent.right
+			if w.color() == red {
+				w.c = black
+				x.parent.c = red
+				ivt.rotateLeft(x.parent)
+				w = x.parent.right
+			}
+			if w == nil {
+				break
+			}
+			if w.left.color() == black && w.right.color() == black {
+				w.c = red
+				x = x.parent
+			} else {
+				if w.right.color() == black {
+					w.left.c = black
+					w.c = red
+					ivt.rotateRight(w)
+					w = x.parent.right
+				}
+				w.c = x.parent.color()
+				x.parent.c = black
+				w.right.c = black
+				ivt.rotateLeft(x.parent)
+				x = ivt.root
+			}
+		} else {
+			// same as above but with left and right exchanged
+			w := x.parent.left
+			if w.color() == red {
+				w.c = black
+				x.parent.c = red
+				ivt.rotateRight(x.parent)
+				w = x.parent.left
+			}
+			if w == nil {
+				break
+			}
+			if w.left.color() == black && w.right.color() == black {
+				w.c = red
+				x = x.parent
+			} else {
+				if w.left.color() == black {
+					w.right.c = black
+					w.c = red
+					ivt.rotateLeft(w)
+					w = x.parent.left
+				}
+				w.c = x.parent.color()
+				x.parent.c = black
+				w.left.c = black
+				ivt.rotateRight(x.parent)
+				x = ivt.root
+			}
+		}
+	}
+	if x != nil {
+		x.c = black
+	}
+}
+
+// Insert adds a node with the given interval into the tree.
+func (ivt *IntervalTree) Insert(ivl Interval, val interface{}) {
+	var y *intervalNode
+	z := &intervalNode{iv: IntervalValue{ivl, val}, max: ivl.End, c: red}
+	x := ivt.root
+	for x != nil {
+		y = x
+		if z.iv.Ivl.Begin.Compare(x.iv.Ivl.Begin) < 0 {
+			x = x.left
+		} else {
+			x = x.right
+		}
+	}
+
+	z.parent = y
+	if y == nil {
+		ivt.root = z
+	} else {
+		if z.iv.Ivl.Begin.Compare(y.iv.Ivl.Begin) < 0 {
+			y.left = z
+		} else {
+			y.right = z
+		}
+		y.updateMax()
+	}
+	z.c = red
+	ivt.insertFixup(z)
+	ivt.count++
+}
+
+func (ivt *IntervalTree) insertFixup(z *intervalNode) {
+	for z.parent != nil && z.parent.parent != nil && z.parent.color() == red {
+		if z.parent == z.parent.parent.left {
+			y := z.parent.parent.right
+			if y.color() == red {
+				y.c = black
+				z.parent.c = black
+				z.parent.parent.c = red
+				z = z.parent.parent
+			} else {
+				if z == z.parent.right {
+					z = z.parent
+					ivt.rotateLeft(z)
+				}
+				z.parent.c = black
+				z.parent.parent.c = red
+				ivt.rotateRight(z.parent.parent)
+			}
+		} else {
+			// same as then with left/right exchanged
+			y := z.parent.parent.left
+			if y.color() == red {
+				y.c = black
+				z.parent.c = black
+				z.parent.parent.c = red
+				z = z.parent.parent
+			} else {
+				if z == z.parent.left {
+					z = z.parent
+					ivt.rotateRight(z)
+				}
+				z.parent.c = black
+				z.parent.parent.c = red
+				ivt.rotateLeft(z.parent.parent)
+			}
+		}
+	}
+	ivt.root.c = black
+}
+
+// rotateLeft moves x so it is left of its right child
+func (ivt *IntervalTree) rotateLeft(x *intervalNode) {
+	y := x.right
+	x.right = y.left
+	if y.left != nil {
+		y.left.parent = x
+	}
+	x.updateMax()
+	ivt.replaceParent(x, y)
+	y.left = x
+	y.updateMax()
+}
+
+// rotateLeft moves x so it is right of its left child
+func (ivt *IntervalTree) rotateRight(x *intervalNode) {
+	if x == nil {
+		return
+	}
+	y := x.left
+	x.left = y.right
+	if y.right != nil {
+		y.right.parent = x
+	}
+	x.updateMax()
+	ivt.replaceParent(x, y)
+	y.right = x
+	y.updateMax()
+}
+
+// replaceParent replaces x's parent with y
+func (ivt *IntervalTree) replaceParent(x *intervalNode, y *intervalNode) {
+	y.parent = x.parent
+	if x.parent == nil {
+		ivt.root = y
+	} else {
+		if x == x.parent.left {
+			x.parent.left = y
+		} else {
+			x.parent.right = y
+		}
+		x.parent.updateMax()
+	}
+	x.parent = y
+}
+
+// Len gives the number of elements in the tree
+func (ivt *IntervalTree) Len() int { return ivt.count }
+
+// Height is the number of levels in the tree; one node has height 1.
+func (ivt *IntervalTree) Height() int { return ivt.root.height() }
+
+// MaxHeight is the expected maximum tree height given the number of nodes
+func (ivt *IntervalTree) MaxHeight() int {
+	return int((2 * math.Log2(float64(ivt.Len()+1))) + 0.5)
+}
+
+// IntervalVisitor is used on tree searches; return false to stop searching.
+type IntervalVisitor func(n *IntervalValue) bool
+
+// Visit calls a visitor function on every tree node intersecting the given interval.
+// It will visit each interval [x, y) in ascending order sorted on x.
+func (ivt *IntervalTree) Visit(ivl Interval, ivv IntervalVisitor) {
+	ivt.root.visit(&ivl, func(n *intervalNode) bool { return ivv(&n.iv) })
+}
+
+// find the exact node for a given interval
+func (ivt *IntervalTree) find(ivl Interval) (ret *intervalNode) {
+	f := func(n *intervalNode) bool {
+		if n.iv.Ivl != ivl {
+			return true
+		}
+		ret = n
+		return false
+	}
+	ivt.root.visit(&ivl, f)
+	return ret
+}
+
+// Find gets the IntervalValue for the node matching the given interval
+func (ivt *IntervalTree) Find(ivl Interval) (ret *IntervalValue) {
+	n := ivt.find(ivl)
+	if n == nil {
+		return nil
+	}
+	return &n.iv
+}
+
+// Intersects returns true if there is some tree node intersecting the given interval.
+func (ivt *IntervalTree) Intersects(iv Interval) bool {
+	x := ivt.root
+	for x != nil && iv.Compare(&x.iv.Ivl) != 0 {
+		if x.left != nil && x.left.max.Compare(iv.Begin) > 0 {
+			x = x.left
+		} else {
+			x = x.right
+		}
+	}
+	return x != nil
+}
+
+// Contains returns true if the interval tree's keys cover the entire given interval.
+func (ivt *IntervalTree) Contains(ivl Interval) bool {
+	var maxEnd, minBegin Comparable
+
+	isContiguous := true
+	ivt.Visit(ivl, func(n *IntervalValue) bool {
+		if minBegin == nil {
+			minBegin = n.Ivl.Begin
+			maxEnd = n.Ivl.End
+			return true
+		}
+		if maxEnd.Compare(n.Ivl.Begin) < 0 {
+			isContiguous = false
+			return false
+		}
+		if n.Ivl.End.Compare(maxEnd) > 0 {
+			maxEnd = n.Ivl.End
+		}
+		return true
+	})
+
+	return isContiguous && minBegin != nil && maxEnd.Compare(ivl.End) >= 0 && minBegin.Compare(ivl.Begin) <= 0
+}
+
+// Stab returns a slice with all elements in the tree intersecting the interval.
+func (ivt *IntervalTree) Stab(iv Interval) (ivs []*IntervalValue) {
+	if ivt.count == 0 {
+		return nil
+	}
+	f := func(n *IntervalValue) bool { ivs = append(ivs, n); return true }
+	ivt.Visit(iv, f)
+	return ivs
+}
+
+// Union merges a given interval tree into the receiver.
+func (ivt *IntervalTree) Union(inIvt IntervalTree, ivl Interval) {
+	f := func(n *IntervalValue) bool {
+		ivt.Insert(n.Ivl, n.Val)
+		return true
+	}
+	inIvt.Visit(ivl, f)
+}
+
+type StringComparable string
+
+func (s StringComparable) Compare(c Comparable) int {
+	sc := c.(StringComparable)
+	if s < sc {
+		return -1
+	}
+	if s > sc {
+		return 1
+	}
+	return 0
+}
+
+func NewStringInterval(begin, end string) Interval {
+	return Interval{StringComparable(begin), StringComparable(end)}
+}
+
+func NewStringPoint(s string) Interval {
+	return Interval{StringComparable(s), StringComparable(s + "\x00")}
+}
+
+// StringAffineComparable treats "" as > all other strings
+type StringAffineComparable string
+
+func (s StringAffineComparable) Compare(c Comparable) int {
+	sc := c.(StringAffineComparable)
+
+	if len(s) == 0 {
+		if len(sc) == 0 {
+			return 0
+		}
+		return 1
+	}
+	if len(sc) == 0 {
+		return -1
+	}
+
+	if s < sc {
+		return -1
+	}
+	if s > sc {
+		return 1
+	}
+	return 0
+}
+
+func NewStringAffineInterval(begin, end string) Interval {
+	return Interval{StringAffineComparable(begin), StringAffineComparable(end)}
+}
+func NewStringAffinePoint(s string) Interval {
+	return NewStringAffineInterval(s, s+"\x00")
+}
+
+func NewInt64Interval(a int64, b int64) Interval {
+	return Interval{Int64Comparable(a), Int64Comparable(b)}
+}
+
+func NewInt64Point(a int64) Interval {
+	return Interval{Int64Comparable(a), Int64Comparable(a + 1)}
+}
+
+type Int64Comparable int64
+
+func (v Int64Comparable) Compare(c Comparable) int {
+	vc := c.(Int64Comparable)
+	cmp := v - vc
+	if cmp < 0 {
+		return -1
+	}
+	if cmp > 0 {
+		return 1
+	}
+	return 0
+}
+
+// BytesAffineComparable treats empty byte arrays as > all other byte arrays
+type BytesAffineComparable []byte
+
+func (b BytesAffineComparable) Compare(c Comparable) int {
+	bc := c.(BytesAffineComparable)
+
+	if len(b) == 0 {
+		if len(bc) == 0 {
+			return 0
+		}
+		return 1
+	}
+	if len(bc) == 0 {
+		return -1
+	}
+
+	return bytes.Compare(b, bc)
+}
+
+func NewBytesAffineInterval(begin, end []byte) Interval {
+	return Interval{BytesAffineComparable(begin), BytesAffineComparable(end)}
+}
+func NewBytesAffinePoint(b []byte) Interval {
+	be := make([]byte, len(b)+1)
+	copy(be, b)
+	be[len(b)] = 0
+	return NewBytesAffineInterval(b, be)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/contention/contention.go b/vendor/github.com/coreos/etcd/pkg/contention/contention.go
new file mode 100644
index 0000000..26ce9a2
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/contention/contention.go
@@ -0,0 +1,69 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package contention
+
+import (
+	"sync"
+	"time"
+)
+
+// TimeoutDetector detects routine starvations by
+// observing the actual time duration to finish an action
+// or between two events that should happen in a fixed
+// interval. If the observed duration is longer than
+// the expectation, the detector will report the result.
+type TimeoutDetector struct {
+	mu          sync.Mutex // protects all
+	maxDuration time.Duration
+	// map from event to time
+	// time is the last seen time of the event.
+	records map[uint64]time.Time
+}
+
+// NewTimeoutDetector creates the TimeoutDetector.
+func NewTimeoutDetector(maxDuration time.Duration) *TimeoutDetector {
+	return &TimeoutDetector{
+		maxDuration: maxDuration,
+		records:     make(map[uint64]time.Time),
+	}
+}
+
+// Reset resets the NewTimeoutDetector.
+func (td *TimeoutDetector) Reset() {
+	td.mu.Lock()
+	defer td.mu.Unlock()
+
+	td.records = make(map[uint64]time.Time)
+}
+
+// Observe observes an event for given id. It returns false and exceeded duration
+// if the interval is longer than the expectation.
+func (td *TimeoutDetector) Observe(which uint64) (bool, time.Duration) {
+	td.mu.Lock()
+	defer td.mu.Unlock()
+
+	ok := true
+	now := time.Now()
+	exceed := time.Duration(0)
+
+	if pt, found := td.records[which]; found {
+		exceed = now.Sub(pt) - td.maxDuration
+		if exceed > 0 {
+			ok = false
+		}
+	}
+	td.records[which] = now
+	return ok, exceed
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/contention/doc.go b/vendor/github.com/coreos/etcd/pkg/contention/doc.go
new file mode 100644
index 0000000..daf4522
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/contention/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package contention provides facilities for detecting system contention.
+package contention
diff --git a/vendor/github.com/coreos/etcd/pkg/cors/cors.go b/vendor/github.com/coreos/etcd/pkg/cors/cors.go
new file mode 100644
index 0000000..0c64f16
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/cors/cors.go
@@ -0,0 +1,90 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cors handles cross-origin HTTP requests (CORS).
+package cors
+
+import (
+	"fmt"
+	"net/http"
+	"net/url"
+	"sort"
+	"strings"
+)
+
+type CORSInfo map[string]bool
+
+// Set implements the flag.Value interface to allow users to define a list of CORS origins
+func (ci *CORSInfo) Set(s string) error {
+	m := make(map[string]bool)
+	for _, v := range strings.Split(s, ",") {
+		v = strings.TrimSpace(v)
+		if v == "" {
+			continue
+		}
+		if v != "*" {
+			if _, err := url.Parse(v); err != nil {
+				return fmt.Errorf("Invalid CORS origin: %s", err)
+			}
+		}
+		m[v] = true
+
+	}
+	*ci = CORSInfo(m)
+	return nil
+}
+
+func (ci *CORSInfo) String() string {
+	o := make([]string, 0)
+	for k := range *ci {
+		o = append(o, k)
+	}
+	sort.StringSlice(o).Sort()
+	return strings.Join(o, ",")
+}
+
+// OriginAllowed determines whether the server will allow a given CORS origin.
+func (c CORSInfo) OriginAllowed(origin string) bool {
+	return c["*"] || c[origin]
+}
+
+type CORSHandler struct {
+	Handler http.Handler
+	Info    *CORSInfo
+}
+
+// addHeader adds the correct cors headers given an origin
+func (h *CORSHandler) addHeader(w http.ResponseWriter, origin string) {
+	w.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
+	w.Header().Add("Access-Control-Allow-Origin", origin)
+	w.Header().Add("Access-Control-Allow-Headers", "accept, content-type, authorization")
+}
+
+// ServeHTTP adds the correct CORS headers based on the origin and returns immediately
+// with a 200 OK if the method is OPTIONS.
+func (h *CORSHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	// Write CORS header.
+	if h.Info.OriginAllowed("*") {
+		h.addHeader(w, "*")
+	} else if origin := req.Header.Get("Origin"); h.Info.OriginAllowed(origin) {
+		h.addHeader(w, origin)
+	}
+
+	if req.Method == "OPTIONS" {
+		w.WriteHeader(http.StatusOK)
+		return
+	}
+
+	h.Handler.ServeHTTP(w, req)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go b/vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go
new file mode 100644
index 0000000..0323b2d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cpuutil provides facilities for detecting cpu-specific features.
+package cpuutil
diff --git a/vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go b/vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go
new file mode 100644
index 0000000..6ab898d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go
@@ -0,0 +1,36 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cpuutil
+
+import (
+	"encoding/binary"
+	"unsafe"
+)
+
+const intWidth int = int(unsafe.Sizeof(0))
+
+var byteOrder binary.ByteOrder
+
+// ByteOrder returns the byte order for the CPU's native endianness.
+func ByteOrder() binary.ByteOrder { return byteOrder }
+
+func init() {
+	var i int = 0x1
+	if v := (*[intWidth]byte)(unsafe.Pointer(&i)); v[0] == 0 {
+		byteOrder = binary.BigEndian
+	} else {
+		byteOrder = binary.LittleEndian
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/crc/crc.go b/vendor/github.com/coreos/etcd/pkg/crc/crc.go
new file mode 100644
index 0000000..4b998a4
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/crc/crc.go
@@ -0,0 +1,43 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package crc provides utility function for cyclic redundancy check
+// algorithms.
+package crc
+
+import (
+	"hash"
+	"hash/crc32"
+)
+
+// The size of a CRC-32 checksum in bytes.
+const Size = 4
+
+type digest struct {
+	crc uint32
+	tab *crc32.Table
+}
+
+// New creates a new hash.Hash32 computing the CRC-32 checksum
+// using the polynomial represented by the Table.
+// Modified by xiangli to take a prevcrc.
+func New(prev uint32, tab *crc32.Table) hash.Hash32 { return &digest{prev, tab} }
+
+func (d *digest) Size() int { return Size }
+
+func (d *digest) BlockSize() int { return 1 }
+
+func (d *digest) Reset() { d.crc = 0 }
+
+func (d *digest) Write(p []byte) (n int, err error) {
+	d.crc = crc32.Update(d.crc, d.tab, p)
+	return len(p), nil
+}
+
+func (d *digest) Sum32() uint32 { return d.crc }
+
+func (d *digest) Sum(in []byte) []byte {
+	s := d.Sum32()
+	return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go b/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go
new file mode 100644
index 0000000..74499eb
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package debugutil includes utility functions for debugging.
+package debugutil
diff --git a/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go b/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go
new file mode 100644
index 0000000..8d5544a
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go
@@ -0,0 +1,47 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debugutil
+
+import (
+	"net/http"
+	"net/http/pprof"
+	"runtime"
+)
+
+const HTTPPrefixPProf = "/debug/pprof"
+
+// PProfHandlers returns a map of pprof handlers keyed by the HTTP path.
+func PProfHandlers() map[string]http.Handler {
+	// set only when there's no existing setting
+	if runtime.SetMutexProfileFraction(-1) == 0 {
+		// 1 out of 5 mutex events are reported, on average
+		runtime.SetMutexProfileFraction(5)
+	}
+
+	m := make(map[string]http.Handler)
+
+	m[HTTPPrefixPProf+"/"] = http.HandlerFunc(pprof.Index)
+	m[HTTPPrefixPProf+"/profile"] = http.HandlerFunc(pprof.Profile)
+	m[HTTPPrefixPProf+"/symbol"] = http.HandlerFunc(pprof.Symbol)
+	m[HTTPPrefixPProf+"/cmdline"] = http.HandlerFunc(pprof.Cmdline)
+	m[HTTPPrefixPProf+"/trace "] = http.HandlerFunc(pprof.Trace)
+	m[HTTPPrefixPProf+"/heap"] = pprof.Handler("heap")
+	m[HTTPPrefixPProf+"/goroutine"] = pprof.Handler("goroutine")
+	m[HTTPPrefixPProf+"/threadcreate"] = pprof.Handler("threadcreate")
+	m[HTTPPrefixPProf+"/block"] = pprof.Handler("block")
+	m[HTTPPrefixPProf+"/mutex"] = pprof.Handler("mutex")
+
+	return m
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go
new file mode 100644
index 0000000..58a77df
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go
@@ -0,0 +1,22 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package fileutil
+
+import "os"
+
+// OpenDir opens a directory for syncing.
+func OpenDir(path string) (*os.File, error) { return os.Open(path) }
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go
new file mode 100644
index 0000000..c123395
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go
@@ -0,0 +1,46 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows
+
+package fileutil
+
+import (
+	"os"
+	"syscall"
+)
+
+// OpenDir opens a directory in windows with write access for syncing.
+func OpenDir(path string) (*os.File, error) {
+	fd, err := openDir(path)
+	if err != nil {
+		return nil, err
+	}
+	return os.NewFile(uintptr(fd), path), nil
+}
+
+func openDir(path string) (fd syscall.Handle, err error) {
+	if len(path) == 0 {
+		return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
+	}
+	pathp, err := syscall.UTF16PtrFromString(path)
+	if err != nil {
+		return syscall.InvalidHandle, err
+	}
+	access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE)
+	sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE)
+	createmode := uint32(syscall.OPEN_EXISTING)
+	fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
+	return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go
new file mode 100644
index 0000000..fce5126
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go
@@ -0,0 +1,122 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package fileutil implements utility functions related to files and paths.
+package fileutil
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sort"
+
+	"github.com/coreos/pkg/capnslog"
+)
+
+const (
+	// PrivateFileMode grants owner to read/write a file.
+	PrivateFileMode = 0600
+	// PrivateDirMode grants owner to make/remove files inside the directory.
+	PrivateDirMode = 0700
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/fileutil")
+)
+
+// IsDirWriteable checks if dir is writable by writing and removing a file
+// to dir. It returns nil if dir is writable.
+func IsDirWriteable(dir string) error {
+	f := filepath.Join(dir, ".touch")
+	if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil {
+		return err
+	}
+	return os.Remove(f)
+}
+
+// ReadDir returns the filenames in the given directory in sorted order.
+func ReadDir(dirpath string) ([]string, error) {
+	dir, err := os.Open(dirpath)
+	if err != nil {
+		return nil, err
+	}
+	defer dir.Close()
+	names, err := dir.Readdirnames(-1)
+	if err != nil {
+		return nil, err
+	}
+	sort.Strings(names)
+	return names, nil
+}
+
+// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
+// does not exists. TouchDirAll also ensures the given directory is writable.
+func TouchDirAll(dir string) error {
+	// If path is already a directory, MkdirAll does nothing
+	// and returns nil.
+	err := os.MkdirAll(dir, PrivateDirMode)
+	if err != nil {
+		// if mkdirAll("a/text") and "text" is not
+		// a directory, this will return syscall.ENOTDIR
+		return err
+	}
+	return IsDirWriteable(dir)
+}
+
+// CreateDirAll is similar to TouchDirAll but returns error
+// if the deepest directory was not empty.
+func CreateDirAll(dir string) error {
+	err := TouchDirAll(dir)
+	if err == nil {
+		var ns []string
+		ns, err = ReadDir(dir)
+		if err != nil {
+			return err
+		}
+		if len(ns) != 0 {
+			err = fmt.Errorf("expected %q to be empty, got %q", dir, ns)
+		}
+	}
+	return err
+}
+
+func Exist(name string) bool {
+	_, err := os.Stat(name)
+	return err == nil
+}
+
+// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily
+// shorten the length of the file.
+func ZeroToEnd(f *os.File) error {
+	// TODO: support FALLOC_FL_ZERO_RANGE
+	off, err := f.Seek(0, io.SeekCurrent)
+	if err != nil {
+		return err
+	}
+	lenf, lerr := f.Seek(0, io.SeekEnd)
+	if lerr != nil {
+		return lerr
+	}
+	if err = f.Truncate(off); err != nil {
+		return err
+	}
+	// make sure blocks remain allocated
+	if err = Preallocate(f, lenf, true); err != nil {
+		return err
+	}
+	_, err = f.Seek(off, io.SeekStart)
+	return err
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go
new file mode 100644
index 0000000..338627f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go
@@ -0,0 +1,26 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+	"errors"
+	"os"
+)
+
+var (
+	ErrLocked = errors.New("fileutil: file already locked")
+)
+
+type LockedFile struct{ *os.File }
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go
new file mode 100644
index 0000000..542550b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go
@@ -0,0 +1,49 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows,!plan9,!solaris
+
+package fileutil
+
+import (
+	"os"
+	"syscall"
+)
+
+func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	f, err := os.OpenFile(path, flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
+		f.Close()
+		if err == syscall.EWOULDBLOCK {
+			err = ErrLocked
+		}
+		return nil, err
+	}
+	return &LockedFile{f}, nil
+}
+
+func flockLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	f, err := os.OpenFile(path, flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX); err != nil {
+		f.Close()
+		return nil, err
+	}
+	return &LockedFile{f}, err
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go
new file mode 100644
index 0000000..939fea6
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go
@@ -0,0 +1,97 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux
+
+package fileutil
+
+import (
+	"io"
+	"os"
+	"syscall"
+)
+
+// This used to call syscall.Flock() but that call fails with EBADF on NFS.
+// An alternative is lockf() which works on NFS but that call lets a process lock
+// the same file twice. Instead, use Linux's non-standard open file descriptor
+// locks which will block if the process already holds the file lock.
+//
+// constants from /usr/include/bits/fcntl-linux.h
+const (
+	F_OFD_GETLK  = 37
+	F_OFD_SETLK  = 37
+	F_OFD_SETLKW = 38
+)
+
+var (
+	wrlck = syscall.Flock_t{
+		Type:   syscall.F_WRLCK,
+		Whence: int16(io.SeekStart),
+		Start:  0,
+		Len:    0,
+	}
+
+	linuxTryLockFile = flockTryLockFile
+	linuxLockFile    = flockLockFile
+)
+
+func init() {
+	// use open file descriptor locks if the system supports it
+	getlk := syscall.Flock_t{Type: syscall.F_RDLCK}
+	if err := syscall.FcntlFlock(0, F_OFD_GETLK, &getlk); err == nil {
+		linuxTryLockFile = ofdTryLockFile
+		linuxLockFile = ofdLockFile
+	}
+}
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	return linuxTryLockFile(path, flag, perm)
+}
+
+func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	f, err := os.OpenFile(path, flag, perm)
+	if err != nil {
+		return nil, err
+	}
+
+	flock := wrlck
+	if err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLK, &flock); err != nil {
+		f.Close()
+		if err == syscall.EWOULDBLOCK {
+			err = ErrLocked
+		}
+		return nil, err
+	}
+	return &LockedFile{f}, nil
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	return linuxLockFile(path, flag, perm)
+}
+
+func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	f, err := os.OpenFile(path, flag, perm)
+	if err != nil {
+		return nil, err
+	}
+
+	flock := wrlck
+	err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLKW, &flock)
+
+	if err != nil {
+		f.Close()
+		return nil, err
+	}
+	return &LockedFile{f}, err
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go
new file mode 100644
index 0000000..fee6a7c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+	"os"
+	"syscall"
+	"time"
+)
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil {
+		return nil, err
+	}
+	f, err := os.Open(path, flag, perm)
+	if err != nil {
+		return nil, ErrLocked
+	}
+	return &LockedFile{f}, nil
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil {
+		return nil, err
+	}
+	for {
+		f, err := os.OpenFile(path, flag, perm)
+		if err == nil {
+			return &LockedFile{f}, nil
+		}
+		time.Sleep(10 * time.Millisecond)
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go
new file mode 100644
index 0000000..352ca55
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go
@@ -0,0 +1,62 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build solaris
+
+package fileutil
+
+import (
+	"os"
+	"syscall"
+)
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	var lock syscall.Flock_t
+	lock.Start = 0
+	lock.Len = 0
+	lock.Pid = 0
+	lock.Type = syscall.F_WRLCK
+	lock.Whence = 0
+	lock.Pid = 0
+	f, err := os.OpenFile(path, flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock); err != nil {
+		f.Close()
+		if err == syscall.EAGAIN {
+			err = ErrLocked
+		}
+		return nil, err
+	}
+	return &LockedFile{f}, nil
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	var lock syscall.Flock_t
+	lock.Start = 0
+	lock.Len = 0
+	lock.Pid = 0
+	lock.Type = syscall.F_WRLCK
+	lock.Whence = 0
+	f, err := os.OpenFile(path, flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	if err = syscall.FcntlFlock(f.Fd(), syscall.F_SETLKW, &lock); err != nil {
+		f.Close()
+		return nil, err
+	}
+	return &LockedFile{f}, nil
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go
new file mode 100644
index 0000000..ed01164
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go
@@ -0,0 +1,29 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows,!plan9,!solaris,!linux
+
+package fileutil
+
+import (
+	"os"
+)
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	return flockTryLockFile(path, flag, perm)
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	return flockLockFile(path, flag, perm)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go
new file mode 100644
index 0000000..b181723
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go
@@ -0,0 +1,125 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows
+
+package fileutil
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"syscall"
+	"unsafe"
+)
+
+var (
+	modkernel32    = syscall.NewLazyDLL("kernel32.dll")
+	procLockFileEx = modkernel32.NewProc("LockFileEx")
+
+	errLocked = errors.New("The process cannot access the file because another process has locked a portion of the file.")
+)
+
+const (
+	// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
+	LOCKFILE_EXCLUSIVE_LOCK   = 2
+	LOCKFILE_FAIL_IMMEDIATELY = 1
+
+	// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
+	errLockViolation syscall.Errno = 0x21
+)
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	f, err := open(path, flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil {
+		f.Close()
+		return nil, err
+	}
+	return &LockedFile{f}, nil
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	f, err := open(path, flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil {
+		f.Close()
+		return nil, err
+	}
+	return &LockedFile{f}, nil
+}
+
+func open(path string, flag int, perm os.FileMode) (*os.File, error) {
+	if path == "" {
+		return nil, fmt.Errorf("cannot open empty filename")
+	}
+	var access uint32
+	switch flag {
+	case syscall.O_RDONLY:
+		access = syscall.GENERIC_READ
+	case syscall.O_WRONLY:
+		access = syscall.GENERIC_WRITE
+	case syscall.O_RDWR:
+		access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
+	case syscall.O_WRONLY | syscall.O_CREAT:
+		access = syscall.GENERIC_ALL
+	default:
+		panic(fmt.Errorf("flag %v is not supported", flag))
+	}
+	fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]),
+		access,
+		syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+		nil,
+		syscall.OPEN_ALWAYS,
+		syscall.FILE_ATTRIBUTE_NORMAL,
+		0)
+	if err != nil {
+		return nil, err
+	}
+	return os.NewFile(uintptr(fd), path), nil
+}
+
+func lockFile(fd syscall.Handle, flags uint32) error {
+	var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK
+	flag |= flags
+	if fd == syscall.InvalidHandle {
+		return nil
+	}
+	err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{})
+	if err == nil {
+		return nil
+	} else if err.Error() == errLocked.Error() {
+		return ErrLocked
+	} else if err != errLockViolation {
+		return err
+	}
+	return nil
+}
+
+func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
+	var reserved uint32 = 0
+	r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = error(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return err
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go
new file mode 100644
index 0000000..c747b7c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go
@@ -0,0 +1,54 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+	"io"
+	"os"
+)
+
+// Preallocate tries to allocate the space for given
+// file. This operation is only supported on linux by a
+// few filesystems (btrfs, ext4, etc.).
+// If the operation is unsupported, no error will be returned.
+// Otherwise, the error encountered will be returned.
+func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error {
+	if sizeInBytes == 0 {
+		// fallocate will return EINVAL if length is 0; skip
+		return nil
+	}
+	if extendFile {
+		return preallocExtend(f, sizeInBytes)
+	}
+	return preallocFixed(f, sizeInBytes)
+}
+
+func preallocExtendTrunc(f *os.File, sizeInBytes int64) error {
+	curOff, err := f.Seek(0, io.SeekCurrent)
+	if err != nil {
+		return err
+	}
+	size, err := f.Seek(sizeInBytes, io.SeekEnd)
+	if err != nil {
+		return err
+	}
+	if _, err = f.Seek(curOff, io.SeekStart); err != nil {
+		return err
+	}
+	if sizeInBytes > size {
+		return nil
+	}
+	return f.Truncate(sizeInBytes)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go
new file mode 100644
index 0000000..5a6dccf
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go
@@ -0,0 +1,65 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build darwin
+
+package fileutil
+
+import (
+	"os"
+	"syscall"
+	"unsafe"
+)
+
+func preallocExtend(f *os.File, sizeInBytes int64) error {
+	if err := preallocFixed(f, sizeInBytes); err != nil {
+		return err
+	}
+	return preallocExtendTrunc(f, sizeInBytes)
+}
+
+func preallocFixed(f *os.File, sizeInBytes int64) error {
+	// allocate all requested space or no space at all
+	// TODO: allocate contiguous space on disk with F_ALLOCATECONTIG flag
+	fstore := &syscall.Fstore_t{
+		Flags:   syscall.F_ALLOCATEALL,
+		Posmode: syscall.F_PEOFPOSMODE,
+		Length:  sizeInBytes}
+	p := unsafe.Pointer(fstore)
+	_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_PREALLOCATE), uintptr(p))
+	if errno == 0 || errno == syscall.ENOTSUP {
+		return nil
+	}
+
+	// wrong argument to fallocate syscall
+	if errno == syscall.EINVAL {
+		// filesystem "st_blocks" are allocated in the units of
+		// "Allocation Block Size" (run "diskutil info /" command)
+		var stat syscall.Stat_t
+		syscall.Fstat(int(f.Fd()), &stat)
+
+		// syscall.Statfs_t.Bsize is "optimal transfer block size"
+		// and contains matching 4096 value when latest OS X kernel
+		// supports 4,096 KB filesystem block size
+		var statfs syscall.Statfs_t
+		syscall.Fstatfs(int(f.Fd()), &statfs)
+		blockSize := int64(statfs.Bsize)
+
+		if stat.Blocks*blockSize >= sizeInBytes {
+			// enough blocks are already allocated
+			return nil
+		}
+	}
+	return errno
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go
new file mode 100644
index 0000000..50bd84f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go
@@ -0,0 +1,49 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux
+
+package fileutil
+
+import (
+	"os"
+	"syscall"
+)
+
+func preallocExtend(f *os.File, sizeInBytes int64) error {
+	// use mode = 0 to change size
+	err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes)
+	if err != nil {
+		errno, ok := err.(syscall.Errno)
+		// not supported; fallback
+		// fallocate EINTRs frequently in some environments; fallback
+		if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) {
+			return preallocExtendTrunc(f, sizeInBytes)
+		}
+	}
+	return err
+}
+
+func preallocFixed(f *os.File, sizeInBytes int64) error {
+	// use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE
+	err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes)
+	if err != nil {
+		errno, ok := err.(syscall.Errno)
+		// treat not supported as nil error
+		if ok && errno == syscall.ENOTSUP {
+			return nil
+		}
+	}
+	return err
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go
new file mode 100644
index 0000000..162fbc5
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go
@@ -0,0 +1,25 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux,!darwin
+
+package fileutil
+
+import "os"
+
+func preallocExtend(f *os.File, sizeInBytes int64) error {
+	return preallocExtendTrunc(f, sizeInBytes)
+}
+
+func preallocFixed(f *os.File, sizeInBytes int64) error { return nil }
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go b/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go
new file mode 100644
index 0000000..92fceab
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go
@@ -0,0 +1,78 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+	"time"
+)
+
+func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
+	return purgeFile(dirname, suffix, max, interval, stop, nil)
+}
+
+// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil.
+func purgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string) <-chan error {
+	errC := make(chan error, 1)
+	go func() {
+		for {
+			fnames, err := ReadDir(dirname)
+			if err != nil {
+				errC <- err
+				return
+			}
+			newfnames := make([]string, 0)
+			for _, fname := range fnames {
+				if strings.HasSuffix(fname, suffix) {
+					newfnames = append(newfnames, fname)
+				}
+			}
+			sort.Strings(newfnames)
+			fnames = newfnames
+			for len(newfnames) > int(max) {
+				f := filepath.Join(dirname, newfnames[0])
+				l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode)
+				if err != nil {
+					break
+				}
+				if err = os.Remove(f); err != nil {
+					errC <- err
+					return
+				}
+				if err = l.Close(); err != nil {
+					plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err)
+					errC <- err
+					return
+				}
+				plog.Infof("purged file %s successfully", f)
+				newfnames = newfnames[1:]
+			}
+			if purgec != nil {
+				for i := 0; i < len(fnames)-len(newfnames); i++ {
+					purgec <- fnames[i]
+				}
+			}
+			select {
+			case <-time.After(interval):
+			case <-stop:
+				return
+			}
+		}
+	}()
+	return errC
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go
new file mode 100644
index 0000000..54dd41f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux,!darwin
+
+package fileutil
+
+import "os"
+
+// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform.
+func Fsync(f *os.File) error {
+	return f.Sync()
+}
+
+// Fdatasync is a wrapper around file.Sync(). Special handling is needed on linux platform.
+func Fdatasync(f *os.File) error {
+	return f.Sync()
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go
new file mode 100644
index 0000000..c2f39bf
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go
@@ -0,0 +1,40 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build darwin
+
+package fileutil
+
+import (
+	"os"
+	"syscall"
+)
+
+// Fsync on HFS/OSX flushes the data on to the physical drive but the drive
+// may not write it to the persistent media for quite sometime and it may be
+// written in out-of-order sequence. Using F_FULLFSYNC ensures that the
+// physical drive's buffer will also get flushed to the media.
+func Fsync(f *os.File) error {
+	_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_FULLFSYNC), uintptr(0))
+	if errno == 0 {
+		return nil
+	}
+	return errno
+}
+
+// Fdatasync on darwin platform invokes fcntl(F_FULLFSYNC) for actual persistence
+// on physical drive media.
+func Fdatasync(f *os.File) error {
+	return Fsync(f)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go
new file mode 100644
index 0000000..1bbced9
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go
@@ -0,0 +1,34 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux
+
+package fileutil
+
+import (
+	"os"
+	"syscall"
+)
+
+// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform.
+func Fsync(f *os.File) error {
+	return f.Sync()
+}
+
+// Fdatasync is similar to fsync(), but does not flush modified metadata
+// unless that metadata is needed in order to allow a subsequent data retrieval
+// to be correctly handled.
+func Fdatasync(f *os.File) error {
+	return syscall.Fdatasync(int(f.Fd()))
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/flags/flag.go b/vendor/github.com/coreos/etcd/pkg/flags/flag.go
new file mode 100644
index 0000000..69c4641
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/flags/flag.go
@@ -0,0 +1,166 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package flags implements command-line flag parsing.
+package flags
+
+import (
+	"flag"
+	"fmt"
+	"net/url"
+	"os"
+	"strings"
+
+	"github.com/coreos/pkg/capnslog"
+	"github.com/spf13/pflag"
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/flags")
+)
+
+// DeprecatedFlag encapsulates a flag that may have been previously valid but
+// is now deprecated. If a DeprecatedFlag is set, an error occurs.
+type DeprecatedFlag struct {
+	Name string
+}
+
+func (f *DeprecatedFlag) Set(_ string) error {
+	return fmt.Errorf(`flag "-%s" is no longer supported.`, f.Name)
+}
+
+func (f *DeprecatedFlag) String() string {
+	return ""
+}
+
+// IgnoredFlag encapsulates a flag that may have been previously valid but is
+// now ignored. If an IgnoredFlag is set, a warning is printed and
+// operation continues.
+type IgnoredFlag struct {
+	Name string
+}
+
+// IsBoolFlag is defined to allow the flag to be defined without an argument
+func (f *IgnoredFlag) IsBoolFlag() bool {
+	return true
+}
+
+func (f *IgnoredFlag) Set(s string) error {
+	plog.Warningf(`flag "-%s" is no longer supported - ignoring.`, f.Name)
+	return nil
+}
+
+func (f *IgnoredFlag) String() string {
+	return ""
+}
+
+// SetFlagsFromEnv parses all registered flags in the given flagset,
+// and if they are not already set it attempts to set their values from
+// environment variables. Environment variables take the name of the flag but
+// are UPPERCASE, have the given prefix  and any dashes are replaced by
+// underscores - for example: some-flag => ETCD_SOME_FLAG
+func SetFlagsFromEnv(prefix string, fs *flag.FlagSet) error {
+	var err error
+	alreadySet := make(map[string]bool)
+	fs.Visit(func(f *flag.Flag) {
+		alreadySet[FlagToEnv(prefix, f.Name)] = true
+	})
+	usedEnvKey := make(map[string]bool)
+	fs.VisitAll(func(f *flag.Flag) {
+		if serr := setFlagFromEnv(fs, prefix, f.Name, usedEnvKey, alreadySet, true); serr != nil {
+			err = serr
+		}
+	})
+	verifyEnv(prefix, usedEnvKey, alreadySet)
+	return err
+}
+
+// SetPflagsFromEnv is similar to SetFlagsFromEnv. However, the accepted flagset type is pflag.FlagSet
+// and it does not do any logging.
+func SetPflagsFromEnv(prefix string, fs *pflag.FlagSet) error {
+	var err error
+	alreadySet := make(map[string]bool)
+	usedEnvKey := make(map[string]bool)
+	fs.VisitAll(func(f *pflag.Flag) {
+		if f.Changed {
+			alreadySet[FlagToEnv(prefix, f.Name)] = true
+		}
+		if serr := setFlagFromEnv(fs, prefix, f.Name, usedEnvKey, alreadySet, false); serr != nil {
+			err = serr
+		}
+	})
+	verifyEnv(prefix, usedEnvKey, alreadySet)
+	return err
+}
+
+// FlagToEnv converts flag string to upper-case environment variable key string.
+func FlagToEnv(prefix, name string) string {
+	return prefix + "_" + strings.ToUpper(strings.Replace(name, "-", "_", -1))
+}
+
+func verifyEnv(prefix string, usedEnvKey, alreadySet map[string]bool) {
+	for _, env := range os.Environ() {
+		kv := strings.SplitN(env, "=", 2)
+		if len(kv) != 2 {
+			plog.Warningf("found invalid env %s", env)
+		}
+		if usedEnvKey[kv[0]] {
+			continue
+		}
+		if alreadySet[kv[0]] {
+			// TODO: exit with error in v3.4
+			plog.Warningf("recognized environment variable %s, but unused: shadowed by corresponding flag", kv[0])
+			continue
+		}
+		if strings.HasPrefix(env, prefix+"_") {
+			plog.Warningf("unrecognized environment variable %s", env)
+		}
+	}
+}
+
+type flagSetter interface {
+	Set(fk string, fv string) error
+}
+
+func setFlagFromEnv(fs flagSetter, prefix, fname string, usedEnvKey, alreadySet map[string]bool, log bool) error {
+	key := FlagToEnv(prefix, fname)
+	if !alreadySet[key] {
+		val := os.Getenv(key)
+		if val != "" {
+			usedEnvKey[key] = true
+			if serr := fs.Set(fname, val); serr != nil {
+				return fmt.Errorf("invalid value %q for %s: %v", val, key, serr)
+			}
+			if log {
+				plog.Infof("recognized and used environment variable %s=%s", key, val)
+			}
+		}
+	}
+	return nil
+}
+
+// URLsFromFlag returns a slices from url got from the flag.
+func URLsFromFlag(fs *flag.FlagSet, urlsFlagName string) []url.URL {
+	return []url.URL(*fs.Lookup(urlsFlagName).Value.(*URLsValue))
+}
+
+func IsSet(fs *flag.FlagSet, name string) bool {
+	set := false
+	fs.Visit(func(f *flag.Flag) {
+		if f.Name == name {
+			set = true
+		}
+	})
+	return set
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/flags/strings.go b/vendor/github.com/coreos/etcd/pkg/flags/strings.go
new file mode 100644
index 0000000..89bdf95
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/flags/strings.go
@@ -0,0 +1,85 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flags
+
+import (
+	"errors"
+	"flag"
+	"sort"
+	"strings"
+)
+
+// NewStringsFlag creates a new string flag for which any one of the given
+// strings is a valid value, and any other value is an error.
+//
+// valids[0] will be default value. Caller must be sure len(valids)!=0 or
+// it will panic.
+func NewStringsFlag(valids ...string) *StringsFlag {
+	return &StringsFlag{Values: valids, val: valids[0]}
+}
+
+// StringsFlag implements the flag.Value interface.
+type StringsFlag struct {
+	Values []string
+	val    string
+}
+
+// Set verifies the argument to be a valid member of the allowed values
+// before setting the underlying flag value.
+func (ss *StringsFlag) Set(s string) error {
+	for _, v := range ss.Values {
+		if s == v {
+			ss.val = s
+			return nil
+		}
+	}
+	return errors.New("invalid value")
+}
+
+// String returns the set value (if any) of the StringsFlag
+func (ss *StringsFlag) String() string {
+	return ss.val
+}
+
+// StringsValueV2 wraps "sort.StringSlice".
+type StringsValueV2 sort.StringSlice
+
+// Set parses a command line set of strings, separated by comma.
+// Implements "flag.Value" interface.
+func (ss *StringsValueV2) Set(s string) error {
+	*ss = strings.Split(s, ",")
+	return nil
+}
+
+// String implements "flag.Value" interface.
+func (ss *StringsValueV2) String() string { return strings.Join(*ss, ",") }
+
+// NewStringsValueV2 implements string slice as "flag.Value" interface.
+// Given value is to be separated by comma.
+func NewStringsValueV2(s string) (ss *StringsValueV2) {
+	if s == "" {
+		return &StringsValueV2{}
+	}
+	ss = new(StringsValueV2)
+	if err := ss.Set(s); err != nil {
+		plog.Panicf("new StringsValueV2 should never fail: %v", err)
+	}
+	return ss
+}
+
+// StringsFromFlagV2 returns a string slice from the flag.
+func StringsFromFlagV2(fs *flag.FlagSet, flagName string) []string {
+	return []string(*fs.Lookup(flagName).Value.(*StringsValueV2))
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/flags/urls.go b/vendor/github.com/coreos/etcd/pkg/flags/urls.go
new file mode 100644
index 0000000..6383d7e
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/flags/urls.go
@@ -0,0 +1,52 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flags
+
+import (
+	"strings"
+
+	"github.com/coreos/etcd/pkg/types"
+)
+
+type URLsValue types.URLs
+
+// Set parses a command line set of URLs formatted like:
+// http://127.0.0.1:2380,http://10.1.1.2:80
+func (us *URLsValue) Set(s string) error {
+	strs := strings.Split(s, ",")
+	nus, err := types.NewURLs(strs)
+	if err != nil {
+		return err
+	}
+
+	*us = URLsValue(nus)
+	return nil
+}
+
+func (us *URLsValue) String() string {
+	all := make([]string, len(*us))
+	for i, u := range *us {
+		all[i] = u.String()
+	}
+	return strings.Join(all, ",")
+}
+
+func NewURLsValue(init string) *URLsValue {
+	v := &URLsValue{}
+	if err := v.Set(init); err != nil {
+		plog.Panicf("new URLsValue should never fail: %v", err)
+	}
+	return v
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go b/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go
new file mode 100644
index 0000000..09f44e7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go
@@ -0,0 +1,22 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// borrowed from golang/net/context/ctxhttp/cancelreq.go
+
+// Package httputil provides HTTP utility functions.
+package httputil
+
+import (
+	"io"
+	"io/ioutil"
+	"net/http"
+)
+
+// GracefulClose drains http.Response.Body until it hits EOF
+// and closes it. This prevents TCP/TLS connections from closing,
+// therefore available for reuse.
+func GracefulClose(resp *http.Response) {
+	io.Copy(ioutil.Discard, resp.Body)
+	resp.Body.Close()
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/idutil/id.go b/vendor/github.com/coreos/etcd/pkg/idutil/id.go
new file mode 100644
index 0000000..2da2106
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/idutil/id.go
@@ -0,0 +1,78 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package idutil implements utility functions for generating unique,
+// randomized ids.
+package idutil
+
+import (
+	"math"
+	"sync"
+	"time"
+)
+
+const (
+	tsLen     = 5 * 8
+	cntLen    = 8
+	suffixLen = tsLen + cntLen
+)
+
+// Generator generates unique identifiers based on counters, timestamps, and
+// a node member ID.
+//
+// The initial id is in this format:
+// High order 2 bytes are from memberID, next 5 bytes are from timestamp,
+// and low order one byte is a counter.
+// | prefix   | suffix              |
+// | 2 bytes  | 5 bytes   | 1 byte  |
+// | memberID | timestamp | cnt     |
+//
+// The timestamp 5 bytes is different when the machine is restart
+// after 1 ms and before 35 years.
+//
+// It increases suffix to generate the next id.
+// The count field may overflow to timestamp field, which is intentional.
+// It helps to extend the event window to 2^56. This doesn't break that
+// id generated after restart is unique because etcd throughput is <<
+// 256req/ms(250k reqs/second).
+type Generator struct {
+	mu sync.Mutex
+	// high order 2 bytes
+	prefix uint64
+	// low order 6 bytes
+	suffix uint64
+}
+
+func NewGenerator(memberID uint16, now time.Time) *Generator {
+	prefix := uint64(memberID) << suffixLen
+	unixMilli := uint64(now.UnixNano()) / uint64(time.Millisecond/time.Nanosecond)
+	suffix := lowbit(unixMilli, tsLen) << cntLen
+	return &Generator{
+		prefix: prefix,
+		suffix: suffix,
+	}
+}
+
+// Next generates a id that is unique.
+func (g *Generator) Next() uint64 {
+	g.mu.Lock()
+	defer g.mu.Unlock()
+	g.suffix++
+	id := g.prefix | lowbit(g.suffix, suffixLen)
+	return id
+}
+
+func lowbit(x uint64, n uint) uint64 {
+	return x & (math.MaxUint64 >> (64 - n))
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go b/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go
new file mode 100644
index 0000000..72de159
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go
@@ -0,0 +1,106 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ioutil
+
+import (
+	"io"
+)
+
+var defaultBufferBytes = 128 * 1024
+
+// PageWriter implements the io.Writer interface so that writes will
+// either be in page chunks or from flushing.
+type PageWriter struct {
+	w io.Writer
+	// pageOffset tracks the page offset of the base of the buffer
+	pageOffset int
+	// pageBytes is the number of bytes per page
+	pageBytes int
+	// bufferedBytes counts the number of bytes pending for write in the buffer
+	bufferedBytes int
+	// buf holds the write buffer
+	buf []byte
+	// bufWatermarkBytes is the number of bytes the buffer can hold before it needs
+	// to be flushed. It is less than len(buf) so there is space for slack writes
+	// to bring the writer to page alignment.
+	bufWatermarkBytes int
+}
+
+// NewPageWriter creates a new PageWriter. pageBytes is the number of bytes
+// to write per page. pageOffset is the starting offset of io.Writer.
+func NewPageWriter(w io.Writer, pageBytes, pageOffset int) *PageWriter {
+	return &PageWriter{
+		w:                 w,
+		pageOffset:        pageOffset,
+		pageBytes:         pageBytes,
+		buf:               make([]byte, defaultBufferBytes+pageBytes),
+		bufWatermarkBytes: defaultBufferBytes,
+	}
+}
+
+func (pw *PageWriter) Write(p []byte) (n int, err error) {
+	if len(p)+pw.bufferedBytes <= pw.bufWatermarkBytes {
+		// no overflow
+		copy(pw.buf[pw.bufferedBytes:], p)
+		pw.bufferedBytes += len(p)
+		return len(p), nil
+	}
+	// complete the slack page in the buffer if unaligned
+	slack := pw.pageBytes - ((pw.pageOffset + pw.bufferedBytes) % pw.pageBytes)
+	if slack != pw.pageBytes {
+		partial := slack > len(p)
+		if partial {
+			// not enough data to complete the slack page
+			slack = len(p)
+		}
+		// special case: writing to slack page in buffer
+		copy(pw.buf[pw.bufferedBytes:], p[:slack])
+		pw.bufferedBytes += slack
+		n = slack
+		p = p[slack:]
+		if partial {
+			// avoid forcing an unaligned flush
+			return n, nil
+		}
+	}
+	// buffer contents are now page-aligned; clear out
+	if err = pw.Flush(); err != nil {
+		return n, err
+	}
+	// directly write all complete pages without copying
+	if len(p) > pw.pageBytes {
+		pages := len(p) / pw.pageBytes
+		c, werr := pw.w.Write(p[:pages*pw.pageBytes])
+		n += c
+		if werr != nil {
+			return n, werr
+		}
+		p = p[pages*pw.pageBytes:]
+	}
+	// write remaining tail to buffer
+	c, werr := pw.Write(p)
+	n += c
+	return n, werr
+}
+
+func (pw *PageWriter) Flush() error {
+	if pw.bufferedBytes == 0 {
+		return nil
+	}
+	_, err := pw.w.Write(pw.buf[:pw.bufferedBytes])
+	pw.pageOffset = (pw.pageOffset + pw.bufferedBytes) % pw.pageBytes
+	pw.bufferedBytes = 0
+	return err
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go b/vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go
new file mode 100644
index 0000000..d3efcfe
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go
@@ -0,0 +1,66 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ioutil
+
+import (
+	"fmt"
+	"io"
+)
+
+// ReaderAndCloser implements io.ReadCloser interface by combining
+// reader and closer together.
+type ReaderAndCloser struct {
+	io.Reader
+	io.Closer
+}
+
+var (
+	ErrShortRead = fmt.Errorf("ioutil: short read")
+	ErrExpectEOF = fmt.Errorf("ioutil: expect EOF")
+)
+
+// NewExactReadCloser returns a ReadCloser that returns errors if the underlying
+// reader does not read back exactly the requested number of bytes.
+func NewExactReadCloser(rc io.ReadCloser, totalBytes int64) io.ReadCloser {
+	return &exactReadCloser{rc: rc, totalBytes: totalBytes}
+}
+
+type exactReadCloser struct {
+	rc         io.ReadCloser
+	br         int64
+	totalBytes int64
+}
+
+func (e *exactReadCloser) Read(p []byte) (int, error) {
+	n, err := e.rc.Read(p)
+	e.br += int64(n)
+	if e.br > e.totalBytes {
+		return 0, ErrExpectEOF
+	}
+	if e.br < e.totalBytes && n == 0 {
+		return 0, ErrShortRead
+	}
+	return n, err
+}
+
+func (e *exactReadCloser) Close() error {
+	if err := e.rc.Close(); err != nil {
+		return err
+	}
+	if e.br < e.totalBytes {
+		return ErrShortRead
+	}
+	return nil
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/reader.go b/vendor/github.com/coreos/etcd/pkg/ioutil/reader.go
new file mode 100644
index 0000000..0703ed4
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/ioutil/reader.go
@@ -0,0 +1,40 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ioutil implements I/O utility functions.
+package ioutil
+
+import "io"
+
+// NewLimitedBufferReader returns a reader that reads from the given reader
+// but limits the amount of data returned to at most n bytes.
+func NewLimitedBufferReader(r io.Reader, n int) io.Reader {
+	return &limitedBufferReader{
+		r: r,
+		n: n,
+	}
+}
+
+type limitedBufferReader struct {
+	r io.Reader
+	n int
+}
+
+func (r *limitedBufferReader) Read(p []byte) (n int, err error) {
+	np := p
+	if len(np) > r.n {
+		np = np[:r.n]
+	}
+	return r.r.Read(np)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/util.go b/vendor/github.com/coreos/etcd/pkg/ioutil/util.go
new file mode 100644
index 0000000..192ad88
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/ioutil/util.go
@@ -0,0 +1,43 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ioutil
+
+import (
+	"io"
+	"os"
+
+	"github.com/coreos/etcd/pkg/fileutil"
+)
+
+// WriteAndSyncFile behaves just like ioutil.WriteFile in the standard library,
+// but calls Sync before closing the file. WriteAndSyncFile guarantees the data
+// is synced if there is no error returned.
+func WriteAndSyncFile(filename string, data []byte, perm os.FileMode) error {
+	f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+	if err != nil {
+		return err
+	}
+	n, err := f.Write(data)
+	if err == nil && n < len(data) {
+		err = io.ErrShortWrite
+	}
+	if err == nil {
+		err = fileutil.Fsync(f)
+	}
+	if err1 := f.Close(); err == nil {
+		err = err1
+	}
+	return err
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go
new file mode 100644
index 0000000..cc750f4
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go
@@ -0,0 +1,195 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package logutil includes utilities to facilitate logging.
+package logutil
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/coreos/pkg/capnslog"
+)
+
+var (
+	defaultMergePeriod     = time.Second
+	defaultTimeOutputScale = 10 * time.Millisecond
+
+	outputInterval = time.Second
+)
+
+// line represents a log line that can be printed out
+// through capnslog.PackageLogger.
+type line struct {
+	level capnslog.LogLevel
+	str   string
+}
+
+func (l line) append(s string) line {
+	return line{
+		level: l.level,
+		str:   l.str + " " + s,
+	}
+}
+
+// status represents the merge status of a line.
+type status struct {
+	period time.Duration
+
+	start time.Time // start time of latest merge period
+	count int       // number of merged lines from starting
+}
+
+func (s *status) isInMergePeriod(now time.Time) bool {
+	return s.period == 0 || s.start.Add(s.period).After(now)
+}
+
+func (s *status) isEmpty() bool { return s.count == 0 }
+
+func (s *status) summary(now time.Time) string {
+	ts := s.start.Round(defaultTimeOutputScale)
+	took := now.Round(defaultTimeOutputScale).Sub(ts)
+	return fmt.Sprintf("[merged %d repeated lines in %s]", s.count, took)
+}
+
+func (s *status) reset(now time.Time) {
+	s.start = now
+	s.count = 0
+}
+
+// MergeLogger supports merge logging, which merges repeated log lines
+// and prints summary log lines instead.
+//
+// For merge logging, MergeLogger prints out the line when the line appears
+// at the first time. MergeLogger holds the same log line printed within
+// defaultMergePeriod, and prints out summary log line at the end of defaultMergePeriod.
+// It stops merging when the line doesn't appear within the
+// defaultMergePeriod.
+type MergeLogger struct {
+	*capnslog.PackageLogger
+
+	mu      sync.Mutex // protect statusm
+	statusm map[line]*status
+}
+
+func NewMergeLogger(logger *capnslog.PackageLogger) *MergeLogger {
+	l := &MergeLogger{
+		PackageLogger: logger,
+		statusm:       make(map[line]*status),
+	}
+	go l.outputLoop()
+	return l
+}
+
+func (l *MergeLogger) MergeInfo(entries ...interface{}) {
+	l.merge(line{
+		level: capnslog.INFO,
+		str:   fmt.Sprint(entries...),
+	})
+}
+
+func (l *MergeLogger) MergeInfof(format string, args ...interface{}) {
+	l.merge(line{
+		level: capnslog.INFO,
+		str:   fmt.Sprintf(format, args...),
+	})
+}
+
+func (l *MergeLogger) MergeNotice(entries ...interface{}) {
+	l.merge(line{
+		level: capnslog.NOTICE,
+		str:   fmt.Sprint(entries...),
+	})
+}
+
+func (l *MergeLogger) MergeNoticef(format string, args ...interface{}) {
+	l.merge(line{
+		level: capnslog.NOTICE,
+		str:   fmt.Sprintf(format, args...),
+	})
+}
+
+func (l *MergeLogger) MergeWarning(entries ...interface{}) {
+	l.merge(line{
+		level: capnslog.WARNING,
+		str:   fmt.Sprint(entries...),
+	})
+}
+
+func (l *MergeLogger) MergeWarningf(format string, args ...interface{}) {
+	l.merge(line{
+		level: capnslog.WARNING,
+		str:   fmt.Sprintf(format, args...),
+	})
+}
+
+func (l *MergeLogger) MergeError(entries ...interface{}) {
+	l.merge(line{
+		level: capnslog.ERROR,
+		str:   fmt.Sprint(entries...),
+	})
+}
+
+func (l *MergeLogger) MergeErrorf(format string, args ...interface{}) {
+	l.merge(line{
+		level: capnslog.ERROR,
+		str:   fmt.Sprintf(format, args...),
+	})
+}
+
+func (l *MergeLogger) merge(ln line) {
+	l.mu.Lock()
+
+	// increase count if the logger is merging the line
+	if status, ok := l.statusm[ln]; ok {
+		status.count++
+		l.mu.Unlock()
+		return
+	}
+
+	// initialize status of the line
+	l.statusm[ln] = &status{
+		period: defaultMergePeriod,
+		start:  time.Now(),
+	}
+	// release the lock before IO operation
+	l.mu.Unlock()
+	// print out the line at its first time
+	l.PackageLogger.Logf(ln.level, ln.str)
+}
+
+func (l *MergeLogger) outputLoop() {
+	for now := range time.Tick(outputInterval) {
+		var outputs []line
+
+		l.mu.Lock()
+		for ln, status := range l.statusm {
+			if status.isInMergePeriod(now) {
+				continue
+			}
+			if status.isEmpty() {
+				delete(l.statusm, ln)
+				continue
+			}
+			outputs = append(outputs, ln.append(status.summary(now)))
+			status.reset(now)
+		}
+		l.mu.Unlock()
+
+		for _, o := range outputs {
+			l.PackageLogger.Logf(o.level, o.str)
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/isolate_linux.go b/vendor/github.com/coreos/etcd/pkg/netutil/isolate_linux.go
new file mode 100644
index 0000000..418580a
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/netutil/isolate_linux.go
@@ -0,0 +1,82 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package netutil
+
+import (
+	"fmt"
+	"os/exec"
+)
+
+// DropPort drops all tcp packets that are received from the given port and sent to the given port.
+func DropPort(port int) error {
+	cmdStr := fmt.Sprintf("sudo iptables -A OUTPUT -p tcp --destination-port %d -j DROP", port)
+	if _, err := exec.Command("/bin/sh", "-c", cmdStr).Output(); err != nil {
+		return err
+	}
+	cmdStr = fmt.Sprintf("sudo iptables -A INPUT -p tcp --destination-port %d -j DROP", port)
+	_, err := exec.Command("/bin/sh", "-c", cmdStr).Output()
+	return err
+}
+
+// RecoverPort stops dropping tcp packets at given port.
+func RecoverPort(port int) error {
+	cmdStr := fmt.Sprintf("sudo iptables -D OUTPUT -p tcp --destination-port %d -j DROP", port)
+	if _, err := exec.Command("/bin/sh", "-c", cmdStr).Output(); err != nil {
+		return err
+	}
+	cmdStr = fmt.Sprintf("sudo iptables -D INPUT -p tcp --destination-port %d -j DROP", port)
+	_, err := exec.Command("/bin/sh", "-c", cmdStr).Output()
+	return err
+}
+
+// SetLatency adds latency in millisecond scale with random variations.
+func SetLatency(ms, rv int) error {
+	ifces, err := GetDefaultInterfaces()
+	if err != nil {
+		return err
+	}
+
+	if rv > ms {
+		rv = 1
+	}
+	for ifce := range ifces {
+		cmdStr := fmt.Sprintf("sudo tc qdisc add dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
+		_, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
+		if err != nil {
+			// the rule has already been added. Overwrite it.
+			cmdStr = fmt.Sprintf("sudo tc qdisc change dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
+			_, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
+			if err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// RemoveLatency resets latency configurations.
+func RemoveLatency() error {
+	ifces, err := GetDefaultInterfaces()
+	if err != nil {
+		return err
+	}
+	for ifce := range ifces {
+		_, err = exec.Command("/bin/sh", "-c", fmt.Sprintf("sudo tc qdisc del dev %s root netem", ifce)).Output()
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/isolate_stub.go b/vendor/github.com/coreos/etcd/pkg/netutil/isolate_stub.go
new file mode 100644
index 0000000..7f4c3e6
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/netutil/isolate_stub.go
@@ -0,0 +1,25 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux
+
+package netutil
+
+func DropPort(port int) error { return nil }
+
+func RecoverPort(port int) error { return nil }
+
+func SetLatency(ms, rv int) error { return nil }
+
+func RemoveLatency() error { return nil }
diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go b/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go
new file mode 100644
index 0000000..e3db8c5
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go
@@ -0,0 +1,187 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package netutil implements network-related utility functions.
+package netutil
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"net/url"
+	"reflect"
+	"sort"
+	"time"
+
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/pkg/capnslog"
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/netutil")
+
+	// indirection for testing
+	resolveTCPAddr = resolveTCPAddrDefault
+)
+
+const retryInterval = time.Second
+
+// taken from go's ResolveTCP code but uses configurable ctx
+func resolveTCPAddrDefault(ctx context.Context, addr string) (*net.TCPAddr, error) {
+	host, port, serr := net.SplitHostPort(addr)
+	if serr != nil {
+		return nil, serr
+	}
+	portnum, perr := net.DefaultResolver.LookupPort(ctx, "tcp", port)
+	if perr != nil {
+		return nil, perr
+	}
+
+	var ips []net.IPAddr
+	if ip := net.ParseIP(host); ip != nil {
+		ips = []net.IPAddr{{IP: ip}}
+	} else {
+		// Try as a DNS name.
+		ipss, err := net.DefaultResolver.LookupIPAddr(ctx, host)
+		if err != nil {
+			return nil, err
+		}
+		ips = ipss
+	}
+	// randomize?
+	ip := ips[0]
+	return &net.TCPAddr{IP: ip.IP, Port: portnum, Zone: ip.Zone}, nil
+}
+
+// resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr.
+// resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames
+// are resolved.
+func resolveTCPAddrs(ctx context.Context, urls [][]url.URL) ([][]url.URL, error) {
+	newurls := make([][]url.URL, 0)
+	for _, us := range urls {
+		nus := make([]url.URL, len(us))
+		for i, u := range us {
+			nu, err := url.Parse(u.String())
+			if err != nil {
+				return nil, fmt.Errorf("failed to parse %q (%v)", u.String(), err)
+			}
+			nus[i] = *nu
+		}
+		for i, u := range nus {
+			h, err := resolveURL(ctx, u)
+			if err != nil {
+				return nil, fmt.Errorf("failed to resolve %q (%v)", u.String(), err)
+			}
+			if h != "" {
+				nus[i].Host = h
+			}
+		}
+		newurls = append(newurls, nus)
+	}
+	return newurls, nil
+}
+
+func resolveURL(ctx context.Context, u url.URL) (string, error) {
+	if u.Scheme == "unix" || u.Scheme == "unixs" {
+		// unix sockets don't resolve over TCP
+		return "", nil
+	}
+	host, _, err := net.SplitHostPort(u.Host)
+	if err != nil {
+		plog.Errorf("could not parse url %s during tcp resolving", u.Host)
+		return "", err
+	}
+	if host == "localhost" || net.ParseIP(host) != nil {
+		return "", nil
+	}
+	for ctx.Err() == nil {
+		tcpAddr, err := resolveTCPAddr(ctx, u.Host)
+		if err == nil {
+			plog.Infof("resolving %s to %s", u.Host, tcpAddr.String())
+			return tcpAddr.String(), nil
+		}
+		plog.Warningf("failed resolving host %s (%v); retrying in %v", u.Host, err, retryInterval)
+		select {
+		case <-ctx.Done():
+			plog.Errorf("could not resolve host %s", u.Host)
+			return "", err
+		case <-time.After(retryInterval):
+		}
+	}
+	return "", ctx.Err()
+}
+
+// urlsEqual checks equality of url.URLS between two arrays.
+// This check pass even if an URL is in hostname and opposite is in IP address.
+func urlsEqual(ctx context.Context, a []url.URL, b []url.URL) (bool, error) {
+	if len(a) != len(b) {
+		return false, fmt.Errorf("len(%q) != len(%q)", urlsToStrings(a), urlsToStrings(b))
+	}
+	urls, err := resolveTCPAddrs(ctx, [][]url.URL{a, b})
+	if err != nil {
+		return false, err
+	}
+	preva, prevb := a, b
+	a, b = urls[0], urls[1]
+	sort.Sort(types.URLs(a))
+	sort.Sort(types.URLs(b))
+	for i := range a {
+		if !reflect.DeepEqual(a[i], b[i]) {
+			return false, fmt.Errorf("%q(resolved from %q) != %q(resolved from %q)",
+				a[i].String(), preva[i].String(),
+				b[i].String(), prevb[i].String(),
+			)
+		}
+	}
+	return true, nil
+}
+
+// URLStringsEqual returns "true" if given URLs are valid
+// and resolved to same IP addresses. Otherwise, return "false"
+// and error, if any.
+func URLStringsEqual(ctx context.Context, a []string, b []string) (bool, error) {
+	if len(a) != len(b) {
+		return false, fmt.Errorf("len(%q) != len(%q)", a, b)
+	}
+	urlsA := make([]url.URL, 0)
+	for _, str := range a {
+		u, err := url.Parse(str)
+		if err != nil {
+			return false, fmt.Errorf("failed to parse %q", str)
+		}
+		urlsA = append(urlsA, *u)
+	}
+	urlsB := make([]url.URL, 0)
+	for _, str := range b {
+		u, err := url.Parse(str)
+		if err != nil {
+			return false, fmt.Errorf("failed to parse %q", str)
+		}
+		urlsB = append(urlsB, *u)
+	}
+	return urlsEqual(ctx, urlsA, urlsB)
+}
+
+func urlsToStrings(us []url.URL) []string {
+	rs := make([]string, len(us))
+	for i := range us {
+		rs[i] = us[i].String()
+	}
+	return rs
+}
+
+func IsNetworkTimeoutError(err error) bool {
+	nerr, ok := err.(net.Error)
+	return ok && nerr.Timeout()
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/routes.go b/vendor/github.com/coreos/etcd/pkg/netutil/routes.go
new file mode 100644
index 0000000..3eb6a19
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/netutil/routes.go
@@ -0,0 +1,33 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux
+
+package netutil
+
+import (
+	"fmt"
+	"runtime"
+)
+
+// GetDefaultHost fetches the a resolvable name that corresponds
+// to the machine's default routable interface
+func GetDefaultHost() (string, error) {
+	return "", fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH)
+}
+
+// GetDefaultInterfaces fetches the device name of default routable interface.
+func GetDefaultInterfaces() (map[string]uint8, error) {
+	return nil, fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go b/vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go
new file mode 100644
index 0000000..797baeb
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go
@@ -0,0 +1,250 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux
+
+package netutil
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"net"
+	"sort"
+	"syscall"
+
+	"github.com/coreos/etcd/pkg/cpuutil"
+)
+
+var errNoDefaultRoute = fmt.Errorf("could not find default route")
+var errNoDefaultHost = fmt.Errorf("could not find default host")
+var errNoDefaultInterface = fmt.Errorf("could not find default interface")
+
+// GetDefaultHost obtains the first IP address of machine from the routing table and returns the IP address as string.
+// An IPv4 address is preferred to an IPv6 address for backward compatibility.
+func GetDefaultHost() (string, error) {
+	rmsgs, rerr := getDefaultRoutes()
+	if rerr != nil {
+		return "", rerr
+	}
+
+	// prioritize IPv4
+	if rmsg, ok := rmsgs[syscall.AF_INET]; ok {
+		if host, err := chooseHost(syscall.AF_INET, rmsg); host != "" || err != nil {
+			return host, err
+		}
+		delete(rmsgs, syscall.AF_INET)
+	}
+
+	// sort so choice is deterministic
+	var families []int
+	for family := range rmsgs {
+		families = append(families, int(family))
+	}
+	sort.Ints(families)
+
+	for _, f := range families {
+		family := uint8(f)
+		if host, err := chooseHost(family, rmsgs[family]); host != "" || err != nil {
+			return host, err
+		}
+	}
+
+	return "", errNoDefaultHost
+}
+
+func chooseHost(family uint8, rmsg *syscall.NetlinkMessage) (string, error) {
+	host, oif, err := parsePREFSRC(rmsg)
+	if host != "" || err != nil {
+		return host, err
+	}
+
+	// prefsrc not detected, fall back to getting address from iface
+	ifmsg, ierr := getIfaceAddr(oif, family)
+	if ierr != nil {
+		return "", ierr
+	}
+
+	attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
+	if aerr != nil {
+		return "", aerr
+	}
+
+	for _, attr := range attrs {
+		// search for RTA_DST because ipv6 doesn't have RTA_SRC
+		if attr.Attr.Type == syscall.RTA_DST {
+			return net.IP(attr.Value).String(), nil
+		}
+	}
+
+	return "", nil
+}
+
+func getDefaultRoutes() (map[uint8]*syscall.NetlinkMessage, error) {
+	dat, err := syscall.NetlinkRIB(syscall.RTM_GETROUTE, syscall.AF_UNSPEC)
+	if err != nil {
+		return nil, err
+	}
+
+	msgs, msgErr := syscall.ParseNetlinkMessage(dat)
+	if msgErr != nil {
+		return nil, msgErr
+	}
+
+	routes := make(map[uint8]*syscall.NetlinkMessage)
+	rtmsg := syscall.RtMsg{}
+	for _, m := range msgs {
+		if m.Header.Type != syscall.RTM_NEWROUTE {
+			continue
+		}
+		buf := bytes.NewBuffer(m.Data[:syscall.SizeofRtMsg])
+		if rerr := binary.Read(buf, cpuutil.ByteOrder(), &rtmsg); rerr != nil {
+			continue
+		}
+		if rtmsg.Dst_len == 0 && rtmsg.Table == syscall.RT_TABLE_MAIN {
+			// zero-length Dst_len implies default route
+			msg := m
+			routes[rtmsg.Family] = &msg
+		}
+	}
+
+	if len(routes) > 0 {
+		return routes, nil
+	}
+
+	return nil, errNoDefaultRoute
+}
+
+// Used to get an address of interface.
+func getIfaceAddr(idx uint32, family uint8) (*syscall.NetlinkMessage, error) {
+	dat, err := syscall.NetlinkRIB(syscall.RTM_GETADDR, int(family))
+	if err != nil {
+		return nil, err
+	}
+
+	msgs, msgErr := syscall.ParseNetlinkMessage(dat)
+	if msgErr != nil {
+		return nil, msgErr
+	}
+
+	ifaddrmsg := syscall.IfAddrmsg{}
+	for _, m := range msgs {
+		if m.Header.Type != syscall.RTM_NEWADDR {
+			continue
+		}
+		buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfAddrmsg])
+		if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifaddrmsg); rerr != nil {
+			continue
+		}
+		if ifaddrmsg.Index == idx {
+			return &m, nil
+		}
+	}
+
+	return nil, fmt.Errorf("could not find address for interface index %v", idx)
+
+}
+
+// Used to get a name of interface.
+func getIfaceLink(idx uint32) (*syscall.NetlinkMessage, error) {
+	dat, err := syscall.NetlinkRIB(syscall.RTM_GETLINK, syscall.AF_UNSPEC)
+	if err != nil {
+		return nil, err
+	}
+
+	msgs, msgErr := syscall.ParseNetlinkMessage(dat)
+	if msgErr != nil {
+		return nil, msgErr
+	}
+
+	ifinfomsg := syscall.IfInfomsg{}
+	for _, m := range msgs {
+		if m.Header.Type != syscall.RTM_NEWLINK {
+			continue
+		}
+		buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfInfomsg])
+		if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifinfomsg); rerr != nil {
+			continue
+		}
+		if ifinfomsg.Index == int32(idx) {
+			return &m, nil
+		}
+	}
+
+	return nil, fmt.Errorf("could not find link for interface index %v", idx)
+}
+
+// GetDefaultInterfaces gets names of interfaces and returns a map[interface]families.
+func GetDefaultInterfaces() (map[string]uint8, error) {
+	interfaces := make(map[string]uint8)
+	rmsgs, rerr := getDefaultRoutes()
+	if rerr != nil {
+		return interfaces, rerr
+	}
+
+	for family, rmsg := range rmsgs {
+		_, oif, err := parsePREFSRC(rmsg)
+		if err != nil {
+			return interfaces, err
+		}
+
+		ifmsg, ierr := getIfaceLink(oif)
+		if ierr != nil {
+			return interfaces, ierr
+		}
+
+		attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
+		if aerr != nil {
+			return interfaces, aerr
+		}
+
+		for _, attr := range attrs {
+			if attr.Attr.Type == syscall.IFLA_IFNAME {
+				// key is an interface name
+				// possible values: 2 - AF_INET, 10 - AF_INET6, 12 - dualstack
+				interfaces[string(attr.Value[:len(attr.Value)-1])] += family
+			}
+		}
+	}
+	if len(interfaces) > 0 {
+		return interfaces, nil
+	}
+	return interfaces, errNoDefaultInterface
+}
+
+// parsePREFSRC returns preferred source address and output interface index (RTA_OIF).
+func parsePREFSRC(m *syscall.NetlinkMessage) (host string, oif uint32, err error) {
+	var attrs []syscall.NetlinkRouteAttr
+	attrs, err = syscall.ParseNetlinkRouteAttr(m)
+	if err != nil {
+		return "", 0, err
+	}
+
+	for _, attr := range attrs {
+		if attr.Attr.Type == syscall.RTA_PREFSRC {
+			host = net.IP(attr.Value).String()
+		}
+		if attr.Attr.Type == syscall.RTA_OIF {
+			oif = cpuutil.ByteOrder().Uint32(attr.Value)
+		}
+		if host != "" && oif != uint32(0) {
+			break
+		}
+	}
+
+	if oif == 0 {
+		err = errNoDefaultRoute
+	}
+	return host, oif, err
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/osutil/interrupt_unix.go b/vendor/github.com/coreos/etcd/pkg/osutil/interrupt_unix.go
new file mode 100644
index 0000000..b9feaff
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/osutil/interrupt_unix.go
@@ -0,0 +1,80 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows,!plan9
+
+package osutil
+
+import (
+	"os"
+	"os/signal"
+	"sync"
+	"syscall"
+)
+
+// InterruptHandler is a function that is called on receiving a
+// SIGTERM or SIGINT signal.
+type InterruptHandler func()
+
+var (
+	interruptRegisterMu, interruptExitMu sync.Mutex
+	// interruptHandlers holds all registered InterruptHandlers in order
+	// they will be executed.
+	interruptHandlers = []InterruptHandler{}
+)
+
+// RegisterInterruptHandler registers a new InterruptHandler. Handlers registered
+// after interrupt handing was initiated will not be executed.
+func RegisterInterruptHandler(h InterruptHandler) {
+	interruptRegisterMu.Lock()
+	defer interruptRegisterMu.Unlock()
+	interruptHandlers = append(interruptHandlers, h)
+}
+
+// HandleInterrupts calls the handler functions on receiving a SIGINT or SIGTERM.
+func HandleInterrupts() {
+	notifier := make(chan os.Signal, 1)
+	signal.Notify(notifier, syscall.SIGINT, syscall.SIGTERM)
+
+	go func() {
+		sig := <-notifier
+
+		interruptRegisterMu.Lock()
+		ihs := make([]InterruptHandler, len(interruptHandlers))
+		copy(ihs, interruptHandlers)
+		interruptRegisterMu.Unlock()
+
+		interruptExitMu.Lock()
+
+		plog.Noticef("received %v signal, shutting down...", sig)
+
+		for _, h := range ihs {
+			h()
+		}
+		signal.Stop(notifier)
+		pid := syscall.Getpid()
+		// exit directly if it is the "init" process, since the kernel will not help to kill pid 1.
+		if pid == 1 {
+			os.Exit(0)
+		}
+		setDflSignal(sig.(syscall.Signal))
+		syscall.Kill(pid, sig.(syscall.Signal))
+	}()
+}
+
+// Exit relays to os.Exit if no interrupt handlers are running, blocks otherwise.
+func Exit(code int) {
+	interruptExitMu.Lock()
+	os.Exit(code)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/osutil/interrupt_windows.go b/vendor/github.com/coreos/etcd/pkg/osutil/interrupt_windows.go
new file mode 100644
index 0000000..013ae88
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/osutil/interrupt_windows.go
@@ -0,0 +1,32 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows
+
+package osutil
+
+import "os"
+
+type InterruptHandler func()
+
+// RegisterInterruptHandler is a no-op on windows
+func RegisterInterruptHandler(h InterruptHandler) {}
+
+// HandleInterrupts is a no-op on windows
+func HandleInterrupts() {}
+
+// Exit calls os.Exit
+func Exit(code int) {
+	os.Exit(code)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/osutil/osutil.go b/vendor/github.com/coreos/etcd/pkg/osutil/osutil.go
new file mode 100644
index 0000000..ef38280
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/osutil/osutil.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package osutil implements operating system-related utility functions.
+package osutil
+
+import (
+	"os"
+	"strings"
+
+	"github.com/coreos/pkg/capnslog"
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/osutil")
+
+	// support to override setting SIG_DFL so tests don't terminate early
+	setDflSignal = dflSignal
+)
+
+func Unsetenv(key string) error {
+	envs := os.Environ()
+	os.Clearenv()
+	for _, e := range envs {
+		strs := strings.SplitN(e, "=", 2)
+		if strs[0] == key {
+			continue
+		}
+		if err := os.Setenv(strs[0], strs[1]); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/osutil/signal.go b/vendor/github.com/coreos/etcd/pkg/osutil/signal.go
new file mode 100644
index 0000000..687397f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/osutil/signal.go
@@ -0,0 +1,21 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux cov
+
+package osutil
+
+import "syscall"
+
+func dflSignal(sig syscall.Signal) { /* nop */ }
diff --git a/vendor/github.com/coreos/etcd/pkg/osutil/signal_linux.go b/vendor/github.com/coreos/etcd/pkg/osutil/signal_linux.go
new file mode 100644
index 0000000..b94d80c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/osutil/signal_linux.go
@@ -0,0 +1,30 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux,!cov
+
+package osutil
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// dflSignal sets the given signal to SIG_DFL
+func dflSignal(sig syscall.Signal) {
+	// clearing out the sigact sets the signal to SIG_DFL
+	var sigactBuf [32]uint64
+	ptr := unsafe.Pointer(&sigactBuf)
+	syscall.Syscall6(uintptr(syscall.SYS_RT_SIGACTION), uintptr(sig), uintptr(ptr), 0, 8, 0, 0)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/pathutil/path.go b/vendor/github.com/coreos/etcd/pkg/pathutil/path.go
new file mode 100644
index 0000000..f26254b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/pathutil/path.go
@@ -0,0 +1,31 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pathutil implements utility functions for handling slash-separated
+// paths.
+package pathutil
+
+import "path"
+
+// CanonicalURLPath returns the canonical url path for p, which follows the rules:
+// 1. the path always starts with "/"
+// 2. replace multiple slashes with a single slash
+// 3. replace each '.' '..' path name element with equivalent one
+// 4. keep the trailing slash
+// The function is borrowed from stdlib http.cleanPath in server.go.
+func CanonicalURLPath(p string) string {
+	if p == "" {
+		return "/"
+	}
+	if p[0] != '/' {
+		p = "/" + p
+	}
+	np := path.Clean(p)
+	// path.Clean removes trailing slash except for root,
+	// put the trailing slash back if necessary.
+	if p[len(p)-1] == '/' && np != "/" {
+		np += "/"
+	}
+	return np
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go b/vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go
new file mode 100644
index 0000000..d70f98d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go
@@ -0,0 +1,60 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil defines interfaces for handling Protocol Buffer objects.
+package pbutil
+
+import "github.com/coreos/pkg/capnslog"
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/pbutil")
+)
+
+type Marshaler interface {
+	Marshal() (data []byte, err error)
+}
+
+type Unmarshaler interface {
+	Unmarshal(data []byte) error
+}
+
+func MustMarshal(m Marshaler) []byte {
+	d, err := m.Marshal()
+	if err != nil {
+		plog.Panicf("marshal should never fail (%v)", err)
+	}
+	return d
+}
+
+func MustUnmarshal(um Unmarshaler, data []byte) {
+	if err := um.Unmarshal(data); err != nil {
+		plog.Panicf("unmarshal should never fail (%v)", err)
+	}
+}
+
+func MaybeUnmarshal(um Unmarshaler, data []byte) bool {
+	if err := um.Unmarshal(data); err != nil {
+		return false
+	}
+	return true
+}
+
+func GetBool(v *bool) (vv bool, set bool) {
+	if v == nil {
+		return false, false
+	}
+	return *v, true
+}
+
+func Boolp(b bool) *bool { return &b }
diff --git a/vendor/github.com/coreos/etcd/pkg/runtime/fds_linux.go b/vendor/github.com/coreos/etcd/pkg/runtime/fds_linux.go
new file mode 100644
index 0000000..8e9359d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/runtime/fds_linux.go
@@ -0,0 +1,37 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package runtime implements utility functions for runtime systems.
+package runtime
+
+import (
+	"io/ioutil"
+	"syscall"
+)
+
+func FDLimit() (uint64, error) {
+	var rlimit syscall.Rlimit
+	if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err != nil {
+		return 0, err
+	}
+	return rlimit.Cur, nil
+}
+
+func FDUsage() (uint64, error) {
+	fds, err := ioutil.ReadDir("/proc/self/fd")
+	if err != nil {
+		return 0, err
+	}
+	return uint64(len(fds)), nil
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/runtime/fds_other.go b/vendor/github.com/coreos/etcd/pkg/runtime/fds_other.go
new file mode 100644
index 0000000..0cbdb88
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/runtime/fds_other.go
@@ -0,0 +1,30 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux
+
+package runtime
+
+import (
+	"fmt"
+	"runtime"
+)
+
+func FDLimit() (uint64, error) {
+	return 0, fmt.Errorf("cannot get FDLimit on %s", runtime.GOOS)
+}
+
+func FDUsage() (uint64, error) {
+	return 0, fmt.Errorf("cannot get FDUsage on %s", runtime.GOOS)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/schedule/doc.go b/vendor/github.com/coreos/etcd/pkg/schedule/doc.go
new file mode 100644
index 0000000..cca2c75
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/schedule/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package schedule provides mechanisms and policies for scheduling units of work.
+package schedule
diff --git a/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go b/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go
new file mode 100644
index 0000000..234d019
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go
@@ -0,0 +1,165 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schedule
+
+import (
+	"context"
+	"sync"
+)
+
+type Job func(context.Context)
+
+// Scheduler can schedule jobs.
+type Scheduler interface {
+	// Schedule asks the scheduler to schedule a job defined by the given func.
+	// Schedule to a stopped scheduler might panic.
+	Schedule(j Job)
+
+	// Pending returns number of pending jobs
+	Pending() int
+
+	// Scheduled returns the number of scheduled jobs (excluding pending jobs)
+	Scheduled() int
+
+	// Finished returns the number of finished jobs
+	Finished() int
+
+	// WaitFinish waits until at least n job are finished and all pending jobs are finished.
+	WaitFinish(n int)
+
+	// Stop stops the scheduler.
+	Stop()
+}
+
+type fifo struct {
+	mu sync.Mutex
+
+	resume    chan struct{}
+	scheduled int
+	finished  int
+	pendings  []Job
+
+	ctx    context.Context
+	cancel context.CancelFunc
+
+	finishCond *sync.Cond
+	donec      chan struct{}
+}
+
+// NewFIFOScheduler returns a Scheduler that schedules jobs in FIFO
+// order sequentially
+func NewFIFOScheduler() Scheduler {
+	f := &fifo{
+		resume: make(chan struct{}, 1),
+		donec:  make(chan struct{}, 1),
+	}
+	f.finishCond = sync.NewCond(&f.mu)
+	f.ctx, f.cancel = context.WithCancel(context.Background())
+	go f.run()
+	return f
+}
+
+// Schedule schedules a job that will be ran in FIFO order sequentially.
+func (f *fifo) Schedule(j Job) {
+	f.mu.Lock()
+	defer f.mu.Unlock()
+
+	if f.cancel == nil {
+		panic("schedule: schedule to stopped scheduler")
+	}
+
+	if len(f.pendings) == 0 {
+		select {
+		case f.resume <- struct{}{}:
+		default:
+		}
+	}
+	f.pendings = append(f.pendings, j)
+}
+
+func (f *fifo) Pending() int {
+	f.mu.Lock()
+	defer f.mu.Unlock()
+	return len(f.pendings)
+}
+
+func (f *fifo) Scheduled() int {
+	f.mu.Lock()
+	defer f.mu.Unlock()
+	return f.scheduled
+}
+
+func (f *fifo) Finished() int {
+	f.finishCond.L.Lock()
+	defer f.finishCond.L.Unlock()
+	return f.finished
+}
+
+func (f *fifo) WaitFinish(n int) {
+	f.finishCond.L.Lock()
+	for f.finished < n || len(f.pendings) != 0 {
+		f.finishCond.Wait()
+	}
+	f.finishCond.L.Unlock()
+}
+
+// Stop stops the scheduler and cancels all pending jobs.
+func (f *fifo) Stop() {
+	f.mu.Lock()
+	f.cancel()
+	f.cancel = nil
+	f.mu.Unlock()
+	<-f.donec
+}
+
+func (f *fifo) run() {
+	// TODO: recover from job panic?
+	defer func() {
+		close(f.donec)
+		close(f.resume)
+	}()
+
+	for {
+		var todo Job
+		f.mu.Lock()
+		if len(f.pendings) != 0 {
+			f.scheduled++
+			todo = f.pendings[0]
+		}
+		f.mu.Unlock()
+		if todo == nil {
+			select {
+			case <-f.resume:
+			case <-f.ctx.Done():
+				f.mu.Lock()
+				pendings := f.pendings
+				f.pendings = nil
+				f.mu.Unlock()
+				// clean up pending jobs
+				for _, todo := range pendings {
+					todo(f.ctx)
+				}
+				return
+			}
+		} else {
+			todo(f.ctx)
+			f.finishCond.L.Lock()
+			f.finished++
+			f.pendings = f.pendings[1:]
+			f.finishCond.Broadcast()
+			f.finishCond.L.Unlock()
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/srv/srv.go b/vendor/github.com/coreos/etcd/pkg/srv/srv.go
new file mode 100644
index 0000000..600061c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/srv/srv.go
@@ -0,0 +1,141 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package srv looks up DNS SRV records.
+package srv
+
+import (
+	"fmt"
+	"net"
+	"net/url"
+	"strings"
+
+	"github.com/coreos/etcd/pkg/types"
+)
+
+var (
+	// indirection for testing
+	lookupSRV      = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict
+	resolveTCPAddr = net.ResolveTCPAddr
+)
+
+// GetCluster gets the cluster information via DNS discovery.
+// Also sees each entry as a separate instance.
+func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) {
+	tempName := int(0)
+	tcp2ap := make(map[string]url.URL)
+
+	// First, resolve the apurls
+	for _, url := range apurls {
+		tcpAddr, err := resolveTCPAddr("tcp", url.Host)
+		if err != nil {
+			return nil, err
+		}
+		tcp2ap[tcpAddr.String()] = url
+	}
+
+	stringParts := []string{}
+	updateNodeMap := func(service, scheme string) error {
+		_, addrs, err := lookupSRV(service, "tcp", dns)
+		if err != nil {
+			return err
+		}
+		for _, srv := range addrs {
+			port := fmt.Sprintf("%d", srv.Port)
+			host := net.JoinHostPort(srv.Target, port)
+			tcpAddr, terr := resolveTCPAddr("tcp", host)
+			if terr != nil {
+				err = terr
+				continue
+			}
+			n := ""
+			url, ok := tcp2ap[tcpAddr.String()]
+			if ok {
+				n = name
+			}
+			if n == "" {
+				n = fmt.Sprintf("%d", tempName)
+				tempName++
+			}
+			// SRV records have a trailing dot but URL shouldn't.
+			shortHost := strings.TrimSuffix(srv.Target, ".")
+			urlHost := net.JoinHostPort(shortHost, port)
+			if ok && url.Scheme != scheme {
+				err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
+			} else {
+				stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
+			}
+		}
+		if len(stringParts) == 0 {
+			return err
+		}
+		return nil
+	}
+
+	failCount := 0
+	err := updateNodeMap(service+"-ssl", "https")
+	srvErr := make([]string, 2)
+	if err != nil {
+		srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err)
+		failCount++
+	}
+	err = updateNodeMap(service, "http")
+	if err != nil {
+		srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err)
+		failCount++
+	}
+	if failCount == 2 {
+		return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1])
+	}
+	return stringParts, nil
+}
+
+type SRVClients struct {
+	Endpoints []string
+	SRVs      []*net.SRV
+}
+
+// GetClient looks up the client endpoints for a service and domain.
+func GetClient(service, domain string) (*SRVClients, error) {
+	var urls []*url.URL
+	var srvs []*net.SRV
+
+	updateURLs := func(service, scheme string) error {
+		_, addrs, err := lookupSRV(service, "tcp", domain)
+		if err != nil {
+			return err
+		}
+		for _, srv := range addrs {
+			urls = append(urls, &url.URL{
+				Scheme: scheme,
+				Host:   net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
+			})
+		}
+		srvs = append(srvs, addrs...)
+		return nil
+	}
+
+	errHTTPS := updateURLs(service+"-ssl", "https")
+	errHTTP := updateURLs(service, "http")
+
+	if errHTTPS != nil && errHTTP != nil {
+		return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
+	}
+
+	endpoints := make([]string, len(urls))
+	for i := range urls {
+		endpoints[i] = urls[i].String()
+	}
+	return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/cipher_suites.go b/vendor/github.com/coreos/etcd/pkg/tlsutil/cipher_suites.go
new file mode 100644
index 0000000..b5916bb
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/tlsutil/cipher_suites.go
@@ -0,0 +1,51 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tlsutil
+
+import "crypto/tls"
+
+// cipher suites implemented by Go
+// https://github.com/golang/go/blob/dev.boringcrypto.go1.10/src/crypto/tls/cipher_suites.go
+var cipherSuites = map[string]uint16{
+	"TLS_RSA_WITH_RC4_128_SHA":                tls.TLS_RSA_WITH_RC4_128_SHA,
+	"TLS_RSA_WITH_3DES_EDE_CBC_SHA":           tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+	"TLS_RSA_WITH_AES_128_CBC_SHA":            tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+	"TLS_RSA_WITH_AES_256_CBC_SHA":            tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+	"TLS_RSA_WITH_AES_128_CBC_SHA256":         tls.TLS_RSA_WITH_AES_128_CBC_SHA256,
+	"TLS_RSA_WITH_AES_128_GCM_SHA256":         tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
+	"TLS_RSA_WITH_AES_256_GCM_SHA384":         tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
+	"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA":        tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+	"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA":    tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+	"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA":    tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+	"TLS_ECDHE_RSA_WITH_RC4_128_SHA":          tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+	"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA":     tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+	"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA":      tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+	"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA":      tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+	"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+	"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256":   tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+	"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256":   tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+	"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+	"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384":   tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+	"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+	"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305":    tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+	"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305":  tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
+}
+
+// GetCipherSuite returns the corresponding cipher suite,
+// and boolean value if it is supported.
+func GetCipherSuite(s string) (uint16, bool) {
+	v, ok := cipherSuites[s]
+	return v, ok
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go b/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go
new file mode 100644
index 0000000..3b6aa67
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package tlsutil provides utility functions for handling TLS.
+package tlsutil
diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go b/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go
new file mode 100644
index 0000000..79b1f63
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go
@@ -0,0 +1,72 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tlsutil
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"encoding/pem"
+	"io/ioutil"
+)
+
+// NewCertPool creates x509 certPool with provided CA files.
+func NewCertPool(CAFiles []string) (*x509.CertPool, error) {
+	certPool := x509.NewCertPool()
+
+	for _, CAFile := range CAFiles {
+		pemByte, err := ioutil.ReadFile(CAFile)
+		if err != nil {
+			return nil, err
+		}
+
+		for {
+			var block *pem.Block
+			block, pemByte = pem.Decode(pemByte)
+			if block == nil {
+				break
+			}
+			cert, err := x509.ParseCertificate(block.Bytes)
+			if err != nil {
+				return nil, err
+			}
+			certPool.AddCert(cert)
+		}
+	}
+
+	return certPool, nil
+}
+
+// NewCert generates TLS cert by using the given cert,key and parse function.
+func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) {
+	cert, err := ioutil.ReadFile(certfile)
+	if err != nil {
+		return nil, err
+	}
+
+	key, err := ioutil.ReadFile(keyfile)
+	if err != nil {
+		return nil, err
+	}
+
+	if parseFunc == nil {
+		parseFunc = tls.X509KeyPair
+	}
+
+	tlsCert, err := parseFunc(cert, key)
+	if err != nil {
+		return nil, err
+	}
+	return &tlsCert, nil
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/doc.go b/vendor/github.com/coreos/etcd/pkg/transport/doc.go
new file mode 100644
index 0000000..37658ce
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/transport/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transport implements various HTTP transport utilities based on Go
+// net package.
+package transport
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go
new file mode 100644
index 0000000..4ff8e7f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go
@@ -0,0 +1,94 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"crypto/tls"
+	"fmt"
+	"net"
+	"time"
+)
+
+type keepAliveConn interface {
+	SetKeepAlive(bool) error
+	SetKeepAlivePeriod(d time.Duration) error
+}
+
+// NewKeepAliveListener returns a listener that listens on the given address.
+// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil.
+// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake.
+// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html
+func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) {
+	if scheme == "https" {
+		if tlscfg == nil {
+			return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented")
+		}
+		return newTLSKeepaliveListener(l, tlscfg), nil
+	}
+
+	return &keepaliveListener{
+		Listener: l,
+	}, nil
+}
+
+type keepaliveListener struct{ net.Listener }
+
+func (kln *keepaliveListener) Accept() (net.Conn, error) {
+	c, err := kln.Listener.Accept()
+	if err != nil {
+		return nil, err
+	}
+	kac := c.(keepAliveConn)
+	// detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
+	// default on linux:  30 + 8 * 30
+	// default on osx:    30 + 8 * 75
+	kac.SetKeepAlive(true)
+	kac.SetKeepAlivePeriod(30 * time.Second)
+	return c, nil
+}
+
+// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections.
+type tlsKeepaliveListener struct {
+	net.Listener
+	config *tls.Config
+}
+
+// Accept waits for and returns the next incoming TLS connection.
+// The returned connection c is a *tls.Conn.
+func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) {
+	c, err = l.Listener.Accept()
+	if err != nil {
+		return
+	}
+	kac := c.(keepAliveConn)
+	// detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
+	// default on linux:  30 + 8 * 30
+	// default on osx:    30 + 8 * 75
+	kac.SetKeepAlive(true)
+	kac.SetKeepAlivePeriod(30 * time.Second)
+	c = tls.Server(c, l.config)
+	return c, nil
+}
+
+// NewListener creates a Listener which accepts connections from an inner
+// Listener and wraps each connection with Server.
+// The configuration config must be non-nil and must have
+// at least one certificate.
+func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener {
+	l := &tlsKeepaliveListener{}
+	l.Listener = inner
+	l.config = config
+	return l
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go b/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go
new file mode 100644
index 0000000..930c542
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go
@@ -0,0 +1,80 @@
+// Copyright 2013 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transport provides network utility functions, complementing the more
+// common ones in the net package.
+package transport
+
+import (
+	"errors"
+	"net"
+	"sync"
+	"time"
+)
+
+var (
+	ErrNotTCP = errors.New("only tcp connections have keepalive")
+)
+
+// LimitListener returns a Listener that accepts at most n simultaneous
+// connections from the provided Listener.
+func LimitListener(l net.Listener, n int) net.Listener {
+	return &limitListener{l, make(chan struct{}, n)}
+}
+
+type limitListener struct {
+	net.Listener
+	sem chan struct{}
+}
+
+func (l *limitListener) acquire() { l.sem <- struct{}{} }
+func (l *limitListener) release() { <-l.sem }
+
+func (l *limitListener) Accept() (net.Conn, error) {
+	l.acquire()
+	c, err := l.Listener.Accept()
+	if err != nil {
+		l.release()
+		return nil, err
+	}
+	return &limitListenerConn{Conn: c, release: l.release}, nil
+}
+
+type limitListenerConn struct {
+	net.Conn
+	releaseOnce sync.Once
+	release     func()
+}
+
+func (l *limitListenerConn) Close() error {
+	err := l.Conn.Close()
+	l.releaseOnce.Do(l.release)
+	return err
+}
+
+func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error {
+	tcpc, ok := l.Conn.(*net.TCPConn)
+	if !ok {
+		return ErrNotTCP
+	}
+	return tcpc.SetKeepAlive(doKeepAlive)
+}
+
+func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error {
+	tcpc, ok := l.Conn.(*net.TCPConn)
+	if !ok {
+		return ErrNotTCP
+	}
+	return tcpc.SetKeepAlivePeriod(d)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/vendor/github.com/coreos/etcd/pkg/transport/listener.go
new file mode 100644
index 0000000..4865506
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/transport/listener.go
@@ -0,0 +1,289 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"crypto/ecdsa"
+	"crypto/elliptic"
+	"crypto/rand"
+	"crypto/tls"
+	"crypto/x509"
+	"crypto/x509/pkix"
+	"encoding/pem"
+	"errors"
+	"fmt"
+	"math/big"
+	"net"
+	"os"
+	"path/filepath"
+	"strings"
+	"time"
+
+	"github.com/coreos/etcd/pkg/tlsutil"
+)
+
+func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) {
+	if l, err = newListener(addr, scheme); err != nil {
+		return nil, err
+	}
+	return wrapTLS(addr, scheme, tlsinfo, l)
+}
+
+func newListener(addr string, scheme string) (net.Listener, error) {
+	if scheme == "unix" || scheme == "unixs" {
+		// unix sockets via unix://laddr
+		return NewUnixListener(addr)
+	}
+	return net.Listen("tcp", addr)
+}
+
+func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) {
+	if scheme != "https" && scheme != "unixs" {
+		return l, nil
+	}
+	return newTLSListener(l, tlsinfo, checkSAN)
+}
+
+type TLSInfo struct {
+	CertFile           string
+	KeyFile            string
+	CAFile             string // TODO: deprecate this in v4
+	TrustedCAFile      string
+	ClientCertAuth     bool
+	CRLFile            string
+	InsecureSkipVerify bool
+
+	// ServerName ensures the cert matches the given host in case of discovery / virtual hosting
+	ServerName string
+
+	// HandshakeFailure is optionally called when a connection fails to handshake. The
+	// connection will be closed immediately afterwards.
+	HandshakeFailure func(*tls.Conn, error)
+
+	// CipherSuites is a list of supported cipher suites.
+	// If empty, Go auto-populates it by default.
+	// Note that cipher suites are prioritized in the given order.
+	CipherSuites []uint16
+
+	selfCert bool
+
+	// parseFunc exists to simplify testing. Typically, parseFunc
+	// should be left nil. In that case, tls.X509KeyPair will be used.
+	parseFunc func([]byte, []byte) (tls.Certificate, error)
+
+	// AllowedCN is a CN which must be provided by a client.
+	AllowedCN string
+}
+
+func (info TLSInfo) String() string {
+	return fmt.Sprintf("cert = %s, key = %s, ca = %s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.CAFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile)
+}
+
+func (info TLSInfo) Empty() bool {
+	return info.CertFile == "" && info.KeyFile == ""
+}
+
+func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) {
+	if err = os.MkdirAll(dirpath, 0700); err != nil {
+		return
+	}
+
+	certPath := filepath.Join(dirpath, "cert.pem")
+	keyPath := filepath.Join(dirpath, "key.pem")
+	_, errcert := os.Stat(certPath)
+	_, errkey := os.Stat(keyPath)
+	if errcert == nil && errkey == nil {
+		info.CertFile = certPath
+		info.KeyFile = keyPath
+		info.selfCert = true
+		return
+	}
+
+	serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
+	serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
+	if err != nil {
+		return
+	}
+
+	tmpl := x509.Certificate{
+		SerialNumber: serialNumber,
+		Subject:      pkix.Name{Organization: []string{"etcd"}},
+		NotBefore:    time.Now(),
+		NotAfter:     time.Now().Add(365 * (24 * time.Hour)),
+
+		KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+		ExtKeyUsage:           []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
+		BasicConstraintsValid: true,
+	}
+
+	for _, host := range hosts {
+		h, _, _ := net.SplitHostPort(host)
+		if ip := net.ParseIP(h); ip != nil {
+			tmpl.IPAddresses = append(tmpl.IPAddresses, ip)
+		} else {
+			tmpl.DNSNames = append(tmpl.DNSNames, h)
+		}
+	}
+
+	priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
+	if err != nil {
+		return
+	}
+
+	derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv)
+	if err != nil {
+		return
+	}
+
+	certOut, err := os.Create(certPath)
+	if err != nil {
+		return
+	}
+	pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
+	certOut.Close()
+
+	b, err := x509.MarshalECPrivateKey(priv)
+	if err != nil {
+		return
+	}
+	keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
+	if err != nil {
+		return
+	}
+	pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b})
+	keyOut.Close()
+
+	return SelfCert(dirpath, hosts)
+}
+
+func (info TLSInfo) baseConfig() (*tls.Config, error) {
+	if info.KeyFile == "" || info.CertFile == "" {
+		return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile)
+	}
+
+	_, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
+	if err != nil {
+		return nil, err
+	}
+
+	cfg := &tls.Config{
+		MinVersion: tls.VersionTLS12,
+		ServerName: info.ServerName,
+	}
+
+	if len(info.CipherSuites) > 0 {
+		cfg.CipherSuites = info.CipherSuites
+	}
+
+	if info.AllowedCN != "" {
+		cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
+			for _, chains := range verifiedChains {
+				if len(chains) != 0 {
+					if info.AllowedCN == chains[0].Subject.CommonName {
+						return nil
+					}
+				}
+			}
+			return errors.New("CommonName authentication failed")
+		}
+	}
+
+	// this only reloads certs when there's a client request
+	// TODO: support server-side refresh (e.g. inotify, SIGHUP), caching
+	cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
+		return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
+	}
+	cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+		return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
+	}
+	return cfg, nil
+}
+
+// cafiles returns a list of CA file paths.
+func (info TLSInfo) cafiles() []string {
+	cs := make([]string, 0)
+	if info.CAFile != "" {
+		cs = append(cs, info.CAFile)
+	}
+	if info.TrustedCAFile != "" {
+		cs = append(cs, info.TrustedCAFile)
+	}
+	return cs
+}
+
+// ServerConfig generates a tls.Config object for use by an HTTP server.
+func (info TLSInfo) ServerConfig() (*tls.Config, error) {
+	cfg, err := info.baseConfig()
+	if err != nil {
+		return nil, err
+	}
+
+	cfg.ClientAuth = tls.NoClientCert
+	if info.CAFile != "" || info.ClientCertAuth {
+		cfg.ClientAuth = tls.RequireAndVerifyClientCert
+	}
+
+	CAFiles := info.cafiles()
+	if len(CAFiles) > 0 {
+		cp, err := tlsutil.NewCertPool(CAFiles)
+		if err != nil {
+			return nil, err
+		}
+		cfg.ClientCAs = cp
+	}
+
+	// "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server
+	cfg.NextProtos = []string{"h2"}
+
+	return cfg, nil
+}
+
+// ClientConfig generates a tls.Config object for use by an HTTP client.
+func (info TLSInfo) ClientConfig() (*tls.Config, error) {
+	var cfg *tls.Config
+	var err error
+
+	if !info.Empty() {
+		cfg, err = info.baseConfig()
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		cfg = &tls.Config{ServerName: info.ServerName}
+	}
+	cfg.InsecureSkipVerify = info.InsecureSkipVerify
+
+	CAFiles := info.cafiles()
+	if len(CAFiles) > 0 {
+		cfg.RootCAs, err = tlsutil.NewCertPool(CAFiles)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if info.selfCert {
+		cfg.InsecureSkipVerify = true
+	}
+	return cfg, nil
+}
+
+// IsClosedConnError returns true if the error is from closing listener, cmux.
+// copied from golang.org/x/net/http2/http2.go
+func IsClosedConnError(err error) bool {
+	// 'use of closed network connection' (Go <=1.8)
+	// 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing)
+	// 'mux: listener closed' (cmux.ErrListenerClosed)
+	return err != nil && strings.Contains(err.Error(), "closed")
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go
new file mode 100644
index 0000000..6f16009
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go
@@ -0,0 +1,272 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"context"
+	"crypto/tls"
+	"crypto/x509"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"strings"
+	"sync"
+)
+
+// tlsListener overrides a TLS listener so it will reject client
+// certificates with insufficient SAN credentials or CRL revoked
+// certificates.
+type tlsListener struct {
+	net.Listener
+	connc            chan net.Conn
+	donec            chan struct{}
+	err              error
+	handshakeFailure func(*tls.Conn, error)
+	check            tlsCheckFunc
+}
+
+type tlsCheckFunc func(context.Context, *tls.Conn) error
+
+// NewTLSListener handshakes TLS connections and performs optional CRL checking.
+func NewTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) {
+	check := func(context.Context, *tls.Conn) error { return nil }
+	return newTLSListener(l, tlsinfo, check)
+}
+
+func newTLSListener(l net.Listener, tlsinfo *TLSInfo, check tlsCheckFunc) (net.Listener, error) {
+	if tlsinfo == nil || tlsinfo.Empty() {
+		l.Close()
+		return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String())
+	}
+	tlscfg, err := tlsinfo.ServerConfig()
+	if err != nil {
+		return nil, err
+	}
+
+	hf := tlsinfo.HandshakeFailure
+	if hf == nil {
+		hf = func(*tls.Conn, error) {}
+	}
+
+	if len(tlsinfo.CRLFile) > 0 {
+		prevCheck := check
+		check = func(ctx context.Context, tlsConn *tls.Conn) error {
+			if err := prevCheck(ctx, tlsConn); err != nil {
+				return err
+			}
+			st := tlsConn.ConnectionState()
+			if certs := st.PeerCertificates; len(certs) > 0 {
+				return checkCRL(tlsinfo.CRLFile, certs)
+			}
+			return nil
+		}
+	}
+
+	tlsl := &tlsListener{
+		Listener:         tls.NewListener(l, tlscfg),
+		connc:            make(chan net.Conn),
+		donec:            make(chan struct{}),
+		handshakeFailure: hf,
+		check:            check,
+	}
+	go tlsl.acceptLoop()
+	return tlsl, nil
+}
+
+func (l *tlsListener) Accept() (net.Conn, error) {
+	select {
+	case conn := <-l.connc:
+		return conn, nil
+	case <-l.donec:
+		return nil, l.err
+	}
+}
+
+func checkSAN(ctx context.Context, tlsConn *tls.Conn) error {
+	st := tlsConn.ConnectionState()
+	if certs := st.PeerCertificates; len(certs) > 0 {
+		addr := tlsConn.RemoteAddr().String()
+		return checkCertSAN(ctx, certs[0], addr)
+	}
+	return nil
+}
+
+// acceptLoop launches each TLS handshake in a separate goroutine
+// to prevent a hanging TLS connection from blocking other connections.
+func (l *tlsListener) acceptLoop() {
+	var wg sync.WaitGroup
+	var pendingMu sync.Mutex
+
+	pending := make(map[net.Conn]struct{})
+	ctx, cancel := context.WithCancel(context.Background())
+	defer func() {
+		cancel()
+		pendingMu.Lock()
+		for c := range pending {
+			c.Close()
+		}
+		pendingMu.Unlock()
+		wg.Wait()
+		close(l.donec)
+	}()
+
+	for {
+		conn, err := l.Listener.Accept()
+		if err != nil {
+			l.err = err
+			return
+		}
+
+		pendingMu.Lock()
+		pending[conn] = struct{}{}
+		pendingMu.Unlock()
+
+		wg.Add(1)
+		go func() {
+			defer func() {
+				if conn != nil {
+					conn.Close()
+				}
+				wg.Done()
+			}()
+
+			tlsConn := conn.(*tls.Conn)
+			herr := tlsConn.Handshake()
+			pendingMu.Lock()
+			delete(pending, conn)
+			pendingMu.Unlock()
+
+			if herr != nil {
+				l.handshakeFailure(tlsConn, herr)
+				return
+			}
+			if err := l.check(ctx, tlsConn); err != nil {
+				l.handshakeFailure(tlsConn, err)
+				return
+			}
+
+			select {
+			case l.connc <- tlsConn:
+				conn = nil
+			case <-ctx.Done():
+			}
+		}()
+	}
+}
+
+func checkCRL(crlPath string, cert []*x509.Certificate) error {
+	// TODO: cache
+	crlBytes, err := ioutil.ReadFile(crlPath)
+	if err != nil {
+		return err
+	}
+	certList, err := x509.ParseCRL(crlBytes)
+	if err != nil {
+		return err
+	}
+	revokedSerials := make(map[string]struct{})
+	for _, rc := range certList.TBSCertList.RevokedCertificates {
+		revokedSerials[string(rc.SerialNumber.Bytes())] = struct{}{}
+	}
+	for _, c := range cert {
+		serial := string(c.SerialNumber.Bytes())
+		if _, ok := revokedSerials[serial]; ok {
+			return fmt.Errorf("transport: certificate serial %x revoked", serial)
+		}
+	}
+	return nil
+}
+
+func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string) error {
+	if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 {
+		return nil
+	}
+	h, _, herr := net.SplitHostPort(remoteAddr)
+	if herr != nil {
+		return herr
+	}
+	if len(cert.IPAddresses) > 0 {
+		cerr := cert.VerifyHostname(h)
+		if cerr == nil {
+			return nil
+		}
+		if len(cert.DNSNames) == 0 {
+			return cerr
+		}
+	}
+	if len(cert.DNSNames) > 0 {
+		ok, err := isHostInDNS(ctx, h, cert.DNSNames)
+		if ok {
+			return nil
+		}
+		errStr := ""
+		if err != nil {
+			errStr = " (" + err.Error() + ")"
+		}
+		return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames)
+	}
+	return nil
+}
+
+func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) {
+	// reverse lookup
+	wildcards, names := []string{}, []string{}
+	for _, dns := range dnsNames {
+		if strings.HasPrefix(dns, "*.") {
+			wildcards = append(wildcards, dns[1:])
+		} else {
+			names = append(names, dns)
+		}
+	}
+	lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host)
+	for _, name := range lnames {
+		// strip trailing '.' from PTR record
+		if name[len(name)-1] == '.' {
+			name = name[:len(name)-1]
+		}
+		for _, wc := range wildcards {
+			if strings.HasSuffix(name, wc) {
+				return true, nil
+			}
+		}
+		for _, n := range names {
+			if n == name {
+				return true, nil
+			}
+		}
+	}
+	err = lerr
+
+	// forward lookup
+	for _, dns := range names {
+		addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns)
+		if lerr != nil {
+			err = lerr
+			continue
+		}
+		for _, addr := range addrs {
+			if addr == host {
+				return true, nil
+			}
+		}
+	}
+	return false, err
+}
+
+func (l *tlsListener) Close() error {
+	err := l.Listener.Close()
+	<-l.donec
+	return err
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go
new file mode 100644
index 0000000..7e8c020
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go
@@ -0,0 +1,44 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"net"
+	"time"
+)
+
+type timeoutConn struct {
+	net.Conn
+	wtimeoutd  time.Duration
+	rdtimeoutd time.Duration
+}
+
+func (c timeoutConn) Write(b []byte) (n int, err error) {
+	if c.wtimeoutd > 0 {
+		if err := c.SetWriteDeadline(time.Now().Add(c.wtimeoutd)); err != nil {
+			return 0, err
+		}
+	}
+	return c.Conn.Write(b)
+}
+
+func (c timeoutConn) Read(b []byte) (n int, err error) {
+	if c.rdtimeoutd > 0 {
+		if err := c.SetReadDeadline(time.Now().Add(c.rdtimeoutd)); err != nil {
+			return 0, err
+		}
+	}
+	return c.Conn.Read(b)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go
new file mode 100644
index 0000000..6ae39ec
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go
@@ -0,0 +1,36 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"net"
+	"time"
+)
+
+type rwTimeoutDialer struct {
+	wtimeoutd  time.Duration
+	rdtimeoutd time.Duration
+	net.Dialer
+}
+
+func (d *rwTimeoutDialer) Dial(network, address string) (net.Conn, error) {
+	conn, err := d.Dialer.Dial(network, address)
+	tconn := &timeoutConn{
+		rdtimeoutd: d.rdtimeoutd,
+		wtimeoutd:  d.wtimeoutd,
+		Conn:       conn,
+	}
+	return tconn, err
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go
new file mode 100644
index 0000000..b35e049
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go
@@ -0,0 +1,57 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"net"
+	"time"
+)
+
+// NewTimeoutListener returns a listener that listens on the given address.
+// If read/write on the accepted connection blocks longer than its time limit,
+// it will return timeout error.
+func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) {
+	ln, err := newListener(addr, scheme)
+	if err != nil {
+		return nil, err
+	}
+	ln = &rwTimeoutListener{
+		Listener:   ln,
+		rdtimeoutd: rdtimeoutd,
+		wtimeoutd:  wtimeoutd,
+	}
+	if ln, err = wrapTLS(addr, scheme, tlsinfo, ln); err != nil {
+		return nil, err
+	}
+	return ln, nil
+}
+
+type rwTimeoutListener struct {
+	net.Listener
+	wtimeoutd  time.Duration
+	rdtimeoutd time.Duration
+}
+
+func (rwln *rwTimeoutListener) Accept() (net.Conn, error) {
+	c, err := rwln.Listener.Accept()
+	if err != nil {
+		return nil, err
+	}
+	return timeoutConn{
+		Conn:       c,
+		wtimeoutd:  rwln.wtimeoutd,
+		rdtimeoutd: rwln.rdtimeoutd,
+	}, nil
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go
new file mode 100644
index 0000000..ea16b4c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go
@@ -0,0 +1,51 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"net"
+	"net/http"
+	"time"
+)
+
+// NewTimeoutTransport returns a transport created using the given TLS info.
+// If read/write on the created connection blocks longer than its time limit,
+// it will return timeout error.
+// If read/write timeout is set, transport will not be able to reuse connection.
+func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) {
+	tr, err := NewTransport(info, dialtimeoutd)
+	if err != nil {
+		return nil, err
+	}
+
+	if rdtimeoutd != 0 || wtimeoutd != 0 {
+		// the timed out connection will timeout soon after it is idle.
+		// it should not be put back to http transport as an idle connection for future usage.
+		tr.MaxIdleConnsPerHost = -1
+	} else {
+		// allow more idle connections between peers to avoid unnecessary port allocation.
+		tr.MaxIdleConnsPerHost = 1024
+	}
+
+	tr.Dial = (&rwTimeoutDialer{
+		Dialer: net.Dialer{
+			Timeout:   dialtimeoutd,
+			KeepAlive: 30 * time.Second,
+		},
+		rdtimeoutd: rdtimeoutd,
+		wtimeoutd:  wtimeoutd,
+	}).Dial
+	return tr, nil
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/tls.go b/vendor/github.com/coreos/etcd/pkg/transport/tls.go
new file mode 100644
index 0000000..62fe0d3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/transport/tls.go
@@ -0,0 +1,49 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"fmt"
+	"strings"
+	"time"
+)
+
+// ValidateSecureEndpoints scans the given endpoints against tls info, returning only those
+// endpoints that could be validated as secure.
+func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) {
+	t, err := NewTransport(tlsInfo, 5*time.Second)
+	if err != nil {
+		return nil, err
+	}
+	var errs []string
+	var endpoints []string
+	for _, ep := range eps {
+		if !strings.HasPrefix(ep, "https://") {
+			errs = append(errs, fmt.Sprintf("%q is insecure", ep))
+			continue
+		}
+		conn, cerr := t.Dial("tcp", ep[len("https://"):])
+		if cerr != nil {
+			errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr))
+			continue
+		}
+		conn.Close()
+		endpoints = append(endpoints, ep)
+	}
+	if len(errs) != 0 {
+		err = fmt.Errorf("%s", strings.Join(errs, ","))
+	}
+	return endpoints, err
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/transport.go b/vendor/github.com/coreos/etcd/pkg/transport/transport.go
new file mode 100644
index 0000000..4a7fe69
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/transport/transport.go
@@ -0,0 +1,71 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"net"
+	"net/http"
+	"strings"
+	"time"
+)
+
+type unixTransport struct{ *http.Transport }
+
+func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) {
+	cfg, err := info.ClientConfig()
+	if err != nil {
+		return nil, err
+	}
+
+	t := &http.Transport{
+		Proxy: http.ProxyFromEnvironment,
+		Dial: (&net.Dialer{
+			Timeout: dialtimeoutd,
+			// value taken from http.DefaultTransport
+			KeepAlive: 30 * time.Second,
+		}).Dial,
+		// value taken from http.DefaultTransport
+		TLSHandshakeTimeout: 10 * time.Second,
+		TLSClientConfig:     cfg,
+	}
+
+	dialer := (&net.Dialer{
+		Timeout:   dialtimeoutd,
+		KeepAlive: 30 * time.Second,
+	})
+	dial := func(net, addr string) (net.Conn, error) {
+		return dialer.Dial("unix", addr)
+	}
+
+	tu := &http.Transport{
+		Proxy:               http.ProxyFromEnvironment,
+		Dial:                dial,
+		TLSHandshakeTimeout: 10 * time.Second,
+		TLSClientConfig:     cfg,
+	}
+	ut := &unixTransport{tu}
+
+	t.RegisterProtocol("unix", ut)
+	t.RegisterProtocol("unixs", ut)
+
+	return t, nil
+}
+
+func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+	url := *req.URL
+	req.URL = &url
+	req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1)
+	return urt.Transport.RoundTrip(req)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go
new file mode 100644
index 0000000..123e203
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go
@@ -0,0 +1,40 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"net"
+	"os"
+)
+
+type unixListener struct{ net.Listener }
+
+func NewUnixListener(addr string) (net.Listener, error) {
+	if err := os.Remove(addr); err != nil && !os.IsNotExist(err) {
+		return nil, err
+	}
+	l, err := net.Listen("unix", addr)
+	if err != nil {
+		return nil, err
+	}
+	return &unixListener{l}, nil
+}
+
+func (ul *unixListener) Close() error {
+	if err := os.Remove(ul.Addr().String()); err != nil && !os.IsNotExist(err) {
+		return err
+	}
+	return ul.Listener.Close()
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/wait/wait.go b/vendor/github.com/coreos/etcd/pkg/wait/wait.go
new file mode 100644
index 0000000..9b1df41
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/wait/wait.go
@@ -0,0 +1,91 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package wait provides utility functions for polling, listening using Go
+// channel.
+package wait
+
+import (
+	"log"
+	"sync"
+)
+
+// Wait is an interface that provides the ability to wait and trigger events that
+// are associated with IDs.
+type Wait interface {
+	// Register waits returns a chan that waits on the given ID.
+	// The chan will be triggered when Trigger is called with
+	// the same ID.
+	Register(id uint64) <-chan interface{}
+	// Trigger triggers the waiting chans with the given ID.
+	Trigger(id uint64, x interface{})
+	IsRegistered(id uint64) bool
+}
+
+type list struct {
+	l sync.RWMutex
+	m map[uint64]chan interface{}
+}
+
+// New creates a Wait.
+func New() Wait {
+	return &list{m: make(map[uint64]chan interface{})}
+}
+
+func (w *list) Register(id uint64) <-chan interface{} {
+	w.l.Lock()
+	defer w.l.Unlock()
+	ch := w.m[id]
+	if ch == nil {
+		ch = make(chan interface{}, 1)
+		w.m[id] = ch
+	} else {
+		log.Panicf("dup id %x", id)
+	}
+	return ch
+}
+
+func (w *list) Trigger(id uint64, x interface{}) {
+	w.l.Lock()
+	ch := w.m[id]
+	delete(w.m, id)
+	w.l.Unlock()
+	if ch != nil {
+		ch <- x
+		close(ch)
+	}
+}
+
+func (w *list) IsRegistered(id uint64) bool {
+	w.l.RLock()
+	defer w.l.RUnlock()
+	_, ok := w.m[id]
+	return ok
+}
+
+type waitWithResponse struct {
+	ch <-chan interface{}
+}
+
+func NewWithResponse(ch <-chan interface{}) Wait {
+	return &waitWithResponse{ch: ch}
+}
+
+func (w *waitWithResponse) Register(id uint64) <-chan interface{} {
+	return w.ch
+}
+func (w *waitWithResponse) Trigger(id uint64, x interface{}) {}
+func (w *waitWithResponse) IsRegistered(id uint64) bool {
+	panic("waitWithResponse.IsRegistered() shouldn't be called")
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/wait/wait_time.go b/vendor/github.com/coreos/etcd/pkg/wait/wait_time.go
new file mode 100644
index 0000000..297e48a
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/wait/wait_time.go
@@ -0,0 +1,66 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wait
+
+import "sync"
+
+type WaitTime interface {
+	// Wait returns a chan that waits on the given logical deadline.
+	// The chan will be triggered when Trigger is called with a
+	// deadline that is later than the one it is waiting for.
+	Wait(deadline uint64) <-chan struct{}
+	// Trigger triggers all the waiting chans with an earlier logical deadline.
+	Trigger(deadline uint64)
+}
+
+var closec chan struct{}
+
+func init() { closec = make(chan struct{}); close(closec) }
+
+type timeList struct {
+	l                   sync.Mutex
+	lastTriggerDeadline uint64
+	m                   map[uint64]chan struct{}
+}
+
+func NewTimeList() *timeList {
+	return &timeList{m: make(map[uint64]chan struct{})}
+}
+
+func (tl *timeList) Wait(deadline uint64) <-chan struct{} {
+	tl.l.Lock()
+	defer tl.l.Unlock()
+	if tl.lastTriggerDeadline >= deadline {
+		return closec
+	}
+	ch := tl.m[deadline]
+	if ch == nil {
+		ch = make(chan struct{})
+		tl.m[deadline] = ch
+	}
+	return ch
+}
+
+func (tl *timeList) Trigger(deadline uint64) {
+	tl.l.Lock()
+	defer tl.l.Unlock()
+	tl.lastTriggerDeadline = deadline
+	for t, ch := range tl.m {
+		if t <= deadline {
+			delete(tl.m, t)
+			close(ch)
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go
new file mode 100644
index 0000000..33dc91f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go
@@ -0,0 +1,93 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+	"context"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+	grpc "google.golang.org/grpc"
+)
+
+type as2ac struct{ as pb.AuthServer }
+
+func AuthServerToAuthClient(as pb.AuthServer) pb.AuthClient {
+	return &as2ac{as}
+}
+
+func (s *as2ac) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (*pb.AuthEnableResponse, error) {
+	return s.as.AuthEnable(ctx, in)
+}
+
+func (s *as2ac) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (*pb.AuthDisableResponse, error) {
+	return s.as.AuthDisable(ctx, in)
+}
+
+func (s *as2ac) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (*pb.AuthenticateResponse, error) {
+	return s.as.Authenticate(ctx, in)
+}
+
+func (s *as2ac) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (*pb.AuthRoleAddResponse, error) {
+	return s.as.RoleAdd(ctx, in)
+}
+
+func (s *as2ac) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (*pb.AuthRoleDeleteResponse, error) {
+	return s.as.RoleDelete(ctx, in)
+}
+
+func (s *as2ac) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (*pb.AuthRoleGetResponse, error) {
+	return s.as.RoleGet(ctx, in)
+}
+
+func (s *as2ac) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (*pb.AuthRoleListResponse, error) {
+	return s.as.RoleList(ctx, in)
+}
+
+func (s *as2ac) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*pb.AuthRoleRevokePermissionResponse, error) {
+	return s.as.RoleRevokePermission(ctx, in)
+}
+
+func (s *as2ac) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*pb.AuthRoleGrantPermissionResponse, error) {
+	return s.as.RoleGrantPermission(ctx, in)
+}
+
+func (s *as2ac) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (*pb.AuthUserDeleteResponse, error) {
+	return s.as.UserDelete(ctx, in)
+}
+
+func (s *as2ac) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (*pb.AuthUserAddResponse, error) {
+	return s.as.UserAdd(ctx, in)
+}
+
+func (s *as2ac) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (*pb.AuthUserGetResponse, error) {
+	return s.as.UserGet(ctx, in)
+}
+
+func (s *as2ac) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (*pb.AuthUserListResponse, error) {
+	return s.as.UserList(ctx, in)
+}
+
+func (s *as2ac) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*pb.AuthUserGrantRoleResponse, error) {
+	return s.as.UserGrantRole(ctx, in)
+}
+
+func (s *as2ac) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*pb.AuthUserRevokeRoleResponse, error) {
+	return s.as.UserRevokeRole(ctx, in)
+}
+
+func (s *as2ac) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*pb.AuthUserChangePasswordResponse, error) {
+	return s.as.UserChangePassword(ctx, in)
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go
new file mode 100644
index 0000000..82e3411
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go
@@ -0,0 +1,165 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+	"context"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/metadata"
+)
+
+// chanServerStream implements grpc.ServerStream with a chanStream
+type chanServerStream struct {
+	headerc  chan<- metadata.MD
+	trailerc chan<- metadata.MD
+	grpc.Stream
+
+	headers []metadata.MD
+}
+
+func (ss *chanServerStream) SendHeader(md metadata.MD) error {
+	if ss.headerc == nil {
+		return errAlreadySentHeader
+	}
+	outmd := make(map[string][]string)
+	for _, h := range append(ss.headers, md) {
+		for k, v := range h {
+			outmd[k] = v
+		}
+	}
+	select {
+	case ss.headerc <- outmd:
+		ss.headerc = nil
+		ss.headers = nil
+		return nil
+	case <-ss.Context().Done():
+	}
+	return ss.Context().Err()
+}
+
+func (ss *chanServerStream) SetHeader(md metadata.MD) error {
+	if ss.headerc == nil {
+		return errAlreadySentHeader
+	}
+	ss.headers = append(ss.headers, md)
+	return nil
+}
+
+func (ss *chanServerStream) SetTrailer(md metadata.MD) {
+	ss.trailerc <- md
+}
+
+// chanClientStream implements grpc.ClientStream with a chanStream
+type chanClientStream struct {
+	headerc  <-chan metadata.MD
+	trailerc <-chan metadata.MD
+	*chanStream
+}
+
+func (cs *chanClientStream) Header() (metadata.MD, error) {
+	select {
+	case md := <-cs.headerc:
+		return md, nil
+	case <-cs.Context().Done():
+	}
+	return nil, cs.Context().Err()
+}
+
+func (cs *chanClientStream) Trailer() metadata.MD {
+	select {
+	case md := <-cs.trailerc:
+		return md
+	case <-cs.Context().Done():
+		return nil
+	}
+}
+
+func (cs *chanClientStream) CloseSend() error {
+	close(cs.chanStream.sendc)
+	return nil
+}
+
+// chanStream implements grpc.Stream using channels
+type chanStream struct {
+	recvc  <-chan interface{}
+	sendc  chan<- interface{}
+	ctx    context.Context
+	cancel context.CancelFunc
+}
+
+func (s *chanStream) Context() context.Context { return s.ctx }
+
+func (s *chanStream) SendMsg(m interface{}) error {
+	select {
+	case s.sendc <- m:
+		if err, ok := m.(error); ok {
+			return err
+		}
+		return nil
+	case <-s.ctx.Done():
+	}
+	return s.ctx.Err()
+}
+
+func (s *chanStream) RecvMsg(m interface{}) error {
+	v := m.(*interface{})
+	for {
+		select {
+		case msg, ok := <-s.recvc:
+			if !ok {
+				return grpc.ErrClientConnClosing
+			}
+			if err, ok := msg.(error); ok {
+				return err
+			}
+			*v = msg
+			return nil
+		case <-s.ctx.Done():
+		}
+		if len(s.recvc) == 0 {
+			// prioritize any pending recv messages over canceled context
+			break
+		}
+	}
+	return s.ctx.Err()
+}
+
+func newPipeStream(ctx context.Context, ssHandler func(chanServerStream) error) chanClientStream {
+	// ch1 is buffered so server can send error on close
+	ch1, ch2 := make(chan interface{}, 1), make(chan interface{})
+	headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1)
+
+	cctx, ccancel := context.WithCancel(ctx)
+	cli := &chanStream{recvc: ch1, sendc: ch2, ctx: cctx, cancel: ccancel}
+	cs := chanClientStream{headerc, trailerc, cli}
+
+	sctx, scancel := context.WithCancel(ctx)
+	srv := &chanStream{recvc: ch2, sendc: ch1, ctx: sctx, cancel: scancel}
+	ss := chanServerStream{headerc, trailerc, srv, nil}
+
+	go func() {
+		if err := ssHandler(ss); err != nil {
+			select {
+			case srv.sendc <- err:
+			case <-sctx.Done():
+			case <-cctx.Done():
+			}
+		}
+		scancel()
+		ccancel()
+	}()
+	return cs
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go
new file mode 100644
index 0000000..6c03409
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go
@@ -0,0 +1,45 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+	"context"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+	"google.golang.org/grpc"
+)
+
+type cls2clc struct{ cls pb.ClusterServer }
+
+func ClusterServerToClusterClient(cls pb.ClusterServer) pb.ClusterClient {
+	return &cls2clc{cls}
+}
+
+func (s *cls2clc) MemberList(ctx context.Context, r *pb.MemberListRequest, opts ...grpc.CallOption) (*pb.MemberListResponse, error) {
+	return s.cls.MemberList(ctx, r)
+}
+
+func (s *cls2clc) MemberAdd(ctx context.Context, r *pb.MemberAddRequest, opts ...grpc.CallOption) (*pb.MemberAddResponse, error) {
+	return s.cls.MemberAdd(ctx, r)
+}
+
+func (s *cls2clc) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest, opts ...grpc.CallOption) (*pb.MemberUpdateResponse, error) {
+	return s.cls.MemberUpdate(ctx, r)
+}
+
+func (s *cls2clc) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest, opts ...grpc.CallOption) (*pb.MemberRemoveResponse, error) {
+	return s.cls.MemberRemove(ctx, r)
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go
new file mode 100644
index 0000000..7170be2
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package adapter provides gRPC adapters between client and server
+// gRPC interfaces without needing to go through a gRPC connection.
+package adapter
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go
new file mode 100644
index 0000000..a2ebf13
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go
@@ -0,0 +1,80 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+	"context"
+
+	"github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
+
+	"google.golang.org/grpc"
+)
+
+type es2ec struct{ es v3electionpb.ElectionServer }
+
+func ElectionServerToElectionClient(es v3electionpb.ElectionServer) v3electionpb.ElectionClient {
+	return &es2ec{es}
+}
+
+func (s *es2ec) Campaign(ctx context.Context, r *v3electionpb.CampaignRequest, opts ...grpc.CallOption) (*v3electionpb.CampaignResponse, error) {
+	return s.es.Campaign(ctx, r)
+}
+
+func (s *es2ec) Proclaim(ctx context.Context, r *v3electionpb.ProclaimRequest, opts ...grpc.CallOption) (*v3electionpb.ProclaimResponse, error) {
+	return s.es.Proclaim(ctx, r)
+}
+
+func (s *es2ec) Leader(ctx context.Context, r *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (*v3electionpb.LeaderResponse, error) {
+	return s.es.Leader(ctx, r)
+}
+
+func (s *es2ec) Resign(ctx context.Context, r *v3electionpb.ResignRequest, opts ...grpc.CallOption) (*v3electionpb.ResignResponse, error) {
+	return s.es.Resign(ctx, r)
+}
+
+func (s *es2ec) Observe(ctx context.Context, in *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (v3electionpb.Election_ObserveClient, error) {
+	cs := newPipeStream(ctx, func(ss chanServerStream) error {
+		return s.es.Observe(in, &es2ecServerStream{ss})
+	})
+	return &es2ecClientStream{cs}, nil
+}
+
+// es2ecClientStream implements Election_ObserveClient
+type es2ecClientStream struct{ chanClientStream }
+
+// es2ecServerStream implements Election_ObserveServer
+type es2ecServerStream struct{ chanServerStream }
+
+func (s *es2ecClientStream) Send(rr *v3electionpb.LeaderRequest) error {
+	return s.SendMsg(rr)
+}
+func (s *es2ecClientStream) Recv() (*v3electionpb.LeaderResponse, error) {
+	var v interface{}
+	if err := s.RecvMsg(&v); err != nil {
+		return nil, err
+	}
+	return v.(*v3electionpb.LeaderResponse), nil
+}
+
+func (s *es2ecServerStream) Send(rr *v3electionpb.LeaderResponse) error {
+	return s.SendMsg(rr)
+}
+func (s *es2ecServerStream) Recv() (*v3electionpb.LeaderRequest, error) {
+	var v interface{}
+	if err := s.RecvMsg(&v); err != nil {
+		return nil, err
+	}
+	return v.(*v3electionpb.LeaderRequest), nil
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go
new file mode 100644
index 0000000..acd5673
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go
@@ -0,0 +1,49 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+	"context"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+	grpc "google.golang.org/grpc"
+)
+
+type kvs2kvc struct{ kvs pb.KVServer }
+
+func KvServerToKvClient(kvs pb.KVServer) pb.KVClient {
+	return &kvs2kvc{kvs}
+}
+
+func (s *kvs2kvc) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (*pb.RangeResponse, error) {
+	return s.kvs.Range(ctx, in)
+}
+
+func (s *kvs2kvc) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (*pb.PutResponse, error) {
+	return s.kvs.Put(ctx, in)
+}
+
+func (s *kvs2kvc) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (*pb.DeleteRangeResponse, error) {
+	return s.kvs.DeleteRange(ctx, in)
+}
+
+func (s *kvs2kvc) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (*pb.TxnResponse, error) {
+	return s.kvs.Txn(ctx, in)
+}
+
+func (s *kvs2kvc) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (*pb.CompactionResponse, error) {
+	return s.kvs.Compact(ctx, in)
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go
new file mode 100644
index 0000000..84c48b5
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go
@@ -0,0 +1,82 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+	"context"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+	"google.golang.org/grpc"
+)
+
+type ls2lc struct {
+	leaseServer pb.LeaseServer
+}
+
+func LeaseServerToLeaseClient(ls pb.LeaseServer) pb.LeaseClient {
+	return &ls2lc{ls}
+}
+
+func (c *ls2lc) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (*pb.LeaseGrantResponse, error) {
+	return c.leaseServer.LeaseGrant(ctx, in)
+}
+
+func (c *ls2lc) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (*pb.LeaseRevokeResponse, error) {
+	return c.leaseServer.LeaseRevoke(ctx, in)
+}
+
+func (c *ls2lc) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (pb.Lease_LeaseKeepAliveClient, error) {
+	cs := newPipeStream(ctx, func(ss chanServerStream) error {
+		return c.leaseServer.LeaseKeepAlive(&ls2lcServerStream{ss})
+	})
+	return &ls2lcClientStream{cs}, nil
+}
+
+func (c *ls2lc) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*pb.LeaseTimeToLiveResponse, error) {
+	return c.leaseServer.LeaseTimeToLive(ctx, in)
+}
+
+func (c *ls2lc) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (*pb.LeaseLeasesResponse, error) {
+	return c.leaseServer.LeaseLeases(ctx, in)
+}
+
+// ls2lcClientStream implements Lease_LeaseKeepAliveClient
+type ls2lcClientStream struct{ chanClientStream }
+
+// ls2lcServerStream implements Lease_LeaseKeepAliveServer
+type ls2lcServerStream struct{ chanServerStream }
+
+func (s *ls2lcClientStream) Send(rr *pb.LeaseKeepAliveRequest) error {
+	return s.SendMsg(rr)
+}
+func (s *ls2lcClientStream) Recv() (*pb.LeaseKeepAliveResponse, error) {
+	var v interface{}
+	if err := s.RecvMsg(&v); err != nil {
+		return nil, err
+	}
+	return v.(*pb.LeaseKeepAliveResponse), nil
+}
+
+func (s *ls2lcServerStream) Send(rr *pb.LeaseKeepAliveResponse) error {
+	return s.SendMsg(rr)
+}
+func (s *ls2lcServerStream) Recv() (*pb.LeaseKeepAliveRequest, error) {
+	var v interface{}
+	if err := s.RecvMsg(&v); err != nil {
+		return nil, err
+	}
+	return v.(*pb.LeaseKeepAliveRequest), nil
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go
new file mode 100644
index 0000000..9ce7913
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go
@@ -0,0 +1,37 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+	"context"
+
+	"github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
+
+	"google.golang.org/grpc"
+)
+
+type ls2lsc struct{ ls v3lockpb.LockServer }
+
+func LockServerToLockClient(ls v3lockpb.LockServer) v3lockpb.LockClient {
+	return &ls2lsc{ls}
+}
+
+func (s *ls2lsc) Lock(ctx context.Context, r *v3lockpb.LockRequest, opts ...grpc.CallOption) (*v3lockpb.LockResponse, error) {
+	return s.ls.Lock(ctx, r)
+}
+
+func (s *ls2lsc) Unlock(ctx context.Context, r *v3lockpb.UnlockRequest, opts ...grpc.CallOption) (*v3lockpb.UnlockResponse, error) {
+	return s.ls.Unlock(ctx, r)
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go
new file mode 100644
index 0000000..92d9dfd
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go
@@ -0,0 +1,88 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+	"context"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+	"google.golang.org/grpc"
+)
+
+type mts2mtc struct{ mts pb.MaintenanceServer }
+
+func MaintenanceServerToMaintenanceClient(mts pb.MaintenanceServer) pb.MaintenanceClient {
+	return &mts2mtc{mts}
+}
+
+func (s *mts2mtc) Alarm(ctx context.Context, r *pb.AlarmRequest, opts ...grpc.CallOption) (*pb.AlarmResponse, error) {
+	return s.mts.Alarm(ctx, r)
+}
+
+func (s *mts2mtc) Status(ctx context.Context, r *pb.StatusRequest, opts ...grpc.CallOption) (*pb.StatusResponse, error) {
+	return s.mts.Status(ctx, r)
+}
+
+func (s *mts2mtc) Defragment(ctx context.Context, dr *pb.DefragmentRequest, opts ...grpc.CallOption) (*pb.DefragmentResponse, error) {
+	return s.mts.Defragment(ctx, dr)
+}
+
+func (s *mts2mtc) Hash(ctx context.Context, r *pb.HashRequest, opts ...grpc.CallOption) (*pb.HashResponse, error) {
+	return s.mts.Hash(ctx, r)
+}
+
+func (s *mts2mtc) HashKV(ctx context.Context, r *pb.HashKVRequest, opts ...grpc.CallOption) (*pb.HashKVResponse, error) {
+	return s.mts.HashKV(ctx, r)
+}
+
+func (s *mts2mtc) MoveLeader(ctx context.Context, r *pb.MoveLeaderRequest, opts ...grpc.CallOption) (*pb.MoveLeaderResponse, error) {
+	return s.mts.MoveLeader(ctx, r)
+}
+
+func (s *mts2mtc) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (pb.Maintenance_SnapshotClient, error) {
+	cs := newPipeStream(ctx, func(ss chanServerStream) error {
+		return s.mts.Snapshot(in, &ss2scServerStream{ss})
+	})
+	return &ss2scClientStream{cs}, nil
+}
+
+// ss2scClientStream implements Maintenance_SnapshotClient
+type ss2scClientStream struct{ chanClientStream }
+
+// ss2scServerStream implements Maintenance_SnapshotServer
+type ss2scServerStream struct{ chanServerStream }
+
+func (s *ss2scClientStream) Send(rr *pb.SnapshotRequest) error {
+	return s.SendMsg(rr)
+}
+func (s *ss2scClientStream) Recv() (*pb.SnapshotResponse, error) {
+	var v interface{}
+	if err := s.RecvMsg(&v); err != nil {
+		return nil, err
+	}
+	return v.(*pb.SnapshotResponse), nil
+}
+
+func (s *ss2scServerStream) Send(rr *pb.SnapshotResponse) error {
+	return s.SendMsg(rr)
+}
+func (s *ss2scServerStream) Recv() (*pb.SnapshotRequest, error) {
+	var v interface{}
+	if err := s.RecvMsg(&v); err != nil {
+		return nil, err
+	}
+	return v.(*pb.SnapshotRequest), nil
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go
new file mode 100644
index 0000000..afe61e8
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go
@@ -0,0 +1,66 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+	"context"
+	"errors"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"google.golang.org/grpc"
+)
+
+var errAlreadySentHeader = errors.New("adapter: already sent header")
+
+type ws2wc struct{ wserv pb.WatchServer }
+
+func WatchServerToWatchClient(wserv pb.WatchServer) pb.WatchClient {
+	return &ws2wc{wserv}
+}
+
+func (s *ws2wc) Watch(ctx context.Context, opts ...grpc.CallOption) (pb.Watch_WatchClient, error) {
+	cs := newPipeStream(ctx, func(ss chanServerStream) error {
+		return s.wserv.Watch(&ws2wcServerStream{ss})
+	})
+	return &ws2wcClientStream{cs}, nil
+}
+
+// ws2wcClientStream implements Watch_WatchClient
+type ws2wcClientStream struct{ chanClientStream }
+
+// ws2wcServerStream implements Watch_WatchServer
+type ws2wcServerStream struct{ chanServerStream }
+
+func (s *ws2wcClientStream) Send(wr *pb.WatchRequest) error {
+	return s.SendMsg(wr)
+}
+func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) {
+	var v interface{}
+	if err := s.RecvMsg(&v); err != nil {
+		return nil, err
+	}
+	return v.(*pb.WatchResponse), nil
+}
+
+func (s *ws2wcServerStream) Send(wr *pb.WatchResponse) error {
+	return s.SendMsg(wr)
+}
+func (s *ws2wcServerStream) Recv() (*pb.WatchRequest, error) {
+	var v interface{}
+	if err := s.RecvMsg(&v); err != nil {
+		return nil, err
+	}
+	return v.(*pb.WatchRequest), nil
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/auth.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/auth.go
new file mode 100644
index 0000000..0ed8d24
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/auth.go
@@ -0,0 +1,110 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+	"context"
+
+	"github.com/coreos/etcd/clientv3"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+)
+
+type AuthProxy struct {
+	client *clientv3.Client
+}
+
+func NewAuthProxy(c *clientv3.Client) pb.AuthServer {
+	return &AuthProxy{client: c}
+}
+
+func (ap *AuthProxy) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
+	conn := ap.client.ActiveConnection()
+	return pb.NewAuthClient(conn).AuthEnable(ctx, r)
+}
+
+func (ap *AuthProxy) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
+	conn := ap.client.ActiveConnection()
+	return pb.NewAuthClient(conn).AuthDisable(ctx, r)
+}
+
+func (ap *AuthProxy) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
+	conn := ap.client.ActiveConnection()
+	return pb.NewAuthClient(conn).Authenticate(ctx, r)
+}
+
+func (ap *AuthProxy) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+	conn := ap.client.ActiveConnection()
+	return pb.NewAuthClient(conn).RoleAdd(ctx, r)
+}
+
+func (ap *AuthProxy) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+	conn := ap.client.ActiveConnection()
+	return pb.NewAuthClient(conn).RoleDelete(ctx, r)
+}
+
+func (ap *AuthProxy) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+	conn := ap.client.ActiveConnection()
+	return pb.NewAuthClient(conn).RoleGet(ctx, r)
+}
+
+func (ap *AuthProxy) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+	conn := ap.client.ActiveConnection()
+	return pb.NewAuthClient(conn).RoleList(ctx, r)
+}
+
+func (ap *AuthProxy) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+	conn := ap.client.ActiveConnection()
+	return pb.NewAuthClient(conn).RoleRevokePermission(ctx, r)
+}
+
+func (ap *AuthProxy) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+	conn := ap.client.ActiveConnection()
+	return pb.NewAuthClient(conn).RoleGrantPermission(ctx, r)
+}
+
+func (ap *AuthProxy) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+	conn := ap.client.ActiveConnection()
+	return pb.NewAuthClient(conn).UserAdd(ctx, r)
+}
+
+func (ap *AuthProxy) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+	conn := ap.client.ActiveConnection()
+	return pb.NewAuthClient(conn).UserDelete(ctx, r)
+}
+
+func (ap *AuthProxy) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+	conn := ap.client.ActiveConnection()
+	return pb.NewAuthClient(conn).UserGet(ctx, r)
+}
+
+func (ap *AuthProxy) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+	conn := ap.client.ActiveConnection()
+	return pb.NewAuthClient(conn).UserList(ctx, r)
+}
+
+func (ap *AuthProxy) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+	conn := ap.client.ActiveConnection()
+	return pb.NewAuthClient(conn).UserGrantRole(ctx, r)
+}
+
+func (ap *AuthProxy) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+	conn := ap.client.ActiveConnection()
+	return pb.NewAuthClient(conn).UserRevokeRole(ctx, r)
+}
+
+func (ap *AuthProxy) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+	conn := ap.client.ActiveConnection()
+	return pb.NewAuthClient(conn).UserChangePassword(ctx, r)
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go
new file mode 100644
index 0000000..5765228
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go
@@ -0,0 +1,171 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cache exports functionality for efficiently caching and mapping
+// `RangeRequest`s to corresponding `RangeResponse`s.
+package cache
+
+import (
+	"errors"
+	"sync"
+
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/pkg/adt"
+	"github.com/golang/groupcache/lru"
+)
+
+var (
+	DefaultMaxEntries = 2048
+	ErrCompacted      = rpctypes.ErrGRPCCompacted
+)
+
+type Cache interface {
+	Add(req *pb.RangeRequest, resp *pb.RangeResponse)
+	Get(req *pb.RangeRequest) (*pb.RangeResponse, error)
+	Compact(revision int64)
+	Invalidate(key []byte, endkey []byte)
+	Size() int
+	Close()
+}
+
+// keyFunc returns the key of a request, which is used to look up its caching response in the cache.
+func keyFunc(req *pb.RangeRequest) string {
+	// TODO: use marshalTo to reduce allocation
+	b, err := req.Marshal()
+	if err != nil {
+		panic(err)
+	}
+	return string(b)
+}
+
+func NewCache(maxCacheEntries int) Cache {
+	return &cache{
+		lru:          lru.New(maxCacheEntries),
+		compactedRev: -1,
+	}
+}
+
+func (c *cache) Close() {}
+
+// cache implements Cache
+type cache struct {
+	mu  sync.RWMutex
+	lru *lru.Cache
+
+	// a reverse index for cache invalidation
+	cachedRanges adt.IntervalTree
+
+	compactedRev int64
+}
+
+// Add adds the response of a request to the cache if its revision is larger than the compacted revision of the cache.
+func (c *cache) Add(req *pb.RangeRequest, resp *pb.RangeResponse) {
+	key := keyFunc(req)
+
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if req.Revision > c.compactedRev {
+		c.lru.Add(key, resp)
+	}
+	// we do not need to invalidate a request with a revision specified.
+	// so we do not need to add it into the reverse index.
+	if req.Revision != 0 {
+		return
+	}
+
+	var (
+		iv  *adt.IntervalValue
+		ivl adt.Interval
+	)
+	if len(req.RangeEnd) != 0 {
+		ivl = adt.NewStringAffineInterval(string(req.Key), string(req.RangeEnd))
+	} else {
+		ivl = adt.NewStringAffinePoint(string(req.Key))
+	}
+
+	iv = c.cachedRanges.Find(ivl)
+
+	if iv == nil {
+		val := map[string]struct{}{key: {}}
+		c.cachedRanges.Insert(ivl, val)
+	} else {
+		val := iv.Val.(map[string]struct{})
+		val[key] = struct{}{}
+		iv.Val = val
+	}
+}
+
+// Get looks up the caching response for a given request.
+// Get is also responsible for lazy eviction when accessing compacted entries.
+func (c *cache) Get(req *pb.RangeRequest) (*pb.RangeResponse, error) {
+	key := keyFunc(req)
+
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if req.Revision > 0 && req.Revision < c.compactedRev {
+		c.lru.Remove(key)
+		return nil, ErrCompacted
+	}
+
+	if resp, ok := c.lru.Get(key); ok {
+		return resp.(*pb.RangeResponse), nil
+	}
+	return nil, errors.New("not exist")
+}
+
+// Invalidate invalidates the cache entries that intersecting with the given range from key to endkey.
+func (c *cache) Invalidate(key, endkey []byte) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	var (
+		ivs []*adt.IntervalValue
+		ivl adt.Interval
+	)
+	if len(endkey) == 0 {
+		ivl = adt.NewStringAffinePoint(string(key))
+	} else {
+		ivl = adt.NewStringAffineInterval(string(key), string(endkey))
+	}
+
+	ivs = c.cachedRanges.Stab(ivl)
+	for _, iv := range ivs {
+		keys := iv.Val.(map[string]struct{})
+		for key := range keys {
+			c.lru.Remove(key)
+		}
+	}
+	// delete after removing all keys since it is destructive to 'ivs'
+	c.cachedRanges.Delete(ivl)
+}
+
+// Compact invalidate all caching response before the given rev.
+// Replace with the invalidation is lazy. The actual removal happens when the entries is accessed.
+func (c *cache) Compact(revision int64) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if revision > c.compactedRev {
+		c.compactedRev = revision
+	}
+}
+
+func (c *cache) Size() int {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	return c.lru.Len()
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go
new file mode 100644
index 0000000..6e8d3c8
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go
@@ -0,0 +1,177 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"sync"
+
+	"github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/clientv3/naming"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+	"golang.org/x/time/rate"
+	gnaming "google.golang.org/grpc/naming"
+)
+
+// allow maximum 1 retry per second
+const resolveRetryRate = 1
+
+type clusterProxy struct {
+	clus clientv3.Cluster
+	ctx  context.Context
+	gr   *naming.GRPCResolver
+
+	// advertise client URL
+	advaddr string
+	prefix  string
+
+	umu  sync.RWMutex
+	umap map[string]gnaming.Update
+}
+
+// NewClusterProxy takes optional prefix to fetch grpc-proxy member endpoints.
+// The returned channel is closed when there is grpc-proxy endpoint registered
+// and the client's context is canceled so the 'register' loop returns.
+func NewClusterProxy(c *clientv3.Client, advaddr string, prefix string) (pb.ClusterServer, <-chan struct{}) {
+	cp := &clusterProxy{
+		clus: c.Cluster,
+		ctx:  c.Ctx(),
+		gr:   &naming.GRPCResolver{Client: c},
+
+		advaddr: advaddr,
+		prefix:  prefix,
+		umap:    make(map[string]gnaming.Update),
+	}
+
+	donec := make(chan struct{})
+	if advaddr != "" && prefix != "" {
+		go func() {
+			defer close(donec)
+			cp.resolve(prefix)
+		}()
+		return cp, donec
+	}
+
+	close(donec)
+	return cp, donec
+}
+
+func (cp *clusterProxy) resolve(prefix string) {
+	rm := rate.NewLimiter(rate.Limit(resolveRetryRate), resolveRetryRate)
+	for rm.Wait(cp.ctx) == nil {
+		wa, err := cp.gr.Resolve(prefix)
+		if err != nil {
+			plog.Warningf("failed to resolve %q (%v)", prefix, err)
+			continue
+		}
+		cp.monitor(wa)
+	}
+}
+
+func (cp *clusterProxy) monitor(wa gnaming.Watcher) {
+	for cp.ctx.Err() == nil {
+		ups, err := wa.Next()
+		if err != nil {
+			plog.Warningf("clusterProxy watcher error (%v)", err)
+			if rpctypes.ErrorDesc(err) == naming.ErrWatcherClosed.Error() {
+				return
+			}
+		}
+
+		cp.umu.Lock()
+		for i := range ups {
+			switch ups[i].Op {
+			case gnaming.Add:
+				cp.umap[ups[i].Addr] = *ups[i]
+			case gnaming.Delete:
+				delete(cp.umap, ups[i].Addr)
+			}
+		}
+		cp.umu.Unlock()
+	}
+}
+
+func (cp *clusterProxy) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) {
+	mresp, err := cp.clus.MemberAdd(ctx, r.PeerURLs)
+	if err != nil {
+		return nil, err
+	}
+	resp := (pb.MemberAddResponse)(*mresp)
+	return &resp, err
+}
+
+func (cp *clusterProxy) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
+	mresp, err := cp.clus.MemberRemove(ctx, r.ID)
+	if err != nil {
+		return nil, err
+	}
+	resp := (pb.MemberRemoveResponse)(*mresp)
+	return &resp, err
+}
+
+func (cp *clusterProxy) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) {
+	mresp, err := cp.clus.MemberUpdate(ctx, r.ID, r.PeerURLs)
+	if err != nil {
+		return nil, err
+	}
+	resp := (pb.MemberUpdateResponse)(*mresp)
+	return &resp, err
+}
+
+func (cp *clusterProxy) membersFromUpdates() ([]*pb.Member, error) {
+	cp.umu.RLock()
+	defer cp.umu.RUnlock()
+	mbs := make([]*pb.Member, 0, len(cp.umap))
+	for addr, upt := range cp.umap {
+		m, err := decodeMeta(fmt.Sprint(upt.Metadata))
+		if err != nil {
+			return nil, err
+		}
+		mbs = append(mbs, &pb.Member{Name: m.Name, ClientURLs: []string{addr}})
+	}
+	return mbs, nil
+}
+
+// MemberList wraps member list API with following rules:
+// - If 'advaddr' is not empty and 'prefix' is not empty, return registered member lists via resolver
+// - If 'advaddr' is not empty and 'prefix' is not empty and registered grpc-proxy members haven't been fetched, return the 'advaddr'
+// - If 'advaddr' is not empty and 'prefix' is empty, return 'advaddr' without forcing it to 'register'
+// - If 'advaddr' is empty, forward to member list API
+func (cp *clusterProxy) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) {
+	if cp.advaddr != "" {
+		if cp.prefix != "" {
+			mbs, err := cp.membersFromUpdates()
+			if err != nil {
+				return nil, err
+			}
+			if len(mbs) > 0 {
+				return &pb.MemberListResponse{Members: mbs}, nil
+			}
+		}
+		// prefix is empty or no grpc-proxy members haven't been registered
+		hostname, _ := os.Hostname()
+		return &pb.MemberListResponse{Members: []*pb.Member{{Name: hostname, ClientURLs: []string{cp.advaddr}}}}, nil
+	}
+	mresp, err := cp.clus.MemberList(ctx)
+	if err != nil {
+		return nil, err
+	}
+	resp := (pb.MemberListResponse)(*mresp)
+	return &resp, err
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/doc.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/doc.go
new file mode 100644
index 0000000..fc022e3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package grpcproxy is an OSI level 7 proxy for etcd v3 API requests.
+package grpcproxy
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/election.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/election.go
new file mode 100644
index 0000000..4b4a4cc
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/election.go
@@ -0,0 +1,65 @@
+// Copyright 2017 The etcd Lockors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+	"context"
+
+	"github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
+)
+
+type electionProxy struct {
+	client *clientv3.Client
+}
+
+func NewElectionProxy(client *clientv3.Client) v3electionpb.ElectionServer {
+	return &electionProxy{client: client}
+}
+
+func (ep *electionProxy) Campaign(ctx context.Context, req *v3electionpb.CampaignRequest) (*v3electionpb.CampaignResponse, error) {
+	return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Campaign(ctx, req)
+}
+
+func (ep *electionProxy) Proclaim(ctx context.Context, req *v3electionpb.ProclaimRequest) (*v3electionpb.ProclaimResponse, error) {
+	return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Proclaim(ctx, req)
+}
+
+func (ep *electionProxy) Leader(ctx context.Context, req *v3electionpb.LeaderRequest) (*v3electionpb.LeaderResponse, error) {
+	return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Leader(ctx, req)
+}
+
+func (ep *electionProxy) Observe(req *v3electionpb.LeaderRequest, s v3electionpb.Election_ObserveServer) error {
+	conn := ep.client.ActiveConnection()
+	ctx, cancel := context.WithCancel(s.Context())
+	defer cancel()
+	sc, err := v3electionpb.NewElectionClient(conn).Observe(ctx, req)
+	if err != nil {
+		return err
+	}
+	for {
+		rr, err := sc.Recv()
+		if err != nil {
+			return err
+		}
+		if err = s.Send(rr); err != nil {
+			return err
+		}
+	}
+}
+
+func (ep *electionProxy) Resign(ctx context.Context, req *v3electionpb.ResignRequest) (*v3electionpb.ResignResponse, error) {
+	return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Resign(ctx, req)
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/health.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/health.go
new file mode 100644
index 0000000..e5e91f2
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/health.go
@@ -0,0 +1,41 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+	"context"
+	"net/http"
+	"time"
+
+	"github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/etcdserver/api/etcdhttp"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+)
+
+// HandleHealth registers health handler on '/health'.
+func HandleHealth(mux *http.ServeMux, c *clientv3.Client) {
+	mux.Handle(etcdhttp.PathHealth, etcdhttp.NewHealthHandler(func() etcdhttp.Health { return checkHealth(c) }))
+}
+
+func checkHealth(c *clientv3.Client) etcdhttp.Health {
+	h := etcdhttp.Health{Health: "false"}
+	ctx, cancel := context.WithTimeout(c.Ctx(), time.Second)
+	_, err := c.Get(ctx, "a")
+	cancel()
+	if err == nil || err == rpctypes.ErrPermissionDenied {
+		h.Health = "true"
+	}
+	return h
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go
new file mode 100644
index 0000000..1c9860f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go
@@ -0,0 +1,232 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+	"context"
+
+	"github.com/coreos/etcd/clientv3"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/proxy/grpcproxy/cache"
+)
+
+type kvProxy struct {
+	kv    clientv3.KV
+	cache cache.Cache
+}
+
+func NewKvProxy(c *clientv3.Client) (pb.KVServer, <-chan struct{}) {
+	kv := &kvProxy{
+		kv:    c.KV,
+		cache: cache.NewCache(cache.DefaultMaxEntries),
+	}
+	donec := make(chan struct{})
+	close(donec)
+	return kv, donec
+}
+
+func (p *kvProxy) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+	if r.Serializable {
+		resp, err := p.cache.Get(r)
+		switch err {
+		case nil:
+			cacheHits.Inc()
+			return resp, nil
+		case cache.ErrCompacted:
+			cacheHits.Inc()
+			return nil, err
+		}
+
+		cachedMisses.Inc()
+	}
+
+	resp, err := p.kv.Do(ctx, RangeRequestToOp(r))
+	if err != nil {
+		return nil, err
+	}
+
+	// cache linearizable as serializable
+	req := *r
+	req.Serializable = true
+	gresp := (*pb.RangeResponse)(resp.Get())
+	p.cache.Add(&req, gresp)
+	cacheKeys.Set(float64(p.cache.Size()))
+
+	return gresp, nil
+}
+
+func (p *kvProxy) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
+	p.cache.Invalidate(r.Key, nil)
+	cacheKeys.Set(float64(p.cache.Size()))
+
+	resp, err := p.kv.Do(ctx, PutRequestToOp(r))
+	return (*pb.PutResponse)(resp.Put()), err
+}
+
+func (p *kvProxy) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+	p.cache.Invalidate(r.Key, r.RangeEnd)
+	cacheKeys.Set(float64(p.cache.Size()))
+
+	resp, err := p.kv.Do(ctx, DelRequestToOp(r))
+	return (*pb.DeleteRangeResponse)(resp.Del()), err
+}
+
+func (p *kvProxy) txnToCache(reqs []*pb.RequestOp, resps []*pb.ResponseOp) {
+	for i := range resps {
+		switch tv := resps[i].Response.(type) {
+		case *pb.ResponseOp_ResponsePut:
+			p.cache.Invalidate(reqs[i].GetRequestPut().Key, nil)
+		case *pb.ResponseOp_ResponseDeleteRange:
+			rdr := reqs[i].GetRequestDeleteRange()
+			p.cache.Invalidate(rdr.Key, rdr.RangeEnd)
+		case *pb.ResponseOp_ResponseRange:
+			req := *(reqs[i].GetRequestRange())
+			req.Serializable = true
+			p.cache.Add(&req, tv.ResponseRange)
+		}
+	}
+}
+
+func (p *kvProxy) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
+	op := TxnRequestToOp(r)
+	opResp, err := p.kv.Do(ctx, op)
+	if err != nil {
+		return nil, err
+	}
+	resp := opResp.Txn()
+
+	// txn may claim an outdated key is updated; be safe and invalidate
+	for _, cmp := range r.Compare {
+		p.cache.Invalidate(cmp.Key, cmp.RangeEnd)
+	}
+	// update any fetched keys
+	if resp.Succeeded {
+		p.txnToCache(r.Success, resp.Responses)
+	} else {
+		p.txnToCache(r.Failure, resp.Responses)
+	}
+
+	cacheKeys.Set(float64(p.cache.Size()))
+
+	return (*pb.TxnResponse)(resp), nil
+}
+
+func (p *kvProxy) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
+	var opts []clientv3.CompactOption
+	if r.Physical {
+		opts = append(opts, clientv3.WithCompactPhysical())
+	}
+
+	resp, err := p.kv.Compact(ctx, r.Revision, opts...)
+	if err == nil {
+		p.cache.Compact(r.Revision)
+	}
+
+	cacheKeys.Set(float64(p.cache.Size()))
+
+	return (*pb.CompactionResponse)(resp), err
+}
+
+func requestOpToOp(union *pb.RequestOp) clientv3.Op {
+	switch tv := union.Request.(type) {
+	case *pb.RequestOp_RequestRange:
+		if tv.RequestRange != nil {
+			return RangeRequestToOp(tv.RequestRange)
+		}
+	case *pb.RequestOp_RequestPut:
+		if tv.RequestPut != nil {
+			return PutRequestToOp(tv.RequestPut)
+		}
+	case *pb.RequestOp_RequestDeleteRange:
+		if tv.RequestDeleteRange != nil {
+			return DelRequestToOp(tv.RequestDeleteRange)
+		}
+	case *pb.RequestOp_RequestTxn:
+		if tv.RequestTxn != nil {
+			return TxnRequestToOp(tv.RequestTxn)
+		}
+	}
+	panic("unknown request")
+}
+
+func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op {
+	opts := []clientv3.OpOption{}
+	if len(r.RangeEnd) != 0 {
+		opts = append(opts, clientv3.WithRange(string(r.RangeEnd)))
+	}
+	opts = append(opts, clientv3.WithRev(r.Revision))
+	opts = append(opts, clientv3.WithLimit(r.Limit))
+	opts = append(opts, clientv3.WithSort(
+		clientv3.SortTarget(r.SortTarget),
+		clientv3.SortOrder(r.SortOrder)),
+	)
+	opts = append(opts, clientv3.WithMaxCreateRev(r.MaxCreateRevision))
+	opts = append(opts, clientv3.WithMinCreateRev(r.MinCreateRevision))
+	opts = append(opts, clientv3.WithMaxModRev(r.MaxModRevision))
+	opts = append(opts, clientv3.WithMinModRev(r.MinModRevision))
+	if r.CountOnly {
+		opts = append(opts, clientv3.WithCountOnly())
+	}
+	if r.KeysOnly {
+		opts = append(opts, clientv3.WithKeysOnly())
+	}
+	if r.Serializable {
+		opts = append(opts, clientv3.WithSerializable())
+	}
+
+	return clientv3.OpGet(string(r.Key), opts...)
+}
+
+func PutRequestToOp(r *pb.PutRequest) clientv3.Op {
+	opts := []clientv3.OpOption{}
+	opts = append(opts, clientv3.WithLease(clientv3.LeaseID(r.Lease)))
+	if r.IgnoreValue {
+		opts = append(opts, clientv3.WithIgnoreValue())
+	}
+	if r.IgnoreLease {
+		opts = append(opts, clientv3.WithIgnoreLease())
+	}
+	if r.PrevKv {
+		opts = append(opts, clientv3.WithPrevKV())
+	}
+	return clientv3.OpPut(string(r.Key), string(r.Value), opts...)
+}
+
+func DelRequestToOp(r *pb.DeleteRangeRequest) clientv3.Op {
+	opts := []clientv3.OpOption{}
+	if len(r.RangeEnd) != 0 {
+		opts = append(opts, clientv3.WithRange(string(r.RangeEnd)))
+	}
+	if r.PrevKv {
+		opts = append(opts, clientv3.WithPrevKV())
+	}
+	return clientv3.OpDelete(string(r.Key), opts...)
+}
+
+func TxnRequestToOp(r *pb.TxnRequest) clientv3.Op {
+	cmps := make([]clientv3.Cmp, len(r.Compare))
+	thenops := make([]clientv3.Op, len(r.Success))
+	elseops := make([]clientv3.Op, len(r.Failure))
+	for i := range r.Compare {
+		cmps[i] = (clientv3.Cmp)(*r.Compare[i])
+	}
+	for i := range r.Success {
+		thenops[i] = requestOpToOp(r.Success[i])
+	}
+	for i := range r.Failure {
+		elseops[i] = requestOpToOp(r.Failure[i])
+	}
+	return clientv3.OpTxn(cmps, thenops, elseops)
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/leader.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/leader.go
new file mode 100644
index 0000000..042c949
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/leader.go
@@ -0,0 +1,115 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+	"context"
+	"math"
+	"sync"
+
+	"github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+
+	"golang.org/x/time/rate"
+	"google.golang.org/grpc"
+)
+
+const (
+	lostLeaderKey  = "__lostleader" // watched to detect leader loss
+	retryPerSecond = 10
+)
+
+type leader struct {
+	ctx context.Context
+	w   clientv3.Watcher
+	// mu protects leaderc updates.
+	mu       sync.RWMutex
+	leaderc  chan struct{}
+	disconnc chan struct{}
+	donec    chan struct{}
+}
+
+func newLeader(ctx context.Context, w clientv3.Watcher) *leader {
+	l := &leader{
+		ctx:      clientv3.WithRequireLeader(ctx),
+		w:        w,
+		leaderc:  make(chan struct{}),
+		disconnc: make(chan struct{}),
+		donec:    make(chan struct{}),
+	}
+	// begin assuming leader is lost
+	close(l.leaderc)
+	go l.recvLoop()
+	return l
+}
+
+func (l *leader) recvLoop() {
+	defer close(l.donec)
+
+	limiter := rate.NewLimiter(rate.Limit(retryPerSecond), retryPerSecond)
+	rev := int64(math.MaxInt64 - 2)
+	for limiter.Wait(l.ctx) == nil {
+		wch := l.w.Watch(l.ctx, lostLeaderKey, clientv3.WithRev(rev), clientv3.WithCreatedNotify())
+		cresp, ok := <-wch
+		if !ok {
+			l.loseLeader()
+			continue
+		}
+		if cresp.Err() != nil {
+			l.loseLeader()
+			if rpctypes.ErrorDesc(cresp.Err()) == grpc.ErrClientConnClosing.Error() {
+				close(l.disconnc)
+				return
+			}
+			continue
+		}
+		l.gotLeader()
+		<-wch
+		l.loseLeader()
+	}
+}
+
+func (l *leader) loseLeader() {
+	l.mu.RLock()
+	defer l.mu.RUnlock()
+	select {
+	case <-l.leaderc:
+	default:
+		close(l.leaderc)
+	}
+}
+
+// gotLeader will force update the leadership status to having a leader.
+func (l *leader) gotLeader() {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+	select {
+	case <-l.leaderc:
+		l.leaderc = make(chan struct{})
+	default:
+	}
+}
+
+func (l *leader) disconnectNotify() <-chan struct{} { return l.disconnc }
+
+func (l *leader) stopNotify() <-chan struct{} { return l.donec }
+
+// lostNotify returns a channel that is closed if there has been
+// a leader loss not yet followed by a leader reacquire.
+func (l *leader) lostNotify() <-chan struct{} {
+	l.mu.RLock()
+	defer l.mu.RUnlock()
+	return l.leaderc
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go
new file mode 100644
index 0000000..65f68b0
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go
@@ -0,0 +1,382 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+	"context"
+	"io"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/metadata"
+)
+
+type leaseProxy struct {
+	// leaseClient handles req from LeaseGrant() that requires a lease ID.
+	leaseClient pb.LeaseClient
+
+	lessor clientv3.Lease
+
+	ctx context.Context
+
+	leader *leader
+
+	// mu protects adding outstanding leaseProxyStream through wg.
+	mu sync.RWMutex
+
+	// wg waits until all outstanding leaseProxyStream quit.
+	wg sync.WaitGroup
+}
+
+func NewLeaseProxy(c *clientv3.Client) (pb.LeaseServer, <-chan struct{}) {
+	cctx, cancel := context.WithCancel(c.Ctx())
+	lp := &leaseProxy{
+		leaseClient: pb.NewLeaseClient(c.ActiveConnection()),
+		lessor:      c.Lease,
+		ctx:         cctx,
+		leader:      newLeader(c.Ctx(), c.Watcher),
+	}
+	ch := make(chan struct{})
+	go func() {
+		defer close(ch)
+		<-lp.leader.stopNotify()
+		lp.mu.Lock()
+		select {
+		case <-lp.ctx.Done():
+		case <-lp.leader.disconnectNotify():
+			cancel()
+		}
+		<-lp.ctx.Done()
+		lp.mu.Unlock()
+		lp.wg.Wait()
+	}()
+	return lp, ch
+}
+
+func (lp *leaseProxy) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+	rp, err := lp.leaseClient.LeaseGrant(ctx, cr, grpc.FailFast(false))
+	if err != nil {
+		return nil, err
+	}
+	lp.leader.gotLeader()
+	return rp, nil
+}
+
+func (lp *leaseProxy) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+	r, err := lp.lessor.Revoke(ctx, clientv3.LeaseID(rr.ID))
+	if err != nil {
+		return nil, err
+	}
+	lp.leader.gotLeader()
+	return (*pb.LeaseRevokeResponse)(r), nil
+}
+
+func (lp *leaseProxy) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
+	var (
+		r   *clientv3.LeaseTimeToLiveResponse
+		err error
+	)
+	if rr.Keys {
+		r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID), clientv3.WithAttachedKeys())
+	} else {
+		r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID))
+	}
+	if err != nil {
+		return nil, err
+	}
+	rp := &pb.LeaseTimeToLiveResponse{
+		Header:     r.ResponseHeader,
+		ID:         int64(r.ID),
+		TTL:        r.TTL,
+		GrantedTTL: r.GrantedTTL,
+		Keys:       r.Keys,
+	}
+	return rp, err
+}
+
+func (lp *leaseProxy) LeaseLeases(ctx context.Context, rr *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
+	r, err := lp.lessor.Leases(ctx)
+	if err != nil {
+		return nil, err
+	}
+	leases := make([]*pb.LeaseStatus, len(r.Leases))
+	for i := range r.Leases {
+		leases[i] = &pb.LeaseStatus{ID: int64(r.Leases[i].ID)}
+	}
+	rp := &pb.LeaseLeasesResponse{
+		Header: r.ResponseHeader,
+		Leases: leases,
+	}
+	return rp, err
+}
+
+func (lp *leaseProxy) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
+	lp.mu.Lock()
+	select {
+	case <-lp.ctx.Done():
+		lp.mu.Unlock()
+		return lp.ctx.Err()
+	default:
+		lp.wg.Add(1)
+	}
+	lp.mu.Unlock()
+
+	ctx, cancel := context.WithCancel(stream.Context())
+	lps := leaseProxyStream{
+		stream:          stream,
+		lessor:          lp.lessor,
+		keepAliveLeases: make(map[int64]*atomicCounter),
+		respc:           make(chan *pb.LeaseKeepAliveResponse),
+		ctx:             ctx,
+		cancel:          cancel,
+	}
+
+	errc := make(chan error, 2)
+
+	var lostLeaderC <-chan struct{}
+	if md, ok := metadata.FromOutgoingContext(stream.Context()); ok {
+		v := md[rpctypes.MetadataRequireLeaderKey]
+		if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader {
+			lostLeaderC = lp.leader.lostNotify()
+			// if leader is known to be lost at creation time, avoid
+			// letting events through at all
+			select {
+			case <-lostLeaderC:
+				lp.wg.Done()
+				return rpctypes.ErrNoLeader
+			default:
+			}
+		}
+	}
+	stopc := make(chan struct{}, 3)
+	go func() {
+		defer func() { stopc <- struct{}{} }()
+		if err := lps.recvLoop(); err != nil {
+			errc <- err
+		}
+	}()
+
+	go func() {
+		defer func() { stopc <- struct{}{} }()
+		if err := lps.sendLoop(); err != nil {
+			errc <- err
+		}
+	}()
+
+	// tears down LeaseKeepAlive stream if leader goes down or entire leaseProxy is terminated.
+	go func() {
+		defer func() { stopc <- struct{}{} }()
+		select {
+		case <-lostLeaderC:
+		case <-ctx.Done():
+		case <-lp.ctx.Done():
+		}
+	}()
+
+	var err error
+	select {
+	case <-stopc:
+		stopc <- struct{}{}
+	case err = <-errc:
+	}
+	cancel()
+
+	// recv/send may only shutdown after function exits;
+	// this goroutine notifies lease proxy that the stream is through
+	go func() {
+		<-stopc
+		<-stopc
+		<-stopc
+		lps.close()
+		close(errc)
+		lp.wg.Done()
+	}()
+
+	select {
+	case <-lostLeaderC:
+		return rpctypes.ErrNoLeader
+	case <-lp.leader.disconnectNotify():
+		return grpc.ErrClientConnClosing
+	default:
+		if err != nil {
+			return err
+		}
+		return ctx.Err()
+	}
+}
+
+type leaseProxyStream struct {
+	stream pb.Lease_LeaseKeepAliveServer
+
+	lessor clientv3.Lease
+	// wg tracks keepAliveLoop goroutines
+	wg sync.WaitGroup
+	// mu protects keepAliveLeases
+	mu sync.RWMutex
+	// keepAliveLeases tracks how many outstanding keepalive requests which need responses are on a lease.
+	keepAliveLeases map[int64]*atomicCounter
+	// respc receives lease keepalive responses from etcd backend
+	respc chan *pb.LeaseKeepAliveResponse
+
+	ctx    context.Context
+	cancel context.CancelFunc
+}
+
+func (lps *leaseProxyStream) recvLoop() error {
+	for {
+		rr, err := lps.stream.Recv()
+		if err == io.EOF {
+			return nil
+		}
+		if err != nil {
+			return err
+		}
+		lps.mu.Lock()
+		neededResps, ok := lps.keepAliveLeases[rr.ID]
+		if !ok {
+			neededResps = &atomicCounter{}
+			lps.keepAliveLeases[rr.ID] = neededResps
+			lps.wg.Add(1)
+			go func() {
+				defer lps.wg.Done()
+				if err := lps.keepAliveLoop(rr.ID, neededResps); err != nil {
+					lps.cancel()
+				}
+			}()
+		}
+		neededResps.add(1)
+		lps.mu.Unlock()
+	}
+}
+
+func (lps *leaseProxyStream) keepAliveLoop(leaseID int64, neededResps *atomicCounter) error {
+	cctx, ccancel := context.WithCancel(lps.ctx)
+	defer ccancel()
+	respc, err := lps.lessor.KeepAlive(cctx, clientv3.LeaseID(leaseID))
+	if err != nil {
+		return err
+	}
+	// ticker expires when loop hasn't received keepalive within TTL
+	var ticker <-chan time.Time
+	for {
+		select {
+		case <-ticker:
+			lps.mu.Lock()
+			// if there are outstanding keepAlive reqs at the moment of ticker firing,
+			// don't close keepAliveLoop(), let it continuing to process the KeepAlive reqs.
+			if neededResps.get() > 0 {
+				lps.mu.Unlock()
+				ticker = nil
+				continue
+			}
+			delete(lps.keepAliveLeases, leaseID)
+			lps.mu.Unlock()
+			return nil
+		case rp, ok := <-respc:
+			if !ok {
+				lps.mu.Lock()
+				delete(lps.keepAliveLeases, leaseID)
+				lps.mu.Unlock()
+				if neededResps.get() == 0 {
+					return nil
+				}
+				ttlResp, err := lps.lessor.TimeToLive(cctx, clientv3.LeaseID(leaseID))
+				if err != nil {
+					return err
+				}
+				r := &pb.LeaseKeepAliveResponse{
+					Header: ttlResp.ResponseHeader,
+					ID:     int64(ttlResp.ID),
+					TTL:    ttlResp.TTL,
+				}
+				for neededResps.get() > 0 {
+					select {
+					case lps.respc <- r:
+						neededResps.add(-1)
+					case <-lps.ctx.Done():
+						return nil
+					}
+				}
+				return nil
+			}
+			if neededResps.get() == 0 {
+				continue
+			}
+			ticker = time.After(time.Duration(rp.TTL) * time.Second)
+			r := &pb.LeaseKeepAliveResponse{
+				Header: rp.ResponseHeader,
+				ID:     int64(rp.ID),
+				TTL:    rp.TTL,
+			}
+			lps.replyToClient(r, neededResps)
+		}
+	}
+}
+
+func (lps *leaseProxyStream) replyToClient(r *pb.LeaseKeepAliveResponse, neededResps *atomicCounter) {
+	timer := time.After(500 * time.Millisecond)
+	for neededResps.get() > 0 {
+		select {
+		case lps.respc <- r:
+			neededResps.add(-1)
+		case <-timer:
+			return
+		case <-lps.ctx.Done():
+			return
+		}
+	}
+}
+
+func (lps *leaseProxyStream) sendLoop() error {
+	for {
+		select {
+		case lrp, ok := <-lps.respc:
+			if !ok {
+				return nil
+			}
+			if err := lps.stream.Send(lrp); err != nil {
+				return err
+			}
+		case <-lps.ctx.Done():
+			return lps.ctx.Err()
+		}
+	}
+}
+
+func (lps *leaseProxyStream) close() {
+	lps.cancel()
+	lps.wg.Wait()
+	// only close respc channel if all the keepAliveLoop() goroutines have finished
+	// this ensures those goroutines don't send resp to a closed resp channel
+	close(lps.respc)
+}
+
+type atomicCounter struct {
+	counter int64
+}
+
+func (ac *atomicCounter) add(delta int64) {
+	atomic.AddInt64(&ac.counter, delta)
+}
+
+func (ac *atomicCounter) get() int64 {
+	return atomic.LoadInt64(&ac.counter)
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/lock.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lock.go
new file mode 100644
index 0000000..ceef26f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lock.go
@@ -0,0 +1,38 @@
+// Copyright 2017 The etcd Lockors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+	"context"
+
+	"github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
+)
+
+type lockProxy struct {
+	client *clientv3.Client
+}
+
+func NewLockProxy(client *clientv3.Client) v3lockpb.LockServer {
+	return &lockProxy{client: client}
+}
+
+func (lp *lockProxy) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) {
+	return v3lockpb.NewLockClient(lp.client.ActiveConnection()).Lock(ctx, req)
+}
+
+func (lp *lockProxy) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) {
+	return v3lockpb.NewLockClient(lp.client.ActiveConnection()).Unlock(ctx, req)
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/logger.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/logger.go
new file mode 100644
index 0000000..c2d8180
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/logger.go
@@ -0,0 +1,19 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import "github.com/coreos/pkg/capnslog"
+
+var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "grpcproxy")
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go
new file mode 100644
index 0000000..291e8e3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go
@@ -0,0 +1,90 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+	"context"
+	"io"
+
+	"github.com/coreos/etcd/clientv3"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+)
+
+type maintenanceProxy struct {
+	client *clientv3.Client
+}
+
+func NewMaintenanceProxy(c *clientv3.Client) pb.MaintenanceServer {
+	return &maintenanceProxy{
+		client: c,
+	}
+}
+
+func (mp *maintenanceProxy) Defragment(ctx context.Context, dr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
+	conn := mp.client.ActiveConnection()
+	return pb.NewMaintenanceClient(conn).Defragment(ctx, dr)
+}
+
+func (mp *maintenanceProxy) Snapshot(sr *pb.SnapshotRequest, stream pb.Maintenance_SnapshotServer) error {
+	conn := mp.client.ActiveConnection()
+	ctx, cancel := context.WithCancel(stream.Context())
+	defer cancel()
+
+	ctx = withClientAuthToken(ctx, stream.Context())
+
+	sc, err := pb.NewMaintenanceClient(conn).Snapshot(ctx, sr)
+	if err != nil {
+		return err
+	}
+
+	for {
+		rr, err := sc.Recv()
+		if err != nil {
+			if err == io.EOF {
+				return nil
+			}
+			return err
+		}
+		err = stream.Send(rr)
+		if err != nil {
+			return err
+		}
+	}
+}
+
+func (mp *maintenanceProxy) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
+	conn := mp.client.ActiveConnection()
+	return pb.NewMaintenanceClient(conn).Hash(ctx, r)
+}
+
+func (mp *maintenanceProxy) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
+	conn := mp.client.ActiveConnection()
+	return pb.NewMaintenanceClient(conn).HashKV(ctx, r)
+}
+
+func (mp *maintenanceProxy) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) {
+	conn := mp.client.ActiveConnection()
+	return pb.NewMaintenanceClient(conn).Alarm(ctx, r)
+}
+
+func (mp *maintenanceProxy) Status(ctx context.Context, r *pb.StatusRequest) (*pb.StatusResponse, error) {
+	conn := mp.client.ActiveConnection()
+	return pb.NewMaintenanceClient(conn).Status(ctx, r)
+}
+
+func (mp *maintenanceProxy) MoveLeader(ctx context.Context, r *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
+	conn := mp.client.ActiveConnection()
+	return pb.NewMaintenanceClient(conn).MoveLeader(ctx, r)
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go
new file mode 100644
index 0000000..864fa16
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go
@@ -0,0 +1,58 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+	watchersCoalescing = prometheus.NewGauge(prometheus.GaugeOpts{
+		Namespace: "etcd",
+		Subsystem: "grpc_proxy",
+		Name:      "watchers_coalescing_total",
+		Help:      "Total number of current watchers coalescing",
+	})
+	eventsCoalescing = prometheus.NewCounter(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "grpc_proxy",
+		Name:      "events_coalescing_total",
+		Help:      "Total number of events coalescing",
+	})
+	cacheKeys = prometheus.NewGauge(prometheus.GaugeOpts{
+		Namespace: "etcd",
+		Subsystem: "grpc_proxy",
+		Name:      "cache_keys_total",
+		Help:      "Total number of keys/ranges cached",
+	})
+	cacheHits = prometheus.NewGauge(prometheus.GaugeOpts{
+		Namespace: "etcd",
+		Subsystem: "grpc_proxy",
+		Name:      "cache_hits_total",
+		Help:      "Total number of cache hits",
+	})
+	cachedMisses = prometheus.NewGauge(prometheus.GaugeOpts{
+		Namespace: "etcd",
+		Subsystem: "grpc_proxy",
+		Name:      "cache_misses_total",
+		Help:      "Total number of cache misses",
+	})
+)
+
+func init() {
+	prometheus.MustRegister(watchersCoalescing)
+	prometheus.MustRegister(eventsCoalescing)
+	prometheus.MustRegister(cacheKeys)
+	prometheus.MustRegister(cacheHits)
+	prometheus.MustRegister(cachedMisses)
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/register.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/register.go
new file mode 100644
index 0000000..598c71f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/register.go
@@ -0,0 +1,94 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+	"encoding/json"
+	"os"
+
+	"github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/clientv3/concurrency"
+	"github.com/coreos/etcd/clientv3/naming"
+
+	"golang.org/x/time/rate"
+	gnaming "google.golang.org/grpc/naming"
+)
+
+// allow maximum 1 retry per second
+const registerRetryRate = 1
+
+// Register registers itself as a grpc-proxy server by writing prefixed-key
+// with session of specified TTL (in seconds). The returned channel is closed
+// when the client's context is canceled.
+func Register(c *clientv3.Client, prefix string, addr string, ttl int) <-chan struct{} {
+	rm := rate.NewLimiter(rate.Limit(registerRetryRate), registerRetryRate)
+
+	donec := make(chan struct{})
+	go func() {
+		defer close(donec)
+
+		for rm.Wait(c.Ctx()) == nil {
+			ss, err := registerSession(c, prefix, addr, ttl)
+			if err != nil {
+				plog.Warningf("failed to create a session %v", err)
+				continue
+			}
+			select {
+			case <-c.Ctx().Done():
+				ss.Close()
+				return
+
+			case <-ss.Done():
+				plog.Warning("session expired; possible network partition or server restart")
+				plog.Warning("creating a new session to rejoin")
+				continue
+			}
+		}
+	}()
+
+	return donec
+}
+
+func registerSession(c *clientv3.Client, prefix string, addr string, ttl int) (*concurrency.Session, error) {
+	ss, err := concurrency.NewSession(c, concurrency.WithTTL(ttl))
+	if err != nil {
+		return nil, err
+	}
+
+	gr := &naming.GRPCResolver{Client: c}
+	if err = gr.Update(c.Ctx(), prefix, gnaming.Update{Op: gnaming.Add, Addr: addr, Metadata: getMeta()}, clientv3.WithLease(ss.Lease())); err != nil {
+		return nil, err
+	}
+
+	plog.Infof("registered %q with %d-second lease", addr, ttl)
+	return ss, nil
+}
+
+// meta represents metadata of proxy register.
+type meta struct {
+	Name string `json:"name"`
+}
+
+func getMeta() string {
+	hostname, _ := os.Hostname()
+	bts, _ := json.Marshal(meta{Name: hostname})
+	return string(bts)
+}
+
+func decodeMeta(s string) (meta, error) {
+	m := meta{}
+	err := json.Unmarshal([]byte(s), &m)
+	return m, err
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/util.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/util.go
new file mode 100644
index 0000000..2b226fa
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/util.go
@@ -0,0 +1,73 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+	"context"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/metadata"
+)
+
+func getAuthTokenFromClient(ctx context.Context) string {
+	md, ok := metadata.FromIncomingContext(ctx)
+	if ok {
+		ts, ok := md["token"]
+		if ok {
+			return ts[0]
+		}
+	}
+	return ""
+}
+
+func withClientAuthToken(ctx context.Context, ctxWithToken context.Context) context.Context {
+	token := getAuthTokenFromClient(ctxWithToken)
+	if token != "" {
+		ctx = context.WithValue(ctx, "token", token)
+	}
+	return ctx
+}
+
+type proxyTokenCredential struct {
+	token string
+}
+
+func (cred *proxyTokenCredential) RequireTransportSecurity() bool {
+	return false
+}
+
+func (cred *proxyTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
+	return map[string]string{
+		"token": cred.token,
+	}, nil
+}
+
+func AuthUnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+	token := getAuthTokenFromClient(ctx)
+	if token != "" {
+		tokenCred := &proxyTokenCredential{token}
+		opts = append(opts, grpc.PerRPCCredentials(tokenCred))
+	}
+	return invoker(ctx, method, req, reply, cc, opts...)
+}
+
+func AuthStreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+	tokenif := ctx.Value("token")
+	if tokenif != nil {
+		tokenCred := &proxyTokenCredential{tokenif.(string)}
+		opts = append(opts, grpc.PerRPCCredentials(tokenCred))
+	}
+	return streamer(ctx, desc, cc, method, opts...)
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go
new file mode 100644
index 0000000..603095f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go
@@ -0,0 +1,298 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+	"context"
+	"sync"
+
+	"github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/metadata"
+)
+
+type watchProxy struct {
+	cw  clientv3.Watcher
+	ctx context.Context
+
+	leader *leader
+
+	ranges *watchRanges
+
+	// mu protects adding outstanding watch servers through wg.
+	mu sync.Mutex
+
+	// wg waits until all outstanding watch servers quit.
+	wg sync.WaitGroup
+
+	// kv is used for permission checking
+	kv clientv3.KV
+}
+
+func NewWatchProxy(c *clientv3.Client) (pb.WatchServer, <-chan struct{}) {
+	cctx, cancel := context.WithCancel(c.Ctx())
+	wp := &watchProxy{
+		cw:     c.Watcher,
+		ctx:    cctx,
+		leader: newLeader(c.Ctx(), c.Watcher),
+
+		kv: c.KV, // for permission checking
+	}
+	wp.ranges = newWatchRanges(wp)
+	ch := make(chan struct{})
+	go func() {
+		defer close(ch)
+		<-wp.leader.stopNotify()
+		wp.mu.Lock()
+		select {
+		case <-wp.ctx.Done():
+		case <-wp.leader.disconnectNotify():
+			cancel()
+		}
+		<-wp.ctx.Done()
+		wp.mu.Unlock()
+		wp.wg.Wait()
+		wp.ranges.stop()
+	}()
+	return wp, ch
+}
+
+func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) {
+	wp.mu.Lock()
+	select {
+	case <-wp.ctx.Done():
+		wp.mu.Unlock()
+		select {
+		case <-wp.leader.disconnectNotify():
+			return grpc.ErrClientConnClosing
+		default:
+			return wp.ctx.Err()
+		}
+	default:
+		wp.wg.Add(1)
+	}
+	wp.mu.Unlock()
+
+	ctx, cancel := context.WithCancel(stream.Context())
+	wps := &watchProxyStream{
+		ranges:   wp.ranges,
+		watchers: make(map[int64]*watcher),
+		stream:   stream,
+		watchCh:  make(chan *pb.WatchResponse, 1024),
+		ctx:      ctx,
+		cancel:   cancel,
+		kv:       wp.kv,
+	}
+
+	var lostLeaderC <-chan struct{}
+	if md, ok := metadata.FromOutgoingContext(stream.Context()); ok {
+		v := md[rpctypes.MetadataRequireLeaderKey]
+		if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader {
+			lostLeaderC = wp.leader.lostNotify()
+			// if leader is known to be lost at creation time, avoid
+			// letting events through at all
+			select {
+			case <-lostLeaderC:
+				wp.wg.Done()
+				return rpctypes.ErrNoLeader
+			default:
+			}
+		}
+	}
+
+	// post to stopc => terminate server stream; can't use a waitgroup
+	// since all goroutines will only terminate after Watch() exits.
+	stopc := make(chan struct{}, 3)
+	go func() {
+		defer func() { stopc <- struct{}{} }()
+		wps.recvLoop()
+	}()
+	go func() {
+		defer func() { stopc <- struct{}{} }()
+		wps.sendLoop()
+	}()
+	// tear down watch if leader goes down or entire watch proxy is terminated
+	go func() {
+		defer func() { stopc <- struct{}{} }()
+		select {
+		case <-lostLeaderC:
+		case <-ctx.Done():
+		case <-wp.ctx.Done():
+		}
+	}()
+
+	<-stopc
+	cancel()
+
+	// recv/send may only shutdown after function exits;
+	// goroutine notifies proxy that stream is through
+	go func() {
+		<-stopc
+		<-stopc
+		wps.close()
+		wp.wg.Done()
+	}()
+
+	select {
+	case <-lostLeaderC:
+		return rpctypes.ErrNoLeader
+	case <-wp.leader.disconnectNotify():
+		return grpc.ErrClientConnClosing
+	default:
+		return wps.ctx.Err()
+	}
+}
+
+// watchProxyStream forwards etcd watch events to a proxied client stream.
+type watchProxyStream struct {
+	ranges *watchRanges
+
+	// mu protects watchers and nextWatcherID
+	mu sync.Mutex
+	// watchers receive events from watch broadcast.
+	watchers map[int64]*watcher
+	// nextWatcherID is the id to assign the next watcher on this stream.
+	nextWatcherID int64
+
+	stream pb.Watch_WatchServer
+
+	// watchCh receives watch responses from the watchers.
+	watchCh chan *pb.WatchResponse
+
+	ctx    context.Context
+	cancel context.CancelFunc
+
+	// kv is used for permission checking
+	kv clientv3.KV
+}
+
+func (wps *watchProxyStream) close() {
+	var wg sync.WaitGroup
+	wps.cancel()
+	wps.mu.Lock()
+	wg.Add(len(wps.watchers))
+	for _, wpsw := range wps.watchers {
+		go func(w *watcher) {
+			wps.ranges.delete(w)
+			wg.Done()
+		}(wpsw)
+	}
+	wps.watchers = nil
+	wps.mu.Unlock()
+
+	wg.Wait()
+
+	close(wps.watchCh)
+}
+
+func (wps *watchProxyStream) checkPermissionForWatch(key, rangeEnd []byte) error {
+	if len(key) == 0 {
+		// If the length of the key is 0, we need to obtain full range.
+		// look at clientv3.WithPrefix()
+		key = []byte{0}
+		rangeEnd = []byte{0}
+	}
+	req := &pb.RangeRequest{
+		Serializable: true,
+		Key:          key,
+		RangeEnd:     rangeEnd,
+		CountOnly:    true,
+		Limit:        1,
+	}
+	_, err := wps.kv.Do(wps.ctx, RangeRequestToOp(req))
+	return err
+}
+
+func (wps *watchProxyStream) recvLoop() error {
+	for {
+		req, err := wps.stream.Recv()
+		if err != nil {
+			return err
+		}
+		switch uv := req.RequestUnion.(type) {
+		case *pb.WatchRequest_CreateRequest:
+			cr := uv.CreateRequest
+
+			if err = wps.checkPermissionForWatch(cr.Key, cr.RangeEnd); err != nil && err == rpctypes.ErrPermissionDenied {
+				// Return WatchResponse which is caused by permission checking if and only if
+				// the error is permission denied. For other errors (e.g. timeout or connection closed),
+				// the permission checking mechanism should do nothing for preserving error code.
+				wps.watchCh <- &pb.WatchResponse{Header: &pb.ResponseHeader{}, WatchId: -1, Created: true, Canceled: true}
+				continue
+			}
+
+			w := &watcher{
+				wr:  watchRange{string(cr.Key), string(cr.RangeEnd)},
+				id:  wps.nextWatcherID,
+				wps: wps,
+
+				nextrev:  cr.StartRevision,
+				progress: cr.ProgressNotify,
+				prevKV:   cr.PrevKv,
+				filters:  v3rpc.FiltersFromRequest(cr),
+			}
+			if !w.wr.valid() {
+				w.post(&pb.WatchResponse{WatchId: -1, Created: true, Canceled: true})
+				continue
+			}
+			wps.nextWatcherID++
+			w.nextrev = cr.StartRevision
+			wps.watchers[w.id] = w
+			wps.ranges.add(w)
+		case *pb.WatchRequest_CancelRequest:
+			wps.delete(uv.CancelRequest.WatchId)
+		default:
+			panic("not implemented")
+		}
+	}
+}
+
+func (wps *watchProxyStream) sendLoop() {
+	for {
+		select {
+		case wresp, ok := <-wps.watchCh:
+			if !ok {
+				return
+			}
+			if err := wps.stream.Send(wresp); err != nil {
+				return
+			}
+		case <-wps.ctx.Done():
+			return
+		}
+	}
+}
+
+func (wps *watchProxyStream) delete(id int64) {
+	wps.mu.Lock()
+	defer wps.mu.Unlock()
+
+	w, ok := wps.watchers[id]
+	if !ok {
+		return
+	}
+	wps.ranges.delete(w)
+	delete(wps.watchers, id)
+	resp := &pb.WatchResponse{
+		Header:   &w.lastHeader,
+		WatchId:  id,
+		Canceled: true,
+	}
+	wps.watchCh <- resp
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go
new file mode 100644
index 0000000..46e56c7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go
@@ -0,0 +1,152 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+	"context"
+	"sync"
+
+	"github.com/coreos/etcd/clientv3"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+)
+
+// watchBroadcast broadcasts a server watcher to many client watchers.
+type watchBroadcast struct {
+	// cancel stops the underlying etcd server watcher and closes ch.
+	cancel context.CancelFunc
+	donec  chan struct{}
+
+	// mu protects rev and receivers.
+	mu sync.RWMutex
+	// nextrev is the minimum expected next revision of the watcher on ch.
+	nextrev int64
+	// receivers contains all the client-side watchers to serve.
+	receivers map[*watcher]struct{}
+	// responses counts the number of responses
+	responses int
+}
+
+func newWatchBroadcast(wp *watchProxy, w *watcher, update func(*watchBroadcast)) *watchBroadcast {
+	cctx, cancel := context.WithCancel(wp.ctx)
+	wb := &watchBroadcast{
+		cancel:    cancel,
+		nextrev:   w.nextrev,
+		receivers: make(map[*watcher]struct{}),
+		donec:     make(chan struct{}),
+	}
+	wb.add(w)
+	go func() {
+		defer close(wb.donec)
+
+		opts := []clientv3.OpOption{
+			clientv3.WithRange(w.wr.end),
+			clientv3.WithProgressNotify(),
+			clientv3.WithRev(wb.nextrev),
+			clientv3.WithPrevKV(),
+			clientv3.WithCreatedNotify(),
+		}
+
+		cctx = withClientAuthToken(cctx, w.wps.stream.Context())
+
+		wch := wp.cw.Watch(cctx, w.wr.key, opts...)
+
+		for wr := range wch {
+			wb.bcast(wr)
+			update(wb)
+		}
+	}()
+	return wb
+}
+
+func (wb *watchBroadcast) bcast(wr clientv3.WatchResponse) {
+	wb.mu.Lock()
+	defer wb.mu.Unlock()
+	// watchers start on the given revision, if any; ignore header rev on create
+	if wb.responses > 0 || wb.nextrev == 0 {
+		wb.nextrev = wr.Header.Revision + 1
+	}
+	wb.responses++
+	for r := range wb.receivers {
+		r.send(wr)
+	}
+	if len(wb.receivers) > 0 {
+		eventsCoalescing.Add(float64(len(wb.receivers) - 1))
+	}
+}
+
+// add puts a watcher into receiving a broadcast if its revision at least
+// meets the broadcast revision. Returns true if added.
+func (wb *watchBroadcast) add(w *watcher) bool {
+	wb.mu.Lock()
+	defer wb.mu.Unlock()
+	if wb.nextrev > w.nextrev || (wb.nextrev == 0 && w.nextrev != 0) {
+		// wb is too far ahead, w will miss events
+		// or wb is being established with a current watcher
+		return false
+	}
+	if wb.responses == 0 {
+		// Newly created; create event will be sent by etcd.
+		wb.receivers[w] = struct{}{}
+		return true
+	}
+	// already sent by etcd; emulate create event
+	ok := w.post(&pb.WatchResponse{
+		Header: &pb.ResponseHeader{
+			// todo: fill in ClusterId
+			// todo: fill in MemberId:
+			Revision: w.nextrev,
+			// todo: fill in RaftTerm:
+		},
+		WatchId: w.id,
+		Created: true,
+	})
+	if !ok {
+		return false
+	}
+	wb.receivers[w] = struct{}{}
+	watchersCoalescing.Inc()
+
+	return true
+}
+func (wb *watchBroadcast) delete(w *watcher) {
+	wb.mu.Lock()
+	defer wb.mu.Unlock()
+	if _, ok := wb.receivers[w]; !ok {
+		panic("deleting missing watcher from broadcast")
+	}
+	delete(wb.receivers, w)
+	if len(wb.receivers) > 0 {
+		// do not dec the only left watcher for coalescing.
+		watchersCoalescing.Dec()
+	}
+}
+
+func (wb *watchBroadcast) size() int {
+	wb.mu.RLock()
+	defer wb.mu.RUnlock()
+	return len(wb.receivers)
+}
+
+func (wb *watchBroadcast) empty() bool { return wb.size() == 0 }
+
+func (wb *watchBroadcast) stop() {
+	if !wb.empty() {
+		// do not dec the only left watcher for coalescing.
+		watchersCoalescing.Sub(float64(wb.size() - 1))
+	}
+
+	wb.cancel()
+	<-wb.donec
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcasts.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcasts.go
new file mode 100644
index 0000000..8fe9e5f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcasts.go
@@ -0,0 +1,135 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+	"sync"
+)
+
+type watchBroadcasts struct {
+	wp *watchProxy
+
+	// mu protects bcasts and watchers from the coalesce loop.
+	mu       sync.Mutex
+	bcasts   map[*watchBroadcast]struct{}
+	watchers map[*watcher]*watchBroadcast
+
+	updatec chan *watchBroadcast
+	donec   chan struct{}
+}
+
+// maxCoalesceRecievers prevents a popular watchBroadcast from being coalseced.
+const maxCoalesceReceivers = 5
+
+func newWatchBroadcasts(wp *watchProxy) *watchBroadcasts {
+	wbs := &watchBroadcasts{
+		wp:       wp,
+		bcasts:   make(map[*watchBroadcast]struct{}),
+		watchers: make(map[*watcher]*watchBroadcast),
+		updatec:  make(chan *watchBroadcast, 1),
+		donec:    make(chan struct{}),
+	}
+	go func() {
+		defer close(wbs.donec)
+		for wb := range wbs.updatec {
+			wbs.coalesce(wb)
+		}
+	}()
+	return wbs
+}
+
+func (wbs *watchBroadcasts) coalesce(wb *watchBroadcast) {
+	if wb.size() >= maxCoalesceReceivers {
+		return
+	}
+	wbs.mu.Lock()
+	for wbswb := range wbs.bcasts {
+		if wbswb == wb {
+			continue
+		}
+		wb.mu.Lock()
+		wbswb.mu.Lock()
+		// 1. check if wbswb is behind wb so it won't skip any events in wb
+		// 2. ensure wbswb started; nextrev == 0 may mean wbswb is waiting
+		// for a current watcher and expects a create event from the server.
+		if wb.nextrev >= wbswb.nextrev && wbswb.responses > 0 {
+			for w := range wb.receivers {
+				wbswb.receivers[w] = struct{}{}
+				wbs.watchers[w] = wbswb
+			}
+			wb.receivers = nil
+		}
+		wbswb.mu.Unlock()
+		wb.mu.Unlock()
+		if wb.empty() {
+			delete(wbs.bcasts, wb)
+			wb.stop()
+			break
+		}
+	}
+	wbs.mu.Unlock()
+}
+
+func (wbs *watchBroadcasts) add(w *watcher) {
+	wbs.mu.Lock()
+	defer wbs.mu.Unlock()
+	// find fitting bcast
+	for wb := range wbs.bcasts {
+		if wb.add(w) {
+			wbs.watchers[w] = wb
+			return
+		}
+	}
+	// no fit; create a bcast
+	wb := newWatchBroadcast(wbs.wp, w, wbs.update)
+	wbs.watchers[w] = wb
+	wbs.bcasts[wb] = struct{}{}
+}
+
+// delete removes a watcher and returns the number of remaining watchers.
+func (wbs *watchBroadcasts) delete(w *watcher) int {
+	wbs.mu.Lock()
+	defer wbs.mu.Unlock()
+
+	wb, ok := wbs.watchers[w]
+	if !ok {
+		panic("deleting missing watcher from broadcasts")
+	}
+	delete(wbs.watchers, w)
+	wb.delete(w)
+	if wb.empty() {
+		delete(wbs.bcasts, wb)
+		wb.stop()
+	}
+	return len(wbs.bcasts)
+}
+
+func (wbs *watchBroadcasts) stop() {
+	wbs.mu.Lock()
+	for wb := range wbs.bcasts {
+		wb.stop()
+	}
+	wbs.bcasts = nil
+	close(wbs.updatec)
+	wbs.mu.Unlock()
+	<-wbs.donec
+}
+
+func (wbs *watchBroadcasts) update(wb *watchBroadcast) {
+	select {
+	case wbs.updatec <- wb:
+	default:
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_ranges.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_ranges.go
new file mode 100644
index 0000000..31c6b59
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_ranges.go
@@ -0,0 +1,69 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+	"sync"
+)
+
+// watchRanges tracks all open watches for the proxy.
+type watchRanges struct {
+	wp *watchProxy
+
+	mu     sync.Mutex
+	bcasts map[watchRange]*watchBroadcasts
+}
+
+func newWatchRanges(wp *watchProxy) *watchRanges {
+	return &watchRanges{
+		wp:     wp,
+		bcasts: make(map[watchRange]*watchBroadcasts),
+	}
+}
+
+func (wrs *watchRanges) add(w *watcher) {
+	wrs.mu.Lock()
+	defer wrs.mu.Unlock()
+
+	if wbs := wrs.bcasts[w.wr]; wbs != nil {
+		wbs.add(w)
+		return
+	}
+	wbs := newWatchBroadcasts(wrs.wp)
+	wrs.bcasts[w.wr] = wbs
+	wbs.add(w)
+}
+
+func (wrs *watchRanges) delete(w *watcher) {
+	wrs.mu.Lock()
+	defer wrs.mu.Unlock()
+	wbs, ok := wrs.bcasts[w.wr]
+	if !ok {
+		panic("deleting missing range")
+	}
+	if wbs.delete(w) == 0 {
+		wbs.stop()
+		delete(wrs.bcasts, w.wr)
+	}
+}
+
+func (wrs *watchRanges) stop() {
+	wrs.mu.Lock()
+	defer wrs.mu.Unlock()
+	for _, wb := range wrs.bcasts {
+		wb.stop()
+	}
+	wrs.bcasts = nil
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go
new file mode 100644
index 0000000..1a49746
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go
@@ -0,0 +1,129 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+	"time"
+
+	"github.com/coreos/etcd/clientv3"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/mvcc"
+	"github.com/coreos/etcd/mvcc/mvccpb"
+)
+
+type watchRange struct {
+	key, end string
+}
+
+func (wr *watchRange) valid() bool {
+	return len(wr.end) == 0 || wr.end > wr.key || (wr.end[0] == 0 && len(wr.end) == 1)
+}
+
+type watcher struct {
+	// user configuration
+
+	wr       watchRange
+	filters  []mvcc.FilterFunc
+	progress bool
+	prevKV   bool
+
+	// id is the id returned to the client on its watch stream.
+	id int64
+	// nextrev is the minimum expected next event revision.
+	nextrev int64
+	// lastHeader has the last header sent over the stream.
+	lastHeader pb.ResponseHeader
+
+	// wps is the parent.
+	wps *watchProxyStream
+}
+
+// send filters out repeated events by discarding revisions older
+// than the last one sent over the watch channel.
+func (w *watcher) send(wr clientv3.WatchResponse) {
+	if wr.IsProgressNotify() && !w.progress {
+		return
+	}
+	if w.nextrev > wr.Header.Revision && len(wr.Events) > 0 {
+		return
+	}
+	if w.nextrev == 0 {
+		// current watch; expect updates following this revision
+		w.nextrev = wr.Header.Revision + 1
+	}
+
+	events := make([]*mvccpb.Event, 0, len(wr.Events))
+
+	var lastRev int64
+	for i := range wr.Events {
+		ev := (*mvccpb.Event)(wr.Events[i])
+		if ev.Kv.ModRevision < w.nextrev {
+			continue
+		} else {
+			// We cannot update w.rev here.
+			// txn can have multiple events with the same rev.
+			// If w.nextrev updates here, it would skip events in the same txn.
+			lastRev = ev.Kv.ModRevision
+		}
+
+		filtered := false
+		for _, filter := range w.filters {
+			if filter(*ev) {
+				filtered = true
+				break
+			}
+		}
+		if filtered {
+			continue
+		}
+
+		if !w.prevKV {
+			evCopy := *ev
+			evCopy.PrevKv = nil
+			ev = &evCopy
+		}
+		events = append(events, ev)
+	}
+
+	if lastRev >= w.nextrev {
+		w.nextrev = lastRev + 1
+	}
+
+	// all events are filtered out?
+	if !wr.IsProgressNotify() && !wr.Created && len(events) == 0 && wr.CompactRevision == 0 {
+		return
+	}
+
+	w.lastHeader = wr.Header
+	w.post(&pb.WatchResponse{
+		Header:          &wr.Header,
+		Created:         wr.Created,
+		CompactRevision: wr.CompactRevision,
+		Canceled:        wr.Canceled,
+		WatchId:         w.id,
+		Events:          events,
+	})
+}
+
+// post puts a watch response on the watcher's proxy stream channel
+func (w *watcher) post(wr *pb.WatchResponse) bool {
+	select {
+	case w.wps.watchCh <- wr:
+	case <-time.After(50 * time.Millisecond):
+		w.wps.cancel()
+		return false
+	}
+	return true
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/httpproxy/director.go b/vendor/github.com/coreos/etcd/proxy/httpproxy/director.go
new file mode 100644
index 0000000..d414501
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/httpproxy/director.go
@@ -0,0 +1,158 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpproxy
+
+import (
+	"math/rand"
+	"net/url"
+	"sync"
+	"time"
+)
+
+// defaultRefreshInterval is the default proxyRefreshIntervalMs value
+// as in etcdmain/config.go.
+const defaultRefreshInterval = 30000 * time.Millisecond
+
+var once sync.Once
+
+func init() {
+	rand.Seed(time.Now().UnixNano())
+}
+
+func newDirector(urlsFunc GetProxyURLs, failureWait time.Duration, refreshInterval time.Duration) *director {
+	d := &director{
+		uf:          urlsFunc,
+		failureWait: failureWait,
+	}
+	d.refresh()
+	go func() {
+		// In order to prevent missing proxy endpoints in the first try:
+		// when given refresh interval of defaultRefreshInterval or greater
+		// and whenever there is no available proxy endpoints,
+		// give 1-second refreshInterval.
+		for {
+			es := d.endpoints()
+			ri := refreshInterval
+			if ri >= defaultRefreshInterval {
+				if len(es) == 0 {
+					ri = time.Second
+				}
+			}
+			if len(es) > 0 {
+				once.Do(func() {
+					var sl []string
+					for _, e := range es {
+						sl = append(sl, e.URL.String())
+					}
+					plog.Infof("endpoints found %q", sl)
+				})
+			}
+			time.Sleep(ri)
+			d.refresh()
+		}
+	}()
+	return d
+}
+
+type director struct {
+	sync.Mutex
+	ep          []*endpoint
+	uf          GetProxyURLs
+	failureWait time.Duration
+}
+
+func (d *director) refresh() {
+	urls := d.uf()
+	d.Lock()
+	defer d.Unlock()
+	var endpoints []*endpoint
+	for _, u := range urls {
+		uu, err := url.Parse(u)
+		if err != nil {
+			plog.Printf("upstream URL invalid: %v", err)
+			continue
+		}
+		endpoints = append(endpoints, newEndpoint(*uu, d.failureWait))
+	}
+
+	// shuffle array to avoid connections being "stuck" to a single endpoint
+	for i := range endpoints {
+		j := rand.Intn(i + 1)
+		endpoints[i], endpoints[j] = endpoints[j], endpoints[i]
+	}
+
+	d.ep = endpoints
+}
+
+func (d *director) endpoints() []*endpoint {
+	d.Lock()
+	defer d.Unlock()
+	filtered := make([]*endpoint, 0)
+	for _, ep := range d.ep {
+		if ep.Available {
+			filtered = append(filtered, ep)
+		}
+	}
+
+	return filtered
+}
+
+func newEndpoint(u url.URL, failureWait time.Duration) *endpoint {
+	ep := endpoint{
+		URL:       u,
+		Available: true,
+		failFunc:  timedUnavailabilityFunc(failureWait),
+	}
+
+	return &ep
+}
+
+type endpoint struct {
+	sync.Mutex
+
+	URL       url.URL
+	Available bool
+
+	failFunc func(ep *endpoint)
+}
+
+func (ep *endpoint) Failed() {
+	ep.Lock()
+	if !ep.Available {
+		ep.Unlock()
+		return
+	}
+
+	ep.Available = false
+	ep.Unlock()
+
+	plog.Printf("marked endpoint %s unavailable", ep.URL.String())
+
+	if ep.failFunc == nil {
+		plog.Printf("no failFunc defined, endpoint %s will be unavailable forever.", ep.URL.String())
+		return
+	}
+
+	ep.failFunc(ep)
+}
+
+func timedUnavailabilityFunc(wait time.Duration) func(*endpoint) {
+	return func(ep *endpoint) {
+		time.AfterFunc(wait, func() {
+			ep.Available = true
+			plog.Printf("marked endpoint %s available, to retest connectivity", ep.URL.String())
+		})
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/httpproxy/doc.go b/vendor/github.com/coreos/etcd/proxy/httpproxy/doc.go
new file mode 100644
index 0000000..7a45099
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/httpproxy/doc.go
@@ -0,0 +1,18 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package httpproxy implements etcd httpproxy. The etcd proxy acts as a reverse
+// http proxy forwarding client requests to active etcd cluster members, and does
+// not participate in consensus.
+package httpproxy
diff --git a/vendor/github.com/coreos/etcd/proxy/httpproxy/metrics.go b/vendor/github.com/coreos/etcd/proxy/httpproxy/metrics.go
new file mode 100644
index 0000000..f71258c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/httpproxy/metrics.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpproxy
+
+import (
+	"net/http"
+	"strconv"
+	"time"
+
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+	requestsIncoming = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: "etcd",
+			Subsystem: "proxy",
+			Name:      "requests_total",
+			Help:      "Counter requests incoming by method.",
+		}, []string{"method"})
+
+	requestsHandled = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: "etcd",
+			Subsystem: "proxy",
+			Name:      "handled_total",
+			Help:      "Counter of requests fully handled (by authoratitave servers)",
+		}, []string{"method", "code"})
+
+	requestsDropped = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: "etcd",
+			Subsystem: "proxy",
+			Name:      "dropped_total",
+			Help:      "Counter of requests dropped on the proxy.",
+		}, []string{"method", "proxying_error"})
+
+	requestsHandlingTime = prometheus.NewHistogramVec(
+		prometheus.HistogramOpts{
+			Namespace: "etcd",
+			Subsystem: "proxy",
+			Name:      "handling_duration_seconds",
+			Help: "Bucketed histogram of handling time of successful events (non-watches), by method " +
+				"(GET/PUT etc.).",
+			Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
+		}, []string{"method"})
+)
+
+type forwardingError string
+
+const (
+	zeroEndpoints         forwardingError = "zero_endpoints"
+	failedSendingRequest  forwardingError = "failed_sending_request"
+	failedGettingResponse forwardingError = "failed_getting_response"
+)
+
+func init() {
+	prometheus.MustRegister(requestsIncoming)
+	prometheus.MustRegister(requestsHandled)
+	prometheus.MustRegister(requestsDropped)
+	prometheus.MustRegister(requestsHandlingTime)
+}
+
+func reportIncomingRequest(request *http.Request) {
+	requestsIncoming.WithLabelValues(request.Method).Inc()
+}
+
+func reportRequestHandled(request *http.Request, response *http.Response, startTime time.Time) {
+	method := request.Method
+	requestsHandled.WithLabelValues(method, strconv.Itoa(response.StatusCode)).Inc()
+	requestsHandlingTime.WithLabelValues(method).Observe(time.Since(startTime).Seconds())
+}
+
+func reportRequestDropped(request *http.Request, err forwardingError) {
+	requestsDropped.WithLabelValues(request.Method, string(err)).Inc()
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/httpproxy/proxy.go b/vendor/github.com/coreos/etcd/proxy/httpproxy/proxy.go
new file mode 100644
index 0000000..3cd3161
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/httpproxy/proxy.go
@@ -0,0 +1,116 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpproxy
+
+import (
+	"encoding/json"
+	"net/http"
+	"strings"
+	"time"
+
+	"golang.org/x/net/http2"
+)
+
+const (
+	// DefaultMaxIdleConnsPerHost indicates the default maximum idle connection
+	// count maintained between proxy and each member. We set it to 128 to
+	// let proxy handle 128 concurrent requests in long term smoothly.
+	// If the number of concurrent requests is bigger than this value,
+	// proxy needs to create one new connection when handling each request in
+	// the delta, which is bad because the creation consumes resource and
+	// may eat up ephemeral ports.
+	DefaultMaxIdleConnsPerHost = 128
+)
+
+// GetProxyURLs is a function which should return the current set of URLs to
+// which client requests should be proxied. This function will be queried
+// periodically by the proxy Handler to refresh the set of available
+// backends.
+type GetProxyURLs func() []string
+
+// NewHandler creates a new HTTP handler, listening on the given transport,
+// which will proxy requests to an etcd cluster.
+// The handler will periodically update its view of the cluster.
+func NewHandler(t *http.Transport, urlsFunc GetProxyURLs, failureWait time.Duration, refreshInterval time.Duration) http.Handler {
+	if t.TLSClientConfig != nil {
+		// Enable http2, see Issue 5033.
+		err := http2.ConfigureTransport(t)
+		if err != nil {
+			plog.Infof("Error enabling Transport HTTP/2 support: %v", err)
+		}
+	}
+
+	p := &reverseProxy{
+		director:  newDirector(urlsFunc, failureWait, refreshInterval),
+		transport: t,
+	}
+
+	mux := http.NewServeMux()
+	mux.Handle("/", p)
+	mux.HandleFunc("/v2/config/local/proxy", p.configHandler)
+
+	return mux
+}
+
+// NewReadonlyHandler wraps the given HTTP handler to allow only GET requests
+func NewReadonlyHandler(hdlr http.Handler) http.Handler {
+	readonly := readonlyHandlerFunc(hdlr)
+	return http.HandlerFunc(readonly)
+}
+
+func readonlyHandlerFunc(next http.Handler) func(http.ResponseWriter, *http.Request) {
+	return func(w http.ResponseWriter, req *http.Request) {
+		if req.Method != "GET" {
+			w.WriteHeader(http.StatusNotImplemented)
+			return
+		}
+
+		next.ServeHTTP(w, req)
+	}
+}
+
+func (p *reverseProxy) configHandler(w http.ResponseWriter, r *http.Request) {
+	if !allowMethod(w, r.Method, "GET") {
+		return
+	}
+
+	eps := p.director.endpoints()
+	epstr := make([]string, len(eps))
+	for i, e := range eps {
+		epstr[i] = e.URL.String()
+	}
+
+	proxyConfig := struct {
+		Endpoints []string `json:"endpoints"`
+	}{
+		Endpoints: epstr,
+	}
+
+	json.NewEncoder(w).Encode(proxyConfig)
+}
+
+// allowMethod verifies that the given method is one of the allowed methods,
+// and if not, it writes an error to w.  A boolean is returned indicating
+// whether or not the method is allowed.
+func allowMethod(w http.ResponseWriter, m string, ms ...string) bool {
+	for _, meth := range ms {
+		if m == meth {
+			return true
+		}
+	}
+	w.Header().Set("Allow", strings.Join(ms, ","))
+	http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+	return false
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/httpproxy/reverse.go b/vendor/github.com/coreos/etcd/proxy/httpproxy/reverse.go
new file mode 100644
index 0000000..2ecff3a
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/httpproxy/reverse.go
@@ -0,0 +1,208 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpproxy
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/url"
+	"strings"
+	"sync/atomic"
+	"time"
+
+	"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
+	"github.com/coreos/pkg/capnslog"
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "proxy/httpproxy")
+
+	// Hop-by-hop headers. These are removed when sent to the backend.
+	// http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html
+	// This list of headers borrowed from stdlib httputil.ReverseProxy
+	singleHopHeaders = []string{
+		"Connection",
+		"Keep-Alive",
+		"Proxy-Authenticate",
+		"Proxy-Authorization",
+		"Te", // canonicalized version of "TE"
+		"Trailers",
+		"Transfer-Encoding",
+		"Upgrade",
+	}
+)
+
+func removeSingleHopHeaders(hdrs *http.Header) {
+	for _, h := range singleHopHeaders {
+		hdrs.Del(h)
+	}
+}
+
+type reverseProxy struct {
+	director  *director
+	transport http.RoundTripper
+}
+
+func (p *reverseProxy) ServeHTTP(rw http.ResponseWriter, clientreq *http.Request) {
+	reportIncomingRequest(clientreq)
+	proxyreq := new(http.Request)
+	*proxyreq = *clientreq
+	startTime := time.Now()
+
+	var (
+		proxybody []byte
+		err       error
+	)
+
+	if clientreq.Body != nil {
+		proxybody, err = ioutil.ReadAll(clientreq.Body)
+		if err != nil {
+			msg := fmt.Sprintf("failed to read request body: %v", err)
+			plog.Println(msg)
+			e := httptypes.NewHTTPError(http.StatusInternalServerError, "httpproxy: "+msg)
+			if we := e.WriteTo(rw); we != nil {
+				plog.Debugf("error writing HTTPError (%v) to %s", we, clientreq.RemoteAddr)
+			}
+			return
+		}
+	}
+
+	// deep-copy the headers, as these will be modified below
+	proxyreq.Header = make(http.Header)
+	copyHeader(proxyreq.Header, clientreq.Header)
+
+	normalizeRequest(proxyreq)
+	removeSingleHopHeaders(&proxyreq.Header)
+	maybeSetForwardedFor(proxyreq)
+
+	endpoints := p.director.endpoints()
+	if len(endpoints) == 0 {
+		msg := "zero endpoints currently available"
+		reportRequestDropped(clientreq, zeroEndpoints)
+
+		// TODO: limit the rate of the error logging.
+		plog.Println(msg)
+		e := httptypes.NewHTTPError(http.StatusServiceUnavailable, "httpproxy: "+msg)
+		if we := e.WriteTo(rw); we != nil {
+			plog.Debugf("error writing HTTPError (%v) to %s", we, clientreq.RemoteAddr)
+		}
+		return
+	}
+
+	var requestClosed int32
+	completeCh := make(chan bool, 1)
+	closeNotifier, ok := rw.(http.CloseNotifier)
+	ctx, cancel := context.WithCancel(context.Background())
+	proxyreq = proxyreq.WithContext(ctx)
+	defer cancel()
+	if ok {
+		closeCh := closeNotifier.CloseNotify()
+		go func() {
+			select {
+			case <-closeCh:
+				atomic.StoreInt32(&requestClosed, 1)
+				plog.Printf("client %v closed request prematurely", clientreq.RemoteAddr)
+				cancel()
+			case <-completeCh:
+			}
+		}()
+
+		defer func() {
+			completeCh <- true
+		}()
+	}
+
+	var res *http.Response
+
+	for _, ep := range endpoints {
+		if proxybody != nil {
+			proxyreq.Body = ioutil.NopCloser(bytes.NewBuffer(proxybody))
+		}
+		redirectRequest(proxyreq, ep.URL)
+
+		res, err = p.transport.RoundTrip(proxyreq)
+		if atomic.LoadInt32(&requestClosed) == 1 {
+			return
+		}
+		if err != nil {
+			reportRequestDropped(clientreq, failedSendingRequest)
+			plog.Printf("failed to direct request to %s: %v", ep.URL.String(), err)
+			ep.Failed()
+			continue
+		}
+
+		break
+	}
+
+	if res == nil {
+		// TODO: limit the rate of the error logging.
+		msg := fmt.Sprintf("unable to get response from %d endpoint(s)", len(endpoints))
+		reportRequestDropped(clientreq, failedGettingResponse)
+		plog.Println(msg)
+		e := httptypes.NewHTTPError(http.StatusBadGateway, "httpproxy: "+msg)
+		if we := e.WriteTo(rw); we != nil {
+			plog.Debugf("error writing HTTPError (%v) to %s", we, clientreq.RemoteAddr)
+		}
+		return
+	}
+
+	defer res.Body.Close()
+	reportRequestHandled(clientreq, res, startTime)
+	removeSingleHopHeaders(&res.Header)
+	copyHeader(rw.Header(), res.Header)
+
+	rw.WriteHeader(res.StatusCode)
+	io.Copy(rw, res.Body)
+}
+
+func copyHeader(dst, src http.Header) {
+	for k, vv := range src {
+		for _, v := range vv {
+			dst.Add(k, v)
+		}
+	}
+}
+
+func redirectRequest(req *http.Request, loc url.URL) {
+	req.URL.Scheme = loc.Scheme
+	req.URL.Host = loc.Host
+}
+
+func normalizeRequest(req *http.Request) {
+	req.Proto = "HTTP/1.1"
+	req.ProtoMajor = 1
+	req.ProtoMinor = 1
+	req.Close = false
+}
+
+func maybeSetForwardedFor(req *http.Request) {
+	clientIP, _, err := net.SplitHostPort(req.RemoteAddr)
+	if err != nil {
+		return
+	}
+
+	// If we aren't the first proxy retain prior
+	// X-Forwarded-For information as a comma+space
+	// separated list and fold multiple headers into one.
+	if prior, ok := req.Header["X-Forwarded-For"]; ok {
+		clientIP = strings.Join(prior, ", ") + ", " + clientIP
+	}
+	req.Header.Set("X-Forwarded-For", clientIP)
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/tcpproxy/doc.go b/vendor/github.com/coreos/etcd/proxy/tcpproxy/doc.go
new file mode 100644
index 0000000..6889cac
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/tcpproxy/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package tcpproxy is an OSI level 4 proxy for routing etcd clients to etcd servers.
+package tcpproxy
diff --git a/vendor/github.com/coreos/etcd/proxy/tcpproxy/userspace.go b/vendor/github.com/coreos/etcd/proxy/tcpproxy/userspace.go
new file mode 100644
index 0000000..6dc1d1d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/proxy/tcpproxy/userspace.go
@@ -0,0 +1,226 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tcpproxy
+
+import (
+	"fmt"
+	"io"
+	"math/rand"
+	"net"
+	"sync"
+	"time"
+
+	"github.com/coreos/pkg/capnslog"
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "proxy/tcpproxy")
+)
+
+type remote struct {
+	mu       sync.Mutex
+	srv      *net.SRV
+	addr     string
+	inactive bool
+}
+
+func (r *remote) inactivate() {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	r.inactive = true
+}
+
+func (r *remote) tryReactivate() error {
+	conn, err := net.Dial("tcp", r.addr)
+	if err != nil {
+		return err
+	}
+	conn.Close()
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	r.inactive = false
+	return nil
+}
+
+func (r *remote) isActive() bool {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	return !r.inactive
+}
+
+type TCPProxy struct {
+	Listener        net.Listener
+	Endpoints       []*net.SRV
+	MonitorInterval time.Duration
+
+	donec chan struct{}
+
+	mu        sync.Mutex // guards the following fields
+	remotes   []*remote
+	pickCount int // for round robin
+}
+
+func (tp *TCPProxy) Run() error {
+	tp.donec = make(chan struct{})
+	if tp.MonitorInterval == 0 {
+		tp.MonitorInterval = 5 * time.Minute
+	}
+	for _, srv := range tp.Endpoints {
+		addr := fmt.Sprintf("%s:%d", srv.Target, srv.Port)
+		tp.remotes = append(tp.remotes, &remote{srv: srv, addr: addr})
+	}
+
+	eps := []string{}
+	for _, ep := range tp.Endpoints {
+		eps = append(eps, fmt.Sprintf("%s:%d", ep.Target, ep.Port))
+	}
+	plog.Printf("ready to proxy client requests to %+v", eps)
+
+	go tp.runMonitor()
+	for {
+		in, err := tp.Listener.Accept()
+		if err != nil {
+			return err
+		}
+
+		go tp.serve(in)
+	}
+}
+
+func (tp *TCPProxy) pick() *remote {
+	var weighted []*remote
+	var unweighted []*remote
+
+	bestPr := uint16(65535)
+	w := 0
+	// find best priority class
+	for _, r := range tp.remotes {
+		switch {
+		case !r.isActive():
+		case r.srv.Priority < bestPr:
+			bestPr = r.srv.Priority
+			w = 0
+			weighted = nil
+			unweighted = []*remote{r}
+			fallthrough
+		case r.srv.Priority == bestPr:
+			if r.srv.Weight > 0 {
+				weighted = append(weighted, r)
+				w += int(r.srv.Weight)
+			} else {
+				unweighted = append(unweighted, r)
+			}
+		}
+	}
+	if weighted != nil {
+		if len(unweighted) > 0 && rand.Intn(100) == 1 {
+			// In the presence of records containing weights greater
+			// than 0, records with weight 0 should have a very small
+			// chance of being selected.
+			r := unweighted[tp.pickCount%len(unweighted)]
+			tp.pickCount++
+			return r
+		}
+		// choose a uniform random number between 0 and the sum computed
+		// (inclusive), and select the RR whose running sum value is the
+		// first in the selected order
+		choose := rand.Intn(w)
+		for i := 0; i < len(weighted); i++ {
+			choose -= int(weighted[i].srv.Weight)
+			if choose <= 0 {
+				return weighted[i]
+			}
+		}
+	}
+	if unweighted != nil {
+		for i := 0; i < len(tp.remotes); i++ {
+			picked := tp.remotes[tp.pickCount%len(tp.remotes)]
+			tp.pickCount++
+			if picked.isActive() {
+				return picked
+			}
+		}
+	}
+	return nil
+}
+
+func (tp *TCPProxy) serve(in net.Conn) {
+	var (
+		err error
+		out net.Conn
+	)
+
+	for {
+		tp.mu.Lock()
+		remote := tp.pick()
+		tp.mu.Unlock()
+		if remote == nil {
+			break
+		}
+		// TODO: add timeout
+		out, err = net.Dial("tcp", remote.addr)
+		if err == nil {
+			break
+		}
+		remote.inactivate()
+		plog.Warningf("deactivated endpoint [%s] due to %v for %v", remote.addr, err, tp.MonitorInterval)
+	}
+
+	if out == nil {
+		in.Close()
+		return
+	}
+
+	go func() {
+		io.Copy(in, out)
+		in.Close()
+		out.Close()
+	}()
+
+	io.Copy(out, in)
+	out.Close()
+	in.Close()
+}
+
+func (tp *TCPProxy) runMonitor() {
+	for {
+		select {
+		case <-time.After(tp.MonitorInterval):
+			tp.mu.Lock()
+			for _, rem := range tp.remotes {
+				if rem.isActive() {
+					continue
+				}
+				go func(r *remote) {
+					if err := r.tryReactivate(); err != nil {
+						plog.Warningf("failed to activate endpoint [%s] due to %v (stay inactive for another %v)", r.addr, err, tp.MonitorInterval)
+					} else {
+						plog.Printf("activated %s", r.addr)
+					}
+				}(rem)
+			}
+			tp.mu.Unlock()
+		case <-tp.donec:
+			return
+		}
+	}
+}
+
+func (tp *TCPProxy) Stop() {
+	// graceful shutdown?
+	// shutdown current connections?
+	tp.Listener.Close()
+	close(tp.donec)
+}
diff --git a/vendor/github.com/coreos/etcd/raft/README.md b/vendor/github.com/coreos/etcd/raft/README.md
new file mode 100644
index 0000000..fde22b1
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/README.md
@@ -0,0 +1,196 @@
+# Raft library
+
+Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
+The state machine is kept in sync through the use of a replicated log.
+For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
+(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout.
+
+This Raft library is stable and feature complete. As of 2016, it is **the most widely used** Raft library in production, serving tens of thousands clusters each day. It powers distributed systems such as etcd, Kubernetes, Docker Swarm, Cloud Foundry Diego, CockroachDB, TiDB, Project Calico, Flannel, and more.
+
+Most Raft implementations have a monolithic design, including storage handling, messaging serialization, and network transport. This library instead follows a minimalistic design philosophy by only implementing the core raft algorithm. This minimalism buys flexibility, determinism, and performance.
+
+To keep the codebase small as well as provide flexibility, the library only implements the Raft algorithm; both network and disk IO are left to the user. Library users must implement their own transportation layer for message passing between Raft peers over the wire. Similarly, users must implement their own storage layer to persist the Raft log and state.
+
+In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine.  The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output.
+
+A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/coreos/etcd/tree/master/contrib/raftexample
+
+# Features
+
+This raft implementation is a full feature implementation of Raft protocol. Features includes:
+
+- Leader election
+- Log replication
+- Log compaction 
+- Membership changes
+- Leadership transfer extension
+- Efficient linearizable read-only queries served by both the leader and followers
+  - leader checks with quorum and bypasses Raft log before processing read-only queries
+  - followers asks leader to get a safe read index before processing read-only queries
+- More efficient lease-based linearizable read-only queries served by both the leader and followers
+  - leader bypasses Raft log and processing read-only queries locally
+  - followers asks leader to get a safe read index before processing read-only queries
+  - this approach relies on the clock of the all the machines in raft group
+
+This raft implementation also includes a few optional enhancements:
+
+- Optimistic pipelining to reduce log replication latency
+- Flow control for log replication
+- Batching Raft messages to reduce synchronized network I/O calls
+- Batching log entries to reduce disk synchronized I/O
+- Writing to leader's disk in parallel
+- Internal proposal redirection from followers to leader
+- Automatic stepping down when the leader loses quorum 
+
+## Notable Users
+
+- [cockroachdb](https://github.com/cockroachdb/cockroach) A Scalable, Survivable, Strongly-Consistent SQL Database
+- [dgraph](https://github.com/dgraph-io/dgraph) A Scalable, Distributed, Low Latency, High Throughput Graph Database
+- [etcd](https://github.com/coreos/etcd) A distributed reliable key-value store
+- [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft
+- [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale.
+- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks
+
+## Usage
+
+The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode.
+
+To start a three-node cluster
+```go
+  storage := raft.NewMemoryStorage()
+  c := &Config{
+    ID:              0x01,
+    ElectionTick:    10,
+    HeartbeatTick:   1,
+    Storage:         storage,
+    MaxSizePerMsg:   4096,
+    MaxInflightMsgs: 256,
+  }
+  // Set peer list to the other nodes in the cluster.
+  // Note that they need to be started separately as well.
+  n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
+```
+
+Start a single node cluster, like so:
+```go
+  // Create storage and config as shown above.
+  // Set peer list to itself, so this node can become the leader of this single-node cluster.
+  peers := []raft.Peer{{ID: 0x01}}
+  n := raft.StartNode(c, peers)
+```
+
+To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so:
+```go
+  // Create storage and config as shown above.
+  n := raft.StartNode(c, nil)
+```
+
+To restart a node from previous state:
+```go
+  storage := raft.NewMemoryStorage()
+
+  // Recover the in-memory storage from persistent snapshot, state and entries.
+  storage.ApplySnapshot(snapshot)
+  storage.SetHardState(state)
+  storage.Append(entries)
+
+  c := &Config{
+    ID:              0x01,
+    ElectionTick:    10,
+    HeartbeatTick:   1,
+    Storage:         storage,
+    MaxSizePerMsg:   4096,
+    MaxInflightMsgs: 256,
+  }
+
+  // Restart raft without peer information.
+  // Peer information is already included in the storage.
+  n := raft.RestartNode(c)
+```
+
+After creating a Node, the user has a few responsibilities:
+
+First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2.
+
+1. Write Entries, HardState and Snapshot to persistent storage in order, i.e. Entries first, then HardState and Snapshot if they are not empty. If persistent storage supports atomic writes then all of them can be written together. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded.
+
+2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop.
+
+3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node).
+
+4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready.
+
+Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied.
+
+Third, after receiving a message from another node, pass it to Node.Step:
+
+```go
+	func recvRaftRPC(ctx context.Context, m raftpb.Message) {
+		n.Step(ctx, m)
+	}
+```
+
+Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick".
+
+The total state machine handling loop will look something like this:
+
+```go
+  for {
+    select {
+    case <-s.Ticker:
+      n.Tick()
+    case rd := <-s.Node.Ready():
+      saveToStorage(rd.State, rd.Entries, rd.Snapshot)
+      send(rd.Messages)
+      if !raft.IsEmptySnap(rd.Snapshot) {
+        processSnapshot(rd.Snapshot)
+      }
+      for _, entry := range rd.CommittedEntries {
+        process(entry)
+        if entry.Type == raftpb.EntryConfChange {
+          var cc raftpb.ConfChange
+          cc.Unmarshal(entry.Data)
+          s.Node.ApplyConfChange(cc)
+        }
+      }
+      s.Node.Advance()
+    case <-s.done:
+      return
+    }
+  }
+```
+
+To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call:
+
+```go
+	n.Propose(ctx, data)
+```
+
+If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout. 
+
+To add or remove node in a cluster, build ConfChange struct 'cc' and call:
+
+```go
+	n.ProposeConfChange(ctx, cc)
+```
+
+After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through:
+
+```go
+	var cc raftpb.ConfChange
+	cc.Unmarshal(data)
+	n.ApplyConfChange(cc)
+```
+
+Note: An ID represents a unique node in a cluster for all time. A
+given ID MUST be used only once even if the old node has been removed.
+This means that for example IP addresses make poor node IDs since they
+may be reused. Node IDs must be non-zero.
+
+## Implementation notes
+
+This implementation is up to date with the final Raft thesis (https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap.
+
+To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log.
+
+This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster.
diff --git a/vendor/github.com/coreos/etcd/raft/design.md b/vendor/github.com/coreos/etcd/raft/design.md
new file mode 100644
index 0000000..7bc0531
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/design.md
@@ -0,0 +1,57 @@
+## Progress
+
+Progress represents a follower’s progress in the view of the leader. Leader maintains progresses of all followers, and sends `replication message` to the follower based on its progress. 
+
+`replication message` is a `msgApp` with log entries.
+
+A progress has two attribute: `match` and `next`. `match` is the index of the highest known matched entry. If leader knows nothing about follower’s replication status, `match` is set to zero. `next` is the index of the first entry that will be replicated to the follower. Leader puts entries from `next` to its latest one in next `replication message`.
+
+A progress is in one of the three state: `probe`, `replicate`, `snapshot`. 
+
+```
+                            +--------------------------------------------------------+          
+                            |                  send snapshot                         |          
+                            |                                                        |          
+                  +---------+----------+                                  +----------v---------+
+              +--->       probe        |                                  |      snapshot      |
+              |   |  max inflight = 1  <----------------------------------+  max inflight = 0  |
+              |   +---------+----------+                                  +--------------------+
+              |             |            1. snapshot success                                    
+              |             |               (next=snapshot.index + 1)                           
+              |             |            2. snapshot failure                                    
+              |             |               (no change)                                         
+              |             |            3. receives msgAppResp(rej=false&&index>lastsnap.index)
+              |             |               (match=m.index,next=match+1)                        
+receives msgAppResp(rej=true)                                                                   
+(next=match+1)|             |                                                                   
+              |             |                                                                   
+              |             |                                                                   
+              |             |   receives msgAppResp(rej=false&&index>match)                     
+              |             |   (match=m.index,next=match+1)                                    
+              |             |                                                                   
+              |             |                                                                   
+              |             |                                                                   
+              |   +---------v----------+                                                        
+              |   |     replicate      |                                                        
+              +---+  max inflight = n  |                                                        
+                  +--------------------+                                                        
+```
+
+When the progress of a follower is in `probe` state, leader sends at most one `replication message` per heartbeat interval. The leader sends `replication message` slowly and probing the actual progress of the follower. A `msgHeartbeatResp` or a `msgAppResp` with reject might trigger the sending of the next `replication message`.
+
+When the progress of a follower is in `replicate` state, leader sends `replication message`, then optimistically increases `next` to the latest entry sent. This is an optimized state for fast replicating log entries to the follower.
+
+When the progress of a follower is in `snapshot` state, leader stops sending any `replication message`.
+
+A newly elected leader sets the progresses of all the followers to `probe` state with `match` = 0 and `next` = last index. The leader slowly (at most once per heartbeat) sends `replication message` to the follower and probes its progress.
+
+A progress changes to `replicate` when the follower replies with a non-rejection `msgAppResp`, which implies that it has matched the index sent. At this point, leader starts to stream log entries to the follower fast. The progress will fall back to `probe` when the follower replies a rejection `msgAppResp` or the link layer reports the follower is unreachable. We aggressively reset `next` to `match`+1 since if we receive any `msgAppResp` soon, both `match` and `next` will increase directly to the `index` in `msgAppResp`. (We might end up with sending some duplicate entries when aggressively reset `next` too low.  see open question)
+
+A progress changes from `probe` to `snapshot` when the follower falls very far behind and requires a snapshot. After sending `msgSnap`, the leader waits until the success, failure or abortion of the previous snapshot sent. The progress will go back to `probe` after the sending result is applied.
+
+### Flow Control
+
+1. limit the max size of message sent per message. Max should be configurable.
+Lower the cost at probing state as we limit the size per message; lower the penalty when aggressively decreased to a too low `next`
+
+2. limit the # of in flight messages < N when in `replicate` state. N should be configurable. Most implementation will have a sending buffer on top of its actual network transport layer (not blocking raft node). We want to make sure raft does not overflow that buffer, which can cause message dropping and triggering a bunch of unnecessary resending repeatedly. 
diff --git a/vendor/github.com/coreos/etcd/raft/doc.go b/vendor/github.com/coreos/etcd/raft/doc.go
new file mode 100644
index 0000000..b55c591
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/doc.go
@@ -0,0 +1,300 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package raft sends and receives messages in the Protocol Buffer format
+defined in the raftpb package.
+
+Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
+The state machine is kept in sync through the use of a replicated log.
+For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
+(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout.
+
+A simple example application, _raftexample_, is also available to help illustrate
+how to use this package in practice:
+https://github.com/coreos/etcd/tree/master/contrib/raftexample
+
+Usage
+
+The primary object in raft is a Node. You either start a Node from scratch
+using raft.StartNode or start a Node from some initial state using raft.RestartNode.
+
+To start a node from scratch:
+
+  storage := raft.NewMemoryStorage()
+  c := &Config{
+    ID:              0x01,
+    ElectionTick:    10,
+    HeartbeatTick:   1,
+    Storage:         storage,
+    MaxSizePerMsg:   4096,
+    MaxInflightMsgs: 256,
+  }
+  n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
+
+To restart a node from previous state:
+
+  storage := raft.NewMemoryStorage()
+
+  // recover the in-memory storage from persistent
+  // snapshot, state and entries.
+  storage.ApplySnapshot(snapshot)
+  storage.SetHardState(state)
+  storage.Append(entries)
+
+  c := &Config{
+    ID:              0x01,
+    ElectionTick:    10,
+    HeartbeatTick:   1,
+    Storage:         storage,
+    MaxSizePerMsg:   4096,
+    MaxInflightMsgs: 256,
+  }
+
+  // restart raft without peer information.
+  // peer information is already included in the storage.
+  n := raft.RestartNode(c)
+
+Now that you are holding onto a Node you have a few responsibilities:
+
+First, you must read from the Node.Ready() channel and process the updates
+it contains. These steps may be performed in parallel, except as noted in step
+2.
+
+1. Write HardState, Entries, and Snapshot to persistent storage if they are
+not empty. Note that when writing an Entry with Index i, any
+previously-persisted entries with Index >= i must be discarded.
+
+2. Send all Messages to the nodes named in the To field. It is important that
+no messages be sent until the latest HardState has been persisted to disk,
+and all Entries written by any previous Ready batch (Messages may be sent while
+entries from the same batch are being persisted). To reduce the I/O latency, an
+optimization can be applied to make leader write to disk in parallel with its
+followers (as explained at section 10.2.1 in Raft thesis). If any Message has type
+MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be
+large).
+
+Note: Marshalling messages is not thread-safe; it is important that you
+make sure that no new entries are persisted while marshalling.
+The easiest way to achieve this is to serialise the messages directly inside
+your main raft loop.
+
+3. Apply Snapshot (if any) and CommittedEntries to the state machine.
+If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange()
+to apply it to the node. The configuration change may be cancelled at this point
+by setting the NodeID field to zero before calling ApplyConfChange
+(but ApplyConfChange must be called one way or the other, and the decision to cancel
+must be based solely on the state machine and not external information such as
+the observed health of the node).
+
+4. Call Node.Advance() to signal readiness for the next batch of updates.
+This may be done at any time after step 1, although all updates must be processed
+in the order they were returned by Ready.
+
+Second, all persisted log entries must be made available via an
+implementation of the Storage interface. The provided MemoryStorage
+type can be used for this (if you repopulate its state upon a
+restart), or you can supply your own disk-backed implementation.
+
+Third, when you receive a message from another node, pass it to Node.Step:
+
+	func recvRaftRPC(ctx context.Context, m raftpb.Message) {
+		n.Step(ctx, m)
+	}
+
+Finally, you need to call Node.Tick() at regular intervals (probably
+via a time.Ticker). Raft has two important timeouts: heartbeat and the
+election timeout. However, internally to the raft package time is
+represented by an abstract "tick".
+
+The total state machine handling loop will look something like this:
+
+  for {
+    select {
+    case <-s.Ticker:
+      n.Tick()
+    case rd := <-s.Node.Ready():
+      saveToStorage(rd.State, rd.Entries, rd.Snapshot)
+      send(rd.Messages)
+      if !raft.IsEmptySnap(rd.Snapshot) {
+        processSnapshot(rd.Snapshot)
+      }
+      for _, entry := range rd.CommittedEntries {
+        process(entry)
+        if entry.Type == raftpb.EntryConfChange {
+          var cc raftpb.ConfChange
+          cc.Unmarshal(entry.Data)
+          s.Node.ApplyConfChange(cc)
+        }
+      }
+      s.Node.Advance()
+    case <-s.done:
+      return
+    }
+  }
+
+To propose changes to the state machine from your node take your application
+data, serialize it into a byte slice and call:
+
+	n.Propose(ctx, data)
+
+If the proposal is committed, data will appear in committed entries with type
+raftpb.EntryNormal. There is no guarantee that a proposed command will be
+committed; you may have to re-propose after a timeout.
+
+To add or remove node in a cluster, build ConfChange struct 'cc' and call:
+
+	n.ProposeConfChange(ctx, cc)
+
+After config change is committed, some committed entry with type
+raftpb.EntryConfChange will be returned. You must apply it to node through:
+
+	var cc raftpb.ConfChange
+	cc.Unmarshal(data)
+	n.ApplyConfChange(cc)
+
+Note: An ID represents a unique node in a cluster for all time. A
+given ID MUST be used only once even if the old node has been removed.
+This means that for example IP addresses make poor node IDs since they
+may be reused. Node IDs must be non-zero.
+
+Implementation notes
+
+This implementation is up to date with the final Raft thesis
+(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our
+implementation of the membership change protocol differs somewhat from
+that described in chapter 4. The key invariant that membership changes
+happen one node at a time is preserved, but in our implementation the
+membership change takes effect when its entry is applied, not when it
+is added to the log (so the entry is committed under the old
+membership instead of the new). This is equivalent in terms of safety,
+since the old and new configurations are guaranteed to overlap.
+
+To ensure that we do not attempt to commit two membership changes at
+once by matching log positions (which would be unsafe since they
+should have different quorum requirements), we simply disallow any
+proposed membership change while any uncommitted change appears in
+the leader's log.
+
+This approach introduces a problem when you try to remove a member
+from a two-member cluster: If one of the members dies before the
+other one receives the commit of the confchange entry, then the member
+cannot be removed any more since the cluster cannot make progress.
+For this reason it is highly recommended to use three or more nodes in
+every cluster.
+
+MessageType
+
+Package raft sends and receives message in Protocol Buffer format (defined
+in raftpb package). Each state (follower, candidate, leader) implements its
+own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when
+advancing with the given raftpb.Message. Each step is determined by its
+raftpb.MessageType. Note that every step is checked by one common method
+'Step' that safety-checks the terms of node and incoming message to prevent
+stale log entries:
+
+	'MsgHup' is used for election. If a node is a follower or candidate, the
+	'tick' function in 'raft' struct is set as 'tickElection'. If a follower or
+	candidate has not received any heartbeat before the election timeout, it
+	passes 'MsgHup' to its Step method and becomes (or remains) a candidate to
+	start a new election.
+
+	'MsgBeat' is an internal type that signals the leader to send a heartbeat of
+	the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in
+	the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to
+	send periodic 'MsgHeartbeat' messages to its followers.
+
+	'MsgProp' proposes to append data to its log entries. This is a special
+	type to redirect proposals to leader. Therefore, send method overwrites
+	raftpb.Message's term with its HardState's term to avoid attaching its
+	local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step'
+	method, the leader first calls the 'appendEntry' method to append entries
+	to its log, and then calls 'bcastAppend' method to send those entries to
+	its peers. When passed to candidate, 'MsgProp' is dropped. When passed to
+	follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send
+	method. It is stored with sender's ID and later forwarded to leader by
+	rafthttp package.
+
+	'MsgApp' contains log entries to replicate. A leader calls bcastAppend,
+	which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp'
+	type. When 'MsgApp' is passed to candidate's Step method, candidate reverts
+	back to follower, because it indicates that there is a valid leader sending
+	'MsgApp' messages. Candidate and follower respond to this message in
+	'MsgAppResp' type.
+
+	'MsgAppResp' is response to log replication request('MsgApp'). When
+	'MsgApp' is passed to candidate or follower's Step method, it responds by
+	calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft
+	mailbox.
+
+	'MsgVote' requests votes for election. When a node is a follower or
+	candidate and 'MsgHup' is passed to its Step method, then the node calls
+	'campaign' method to campaign itself to become a leader. Once 'campaign'
+	method is called, the node becomes candidate and sends 'MsgVote' to peers
+	in cluster to request votes. When passed to leader or candidate's Step
+	method and the message's Term is lower than leader's or candidate's,
+	'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true).
+	If leader or candidate receives 'MsgVote' with higher term, it will revert
+	back to follower. When 'MsgVote' is passed to follower, it votes for the
+	sender only when sender's last term is greater than MsgVote's term or
+	sender's last term is equal to MsgVote's term but sender's last committed
+	index is greater than or equal to follower's.
+
+	'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is
+	passed to candidate, the candidate calculates how many votes it has won. If
+	it's more than majority (quorum), it becomes leader and calls 'bcastAppend'.
+	If candidate receives majority of votes of denials, it reverts back to
+	follower.
+
+	'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election
+	protocol. When Config.PreVote is true, a pre-election is carried out first
+	(using the same rules as a regular election), and no node increases its term
+	number unless the pre-election indicates that the campaigining node would win.
+	This minimizes disruption when a partitioned node rejoins the cluster.
+
+	'MsgSnap' requests to install a snapshot message. When a node has just
+	become a leader or the leader receives 'MsgProp' message, it calls
+	'bcastAppend' method, which then calls 'sendAppend' method to each
+	follower. In 'sendAppend', if a leader fails to get term or entries,
+	the leader requests snapshot by sending 'MsgSnap' type message.
+
+	'MsgSnapStatus' tells the result of snapshot install message. When a
+	follower rejected 'MsgSnap', it indicates the snapshot request with
+	'MsgSnap' had failed from network issues which causes the network layer
+	to fail to send out snapshots to its followers. Then leader considers
+	follower's progress as probe. When 'MsgSnap' were not rejected, it
+	indicates that the snapshot succeeded and the leader sets follower's
+	progress to probe and resumes its log replication.
+
+	'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed
+	to candidate and message's term is higher than candidate's, the candidate
+	reverts back to follower and updates its committed index from the one in
+	this heartbeat. And it sends the message to its mailbox. When
+	'MsgHeartbeat' is passed to follower's Step method and message's term is
+	higher than follower's, the follower updates its leaderID with the ID
+	from the message.
+
+	'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp'
+	is passed to leader's Step method, the leader knows which follower
+	responded. And only when the leader's last committed index is greater than
+	follower's Match index, the leader runs 'sendAppend` method.
+
+	'MsgUnreachable' tells that request(message) wasn't delivered. When
+	'MsgUnreachable' is passed to leader's Step method, the leader discovers
+	that the follower that sent this 'MsgUnreachable' is not reachable, often
+	indicating 'MsgApp' is lost. When follower's progress state is replicate,
+	the leader sets it back to probe.
+
+*/
+package raft
diff --git a/vendor/github.com/coreos/etcd/raft/log.go b/vendor/github.com/coreos/etcd/raft/log.go
new file mode 100644
index 0000000..c3036d3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/log.go
@@ -0,0 +1,358 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"fmt"
+	"log"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+type raftLog struct {
+	// storage contains all stable entries since the last snapshot.
+	storage Storage
+
+	// unstable contains all unstable entries and snapshot.
+	// they will be saved into storage.
+	unstable unstable
+
+	// committed is the highest log position that is known to be in
+	// stable storage on a quorum of nodes.
+	committed uint64
+	// applied is the highest log position that the application has
+	// been instructed to apply to its state machine.
+	// Invariant: applied <= committed
+	applied uint64
+
+	logger Logger
+}
+
+// newLog returns log using the given storage. It recovers the log to the state
+// that it just commits and applies the latest snapshot.
+func newLog(storage Storage, logger Logger) *raftLog {
+	if storage == nil {
+		log.Panic("storage must not be nil")
+	}
+	log := &raftLog{
+		storage: storage,
+		logger:  logger,
+	}
+	firstIndex, err := storage.FirstIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	lastIndex, err := storage.LastIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	log.unstable.offset = lastIndex + 1
+	log.unstable.logger = logger
+	// Initialize our committed and applied pointers to the time of the last compaction.
+	log.committed = firstIndex - 1
+	log.applied = firstIndex - 1
+
+	return log
+}
+
+func (l *raftLog) String() string {
+	return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries))
+}
+
+// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise,
+// it returns (last index of new entries, true).
+func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) {
+	if l.matchTerm(index, logTerm) {
+		lastnewi = index + uint64(len(ents))
+		ci := l.findConflict(ents)
+		switch {
+		case ci == 0:
+		case ci <= l.committed:
+			l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed)
+		default:
+			offset := index + 1
+			l.append(ents[ci-offset:]...)
+		}
+		l.commitTo(min(committed, lastnewi))
+		return lastnewi, true
+	}
+	return 0, false
+}
+
+func (l *raftLog) append(ents ...pb.Entry) uint64 {
+	if len(ents) == 0 {
+		return l.lastIndex()
+	}
+	if after := ents[0].Index - 1; after < l.committed {
+		l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed)
+	}
+	l.unstable.truncateAndAppend(ents)
+	return l.lastIndex()
+}
+
+// findConflict finds the index of the conflict.
+// It returns the first pair of conflicting entries between the existing
+// entries and the given entries, if there are any.
+// If there is no conflicting entries, and the existing entries contains
+// all the given entries, zero will be returned.
+// If there is no conflicting entries, but the given entries contains new
+// entries, the index of the first new entry will be returned.
+// An entry is considered to be conflicting if it has the same index but
+// a different term.
+// The first entry MUST have an index equal to the argument 'from'.
+// The index of the given entries MUST be continuously increasing.
+func (l *raftLog) findConflict(ents []pb.Entry) uint64 {
+	for _, ne := range ents {
+		if !l.matchTerm(ne.Index, ne.Term) {
+			if ne.Index <= l.lastIndex() {
+				l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]",
+					ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term)
+			}
+			return ne.Index
+		}
+	}
+	return 0
+}
+
+func (l *raftLog) unstableEntries() []pb.Entry {
+	if len(l.unstable.entries) == 0 {
+		return nil
+	}
+	return l.unstable.entries
+}
+
+// nextEnts returns all the available entries for execution.
+// If applied is smaller than the index of snapshot, it returns all committed
+// entries after the index of snapshot.
+func (l *raftLog) nextEnts() (ents []pb.Entry) {
+	off := max(l.applied+1, l.firstIndex())
+	if l.committed+1 > off {
+		ents, err := l.slice(off, l.committed+1, noLimit)
+		if err != nil {
+			l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err)
+		}
+		return ents
+	}
+	return nil
+}
+
+// hasNextEnts returns if there is any available entries for execution. This
+// is a fast check without heavy raftLog.slice() in raftLog.nextEnts().
+func (l *raftLog) hasNextEnts() bool {
+	off := max(l.applied+1, l.firstIndex())
+	return l.committed+1 > off
+}
+
+func (l *raftLog) snapshot() (pb.Snapshot, error) {
+	if l.unstable.snapshot != nil {
+		return *l.unstable.snapshot, nil
+	}
+	return l.storage.Snapshot()
+}
+
+func (l *raftLog) firstIndex() uint64 {
+	if i, ok := l.unstable.maybeFirstIndex(); ok {
+		return i
+	}
+	index, err := l.storage.FirstIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	return index
+}
+
+func (l *raftLog) lastIndex() uint64 {
+	if i, ok := l.unstable.maybeLastIndex(); ok {
+		return i
+	}
+	i, err := l.storage.LastIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	return i
+}
+
+func (l *raftLog) commitTo(tocommit uint64) {
+	// never decrease commit
+	if l.committed < tocommit {
+		if l.lastIndex() < tocommit {
+			l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex())
+		}
+		l.committed = tocommit
+	}
+}
+
+func (l *raftLog) appliedTo(i uint64) {
+	if i == 0 {
+		return
+	}
+	if l.committed < i || i < l.applied {
+		l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed)
+	}
+	l.applied = i
+}
+
+func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) }
+
+func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) }
+
+func (l *raftLog) lastTerm() uint64 {
+	t, err := l.term(l.lastIndex())
+	if err != nil {
+		l.logger.Panicf("unexpected error when getting the last term (%v)", err)
+	}
+	return t
+}
+
+func (l *raftLog) term(i uint64) (uint64, error) {
+	// the valid term range is [index of dummy entry, last index]
+	dummyIndex := l.firstIndex() - 1
+	if i < dummyIndex || i > l.lastIndex() {
+		// TODO: return an error instead?
+		return 0, nil
+	}
+
+	if t, ok := l.unstable.maybeTerm(i); ok {
+		return t, nil
+	}
+
+	t, err := l.storage.Term(i)
+	if err == nil {
+		return t, nil
+	}
+	if err == ErrCompacted || err == ErrUnavailable {
+		return 0, err
+	}
+	panic(err) // TODO(bdarnell)
+}
+
+func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) {
+	if i > l.lastIndex() {
+		return nil, nil
+	}
+	return l.slice(i, l.lastIndex()+1, maxsize)
+}
+
+// allEntries returns all entries in the log.
+func (l *raftLog) allEntries() []pb.Entry {
+	ents, err := l.entries(l.firstIndex(), noLimit)
+	if err == nil {
+		return ents
+	}
+	if err == ErrCompacted { // try again if there was a racing compaction
+		return l.allEntries()
+	}
+	// TODO (xiangli): handle error?
+	panic(err)
+}
+
+// isUpToDate determines if the given (lastIndex,term) log is more up-to-date
+// by comparing the index and term of the last entries in the existing logs.
+// If the logs have last entries with different terms, then the log with the
+// later term is more up-to-date. If the logs end with the same term, then
+// whichever log has the larger lastIndex is more up-to-date. If the logs are
+// the same, the given log is up-to-date.
+func (l *raftLog) isUpToDate(lasti, term uint64) bool {
+	return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex())
+}
+
+func (l *raftLog) matchTerm(i, term uint64) bool {
+	t, err := l.term(i)
+	if err != nil {
+		return false
+	}
+	return t == term
+}
+
+func (l *raftLog) maybeCommit(maxIndex, term uint64) bool {
+	if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term {
+		l.commitTo(maxIndex)
+		return true
+	}
+	return false
+}
+
+func (l *raftLog) restore(s pb.Snapshot) {
+	l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term)
+	l.committed = s.Metadata.Index
+	l.unstable.restore(s)
+}
+
+// slice returns a slice of log entries from lo through hi-1, inclusive.
+func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) {
+	err := l.mustCheckOutOfBounds(lo, hi)
+	if err != nil {
+		return nil, err
+	}
+	if lo == hi {
+		return nil, nil
+	}
+	var ents []pb.Entry
+	if lo < l.unstable.offset {
+		storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize)
+		if err == ErrCompacted {
+			return nil, err
+		} else if err == ErrUnavailable {
+			l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset))
+		} else if err != nil {
+			panic(err) // TODO(bdarnell)
+		}
+
+		// check if ents has reached the size limitation
+		if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo {
+			return storedEnts, nil
+		}
+
+		ents = storedEnts
+	}
+	if hi > l.unstable.offset {
+		unstable := l.unstable.slice(max(lo, l.unstable.offset), hi)
+		if len(ents) > 0 {
+			ents = append([]pb.Entry{}, ents...)
+			ents = append(ents, unstable...)
+		} else {
+			ents = unstable
+		}
+	}
+	return limitSize(ents, maxSize), nil
+}
+
+// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries)
+func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error {
+	if lo > hi {
+		l.logger.Panicf("invalid slice %d > %d", lo, hi)
+	}
+	fi := l.firstIndex()
+	if lo < fi {
+		return ErrCompacted
+	}
+
+	length := l.lastIndex() + 1 - fi
+	if lo < fi || hi > fi+length {
+		l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex())
+	}
+	return nil
+}
+
+func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 {
+	if err == nil {
+		return t
+	}
+	if err == ErrCompacted {
+		return 0
+	}
+	l.logger.Panicf("unexpected error (%v)", err)
+	return 0
+}
diff --git a/vendor/github.com/coreos/etcd/raft/log_unstable.go b/vendor/github.com/coreos/etcd/raft/log_unstable.go
new file mode 100644
index 0000000..263af9c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/log_unstable.go
@@ -0,0 +1,159 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import pb "github.com/coreos/etcd/raft/raftpb"
+
+// unstable.entries[i] has raft log position i+unstable.offset.
+// Note that unstable.offset may be less than the highest log
+// position in storage; this means that the next write to storage
+// might need to truncate the log before persisting unstable.entries.
+type unstable struct {
+	// the incoming unstable snapshot, if any.
+	snapshot *pb.Snapshot
+	// all entries that have not yet been written to storage.
+	entries []pb.Entry
+	offset  uint64
+
+	logger Logger
+}
+
+// maybeFirstIndex returns the index of the first possible entry in entries
+// if it has a snapshot.
+func (u *unstable) maybeFirstIndex() (uint64, bool) {
+	if u.snapshot != nil {
+		return u.snapshot.Metadata.Index + 1, true
+	}
+	return 0, false
+}
+
+// maybeLastIndex returns the last index if it has at least one
+// unstable entry or snapshot.
+func (u *unstable) maybeLastIndex() (uint64, bool) {
+	if l := len(u.entries); l != 0 {
+		return u.offset + uint64(l) - 1, true
+	}
+	if u.snapshot != nil {
+		return u.snapshot.Metadata.Index, true
+	}
+	return 0, false
+}
+
+// maybeTerm returns the term of the entry at index i, if there
+// is any.
+func (u *unstable) maybeTerm(i uint64) (uint64, bool) {
+	if i < u.offset {
+		if u.snapshot == nil {
+			return 0, false
+		}
+		if u.snapshot.Metadata.Index == i {
+			return u.snapshot.Metadata.Term, true
+		}
+		return 0, false
+	}
+
+	last, ok := u.maybeLastIndex()
+	if !ok {
+		return 0, false
+	}
+	if i > last {
+		return 0, false
+	}
+	return u.entries[i-u.offset].Term, true
+}
+
+func (u *unstable) stableTo(i, t uint64) {
+	gt, ok := u.maybeTerm(i)
+	if !ok {
+		return
+	}
+	// if i < offset, term is matched with the snapshot
+	// only update the unstable entries if term is matched with
+	// an unstable entry.
+	if gt == t && i >= u.offset {
+		u.entries = u.entries[i+1-u.offset:]
+		u.offset = i + 1
+		u.shrinkEntriesArray()
+	}
+}
+
+// shrinkEntriesArray discards the underlying array used by the entries slice
+// if most of it isn't being used. This avoids holding references to a bunch of
+// potentially large entries that aren't needed anymore. Simply clearing the
+// entries wouldn't be safe because clients might still be using them.
+func (u *unstable) shrinkEntriesArray() {
+	// We replace the array if we're using less than half of the space in
+	// it. This number is fairly arbitrary, chosen as an attempt to balance
+	// memory usage vs number of allocations. It could probably be improved
+	// with some focused tuning.
+	const lenMultiple = 2
+	if len(u.entries) == 0 {
+		u.entries = nil
+	} else if len(u.entries)*lenMultiple < cap(u.entries) {
+		newEntries := make([]pb.Entry, len(u.entries))
+		copy(newEntries, u.entries)
+		u.entries = newEntries
+	}
+}
+
+func (u *unstable) stableSnapTo(i uint64) {
+	if u.snapshot != nil && u.snapshot.Metadata.Index == i {
+		u.snapshot = nil
+	}
+}
+
+func (u *unstable) restore(s pb.Snapshot) {
+	u.offset = s.Metadata.Index + 1
+	u.entries = nil
+	u.snapshot = &s
+}
+
+func (u *unstable) truncateAndAppend(ents []pb.Entry) {
+	after := ents[0].Index
+	switch {
+	case after == u.offset+uint64(len(u.entries)):
+		// after is the next index in the u.entries
+		// directly append
+		u.entries = append(u.entries, ents...)
+	case after <= u.offset:
+		u.logger.Infof("replace the unstable entries from index %d", after)
+		// The log is being truncated to before our current offset
+		// portion, so set the offset and replace the entries
+		u.offset = after
+		u.entries = ents
+	default:
+		// truncate to after and copy to u.entries
+		// then append
+		u.logger.Infof("truncate the unstable entries before index %d", after)
+		u.entries = append([]pb.Entry{}, u.slice(u.offset, after)...)
+		u.entries = append(u.entries, ents...)
+	}
+}
+
+func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry {
+	u.mustCheckOutOfBounds(lo, hi)
+	return u.entries[lo-u.offset : hi-u.offset]
+}
+
+// u.offset <= lo <= hi <= u.offset+len(u.offset)
+func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) {
+	if lo > hi {
+		u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi)
+	}
+	upper := u.offset + uint64(len(u.entries))
+	if lo < u.offset || hi > upper {
+		u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper)
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/raft/logger.go b/vendor/github.com/coreos/etcd/raft/logger.go
new file mode 100644
index 0000000..92e55b3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/logger.go
@@ -0,0 +1,126 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+)
+
+type Logger interface {
+	Debug(v ...interface{})
+	Debugf(format string, v ...interface{})
+
+	Error(v ...interface{})
+	Errorf(format string, v ...interface{})
+
+	Info(v ...interface{})
+	Infof(format string, v ...interface{})
+
+	Warning(v ...interface{})
+	Warningf(format string, v ...interface{})
+
+	Fatal(v ...interface{})
+	Fatalf(format string, v ...interface{})
+
+	Panic(v ...interface{})
+	Panicf(format string, v ...interface{})
+}
+
+func SetLogger(l Logger) { raftLogger = l }
+
+var (
+	defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)}
+	discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)}
+	raftLogger    = Logger(defaultLogger)
+)
+
+const (
+	calldepth = 2
+)
+
+// DefaultLogger is a default implementation of the Logger interface.
+type DefaultLogger struct {
+	*log.Logger
+	debug bool
+}
+
+func (l *DefaultLogger) EnableTimestamps() {
+	l.SetFlags(l.Flags() | log.Ldate | log.Ltime)
+}
+
+func (l *DefaultLogger) EnableDebug() {
+	l.debug = true
+}
+
+func (l *DefaultLogger) Debug(v ...interface{}) {
+	if l.debug {
+		l.Output(calldepth, header("DEBUG", fmt.Sprint(v...)))
+	}
+}
+
+func (l *DefaultLogger) Debugf(format string, v ...interface{}) {
+	if l.debug {
+		l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...)))
+	}
+}
+
+func (l *DefaultLogger) Info(v ...interface{}) {
+	l.Output(calldepth, header("INFO", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Infof(format string, v ...interface{}) {
+	l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Error(v ...interface{}) {
+	l.Output(calldepth, header("ERROR", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Errorf(format string, v ...interface{}) {
+	l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Warning(v ...interface{}) {
+	l.Output(calldepth, header("WARN", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Warningf(format string, v ...interface{}) {
+	l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Fatal(v ...interface{}) {
+	l.Output(calldepth, header("FATAL", fmt.Sprint(v...)))
+	os.Exit(1)
+}
+
+func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
+	l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...)))
+	os.Exit(1)
+}
+
+func (l *DefaultLogger) Panic(v ...interface{}) {
+	l.Logger.Panic(v)
+}
+
+func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
+	l.Logger.Panicf(format, v...)
+}
+
+func header(lvl, msg string) string {
+	return fmt.Sprintf("%s: %s", lvl, msg)
+}
diff --git a/vendor/github.com/coreos/etcd/raft/node.go b/vendor/github.com/coreos/etcd/raft/node.go
new file mode 100644
index 0000000..33a9db8
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/node.go
@@ -0,0 +1,539 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"context"
+	"errors"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+type SnapshotStatus int
+
+const (
+	SnapshotFinish  SnapshotStatus = 1
+	SnapshotFailure SnapshotStatus = 2
+)
+
+var (
+	emptyState = pb.HardState{}
+
+	// ErrStopped is returned by methods on Nodes that have been stopped.
+	ErrStopped = errors.New("raft: stopped")
+)
+
+// SoftState provides state that is useful for logging and debugging.
+// The state is volatile and does not need to be persisted to the WAL.
+type SoftState struct {
+	Lead      uint64 // must use atomic operations to access; keep 64-bit aligned.
+	RaftState StateType
+}
+
+func (a *SoftState) equal(b *SoftState) bool {
+	return a.Lead == b.Lead && a.RaftState == b.RaftState
+}
+
+// Ready encapsulates the entries and messages that are ready to read,
+// be saved to stable storage, committed or sent to other peers.
+// All fields in Ready are read-only.
+type Ready struct {
+	// The current volatile state of a Node.
+	// SoftState will be nil if there is no update.
+	// It is not required to consume or store SoftState.
+	*SoftState
+
+	// The current state of a Node to be saved to stable storage BEFORE
+	// Messages are sent.
+	// HardState will be equal to empty state if there is no update.
+	pb.HardState
+
+	// ReadStates can be used for node to serve linearizable read requests locally
+	// when its applied index is greater than the index in ReadState.
+	// Note that the readState will be returned when raft receives msgReadIndex.
+	// The returned is only valid for the request that requested to read.
+	ReadStates []ReadState
+
+	// Entries specifies entries to be saved to stable storage BEFORE
+	// Messages are sent.
+	Entries []pb.Entry
+
+	// Snapshot specifies the snapshot to be saved to stable storage.
+	Snapshot pb.Snapshot
+
+	// CommittedEntries specifies entries to be committed to a
+	// store/state-machine. These have previously been committed to stable
+	// store.
+	CommittedEntries []pb.Entry
+
+	// Messages specifies outbound messages to be sent AFTER Entries are
+	// committed to stable storage.
+	// If it contains a MsgSnap message, the application MUST report back to raft
+	// when the snapshot has been received or has failed by calling ReportSnapshot.
+	Messages []pb.Message
+
+	// MustSync indicates whether the HardState and Entries must be synchronously
+	// written to disk or if an asynchronous write is permissible.
+	MustSync bool
+}
+
+func isHardStateEqual(a, b pb.HardState) bool {
+	return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
+}
+
+// IsEmptyHardState returns true if the given HardState is empty.
+func IsEmptyHardState(st pb.HardState) bool {
+	return isHardStateEqual(st, emptyState)
+}
+
+// IsEmptySnap returns true if the given Snapshot is empty.
+func IsEmptySnap(sp pb.Snapshot) bool {
+	return sp.Metadata.Index == 0
+}
+
+func (rd Ready) containsUpdates() bool {
+	return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) ||
+		!IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 ||
+		len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0
+}
+
+// Node represents a node in a raft cluster.
+type Node interface {
+	// Tick increments the internal logical clock for the Node by a single tick. Election
+	// timeouts and heartbeat timeouts are in units of ticks.
+	Tick()
+	// Campaign causes the Node to transition to candidate state and start campaigning to become leader.
+	Campaign(ctx context.Context) error
+	// Propose proposes that data be appended to the log.
+	Propose(ctx context.Context, data []byte) error
+	// ProposeConfChange proposes config change.
+	// At most one ConfChange can be in the process of going through consensus.
+	// Application needs to call ApplyConfChange when applying EntryConfChange type entry.
+	ProposeConfChange(ctx context.Context, cc pb.ConfChange) error
+	// Step advances the state machine using the given message. ctx.Err() will be returned, if any.
+	Step(ctx context.Context, msg pb.Message) error
+
+	// Ready returns a channel that returns the current point-in-time state.
+	// Users of the Node must call Advance after retrieving the state returned by Ready.
+	//
+	// NOTE: No committed entries from the next Ready may be applied until all committed entries
+	// and snapshots from the previous one have finished.
+	Ready() <-chan Ready
+
+	// Advance notifies the Node that the application has saved progress up to the last Ready.
+	// It prepares the node to return the next available Ready.
+	//
+	// The application should generally call Advance after it applies the entries in last Ready.
+	//
+	// However, as an optimization, the application may call Advance while it is applying the
+	// commands. For example. when the last Ready contains a snapshot, the application might take
+	// a long time to apply the snapshot data. To continue receiving Ready without blocking raft
+	// progress, it can call Advance before finishing applying the last ready.
+	Advance()
+	// ApplyConfChange applies config change to the local node.
+	// Returns an opaque ConfState protobuf which must be recorded
+	// in snapshots. Will never return nil; it returns a pointer only
+	// to match MemoryStorage.Compact.
+	ApplyConfChange(cc pb.ConfChange) *pb.ConfState
+
+	// TransferLeadership attempts to transfer leadership to the given transferee.
+	TransferLeadership(ctx context.Context, lead, transferee uint64)
+
+	// ReadIndex request a read state. The read state will be set in the ready.
+	// Read state has a read index. Once the application advances further than the read
+	// index, any linearizable read requests issued before the read request can be
+	// processed safely. The read state will have the same rctx attached.
+	ReadIndex(ctx context.Context, rctx []byte) error
+
+	// Status returns the current status of the raft state machine.
+	Status() Status
+	// ReportUnreachable reports the given node is not reachable for the last send.
+	ReportUnreachable(id uint64)
+	// ReportSnapshot reports the status of the sent snapshot.
+	ReportSnapshot(id uint64, status SnapshotStatus)
+	// Stop performs any necessary termination of the Node.
+	Stop()
+}
+
+type Peer struct {
+	ID      uint64
+	Context []byte
+}
+
+// StartNode returns a new Node given configuration and a list of raft peers.
+// It appends a ConfChangeAddNode entry for each given peer to the initial log.
+func StartNode(c *Config, peers []Peer) Node {
+	r := newRaft(c)
+	// become the follower at term 1 and apply initial configuration
+	// entries of term 1
+	r.becomeFollower(1, None)
+	for _, peer := range peers {
+		cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
+		d, err := cc.Marshal()
+		if err != nil {
+			panic("unexpected marshal error")
+		}
+		e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d}
+		r.raftLog.append(e)
+	}
+	// Mark these initial entries as committed.
+	// TODO(bdarnell): These entries are still unstable; do we need to preserve
+	// the invariant that committed < unstable?
+	r.raftLog.committed = r.raftLog.lastIndex()
+	// Now apply them, mainly so that the application can call Campaign
+	// immediately after StartNode in tests. Note that these nodes will
+	// be added to raft twice: here and when the application's Ready
+	// loop calls ApplyConfChange. The calls to addNode must come after
+	// all calls to raftLog.append so progress.next is set after these
+	// bootstrapping entries (it is an error if we try to append these
+	// entries since they have already been committed).
+	// We do not set raftLog.applied so the application will be able
+	// to observe all conf changes via Ready.CommittedEntries.
+	for _, peer := range peers {
+		r.addNode(peer.ID)
+	}
+
+	n := newNode()
+	n.logger = c.Logger
+	go n.run(r)
+	return &n
+}
+
+// RestartNode is similar to StartNode but does not take a list of peers.
+// The current membership of the cluster will be restored from the Storage.
+// If the caller has an existing state machine, pass in the last log index that
+// has been applied to it; otherwise use zero.
+func RestartNode(c *Config) Node {
+	r := newRaft(c)
+
+	n := newNode()
+	n.logger = c.Logger
+	go n.run(r)
+	return &n
+}
+
+// node is the canonical implementation of the Node interface
+type node struct {
+	propc      chan pb.Message
+	recvc      chan pb.Message
+	confc      chan pb.ConfChange
+	confstatec chan pb.ConfState
+	readyc     chan Ready
+	advancec   chan struct{}
+	tickc      chan struct{}
+	done       chan struct{}
+	stop       chan struct{}
+	status     chan chan Status
+
+	logger Logger
+}
+
+func newNode() node {
+	return node{
+		propc:      make(chan pb.Message),
+		recvc:      make(chan pb.Message),
+		confc:      make(chan pb.ConfChange),
+		confstatec: make(chan pb.ConfState),
+		readyc:     make(chan Ready),
+		advancec:   make(chan struct{}),
+		// make tickc a buffered chan, so raft node can buffer some ticks when the node
+		// is busy processing raft messages. Raft node will resume process buffered
+		// ticks when it becomes idle.
+		tickc:  make(chan struct{}, 128),
+		done:   make(chan struct{}),
+		stop:   make(chan struct{}),
+		status: make(chan chan Status),
+	}
+}
+
+func (n *node) Stop() {
+	select {
+	case n.stop <- struct{}{}:
+		// Not already stopped, so trigger it
+	case <-n.done:
+		// Node has already been stopped - no need to do anything
+		return
+	}
+	// Block until the stop has been acknowledged by run()
+	<-n.done
+}
+
+func (n *node) run(r *raft) {
+	var propc chan pb.Message
+	var readyc chan Ready
+	var advancec chan struct{}
+	var prevLastUnstablei, prevLastUnstablet uint64
+	var havePrevLastUnstablei bool
+	var prevSnapi uint64
+	var rd Ready
+
+	lead := None
+	prevSoftSt := r.softState()
+	prevHardSt := emptyState
+
+	for {
+		if advancec != nil {
+			readyc = nil
+		} else {
+			rd = newReady(r, prevSoftSt, prevHardSt)
+			if rd.containsUpdates() {
+				readyc = n.readyc
+			} else {
+				readyc = nil
+			}
+		}
+
+		if lead != r.lead {
+			if r.hasLeader() {
+				if lead == None {
+					r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
+				} else {
+					r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
+				}
+				propc = n.propc
+			} else {
+				r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
+				propc = nil
+			}
+			lead = r.lead
+		}
+
+		select {
+		// TODO: maybe buffer the config propose if there exists one (the way
+		// described in raft dissertation)
+		// Currently it is dropped in Step silently.
+		case m := <-propc:
+			m.From = r.id
+			r.Step(m)
+		case m := <-n.recvc:
+			// filter out response message from unknown From.
+			if pr := r.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) {
+				r.Step(m) // raft never returns an error
+			}
+		case cc := <-n.confc:
+			if cc.NodeID == None {
+				r.resetPendingConf()
+				select {
+				case n.confstatec <- pb.ConfState{Nodes: r.nodes()}:
+				case <-n.done:
+				}
+				break
+			}
+			switch cc.Type {
+			case pb.ConfChangeAddNode:
+				r.addNode(cc.NodeID)
+			case pb.ConfChangeAddLearnerNode:
+				r.addLearner(cc.NodeID)
+			case pb.ConfChangeRemoveNode:
+				// block incoming proposal when local node is
+				// removed
+				if cc.NodeID == r.id {
+					propc = nil
+				}
+				r.removeNode(cc.NodeID)
+			case pb.ConfChangeUpdateNode:
+				r.resetPendingConf()
+			default:
+				panic("unexpected conf type")
+			}
+			select {
+			case n.confstatec <- pb.ConfState{Nodes: r.nodes()}:
+			case <-n.done:
+			}
+		case <-n.tickc:
+			r.tick()
+		case readyc <- rd:
+			if rd.SoftState != nil {
+				prevSoftSt = rd.SoftState
+			}
+			if len(rd.Entries) > 0 {
+				prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index
+				prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term
+				havePrevLastUnstablei = true
+			}
+			if !IsEmptyHardState(rd.HardState) {
+				prevHardSt = rd.HardState
+			}
+			if !IsEmptySnap(rd.Snapshot) {
+				prevSnapi = rd.Snapshot.Metadata.Index
+			}
+
+			r.msgs = nil
+			r.readStates = nil
+			advancec = n.advancec
+		case <-advancec:
+			if prevHardSt.Commit != 0 {
+				r.raftLog.appliedTo(prevHardSt.Commit)
+			}
+			if havePrevLastUnstablei {
+				r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet)
+				havePrevLastUnstablei = false
+			}
+			r.raftLog.stableSnapTo(prevSnapi)
+			advancec = nil
+		case c := <-n.status:
+			c <- getStatus(r)
+		case <-n.stop:
+			close(n.done)
+			return
+		}
+	}
+}
+
+// Tick increments the internal logical clock for this Node. Election timeouts
+// and heartbeat timeouts are in units of ticks.
+func (n *node) Tick() {
+	select {
+	case n.tickc <- struct{}{}:
+	case <-n.done:
+	default:
+		n.logger.Warningf("A tick missed to fire. Node blocks too long!")
+	}
+}
+
+func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
+
+func (n *node) Propose(ctx context.Context, data []byte) error {
+	return n.step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
+}
+
+func (n *node) Step(ctx context.Context, m pb.Message) error {
+	// ignore unexpected local messages receiving over network
+	if IsLocalMsg(m.Type) {
+		// TODO: return an error?
+		return nil
+	}
+	return n.step(ctx, m)
+}
+
+func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error {
+	data, err := cc.Marshal()
+	if err != nil {
+		return err
+	}
+	return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}})
+}
+
+// Step advances the state machine using msgs. The ctx.Err() will be returned,
+// if any.
+func (n *node) step(ctx context.Context, m pb.Message) error {
+	ch := n.recvc
+	if m.Type == pb.MsgProp {
+		ch = n.propc
+	}
+
+	select {
+	case ch <- m:
+		return nil
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-n.done:
+		return ErrStopped
+	}
+}
+
+func (n *node) Ready() <-chan Ready { return n.readyc }
+
+func (n *node) Advance() {
+	select {
+	case n.advancec <- struct{}{}:
+	case <-n.done:
+	}
+}
+
+func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
+	var cs pb.ConfState
+	select {
+	case n.confc <- cc:
+	case <-n.done:
+	}
+	select {
+	case cs = <-n.confstatec:
+	case <-n.done:
+	}
+	return &cs
+}
+
+func (n *node) Status() Status {
+	c := make(chan Status)
+	select {
+	case n.status <- c:
+		return <-c
+	case <-n.done:
+		return Status{}
+	}
+}
+
+func (n *node) ReportUnreachable(id uint64) {
+	select {
+	case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
+	case <-n.done:
+	}
+}
+
+func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
+	rej := status == SnapshotFailure
+
+	select {
+	case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
+	case <-n.done:
+	}
+}
+
+func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) {
+	select {
+	// manually set 'from' and 'to', so that leader can voluntarily transfers its leadership
+	case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}:
+	case <-n.done:
+	case <-ctx.Done():
+	}
+}
+
+func (n *node) ReadIndex(ctx context.Context, rctx []byte) error {
+	return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
+}
+
+func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
+	rd := Ready{
+		Entries:          r.raftLog.unstableEntries(),
+		CommittedEntries: r.raftLog.nextEnts(),
+		Messages:         r.msgs,
+	}
+	if softSt := r.softState(); !softSt.equal(prevSoftSt) {
+		rd.SoftState = softSt
+	}
+	if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) {
+		rd.HardState = hardSt
+	}
+	if r.raftLog.unstable.snapshot != nil {
+		rd.Snapshot = *r.raftLog.unstable.snapshot
+	}
+	if len(r.readStates) != 0 {
+		rd.ReadStates = r.readStates
+	}
+	rd.MustSync = MustSync(rd.HardState, prevHardSt, len(rd.Entries))
+	return rd
+}
+
+// MustSync returns true if the hard state and count of Raft entries indicate
+// that a synchronous write to persistent storage is required.
+func MustSync(st, prevst pb.HardState, entsnum int) bool {
+	// Persistent state on all servers:
+	// (Updated on stable storage before responding to RPCs)
+	// currentTerm
+	// votedFor
+	// log entries[]
+	return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term
+}
diff --git a/vendor/github.com/coreos/etcd/raft/progress.go b/vendor/github.com/coreos/etcd/raft/progress.go
new file mode 100644
index 0000000..ef3787d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/progress.go
@@ -0,0 +1,284 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import "fmt"
+
+const (
+	ProgressStateProbe ProgressStateType = iota
+	ProgressStateReplicate
+	ProgressStateSnapshot
+)
+
+type ProgressStateType uint64
+
+var prstmap = [...]string{
+	"ProgressStateProbe",
+	"ProgressStateReplicate",
+	"ProgressStateSnapshot",
+}
+
+func (st ProgressStateType) String() string { return prstmap[uint64(st)] }
+
+// Progress represents a follower’s progress in the view of the leader. Leader maintains
+// progresses of all followers, and sends entries to the follower based on its progress.
+type Progress struct {
+	Match, Next uint64
+	// State defines how the leader should interact with the follower.
+	//
+	// When in ProgressStateProbe, leader sends at most one replication message
+	// per heartbeat interval. It also probes actual progress of the follower.
+	//
+	// When in ProgressStateReplicate, leader optimistically increases next
+	// to the latest entry sent after sending replication message. This is
+	// an optimized state for fast replicating log entries to the follower.
+	//
+	// When in ProgressStateSnapshot, leader should have sent out snapshot
+	// before and stops sending any replication message.
+	State ProgressStateType
+
+	// Paused is used in ProgressStateProbe.
+	// When Paused is true, raft should pause sending replication message to this peer.
+	Paused bool
+	// PendingSnapshot is used in ProgressStateSnapshot.
+	// If there is a pending snapshot, the pendingSnapshot will be set to the
+	// index of the snapshot. If pendingSnapshot is set, the replication process of
+	// this Progress will be paused. raft will not resend snapshot until the pending one
+	// is reported to be failed.
+	PendingSnapshot uint64
+
+	// RecentActive is true if the progress is recently active. Receiving any messages
+	// from the corresponding follower indicates the progress is active.
+	// RecentActive can be reset to false after an election timeout.
+	RecentActive bool
+
+	// inflights is a sliding window for the inflight messages.
+	// Each inflight message contains one or more log entries.
+	// The max number of entries per message is defined in raft config as MaxSizePerMsg.
+	// Thus inflight effectively limits both the number of inflight messages
+	// and the bandwidth each Progress can use.
+	// When inflights is full, no more message should be sent.
+	// When a leader sends out a message, the index of the last
+	// entry should be added to inflights. The index MUST be added
+	// into inflights in order.
+	// When a leader receives a reply, the previous inflights should
+	// be freed by calling inflights.freeTo with the index of the last
+	// received entry.
+	ins *inflights
+
+	// IsLearner is true if this progress is tracked for a learner.
+	IsLearner bool
+}
+
+func (pr *Progress) resetState(state ProgressStateType) {
+	pr.Paused = false
+	pr.PendingSnapshot = 0
+	pr.State = state
+	pr.ins.reset()
+}
+
+func (pr *Progress) becomeProbe() {
+	// If the original state is ProgressStateSnapshot, progress knows that
+	// the pending snapshot has been sent to this peer successfully, then
+	// probes from pendingSnapshot + 1.
+	if pr.State == ProgressStateSnapshot {
+		pendingSnapshot := pr.PendingSnapshot
+		pr.resetState(ProgressStateProbe)
+		pr.Next = max(pr.Match+1, pendingSnapshot+1)
+	} else {
+		pr.resetState(ProgressStateProbe)
+		pr.Next = pr.Match + 1
+	}
+}
+
+func (pr *Progress) becomeReplicate() {
+	pr.resetState(ProgressStateReplicate)
+	pr.Next = pr.Match + 1
+}
+
+func (pr *Progress) becomeSnapshot(snapshoti uint64) {
+	pr.resetState(ProgressStateSnapshot)
+	pr.PendingSnapshot = snapshoti
+}
+
+// maybeUpdate returns false if the given n index comes from an outdated message.
+// Otherwise it updates the progress and returns true.
+func (pr *Progress) maybeUpdate(n uint64) bool {
+	var updated bool
+	if pr.Match < n {
+		pr.Match = n
+		updated = true
+		pr.resume()
+	}
+	if pr.Next < n+1 {
+		pr.Next = n + 1
+	}
+	return updated
+}
+
+func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 }
+
+// maybeDecrTo returns false if the given to index comes from an out of order message.
+// Otherwise it decreases the progress next index to min(rejected, last) and returns true.
+func (pr *Progress) maybeDecrTo(rejected, last uint64) bool {
+	if pr.State == ProgressStateReplicate {
+		// the rejection must be stale if the progress has matched and "rejected"
+		// is smaller than "match".
+		if rejected <= pr.Match {
+			return false
+		}
+		// directly decrease next to match + 1
+		pr.Next = pr.Match + 1
+		return true
+	}
+
+	// the rejection must be stale if "rejected" does not match next - 1
+	if pr.Next-1 != rejected {
+		return false
+	}
+
+	if pr.Next = min(rejected, last+1); pr.Next < 1 {
+		pr.Next = 1
+	}
+	pr.resume()
+	return true
+}
+
+func (pr *Progress) pause()  { pr.Paused = true }
+func (pr *Progress) resume() { pr.Paused = false }
+
+// IsPaused returns whether sending log entries to this node has been
+// paused. A node may be paused because it has rejected recent
+// MsgApps, is currently waiting for a snapshot, or has reached the
+// MaxInflightMsgs limit.
+func (pr *Progress) IsPaused() bool {
+	switch pr.State {
+	case ProgressStateProbe:
+		return pr.Paused
+	case ProgressStateReplicate:
+		return pr.ins.full()
+	case ProgressStateSnapshot:
+		return true
+	default:
+		panic("unexpected state")
+	}
+}
+
+func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 }
+
+// needSnapshotAbort returns true if snapshot progress's Match
+// is equal or higher than the pendingSnapshot.
+func (pr *Progress) needSnapshotAbort() bool {
+	return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot
+}
+
+func (pr *Progress) String() string {
+	return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot)
+}
+
+type inflights struct {
+	// the starting index in the buffer
+	start int
+	// number of inflights in the buffer
+	count int
+
+	// the size of the buffer
+	size int
+
+	// buffer contains the index of the last entry
+	// inside one message.
+	buffer []uint64
+}
+
+func newInflights(size int) *inflights {
+	return &inflights{
+		size: size,
+	}
+}
+
+// add adds an inflight into inflights
+func (in *inflights) add(inflight uint64) {
+	if in.full() {
+		panic("cannot add into a full inflights")
+	}
+	next := in.start + in.count
+	size := in.size
+	if next >= size {
+		next -= size
+	}
+	if next >= len(in.buffer) {
+		in.growBuf()
+	}
+	in.buffer[next] = inflight
+	in.count++
+}
+
+// grow the inflight buffer by doubling up to inflights.size. We grow on demand
+// instead of preallocating to inflights.size to handle systems which have
+// thousands of Raft groups per process.
+func (in *inflights) growBuf() {
+	newSize := len(in.buffer) * 2
+	if newSize == 0 {
+		newSize = 1
+	} else if newSize > in.size {
+		newSize = in.size
+	}
+	newBuffer := make([]uint64, newSize)
+	copy(newBuffer, in.buffer)
+	in.buffer = newBuffer
+}
+
+// freeTo frees the inflights smaller or equal to the given `to` flight.
+func (in *inflights) freeTo(to uint64) {
+	if in.count == 0 || to < in.buffer[in.start] {
+		// out of the left side of the window
+		return
+	}
+
+	idx := in.start
+	var i int
+	for i = 0; i < in.count; i++ {
+		if to < in.buffer[idx] { // found the first large inflight
+			break
+		}
+
+		// increase index and maybe rotate
+		size := in.size
+		if idx++; idx >= size {
+			idx -= size
+		}
+	}
+	// free i inflights and set new start index
+	in.count -= i
+	in.start = idx
+	if in.count == 0 {
+		// inflights is empty, reset the start index so that we don't grow the
+		// buffer unnecessarily.
+		in.start = 0
+	}
+}
+
+func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) }
+
+// full returns true if the inflights is full.
+func (in *inflights) full() bool {
+	return in.count == in.size
+}
+
+// resets frees all inflights.
+func (in *inflights) reset() {
+	in.count = 0
+	in.start = 0
+}
diff --git a/vendor/github.com/coreos/etcd/raft/raft.go b/vendor/github.com/coreos/etcd/raft/raft.go
new file mode 100644
index 0000000..b4c0f02
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/raft.go
@@ -0,0 +1,1406 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"math"
+	"math/rand"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+// None is a placeholder node ID used when there is no leader.
+const None uint64 = 0
+const noLimit = math.MaxUint64
+
+// Possible values for StateType.
+const (
+	StateFollower StateType = iota
+	StateCandidate
+	StateLeader
+	StatePreCandidate
+	numStates
+)
+
+type ReadOnlyOption int
+
+const (
+	// ReadOnlySafe guarantees the linearizability of the read only request by
+	// communicating with the quorum. It is the default and suggested option.
+	ReadOnlySafe ReadOnlyOption = iota
+	// ReadOnlyLeaseBased ensures linearizability of the read only request by
+	// relying on the leader lease. It can be affected by clock drift.
+	// If the clock drift is unbounded, leader might keep the lease longer than it
+	// should (clock can move backward/pause without any bound). ReadIndex is not safe
+	// in that case.
+	ReadOnlyLeaseBased
+)
+
+// Possible values for CampaignType
+const (
+	// campaignPreElection represents the first phase of a normal election when
+	// Config.PreVote is true.
+	campaignPreElection CampaignType = "CampaignPreElection"
+	// campaignElection represents a normal (time-based) election (the second phase
+	// of the election when Config.PreVote is true).
+	campaignElection CampaignType = "CampaignElection"
+	// campaignTransfer represents the type of leader transfer
+	campaignTransfer CampaignType = "CampaignTransfer"
+)
+
+// lockedRand is a small wrapper around rand.Rand to provide
+// synchronization. Only the methods needed by the code are exposed
+// (e.g. Intn).
+type lockedRand struct {
+	mu   sync.Mutex
+	rand *rand.Rand
+}
+
+func (r *lockedRand) Intn(n int) int {
+	r.mu.Lock()
+	v := r.rand.Intn(n)
+	r.mu.Unlock()
+	return v
+}
+
+var globalRand = &lockedRand{
+	rand: rand.New(rand.NewSource(time.Now().UnixNano())),
+}
+
+// CampaignType represents the type of campaigning
+// the reason we use the type of string instead of uint64
+// is because it's simpler to compare and fill in raft entries
+type CampaignType string
+
+// StateType represents the role of a node in a cluster.
+type StateType uint64
+
+var stmap = [...]string{
+	"StateFollower",
+	"StateCandidate",
+	"StateLeader",
+	"StatePreCandidate",
+}
+
+func (st StateType) String() string {
+	return stmap[uint64(st)]
+}
+
+// Config contains the parameters to start a raft.
+type Config struct {
+	// ID is the identity of the local raft. ID cannot be 0.
+	ID uint64
+
+	// peers contains the IDs of all nodes (including self) in the raft cluster. It
+	// should only be set when starting a new raft cluster. Restarting raft from
+	// previous configuration will panic if peers is set. peer is private and only
+	// used for testing right now.
+	peers []uint64
+
+	// learners contains the IDs of all leaner nodes (including self if the local node is a leaner) in the raft cluster.
+	// learners only receives entries from the leader node. It does not vote or promote itself.
+	learners []uint64
+
+	// ElectionTick is the number of Node.Tick invocations that must pass between
+	// elections. That is, if a follower does not receive any message from the
+	// leader of current term before ElectionTick has elapsed, it will become
+	// candidate and start an election. ElectionTick must be greater than
+	// HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
+	// unnecessary leader switching.
+	ElectionTick int
+	// HeartbeatTick is the number of Node.Tick invocations that must pass between
+	// heartbeats. That is, a leader sends heartbeat messages to maintain its
+	// leadership every HeartbeatTick ticks.
+	HeartbeatTick int
+
+	// Storage is the storage for raft. raft generates entries and states to be
+	// stored in storage. raft reads the persisted entries and states out of
+	// Storage when it needs. raft reads out the previous state and configuration
+	// out of storage when restarting.
+	Storage Storage
+	// Applied is the last applied index. It should only be set when restarting
+	// raft. raft will not return entries to the application smaller or equal to
+	// Applied. If Applied is unset when restarting, raft might return previous
+	// applied entries. This is a very application dependent configuration.
+	Applied uint64
+
+	// MaxSizePerMsg limits the max size of each append message. Smaller value
+	// lowers the raft recovery cost(initial probing and message lost during normal
+	// operation). On the other side, it might affect the throughput during normal
+	// replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per
+	// message.
+	MaxSizePerMsg uint64
+	// MaxInflightMsgs limits the max number of in-flight append messages during
+	// optimistic replication phase. The application transportation layer usually
+	// has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid
+	// overflowing that sending buffer. TODO (xiangli): feedback to application to
+	// limit the proposal rate?
+	MaxInflightMsgs int
+
+	// CheckQuorum specifies if the leader should check quorum activity. Leader
+	// steps down when quorum is not active for an electionTimeout.
+	CheckQuorum bool
+
+	// PreVote enables the Pre-Vote algorithm described in raft thesis section
+	// 9.6. This prevents disruption when a node that has been partitioned away
+	// rejoins the cluster.
+	PreVote bool
+
+	// ReadOnlyOption specifies how the read only request is processed.
+	//
+	// ReadOnlySafe guarantees the linearizability of the read only request by
+	// communicating with the quorum. It is the default and suggested option.
+	//
+	// ReadOnlyLeaseBased ensures linearizability of the read only request by
+	// relying on the leader lease. It can be affected by clock drift.
+	// If the clock drift is unbounded, leader might keep the lease longer than it
+	// should (clock can move backward/pause without any bound). ReadIndex is not safe
+	// in that case.
+	// CheckQuorum MUST be enabled if ReadOnlyOption is ReadOnlyLeaseBased.
+	ReadOnlyOption ReadOnlyOption
+
+	// Logger is the logger used for raft log. For multinode which can host
+	// multiple raft group, each raft group can have its own logger
+	Logger Logger
+
+	// DisableProposalForwarding set to true means that followers will drop
+	// proposals, rather than forwarding them to the leader. One use case for
+	// this feature would be in a situation where the Raft leader is used to
+	// compute the data of a proposal, for example, adding a timestamp from a
+	// hybrid logical clock to data in a monotonically increasing way. Forwarding
+	// should be disabled to prevent a follower with an innaccurate hybrid
+	// logical clock from assigning the timestamp and then forwarding the data
+	// to the leader.
+	DisableProposalForwarding bool
+}
+
+func (c *Config) validate() error {
+	if c.ID == None {
+		return errors.New("cannot use none as id")
+	}
+
+	if c.HeartbeatTick <= 0 {
+		return errors.New("heartbeat tick must be greater than 0")
+	}
+
+	if c.ElectionTick <= c.HeartbeatTick {
+		return errors.New("election tick must be greater than heartbeat tick")
+	}
+
+	if c.Storage == nil {
+		return errors.New("storage cannot be nil")
+	}
+
+	if c.MaxInflightMsgs <= 0 {
+		return errors.New("max inflight messages must be greater than 0")
+	}
+
+	if c.Logger == nil {
+		c.Logger = raftLogger
+	}
+
+	if c.ReadOnlyOption == ReadOnlyLeaseBased && !c.CheckQuorum {
+		return errors.New("CheckQuorum must be enabled when ReadOnlyOption is ReadOnlyLeaseBased")
+	}
+
+	return nil
+}
+
+type raft struct {
+	id uint64
+
+	Term uint64
+	Vote uint64
+
+	readStates []ReadState
+
+	// the log
+	raftLog *raftLog
+
+	maxInflight int
+	maxMsgSize  uint64
+	prs         map[uint64]*Progress
+	learnerPrs  map[uint64]*Progress
+
+	state StateType
+
+	// isLearner is true if the local raft node is a learner.
+	isLearner bool
+
+	votes map[uint64]bool
+
+	msgs []pb.Message
+
+	// the leader id
+	lead uint64
+	// leadTransferee is id of the leader transfer target when its value is not zero.
+	// Follow the procedure defined in raft thesis 3.10.
+	leadTransferee uint64
+	// New configuration is ignored if there exists unapplied configuration.
+	pendingConf bool
+
+	readOnly *readOnly
+
+	// number of ticks since it reached last electionTimeout when it is leader
+	// or candidate.
+	// number of ticks since it reached last electionTimeout or received a
+	// valid message from current leader when it is a follower.
+	electionElapsed int
+
+	// number of ticks since it reached last heartbeatTimeout.
+	// only leader keeps heartbeatElapsed.
+	heartbeatElapsed int
+
+	checkQuorum bool
+	preVote     bool
+
+	heartbeatTimeout int
+	electionTimeout  int
+	// randomizedElectionTimeout is a random number between
+	// [electiontimeout, 2 * electiontimeout - 1]. It gets reset
+	// when raft changes its state to follower or candidate.
+	randomizedElectionTimeout int
+	disableProposalForwarding bool
+
+	tick func()
+	step stepFunc
+
+	logger Logger
+}
+
+func newRaft(c *Config) *raft {
+	if err := c.validate(); err != nil {
+		panic(err.Error())
+	}
+	raftlog := newLog(c.Storage, c.Logger)
+	hs, cs, err := c.Storage.InitialState()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	peers := c.peers
+	learners := c.learners
+	if len(cs.Nodes) > 0 || len(cs.Learners) > 0 {
+		if len(peers) > 0 || len(learners) > 0 {
+			// TODO(bdarnell): the peers argument is always nil except in
+			// tests; the argument should be removed and these tests should be
+			// updated to specify their nodes through a snapshot.
+			panic("cannot specify both newRaft(peers, learners) and ConfState.(Nodes, Learners)")
+		}
+		peers = cs.Nodes
+		learners = cs.Learners
+	}
+	r := &raft{
+		id:                        c.ID,
+		lead:                      None,
+		isLearner:                 false,
+		raftLog:                   raftlog,
+		maxMsgSize:                c.MaxSizePerMsg,
+		maxInflight:               c.MaxInflightMsgs,
+		prs:                       make(map[uint64]*Progress),
+		learnerPrs:                make(map[uint64]*Progress),
+		electionTimeout:           c.ElectionTick,
+		heartbeatTimeout:          c.HeartbeatTick,
+		logger:                    c.Logger,
+		checkQuorum:               c.CheckQuorum,
+		preVote:                   c.PreVote,
+		readOnly:                  newReadOnly(c.ReadOnlyOption),
+		disableProposalForwarding: c.DisableProposalForwarding,
+	}
+	for _, p := range peers {
+		r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)}
+	}
+	for _, p := range learners {
+		if _, ok := r.prs[p]; ok {
+			panic(fmt.Sprintf("node %x is in both learner and peer list", p))
+		}
+		r.learnerPrs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight), IsLearner: true}
+		if r.id == p {
+			r.isLearner = true
+		}
+	}
+
+	if !isHardStateEqual(hs, emptyState) {
+		r.loadState(hs)
+	}
+	if c.Applied > 0 {
+		raftlog.appliedTo(c.Applied)
+	}
+	r.becomeFollower(r.Term, None)
+
+	var nodesStrs []string
+	for _, n := range r.nodes() {
+		nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n))
+	}
+
+	r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]",
+		r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm())
+	return r
+}
+
+func (r *raft) hasLeader() bool { return r.lead != None }
+
+func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} }
+
+func (r *raft) hardState() pb.HardState {
+	return pb.HardState{
+		Term:   r.Term,
+		Vote:   r.Vote,
+		Commit: r.raftLog.committed,
+	}
+}
+
+func (r *raft) quorum() int { return len(r.prs)/2 + 1 }
+
+func (r *raft) nodes() []uint64 {
+	nodes := make([]uint64, 0, len(r.prs)+len(r.learnerPrs))
+	for id := range r.prs {
+		nodes = append(nodes, id)
+	}
+	for id := range r.learnerPrs {
+		nodes = append(nodes, id)
+	}
+	sort.Sort(uint64Slice(nodes))
+	return nodes
+}
+
+// send persists state to stable storage and then sends to its mailbox.
+func (r *raft) send(m pb.Message) {
+	m.From = r.id
+	if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp {
+		if m.Term == 0 {
+			// All {pre-,}campaign messages need to have the term set when
+			// sending.
+			// - MsgVote: m.Term is the term the node is campaigning for,
+			//   non-zero as we increment the term when campaigning.
+			// - MsgVoteResp: m.Term is the new r.Term if the MsgVote was
+			//   granted, non-zero for the same reason MsgVote is
+			// - MsgPreVote: m.Term is the term the node will campaign,
+			//   non-zero as we use m.Term to indicate the next term we'll be
+			//   campaigning for
+			// - MsgPreVoteResp: m.Term is the term received in the original
+			//   MsgPreVote if the pre-vote was granted, non-zero for the
+			//   same reasons MsgPreVote is
+			panic(fmt.Sprintf("term should be set when sending %s", m.Type))
+		}
+	} else {
+		if m.Term != 0 {
+			panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term))
+		}
+		// do not attach term to MsgProp, MsgReadIndex
+		// proposals are a way to forward to the leader and
+		// should be treated as local message.
+		// MsgReadIndex is also forwarded to leader.
+		if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex {
+			m.Term = r.Term
+		}
+	}
+	r.msgs = append(r.msgs, m)
+}
+
+func (r *raft) getProgress(id uint64) *Progress {
+	if pr, ok := r.prs[id]; ok {
+		return pr
+	}
+
+	return r.learnerPrs[id]
+}
+
+// sendAppend sends RPC, with entries to the given peer.
+func (r *raft) sendAppend(to uint64) {
+	pr := r.getProgress(to)
+	if pr.IsPaused() {
+		return
+	}
+	m := pb.Message{}
+	m.To = to
+
+	term, errt := r.raftLog.term(pr.Next - 1)
+	ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize)
+
+	if errt != nil || erre != nil { // send snapshot if we failed to get term or entries
+		if !pr.RecentActive {
+			r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to)
+			return
+		}
+
+		m.Type = pb.MsgSnap
+		snapshot, err := r.raftLog.snapshot()
+		if err != nil {
+			if err == ErrSnapshotTemporarilyUnavailable {
+				r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to)
+				return
+			}
+			panic(err) // TODO(bdarnell)
+		}
+		if IsEmptySnap(snapshot) {
+			panic("need non-empty snapshot")
+		}
+		m.Snapshot = snapshot
+		sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term
+		r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]",
+			r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr)
+		pr.becomeSnapshot(sindex)
+		r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr)
+	} else {
+		m.Type = pb.MsgApp
+		m.Index = pr.Next - 1
+		m.LogTerm = term
+		m.Entries = ents
+		m.Commit = r.raftLog.committed
+		if n := len(m.Entries); n != 0 {
+			switch pr.State {
+			// optimistically increase the next when in ProgressStateReplicate
+			case ProgressStateReplicate:
+				last := m.Entries[n-1].Index
+				pr.optimisticUpdate(last)
+				pr.ins.add(last)
+			case ProgressStateProbe:
+				pr.pause()
+			default:
+				r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State)
+			}
+		}
+	}
+	r.send(m)
+}
+
+// sendHeartbeat sends an empty MsgApp
+func (r *raft) sendHeartbeat(to uint64, ctx []byte) {
+	// Attach the commit as min(to.matched, r.committed).
+	// When the leader sends out heartbeat message,
+	// the receiver(follower) might not be matched with the leader
+	// or it might not have all the committed entries.
+	// The leader MUST NOT forward the follower's commit to
+	// an unmatched index.
+	commit := min(r.getProgress(to).Match, r.raftLog.committed)
+	m := pb.Message{
+		To:      to,
+		Type:    pb.MsgHeartbeat,
+		Commit:  commit,
+		Context: ctx,
+	}
+
+	r.send(m)
+}
+
+func (r *raft) forEachProgress(f func(id uint64, pr *Progress)) {
+	for id, pr := range r.prs {
+		f(id, pr)
+	}
+
+	for id, pr := range r.learnerPrs {
+		f(id, pr)
+	}
+}
+
+// bcastAppend sends RPC, with entries to all peers that are not up-to-date
+// according to the progress recorded in r.prs.
+func (r *raft) bcastAppend() {
+	r.forEachProgress(func(id uint64, _ *Progress) {
+		if id == r.id {
+			return
+		}
+
+		r.sendAppend(id)
+	})
+}
+
+// bcastHeartbeat sends RPC, without entries to all the peers.
+func (r *raft) bcastHeartbeat() {
+	lastCtx := r.readOnly.lastPendingRequestCtx()
+	if len(lastCtx) == 0 {
+		r.bcastHeartbeatWithCtx(nil)
+	} else {
+		r.bcastHeartbeatWithCtx([]byte(lastCtx))
+	}
+}
+
+func (r *raft) bcastHeartbeatWithCtx(ctx []byte) {
+	r.forEachProgress(func(id uint64, _ *Progress) {
+		if id == r.id {
+			return
+		}
+		r.sendHeartbeat(id, ctx)
+	})
+}
+
+// maybeCommit attempts to advance the commit index. Returns true if
+// the commit index changed (in which case the caller should call
+// r.bcastAppend).
+func (r *raft) maybeCommit() bool {
+	// TODO(bmizerany): optimize.. Currently naive
+	mis := make(uint64Slice, 0, len(r.prs))
+	for _, p := range r.prs {
+		mis = append(mis, p.Match)
+	}
+	sort.Sort(sort.Reverse(mis))
+	mci := mis[r.quorum()-1]
+	return r.raftLog.maybeCommit(mci, r.Term)
+}
+
+func (r *raft) reset(term uint64) {
+	if r.Term != term {
+		r.Term = term
+		r.Vote = None
+	}
+	r.lead = None
+
+	r.electionElapsed = 0
+	r.heartbeatElapsed = 0
+	r.resetRandomizedElectionTimeout()
+
+	r.abortLeaderTransfer()
+
+	r.votes = make(map[uint64]bool)
+	r.forEachProgress(func(id uint64, pr *Progress) {
+		*pr = Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight), IsLearner: pr.IsLearner}
+		if id == r.id {
+			pr.Match = r.raftLog.lastIndex()
+		}
+	})
+
+	r.pendingConf = false
+	r.readOnly = newReadOnly(r.readOnly.option)
+}
+
+func (r *raft) appendEntry(es ...pb.Entry) {
+	li := r.raftLog.lastIndex()
+	for i := range es {
+		es[i].Term = r.Term
+		es[i].Index = li + 1 + uint64(i)
+	}
+	r.raftLog.append(es...)
+	r.getProgress(r.id).maybeUpdate(r.raftLog.lastIndex())
+	// Regardless of maybeCommit's return, our caller will call bcastAppend.
+	r.maybeCommit()
+}
+
+// tickElection is run by followers and candidates after r.electionTimeout.
+func (r *raft) tickElection() {
+	r.electionElapsed++
+
+	if r.promotable() && r.pastElectionTimeout() {
+		r.electionElapsed = 0
+		r.Step(pb.Message{From: r.id, Type: pb.MsgHup})
+	}
+}
+
+// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout.
+func (r *raft) tickHeartbeat() {
+	r.heartbeatElapsed++
+	r.electionElapsed++
+
+	if r.electionElapsed >= r.electionTimeout {
+		r.electionElapsed = 0
+		if r.checkQuorum {
+			r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum})
+		}
+		// If current leader cannot transfer leadership in electionTimeout, it becomes leader again.
+		if r.state == StateLeader && r.leadTransferee != None {
+			r.abortLeaderTransfer()
+		}
+	}
+
+	if r.state != StateLeader {
+		return
+	}
+
+	if r.heartbeatElapsed >= r.heartbeatTimeout {
+		r.heartbeatElapsed = 0
+		r.Step(pb.Message{From: r.id, Type: pb.MsgBeat})
+	}
+}
+
+func (r *raft) becomeFollower(term uint64, lead uint64) {
+	r.step = stepFollower
+	r.reset(term)
+	r.tick = r.tickElection
+	r.lead = lead
+	r.state = StateFollower
+	r.logger.Infof("%x became follower at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomeCandidate() {
+	// TODO(xiangli) remove the panic when the raft implementation is stable
+	if r.state == StateLeader {
+		panic("invalid transition [leader -> candidate]")
+	}
+	r.step = stepCandidate
+	r.reset(r.Term + 1)
+	r.tick = r.tickElection
+	r.Vote = r.id
+	r.state = StateCandidate
+	r.logger.Infof("%x became candidate at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomePreCandidate() {
+	// TODO(xiangli) remove the panic when the raft implementation is stable
+	if r.state == StateLeader {
+		panic("invalid transition [leader -> pre-candidate]")
+	}
+	// Becoming a pre-candidate changes our step functions and state,
+	// but doesn't change anything else. In particular it does not increase
+	// r.Term or change r.Vote.
+	r.step = stepCandidate
+	r.votes = make(map[uint64]bool)
+	r.tick = r.tickElection
+	r.state = StatePreCandidate
+	r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomeLeader() {
+	// TODO(xiangli) remove the panic when the raft implementation is stable
+	if r.state == StateFollower {
+		panic("invalid transition [follower -> leader]")
+	}
+	r.step = stepLeader
+	r.reset(r.Term)
+	r.tick = r.tickHeartbeat
+	r.lead = r.id
+	r.state = StateLeader
+	ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit)
+	if err != nil {
+		r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err)
+	}
+
+	nconf := numOfPendingConf(ents)
+	if nconf > 1 {
+		panic("unexpected multiple uncommitted config entry")
+	}
+	if nconf == 1 {
+		r.pendingConf = true
+	}
+
+	r.appendEntry(pb.Entry{Data: nil})
+	r.logger.Infof("%x became leader at term %d", r.id, r.Term)
+}
+
+func (r *raft) campaign(t CampaignType) {
+	var term uint64
+	var voteMsg pb.MessageType
+	if t == campaignPreElection {
+		r.becomePreCandidate()
+		voteMsg = pb.MsgPreVote
+		// PreVote RPCs are sent for the next term before we've incremented r.Term.
+		term = r.Term + 1
+	} else {
+		r.becomeCandidate()
+		voteMsg = pb.MsgVote
+		term = r.Term
+	}
+	if r.quorum() == r.poll(r.id, voteRespMsgType(voteMsg), true) {
+		// We won the election after voting for ourselves (which must mean that
+		// this is a single-node cluster). Advance to the next state.
+		if t == campaignPreElection {
+			r.campaign(campaignElection)
+		} else {
+			r.becomeLeader()
+		}
+		return
+	}
+	for id := range r.prs {
+		if id == r.id {
+			continue
+		}
+		r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d",
+			r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term)
+
+		var ctx []byte
+		if t == campaignTransfer {
+			ctx = []byte(t)
+		}
+		r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx})
+	}
+}
+
+func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int) {
+	if v {
+		r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term)
+	} else {
+		r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term)
+	}
+	if _, ok := r.votes[id]; !ok {
+		r.votes[id] = v
+	}
+	for _, vv := range r.votes {
+		if vv {
+			granted++
+		}
+	}
+	return granted
+}
+
+func (r *raft) Step(m pb.Message) error {
+	// Handle the message term, which may result in our stepping down to a follower.
+	switch {
+	case m.Term == 0:
+		// local message
+	case m.Term > r.Term:
+		if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote {
+			force := bytes.Equal(m.Context, []byte(campaignTransfer))
+			inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout
+			if !force && inLease {
+				// If a server receives a RequestVote request within the minimum election timeout
+				// of hearing from a current leader, it does not update its term or grant its vote
+				r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)",
+					r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed)
+				return nil
+			}
+		}
+		switch {
+		case m.Type == pb.MsgPreVote:
+			// Never change our term in response to a PreVote
+		case m.Type == pb.MsgPreVoteResp && !m.Reject:
+			// We send pre-vote requests with a term in our future. If the
+			// pre-vote is granted, we will increment our term when we get a
+			// quorum. If it is not, the term comes from the node that
+			// rejected our vote so we should become a follower at the new
+			// term.
+		default:
+			r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
+				r.id, r.Term, m.Type, m.From, m.Term)
+			if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap {
+				r.becomeFollower(m.Term, m.From)
+			} else {
+				r.becomeFollower(m.Term, None)
+			}
+		}
+
+	case m.Term < r.Term:
+		if r.checkQuorum && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) {
+			// We have received messages from a leader at a lower term. It is possible
+			// that these messages were simply delayed in the network, but this could
+			// also mean that this node has advanced its term number during a network
+			// partition, and it is now unable to either win an election or to rejoin
+			// the majority on the old term. If checkQuorum is false, this will be
+			// handled by incrementing term numbers in response to MsgVote with a
+			// higher term, but if checkQuorum is true we may not advance the term on
+			// MsgVote and must generate other messages to advance the term. The net
+			// result of these two features is to minimize the disruption caused by
+			// nodes that have been removed from the cluster's configuration: a
+			// removed node will send MsgVotes (or MsgPreVotes) which will be ignored,
+			// but it will not receive MsgApp or MsgHeartbeat, so it will not create
+			// disruptive term increases
+			r.send(pb.Message{To: m.From, Type: pb.MsgAppResp})
+		} else {
+			// ignore other cases
+			r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]",
+				r.id, r.Term, m.Type, m.From, m.Term)
+		}
+		return nil
+	}
+
+	switch m.Type {
+	case pb.MsgHup:
+		if r.state != StateLeader {
+			ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit)
+			if err != nil {
+				r.logger.Panicf("unexpected error getting unapplied entries (%v)", err)
+			}
+			if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied {
+				r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n)
+				return nil
+			}
+
+			r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term)
+			if r.preVote {
+				r.campaign(campaignPreElection)
+			} else {
+				r.campaign(campaignElection)
+			}
+		} else {
+			r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
+		}
+
+	case pb.MsgVote, pb.MsgPreVote:
+		if r.isLearner {
+			// TODO: learner may need to vote, in case of node down when confchange.
+			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: learner can not vote",
+				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+			return nil
+		}
+		// The m.Term > r.Term clause is for MsgPreVote. For MsgVote m.Term should
+		// always equal r.Term.
+		if (r.Vote == None || m.Term > r.Term || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) {
+			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d",
+				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+			// When responding to Msg{Pre,}Vote messages we include the term
+			// from the message, not the local term. To see why consider the
+			// case where a single node was previously partitioned away and
+			// it's local term is now of date. If we include the local term
+			// (recall that for pre-votes we don't update the local term), the
+			// (pre-)campaigning node on the other end will proceed to ignore
+			// the message (it ignores all out of date messages).
+			// The term in the original message and current local term are the
+			// same in the case of regular votes, but different for pre-votes.
+			r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)})
+			if m.Type == pb.MsgVote {
+				// Only record real votes.
+				r.electionElapsed = 0
+				r.Vote = m.From
+			}
+		} else {
+			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
+				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+			r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true})
+		}
+
+	default:
+		r.step(r, m)
+	}
+	return nil
+}
+
+type stepFunc func(r *raft, m pb.Message)
+
+func stepLeader(r *raft, m pb.Message) {
+	// These message types do not require any progress for m.From.
+	switch m.Type {
+	case pb.MsgBeat:
+		r.bcastHeartbeat()
+		return
+	case pb.MsgCheckQuorum:
+		if !r.checkQuorumActive() {
+			r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id)
+			r.becomeFollower(r.Term, None)
+		}
+		return
+	case pb.MsgProp:
+		if len(m.Entries) == 0 {
+			r.logger.Panicf("%x stepped empty MsgProp", r.id)
+		}
+		if _, ok := r.prs[r.id]; !ok {
+			// If we are not currently a member of the range (i.e. this node
+			// was removed from the configuration while serving as leader),
+			// drop any new proposals.
+			return
+		}
+		if r.leadTransferee != None {
+			r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee)
+			return
+		}
+
+		for i, e := range m.Entries {
+			if e.Type == pb.EntryConfChange {
+				if r.pendingConf {
+					r.logger.Infof("propose conf %s ignored since pending unapplied configuration", e.String())
+					m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
+				}
+				r.pendingConf = true
+			}
+		}
+		r.appendEntry(m.Entries...)
+		r.bcastAppend()
+		return
+	case pb.MsgReadIndex:
+		if r.quorum() > 1 {
+			if r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) != r.Term {
+				// Reject read only request when this leader has not committed any log entry at its term.
+				return
+			}
+
+			// thinking: use an interally defined context instead of the user given context.
+			// We can express this in terms of the term and index instead of a user-supplied value.
+			// This would allow multiple reads to piggyback on the same message.
+			switch r.readOnly.option {
+			case ReadOnlySafe:
+				r.readOnly.addRequest(r.raftLog.committed, m)
+				r.bcastHeartbeatWithCtx(m.Entries[0].Data)
+			case ReadOnlyLeaseBased:
+				ri := r.raftLog.committed
+				if m.From == None || m.From == r.id { // from local member
+					r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
+				} else {
+					r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries})
+				}
+			}
+		} else {
+			r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
+		}
+
+		return
+	}
+
+	// All other message types require a progress for m.From (pr).
+	pr := r.getProgress(m.From)
+	if pr == nil {
+		r.logger.Debugf("%x no progress available for %x", r.id, m.From)
+		return
+	}
+	switch m.Type {
+	case pb.MsgAppResp:
+		pr.RecentActive = true
+
+		if m.Reject {
+			r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d",
+				r.id, m.RejectHint, m.From, m.Index)
+			if pr.maybeDecrTo(m.Index, m.RejectHint) {
+				r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr)
+				if pr.State == ProgressStateReplicate {
+					pr.becomeProbe()
+				}
+				r.sendAppend(m.From)
+			}
+		} else {
+			oldPaused := pr.IsPaused()
+			if pr.maybeUpdate(m.Index) {
+				switch {
+				case pr.State == ProgressStateProbe:
+					pr.becomeReplicate()
+				case pr.State == ProgressStateSnapshot && pr.needSnapshotAbort():
+					r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+					pr.becomeProbe()
+				case pr.State == ProgressStateReplicate:
+					pr.ins.freeTo(m.Index)
+				}
+
+				if r.maybeCommit() {
+					r.bcastAppend()
+				} else if oldPaused {
+					// update() reset the wait state on this node. If we had delayed sending
+					// an update before, send it now.
+					r.sendAppend(m.From)
+				}
+				// Transfer leadership is in progress.
+				if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() {
+					r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From)
+					r.sendTimeoutNow(m.From)
+				}
+			}
+		}
+	case pb.MsgHeartbeatResp:
+		pr.RecentActive = true
+		pr.resume()
+
+		// free one slot for the full inflights window to allow progress.
+		if pr.State == ProgressStateReplicate && pr.ins.full() {
+			pr.ins.freeFirstOne()
+		}
+		if pr.Match < r.raftLog.lastIndex() {
+			r.sendAppend(m.From)
+		}
+
+		if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 {
+			return
+		}
+
+		ackCount := r.readOnly.recvAck(m)
+		if ackCount < r.quorum() {
+			return
+		}
+
+		rss := r.readOnly.advance(m)
+		for _, rs := range rss {
+			req := rs.req
+			if req.From == None || req.From == r.id { // from local member
+				r.readStates = append(r.readStates, ReadState{Index: rs.index, RequestCtx: req.Entries[0].Data})
+			} else {
+				r.send(pb.Message{To: req.From, Type: pb.MsgReadIndexResp, Index: rs.index, Entries: req.Entries})
+			}
+		}
+	case pb.MsgSnapStatus:
+		if pr.State != ProgressStateSnapshot {
+			return
+		}
+		if !m.Reject {
+			pr.becomeProbe()
+			r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+		} else {
+			pr.snapshotFailure()
+			pr.becomeProbe()
+			r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+		}
+		// If snapshot finish, wait for the msgAppResp from the remote node before sending
+		// out the next msgApp.
+		// If snapshot failure, wait for a heartbeat interval before next try
+		pr.pause()
+	case pb.MsgUnreachable:
+		// During optimistic replication, if the remote becomes unreachable,
+		// there is huge probability that a MsgApp is lost.
+		if pr.State == ProgressStateReplicate {
+			pr.becomeProbe()
+		}
+		r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr)
+	case pb.MsgTransferLeader:
+		if pr.IsLearner {
+			r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id)
+			return
+		}
+		leadTransferee := m.From
+		lastLeadTransferee := r.leadTransferee
+		if lastLeadTransferee != None {
+			if lastLeadTransferee == leadTransferee {
+				r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x",
+					r.id, r.Term, leadTransferee, leadTransferee)
+				return
+			}
+			r.abortLeaderTransfer()
+			r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee)
+		}
+		if leadTransferee == r.id {
+			r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id)
+			return
+		}
+		// Transfer leadership to third party.
+		r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee)
+		// Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed.
+		r.electionElapsed = 0
+		r.leadTransferee = leadTransferee
+		if pr.Match == r.raftLog.lastIndex() {
+			r.sendTimeoutNow(leadTransferee)
+			r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee)
+		} else {
+			r.sendAppend(leadTransferee)
+		}
+	}
+}
+
+// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is
+// whether they respond to MsgVoteResp or MsgPreVoteResp.
+func stepCandidate(r *raft, m pb.Message) {
+	// Only handle vote responses corresponding to our candidacy (while in
+	// StateCandidate, we may get stale MsgPreVoteResp messages in this term from
+	// our pre-candidate state).
+	var myVoteRespType pb.MessageType
+	if r.state == StatePreCandidate {
+		myVoteRespType = pb.MsgPreVoteResp
+	} else {
+		myVoteRespType = pb.MsgVoteResp
+	}
+	switch m.Type {
+	case pb.MsgProp:
+		r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
+		return
+	case pb.MsgApp:
+		r.becomeFollower(r.Term, m.From)
+		r.handleAppendEntries(m)
+	case pb.MsgHeartbeat:
+		r.becomeFollower(r.Term, m.From)
+		r.handleHeartbeat(m)
+	case pb.MsgSnap:
+		r.becomeFollower(m.Term, m.From)
+		r.handleSnapshot(m)
+	case myVoteRespType:
+		gr := r.poll(m.From, m.Type, !m.Reject)
+		r.logger.Infof("%x [quorum:%d] has received %d %s votes and %d vote rejections", r.id, r.quorum(), gr, m.Type, len(r.votes)-gr)
+		switch r.quorum() {
+		case gr:
+			if r.state == StatePreCandidate {
+				r.campaign(campaignElection)
+			} else {
+				r.becomeLeader()
+				r.bcastAppend()
+			}
+		case len(r.votes) - gr:
+			r.becomeFollower(r.Term, None)
+		}
+	case pb.MsgTimeoutNow:
+		r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From)
+	}
+}
+
+func stepFollower(r *raft, m pb.Message) {
+	switch m.Type {
+	case pb.MsgProp:
+		if r.lead == None {
+			r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
+			return
+		} else if r.disableProposalForwarding {
+			r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term)
+			return
+		}
+		m.To = r.lead
+		r.send(m)
+	case pb.MsgApp:
+		r.electionElapsed = 0
+		r.lead = m.From
+		r.handleAppendEntries(m)
+	case pb.MsgHeartbeat:
+		r.electionElapsed = 0
+		r.lead = m.From
+		r.handleHeartbeat(m)
+	case pb.MsgSnap:
+		r.electionElapsed = 0
+		r.lead = m.From
+		r.handleSnapshot(m)
+	case pb.MsgTransferLeader:
+		if r.lead == None {
+			r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term)
+			return
+		}
+		m.To = r.lead
+		r.send(m)
+	case pb.MsgTimeoutNow:
+		if r.promotable() {
+			r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From)
+			// Leadership transfers never use pre-vote even if r.preVote is true; we
+			// know we are not recovering from a partition so there is no need for the
+			// extra round trip.
+			r.campaign(campaignTransfer)
+		} else {
+			r.logger.Infof("%x received MsgTimeoutNow from %x but is not promotable", r.id, m.From)
+		}
+	case pb.MsgReadIndex:
+		if r.lead == None {
+			r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term)
+			return
+		}
+		m.To = r.lead
+		r.send(m)
+	case pb.MsgReadIndexResp:
+		if len(m.Entries) != 1 {
+			r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries))
+			return
+		}
+		r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data})
+	}
+}
+
+func (r *raft) handleAppendEntries(m pb.Message) {
+	if m.Index < r.raftLog.committed {
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
+		return
+	}
+
+	if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok {
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex})
+	} else {
+		r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x",
+			r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From)
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()})
+	}
+}
+
+func (r *raft) handleHeartbeat(m pb.Message) {
+	r.raftLog.commitTo(m.Commit)
+	r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context})
+}
+
+func (r *raft) handleSnapshot(m pb.Message) {
+	sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term
+	if r.restore(m.Snapshot) {
+		r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]",
+			r.id, r.raftLog.committed, sindex, sterm)
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()})
+	} else {
+		r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]",
+			r.id, r.raftLog.committed, sindex, sterm)
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
+	}
+}
+
+// restore recovers the state machine from a snapshot. It restores the log and the
+// configuration of state machine.
+func (r *raft) restore(s pb.Snapshot) bool {
+	if s.Metadata.Index <= r.raftLog.committed {
+		return false
+	}
+	if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) {
+		r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]",
+			r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
+		r.raftLog.commitTo(s.Metadata.Index)
+		return false
+	}
+
+	// The normal peer can't become learner.
+	if !r.isLearner {
+		for _, id := range s.Metadata.ConfState.Learners {
+			if id == r.id {
+				r.logger.Errorf("%x can't become learner when restores snapshot [index: %d, term: %d]", r.id, s.Metadata.Index, s.Metadata.Term)
+				return false
+			}
+		}
+	}
+
+	r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]",
+		r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
+
+	r.raftLog.restore(s)
+	r.prs = make(map[uint64]*Progress)
+	r.learnerPrs = make(map[uint64]*Progress)
+	r.restoreNode(s.Metadata.ConfState.Nodes, false)
+	r.restoreNode(s.Metadata.ConfState.Learners, true)
+	return true
+}
+
+func (r *raft) restoreNode(nodes []uint64, isLearner bool) {
+	for _, n := range nodes {
+		match, next := uint64(0), r.raftLog.lastIndex()+1
+		if n == r.id {
+			match = next - 1
+			r.isLearner = isLearner
+		}
+		r.setProgress(n, match, next, isLearner)
+		r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.getProgress(n))
+	}
+}
+
+// promotable indicates whether state machine can be promoted to leader,
+// which is true when its own id is in progress list.
+func (r *raft) promotable() bool {
+	_, ok := r.prs[r.id]
+	return ok
+}
+
+func (r *raft) addNode(id uint64) {
+	r.addNodeOrLearnerNode(id, false)
+}
+
+func (r *raft) addLearner(id uint64) {
+	r.addNodeOrLearnerNode(id, true)
+}
+
+func (r *raft) addNodeOrLearnerNode(id uint64, isLearner bool) {
+	r.pendingConf = false
+	pr := r.getProgress(id)
+	if pr == nil {
+		r.setProgress(id, 0, r.raftLog.lastIndex()+1, isLearner)
+	} else {
+		if isLearner && !pr.IsLearner {
+			// can only change Learner to Voter
+			r.logger.Infof("%x ignored addLeaner: do not support changing %x from raft peer to learner.", r.id, id)
+			return
+		}
+
+		if isLearner == pr.IsLearner {
+			// Ignore any redundant addNode calls (which can happen because the
+			// initial bootstrapping entries are applied twice).
+			return
+		}
+
+		// change Learner to Voter, use origin Learner progress
+		delete(r.learnerPrs, id)
+		pr.IsLearner = false
+		r.prs[id] = pr
+	}
+
+	if r.id == id {
+		r.isLearner = isLearner
+	}
+
+	// When a node is first added, we should mark it as recently active.
+	// Otherwise, CheckQuorum may cause us to step down if it is invoked
+	// before the added node has a chance to communicate with us.
+	pr = r.getProgress(id)
+	pr.RecentActive = true
+}
+
+func (r *raft) removeNode(id uint64) {
+	r.delProgress(id)
+	r.pendingConf = false
+
+	// do not try to commit or abort transferring if there is no nodes in the cluster.
+	if len(r.prs) == 0 && len(r.learnerPrs) == 0 {
+		return
+	}
+
+	// The quorum size is now smaller, so see if any pending entries can
+	// be committed.
+	if r.maybeCommit() {
+		r.bcastAppend()
+	}
+	// If the removed node is the leadTransferee, then abort the leadership transferring.
+	if r.state == StateLeader && r.leadTransferee == id {
+		r.abortLeaderTransfer()
+	}
+}
+
+func (r *raft) resetPendingConf() { r.pendingConf = false }
+
+func (r *raft) setProgress(id, match, next uint64, isLearner bool) {
+	if !isLearner {
+		delete(r.learnerPrs, id)
+		r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)}
+		return
+	}
+
+	if _, ok := r.prs[id]; ok {
+		panic(fmt.Sprintf("%x unexpected changing from voter to learner for %x", r.id, id))
+	}
+	r.learnerPrs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight), IsLearner: true}
+}
+
+func (r *raft) delProgress(id uint64) {
+	delete(r.prs, id)
+	delete(r.learnerPrs, id)
+}
+
+func (r *raft) loadState(state pb.HardState) {
+	if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() {
+		r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex())
+	}
+	r.raftLog.committed = state.Commit
+	r.Term = state.Term
+	r.Vote = state.Vote
+}
+
+// pastElectionTimeout returns true iff r.electionElapsed is greater
+// than or equal to the randomized election timeout in
+// [electiontimeout, 2 * electiontimeout - 1].
+func (r *raft) pastElectionTimeout() bool {
+	return r.electionElapsed >= r.randomizedElectionTimeout
+}
+
+func (r *raft) resetRandomizedElectionTimeout() {
+	r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout)
+}
+
+// checkQuorumActive returns true if the quorum is active from
+// the view of the local raft state machine. Otherwise, it returns
+// false.
+// checkQuorumActive also resets all RecentActive to false.
+func (r *raft) checkQuorumActive() bool {
+	var act int
+
+	r.forEachProgress(func(id uint64, pr *Progress) {
+		if id == r.id { // self is always active
+			act++
+			return
+		}
+
+		if pr.RecentActive && !pr.IsLearner {
+			act++
+		}
+
+		pr.RecentActive = false
+	})
+
+	return act >= r.quorum()
+}
+
+func (r *raft) sendTimeoutNow(to uint64) {
+	r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow})
+}
+
+func (r *raft) abortLeaderTransfer() {
+	r.leadTransferee = None
+}
+
+func numOfPendingConf(ents []pb.Entry) int {
+	n := 0
+	for i := range ents {
+		if ents[i].Type == pb.EntryConfChange {
+			n++
+		}
+	}
+	return n
+}
diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto b/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto
new file mode 100644
index 0000000..644ce7b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto
@@ -0,0 +1,95 @@
+syntax = "proto2";
+package raftpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+enum EntryType {
+	EntryNormal     = 0;
+	EntryConfChange = 1;
+}
+
+message Entry {
+	optional uint64     Term  = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
+	optional uint64     Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
+	optional EntryType  Type  = 1 [(gogoproto.nullable) = false];
+	optional bytes      Data  = 4;
+}
+
+message SnapshotMetadata {
+	optional ConfState conf_state = 1 [(gogoproto.nullable) = false];
+	optional uint64    index      = 2 [(gogoproto.nullable) = false];
+	optional uint64    term       = 3 [(gogoproto.nullable) = false];
+}
+
+message Snapshot {
+	optional bytes            data     = 1;
+	optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false];
+}
+
+enum MessageType {
+	MsgHup             = 0;
+	MsgBeat            = 1;
+	MsgProp            = 2;
+	MsgApp             = 3;
+	MsgAppResp         = 4;
+	MsgVote            = 5;
+	MsgVoteResp        = 6;
+	MsgSnap            = 7;
+	MsgHeartbeat       = 8;
+	MsgHeartbeatResp   = 9;
+	MsgUnreachable     = 10;
+	MsgSnapStatus      = 11;
+	MsgCheckQuorum     = 12;
+	MsgTransferLeader  = 13;
+	MsgTimeoutNow      = 14;
+	MsgReadIndex       = 15;
+	MsgReadIndexResp   = 16;
+	MsgPreVote         = 17;
+	MsgPreVoteResp     = 18;
+}
+
+message Message {
+	optional MessageType type        = 1  [(gogoproto.nullable) = false];
+	optional uint64      to          = 2  [(gogoproto.nullable) = false];
+	optional uint64      from        = 3  [(gogoproto.nullable) = false];
+	optional uint64      term        = 4  [(gogoproto.nullable) = false];
+	optional uint64      logTerm     = 5  [(gogoproto.nullable) = false];
+	optional uint64      index       = 6  [(gogoproto.nullable) = false];
+	repeated Entry       entries     = 7  [(gogoproto.nullable) = false];
+	optional uint64      commit      = 8  [(gogoproto.nullable) = false];
+	optional Snapshot    snapshot    = 9  [(gogoproto.nullable) = false];
+	optional bool        reject      = 10 [(gogoproto.nullable) = false];
+	optional uint64      rejectHint  = 11 [(gogoproto.nullable) = false];
+	optional bytes       context     = 12;
+}
+
+message HardState {
+	optional uint64 term   = 1 [(gogoproto.nullable) = false];
+	optional uint64 vote   = 2 [(gogoproto.nullable) = false];
+	optional uint64 commit = 3 [(gogoproto.nullable) = false];
+}
+
+message ConfState {
+	repeated uint64 nodes    = 1;
+	repeated uint64 learners = 2;
+}
+
+enum ConfChangeType {
+	ConfChangeAddNode        = 0;
+	ConfChangeRemoveNode     = 1;
+	ConfChangeUpdateNode     = 2;
+	ConfChangeAddLearnerNode = 3;
+}
+
+message ConfChange {
+	optional uint64          ID      = 1 [(gogoproto.nullable) = false];
+	optional ConfChangeType  Type    = 2 [(gogoproto.nullable) = false];
+	optional uint64          NodeID  = 3 [(gogoproto.nullable) = false];
+	optional bytes           Context = 4;
+}
diff --git a/vendor/github.com/coreos/etcd/raft/rawnode.go b/vendor/github.com/coreos/etcd/raft/rawnode.go
new file mode 100644
index 0000000..925cb85
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/rawnode.go
@@ -0,0 +1,266 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"errors"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+// ErrStepLocalMsg is returned when try to step a local raft message
+var ErrStepLocalMsg = errors.New("raft: cannot step raft local message")
+
+// ErrStepPeerNotFound is returned when try to step a response message
+// but there is no peer found in raft.prs for that node.
+var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found")
+
+// RawNode is a thread-unsafe Node.
+// The methods of this struct correspond to the methods of Node and are described
+// more fully there.
+type RawNode struct {
+	raft       *raft
+	prevSoftSt *SoftState
+	prevHardSt pb.HardState
+}
+
+func (rn *RawNode) newReady() Ready {
+	return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt)
+}
+
+func (rn *RawNode) commitReady(rd Ready) {
+	if rd.SoftState != nil {
+		rn.prevSoftSt = rd.SoftState
+	}
+	if !IsEmptyHardState(rd.HardState) {
+		rn.prevHardSt = rd.HardState
+	}
+	if rn.prevHardSt.Commit != 0 {
+		// In most cases, prevHardSt and rd.HardState will be the same
+		// because when there are new entries to apply we just sent a
+		// HardState with an updated Commit value. However, on initial
+		// startup the two are different because we don't send a HardState
+		// until something changes, but we do send any un-applied but
+		// committed entries (and previously-committed entries may be
+		// incorporated into the snapshot, even if rd.CommittedEntries is
+		// empty). Therefore we mark all committed entries as applied
+		// whether they were included in rd.HardState or not.
+		rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit)
+	}
+	if len(rd.Entries) > 0 {
+		e := rd.Entries[len(rd.Entries)-1]
+		rn.raft.raftLog.stableTo(e.Index, e.Term)
+	}
+	if !IsEmptySnap(rd.Snapshot) {
+		rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index)
+	}
+	if len(rd.ReadStates) != 0 {
+		rn.raft.readStates = nil
+	}
+}
+
+// NewRawNode returns a new RawNode given configuration and a list of raft peers.
+func NewRawNode(config *Config, peers []Peer) (*RawNode, error) {
+	if config.ID == 0 {
+		panic("config.ID must not be zero")
+	}
+	r := newRaft(config)
+	rn := &RawNode{
+		raft: r,
+	}
+	lastIndex, err := config.Storage.LastIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	// If the log is empty, this is a new RawNode (like StartNode); otherwise it's
+	// restoring an existing RawNode (like RestartNode).
+	// TODO(bdarnell): rethink RawNode initialization and whether the application needs
+	// to be able to tell us when it expects the RawNode to exist.
+	if lastIndex == 0 {
+		r.becomeFollower(1, None)
+		ents := make([]pb.Entry, len(peers))
+		for i, peer := range peers {
+			cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
+			data, err := cc.Marshal()
+			if err != nil {
+				panic("unexpected marshal error")
+			}
+
+			ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data}
+		}
+		r.raftLog.append(ents...)
+		r.raftLog.committed = uint64(len(ents))
+		for _, peer := range peers {
+			r.addNode(peer.ID)
+		}
+	}
+
+	// Set the initial hard and soft states after performing all initialization.
+	rn.prevSoftSt = r.softState()
+	if lastIndex == 0 {
+		rn.prevHardSt = emptyState
+	} else {
+		rn.prevHardSt = r.hardState()
+	}
+
+	return rn, nil
+}
+
+// Tick advances the internal logical clock by a single tick.
+func (rn *RawNode) Tick() {
+	rn.raft.tick()
+}
+
+// TickQuiesced advances the internal logical clock by a single tick without
+// performing any other state machine processing. It allows the caller to avoid
+// periodic heartbeats and elections when all of the peers in a Raft group are
+// known to be at the same state. Expected usage is to periodically invoke Tick
+// or TickQuiesced depending on whether the group is "active" or "quiesced".
+//
+// WARNING: Be very careful about using this method as it subverts the Raft
+// state machine. You should probably be using Tick instead.
+func (rn *RawNode) TickQuiesced() {
+	rn.raft.electionElapsed++
+}
+
+// Campaign causes this RawNode to transition to candidate state.
+func (rn *RawNode) Campaign() error {
+	return rn.raft.Step(pb.Message{
+		Type: pb.MsgHup,
+	})
+}
+
+// Propose proposes data be appended to the raft log.
+func (rn *RawNode) Propose(data []byte) error {
+	return rn.raft.Step(pb.Message{
+		Type: pb.MsgProp,
+		From: rn.raft.id,
+		Entries: []pb.Entry{
+			{Data: data},
+		}})
+}
+
+// ProposeConfChange proposes a config change.
+func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error {
+	data, err := cc.Marshal()
+	if err != nil {
+		return err
+	}
+	return rn.raft.Step(pb.Message{
+		Type: pb.MsgProp,
+		Entries: []pb.Entry{
+			{Type: pb.EntryConfChange, Data: data},
+		},
+	})
+}
+
+// ApplyConfChange applies a config change to the local node.
+func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
+	if cc.NodeID == None {
+		rn.raft.resetPendingConf()
+		return &pb.ConfState{Nodes: rn.raft.nodes()}
+	}
+	switch cc.Type {
+	case pb.ConfChangeAddNode:
+		rn.raft.addNode(cc.NodeID)
+	case pb.ConfChangeAddLearnerNode:
+		rn.raft.addLearner(cc.NodeID)
+	case pb.ConfChangeRemoveNode:
+		rn.raft.removeNode(cc.NodeID)
+	case pb.ConfChangeUpdateNode:
+		rn.raft.resetPendingConf()
+	default:
+		panic("unexpected conf type")
+	}
+	return &pb.ConfState{Nodes: rn.raft.nodes()}
+}
+
+// Step advances the state machine using the given message.
+func (rn *RawNode) Step(m pb.Message) error {
+	// ignore unexpected local messages receiving over network
+	if IsLocalMsg(m.Type) {
+		return ErrStepLocalMsg
+	}
+	if pr := rn.raft.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) {
+		return rn.raft.Step(m)
+	}
+	return ErrStepPeerNotFound
+}
+
+// Ready returns the current point-in-time state of this RawNode.
+func (rn *RawNode) Ready() Ready {
+	rd := rn.newReady()
+	rn.raft.msgs = nil
+	return rd
+}
+
+// HasReady called when RawNode user need to check if any Ready pending.
+// Checking logic in this method should be consistent with Ready.containsUpdates().
+func (rn *RawNode) HasReady() bool {
+	r := rn.raft
+	if !r.softState().equal(rn.prevSoftSt) {
+		return true
+	}
+	if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) {
+		return true
+	}
+	if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) {
+		return true
+	}
+	if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() {
+		return true
+	}
+	if len(r.readStates) != 0 {
+		return true
+	}
+	return false
+}
+
+// Advance notifies the RawNode that the application has applied and saved progress in the
+// last Ready results.
+func (rn *RawNode) Advance(rd Ready) {
+	rn.commitReady(rd)
+}
+
+// Status returns the current status of the given group.
+func (rn *RawNode) Status() *Status {
+	status := getStatus(rn.raft)
+	return &status
+}
+
+// ReportUnreachable reports the given node is not reachable for the last send.
+func (rn *RawNode) ReportUnreachable(id uint64) {
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id})
+}
+
+// ReportSnapshot reports the status of the sent snapshot.
+func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) {
+	rej := status == SnapshotFailure
+
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej})
+}
+
+// TransferLeader tries to transfer leadership to the given transferee.
+func (rn *RawNode) TransferLeader(transferee uint64) {
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee})
+}
+
+// ReadIndex requests a read state. The read state will be set in ready.
+// Read State has a read index. Once the application advances further than the read
+// index, any linearizable read requests issued before the read request can be
+// processed safely. The read state will have the same rctx attached.
+func (rn *RawNode) ReadIndex(rctx []byte) {
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
+}
diff --git a/vendor/github.com/coreos/etcd/raft/read_only.go b/vendor/github.com/coreos/etcd/raft/read_only.go
new file mode 100644
index 0000000..ae746fa
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/read_only.go
@@ -0,0 +1,118 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import pb "github.com/coreos/etcd/raft/raftpb"
+
+// ReadState provides state for read only query.
+// It's caller's responsibility to call ReadIndex first before getting
+// this state from ready, it's also caller's duty to differentiate if this
+// state is what it requests through RequestCtx, eg. given a unique id as
+// RequestCtx
+type ReadState struct {
+	Index      uint64
+	RequestCtx []byte
+}
+
+type readIndexStatus struct {
+	req   pb.Message
+	index uint64
+	acks  map[uint64]struct{}
+}
+
+type readOnly struct {
+	option           ReadOnlyOption
+	pendingReadIndex map[string]*readIndexStatus
+	readIndexQueue   []string
+}
+
+func newReadOnly(option ReadOnlyOption) *readOnly {
+	return &readOnly{
+		option:           option,
+		pendingReadIndex: make(map[string]*readIndexStatus),
+	}
+}
+
+// addRequest adds a read only reuqest into readonly struct.
+// `index` is the commit index of the raft state machine when it received
+// the read only request.
+// `m` is the original read only request message from the local or remote node.
+func (ro *readOnly) addRequest(index uint64, m pb.Message) {
+	ctx := string(m.Entries[0].Data)
+	if _, ok := ro.pendingReadIndex[ctx]; ok {
+		return
+	}
+	ro.pendingReadIndex[ctx] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]struct{})}
+	ro.readIndexQueue = append(ro.readIndexQueue, ctx)
+}
+
+// recvAck notifies the readonly struct that the raft state machine received
+// an acknowledgment of the heartbeat that attached with the read only request
+// context.
+func (ro *readOnly) recvAck(m pb.Message) int {
+	rs, ok := ro.pendingReadIndex[string(m.Context)]
+	if !ok {
+		return 0
+	}
+
+	rs.acks[m.From] = struct{}{}
+	// add one to include an ack from local node
+	return len(rs.acks) + 1
+}
+
+// advance advances the read only request queue kept by the readonly struct.
+// It dequeues the requests until it finds the read only request that has
+// the same context as the given `m`.
+func (ro *readOnly) advance(m pb.Message) []*readIndexStatus {
+	var (
+		i     int
+		found bool
+	)
+
+	ctx := string(m.Context)
+	rss := []*readIndexStatus{}
+
+	for _, okctx := range ro.readIndexQueue {
+		i++
+		rs, ok := ro.pendingReadIndex[okctx]
+		if !ok {
+			panic("cannot find corresponding read state from pending map")
+		}
+		rss = append(rss, rs)
+		if okctx == ctx {
+			found = true
+			break
+		}
+	}
+
+	if found {
+		ro.readIndexQueue = ro.readIndexQueue[i:]
+		for _, rs := range rss {
+			delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data))
+		}
+		return rss
+	}
+
+	return nil
+}
+
+// lastPendingRequestCtx returns the context of the last pending read only
+// request in readonly struct.
+func (ro *readOnly) lastPendingRequestCtx() string {
+	if len(ro.readIndexQueue) == 0 {
+		return ""
+	}
+	return ro.readIndexQueue[len(ro.readIndexQueue)-1]
+}
diff --git a/vendor/github.com/coreos/etcd/raft/status.go b/vendor/github.com/coreos/etcd/raft/status.go
new file mode 100644
index 0000000..f4d3d86
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/status.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"fmt"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+type Status struct {
+	ID uint64
+
+	pb.HardState
+	SoftState
+
+	Applied  uint64
+	Progress map[uint64]Progress
+
+	LeadTransferee uint64
+}
+
+// getStatus gets a copy of the current raft status.
+func getStatus(r *raft) Status {
+	s := Status{
+		ID:             r.id,
+		LeadTransferee: r.leadTransferee,
+	}
+
+	s.HardState = r.hardState()
+	s.SoftState = *r.softState()
+
+	s.Applied = r.raftLog.applied
+
+	if s.RaftState == StateLeader {
+		s.Progress = make(map[uint64]Progress)
+		for id, p := range r.prs {
+			s.Progress[id] = *p
+		}
+
+		for id, p := range r.learnerPrs {
+			s.Progress[id] = *p
+		}
+	}
+
+	return s
+}
+
+// MarshalJSON translates the raft status into JSON.
+// TODO: try to simplify this by introducing ID type into raft
+func (s Status) MarshalJSON() ([]byte, error) {
+	j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`,
+		s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied)
+
+	if len(s.Progress) == 0 {
+		j += "},"
+	} else {
+		for k, v := range s.Progress {
+			subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State)
+			j += subj
+		}
+		// remove the trailing ","
+		j = j[:len(j)-1] + "},"
+	}
+
+	j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee)
+	return []byte(j), nil
+}
+
+func (s Status) String() string {
+	b, err := s.MarshalJSON()
+	if err != nil {
+		raftLogger.Panicf("unexpected error: %v", err)
+	}
+	return string(b)
+}
diff --git a/vendor/github.com/coreos/etcd/raft/storage.go b/vendor/github.com/coreos/etcd/raft/storage.go
new file mode 100644
index 0000000..69c3a7d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/storage.go
@@ -0,0 +1,271 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"errors"
+	"sync"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+// ErrCompacted is returned by Storage.Entries/Compact when a requested
+// index is unavailable because it predates the last snapshot.
+var ErrCompacted = errors.New("requested index is unavailable due to compaction")
+
+// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested
+// index is older than the existing snapshot.
+var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot")
+
+// ErrUnavailable is returned by Storage interface when the requested log entries
+// are unavailable.
+var ErrUnavailable = errors.New("requested entry at index is unavailable")
+
+// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required
+// snapshot is temporarily unavailable.
+var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable")
+
+// Storage is an interface that may be implemented by the application
+// to retrieve log entries from storage.
+//
+// If any Storage method returns an error, the raft instance will
+// become inoperable and refuse to participate in elections; the
+// application is responsible for cleanup and recovery in this case.
+type Storage interface {
+	// InitialState returns the saved HardState and ConfState information.
+	InitialState() (pb.HardState, pb.ConfState, error)
+	// Entries returns a slice of log entries in the range [lo,hi).
+	// MaxSize limits the total size of the log entries returned, but
+	// Entries returns at least one entry if any.
+	Entries(lo, hi, maxSize uint64) ([]pb.Entry, error)
+	// Term returns the term of entry i, which must be in the range
+	// [FirstIndex()-1, LastIndex()]. The term of the entry before
+	// FirstIndex is retained for matching purposes even though the
+	// rest of that entry may not be available.
+	Term(i uint64) (uint64, error)
+	// LastIndex returns the index of the last entry in the log.
+	LastIndex() (uint64, error)
+	// FirstIndex returns the index of the first log entry that is
+	// possibly available via Entries (older entries have been incorporated
+	// into the latest Snapshot; if storage only contains the dummy entry the
+	// first log entry is not available).
+	FirstIndex() (uint64, error)
+	// Snapshot returns the most recent snapshot.
+	// If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable,
+	// so raft state machine could know that Storage needs some time to prepare
+	// snapshot and call Snapshot later.
+	Snapshot() (pb.Snapshot, error)
+}
+
+// MemoryStorage implements the Storage interface backed by an
+// in-memory array.
+type MemoryStorage struct {
+	// Protects access to all fields. Most methods of MemoryStorage are
+	// run on the raft goroutine, but Append() is run on an application
+	// goroutine.
+	sync.Mutex
+
+	hardState pb.HardState
+	snapshot  pb.Snapshot
+	// ents[i] has raft log position i+snapshot.Metadata.Index
+	ents []pb.Entry
+}
+
+// NewMemoryStorage creates an empty MemoryStorage.
+func NewMemoryStorage() *MemoryStorage {
+	return &MemoryStorage{
+		// When starting from scratch populate the list with a dummy entry at term zero.
+		ents: make([]pb.Entry, 1),
+	}
+}
+
+// InitialState implements the Storage interface.
+func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) {
+	return ms.hardState, ms.snapshot.Metadata.ConfState, nil
+}
+
+// SetHardState saves the current HardState.
+func (ms *MemoryStorage) SetHardState(st pb.HardState) error {
+	ms.Lock()
+	defer ms.Unlock()
+	ms.hardState = st
+	return nil
+}
+
+// Entries implements the Storage interface.
+func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	offset := ms.ents[0].Index
+	if lo <= offset {
+		return nil, ErrCompacted
+	}
+	if hi > ms.lastIndex()+1 {
+		raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex())
+	}
+	// only contains dummy entries.
+	if len(ms.ents) == 1 {
+		return nil, ErrUnavailable
+	}
+
+	ents := ms.ents[lo-offset : hi-offset]
+	return limitSize(ents, maxSize), nil
+}
+
+// Term implements the Storage interface.
+func (ms *MemoryStorage) Term(i uint64) (uint64, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	offset := ms.ents[0].Index
+	if i < offset {
+		return 0, ErrCompacted
+	}
+	if int(i-offset) >= len(ms.ents) {
+		return 0, ErrUnavailable
+	}
+	return ms.ents[i-offset].Term, nil
+}
+
+// LastIndex implements the Storage interface.
+func (ms *MemoryStorage) LastIndex() (uint64, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	return ms.lastIndex(), nil
+}
+
+func (ms *MemoryStorage) lastIndex() uint64 {
+	return ms.ents[0].Index + uint64(len(ms.ents)) - 1
+}
+
+// FirstIndex implements the Storage interface.
+func (ms *MemoryStorage) FirstIndex() (uint64, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	return ms.firstIndex(), nil
+}
+
+func (ms *MemoryStorage) firstIndex() uint64 {
+	return ms.ents[0].Index + 1
+}
+
+// Snapshot implements the Storage interface.
+func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	return ms.snapshot, nil
+}
+
+// ApplySnapshot overwrites the contents of this Storage object with
+// those of the given snapshot.
+func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error {
+	ms.Lock()
+	defer ms.Unlock()
+
+	//handle check for old snapshot being applied
+	msIndex := ms.snapshot.Metadata.Index
+	snapIndex := snap.Metadata.Index
+	if msIndex >= snapIndex {
+		return ErrSnapOutOfDate
+	}
+
+	ms.snapshot = snap
+	ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}}
+	return nil
+}
+
+// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and
+// can be used to reconstruct the state at that point.
+// If any configuration changes have been made since the last compaction,
+// the result of the last ApplyConfChange must be passed in.
+func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	if i <= ms.snapshot.Metadata.Index {
+		return pb.Snapshot{}, ErrSnapOutOfDate
+	}
+
+	offset := ms.ents[0].Index
+	if i > ms.lastIndex() {
+		raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex())
+	}
+
+	ms.snapshot.Metadata.Index = i
+	ms.snapshot.Metadata.Term = ms.ents[i-offset].Term
+	if cs != nil {
+		ms.snapshot.Metadata.ConfState = *cs
+	}
+	ms.snapshot.Data = data
+	return ms.snapshot, nil
+}
+
+// Compact discards all log entries prior to compactIndex.
+// It is the application's responsibility to not attempt to compact an index
+// greater than raftLog.applied.
+func (ms *MemoryStorage) Compact(compactIndex uint64) error {
+	ms.Lock()
+	defer ms.Unlock()
+	offset := ms.ents[0].Index
+	if compactIndex <= offset {
+		return ErrCompacted
+	}
+	if compactIndex > ms.lastIndex() {
+		raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex())
+	}
+
+	i := compactIndex - offset
+	ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i)
+	ents[0].Index = ms.ents[i].Index
+	ents[0].Term = ms.ents[i].Term
+	ents = append(ents, ms.ents[i+1:]...)
+	ms.ents = ents
+	return nil
+}
+
+// Append the new entries to storage.
+// TODO (xiangli): ensure the entries are continuous and
+// entries[0].Index > ms.entries[0].Index
+func (ms *MemoryStorage) Append(entries []pb.Entry) error {
+	if len(entries) == 0 {
+		return nil
+	}
+
+	ms.Lock()
+	defer ms.Unlock()
+
+	first := ms.firstIndex()
+	last := entries[0].Index + uint64(len(entries)) - 1
+
+	// shortcut if there is no new entry.
+	if last < first {
+		return nil
+	}
+	// truncate compacted entries
+	if first > entries[0].Index {
+		entries = entries[first-entries[0].Index:]
+	}
+
+	offset := entries[0].Index - ms.ents[0].Index
+	switch {
+	case uint64(len(ms.ents)) > offset:
+		ms.ents = append([]pb.Entry{}, ms.ents[:offset]...)
+		ms.ents = append(ms.ents, entries...)
+	case uint64(len(ms.ents)) == offset:
+		ms.ents = append(ms.ents, entries...)
+	default:
+		raftLogger.Panicf("missing log entry [last: %d, append at: %d]",
+			ms.lastIndex(), entries[0].Index)
+	}
+	return nil
+}
diff --git a/vendor/github.com/coreos/etcd/raft/util.go b/vendor/github.com/coreos/etcd/raft/util.go
new file mode 100644
index 0000000..f4141fe
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/util.go
@@ -0,0 +1,129 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"bytes"
+	"fmt"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+func (st StateType) MarshalJSON() ([]byte, error) {
+	return []byte(fmt.Sprintf("%q", st.String())), nil
+}
+
+// uint64Slice implements sort interface
+type uint64Slice []uint64
+
+func (p uint64Slice) Len() int           { return len(p) }
+func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p uint64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func min(a, b uint64) uint64 {
+	if a > b {
+		return b
+	}
+	return a
+}
+
+func max(a, b uint64) uint64 {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+func IsLocalMsg(msgt pb.MessageType) bool {
+	return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable ||
+		msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum
+}
+
+func IsResponseMsg(msgt pb.MessageType) bool {
+	return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp
+}
+
+// voteResponseType maps vote and prevote message types to their corresponding responses.
+func voteRespMsgType(msgt pb.MessageType) pb.MessageType {
+	switch msgt {
+	case pb.MsgVote:
+		return pb.MsgVoteResp
+	case pb.MsgPreVote:
+		return pb.MsgPreVoteResp
+	default:
+		panic(fmt.Sprintf("not a vote message: %s", msgt))
+	}
+}
+
+// EntryFormatter can be implemented by the application to provide human-readable formatting
+// of entry data. Nil is a valid EntryFormatter and will use a default format.
+type EntryFormatter func([]byte) string
+
+// DescribeMessage returns a concise human-readable description of a
+// Message for debugging.
+func DescribeMessage(m pb.Message, f EntryFormatter) string {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index)
+	if m.Reject {
+		fmt.Fprintf(&buf, " Rejected")
+		if m.RejectHint != 0 {
+			fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint)
+		}
+	}
+	if m.Commit != 0 {
+		fmt.Fprintf(&buf, " Commit:%d", m.Commit)
+	}
+	if len(m.Entries) > 0 {
+		fmt.Fprintf(&buf, " Entries:[")
+		for i, e := range m.Entries {
+			if i != 0 {
+				buf.WriteString(", ")
+			}
+			buf.WriteString(DescribeEntry(e, f))
+		}
+		fmt.Fprintf(&buf, "]")
+	}
+	if !IsEmptySnap(m.Snapshot) {
+		fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot)
+	}
+	return buf.String()
+}
+
+// DescribeEntry returns a concise human-readable description of an
+// Entry for debugging.
+func DescribeEntry(e pb.Entry, f EntryFormatter) string {
+	var formatted string
+	if e.Type == pb.EntryNormal && f != nil {
+		formatted = f(e.Data)
+	} else {
+		formatted = fmt.Sprintf("%q", e.Data)
+	}
+	return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted)
+}
+
+func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry {
+	if len(ents) == 0 {
+		return ents
+	}
+	size := ents[0].Size()
+	var limit int
+	for limit = 1; limit < len(ents); limit++ {
+		size += ents[limit].Size()
+		if uint64(size) > maxSize {
+			break
+		}
+	}
+	return ents[:limit]
+}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/coder.go b/vendor/github.com/coreos/etcd/rafthttp/coder.go
new file mode 100644
index 0000000..86ede97
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/rafthttp/coder.go
@@ -0,0 +1,27 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import "github.com/coreos/etcd/raft/raftpb"
+
+type encoder interface {
+	// encode encodes the given message to an output stream.
+	encode(m *raftpb.Message) error
+}
+
+type decoder interface {
+	// decode decodes the message from an input stream.
+	decode() (raftpb.Message, error)
+}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/doc.go b/vendor/github.com/coreos/etcd/rafthttp/doc.go
new file mode 100644
index 0000000..a9486a8
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/rafthttp/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package rafthttp implements HTTP transportation layer for etcd/raft pkg.
+package rafthttp
diff --git a/vendor/github.com/coreos/etcd/rafthttp/http.go b/vendor/github.com/coreos/etcd/rafthttp/http.go
new file mode 100644
index 0000000..223a5de
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/rafthttp/http.go
@@ -0,0 +1,366 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"path"
+	"strings"
+	"time"
+
+	pioutil "github.com/coreos/etcd/pkg/ioutil"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/raft/raftpb"
+	"github.com/coreos/etcd/snap"
+	"github.com/coreos/etcd/version"
+)
+
+const (
+	// connReadLimitByte limits the number of bytes
+	// a single read can read out.
+	//
+	// 64KB should be large enough for not causing
+	// throughput bottleneck as well as small enough
+	// for not causing a read timeout.
+	connReadLimitByte = 64 * 1024
+)
+
+var (
+	RaftPrefix         = "/raft"
+	ProbingPrefix      = path.Join(RaftPrefix, "probing")
+	RaftStreamPrefix   = path.Join(RaftPrefix, "stream")
+	RaftSnapshotPrefix = path.Join(RaftPrefix, "snapshot")
+
+	errIncompatibleVersion = errors.New("incompatible version")
+	errClusterIDMismatch   = errors.New("cluster ID mismatch")
+)
+
+type peerGetter interface {
+	Get(id types.ID) Peer
+}
+
+type writerToResponse interface {
+	WriteTo(w http.ResponseWriter)
+}
+
+type pipelineHandler struct {
+	tr  Transporter
+	r   Raft
+	cid types.ID
+}
+
+// newPipelineHandler returns a handler for handling raft messages
+// from pipeline for RaftPrefix.
+//
+// The handler reads out the raft message from request body,
+// and forwards it to the given raft state machine for processing.
+func newPipelineHandler(tr Transporter, r Raft, cid types.ID) http.Handler {
+	return &pipelineHandler{
+		tr:  tr,
+		r:   r,
+		cid: cid,
+	}
+}
+
+func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	if r.Method != "POST" {
+		w.Header().Set("Allow", "POST")
+		http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+		return
+	}
+
+	w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
+
+	if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {
+		http.Error(w, err.Error(), http.StatusPreconditionFailed)
+		return
+	}
+
+	addRemoteFromRequest(h.tr, r)
+
+	// Limit the data size that could be read from the request body, which ensures that read from
+	// connection will not time out accidentally due to possible blocking in underlying implementation.
+	limitedr := pioutil.NewLimitedBufferReader(r.Body, connReadLimitByte)
+	b, err := ioutil.ReadAll(limitedr)
+	if err != nil {
+		plog.Errorf("failed to read raft message (%v)", err)
+		http.Error(w, "error reading raft message", http.StatusBadRequest)
+		recvFailures.WithLabelValues(r.RemoteAddr).Inc()
+		return
+	}
+
+	var m raftpb.Message
+	if err := m.Unmarshal(b); err != nil {
+		plog.Errorf("failed to unmarshal raft message (%v)", err)
+		http.Error(w, "error unmarshaling raft message", http.StatusBadRequest)
+		recvFailures.WithLabelValues(r.RemoteAddr).Inc()
+		return
+	}
+
+	receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(len(b)))
+
+	if err := h.r.Process(context.TODO(), m); err != nil {
+		switch v := err.(type) {
+		case writerToResponse:
+			v.WriteTo(w)
+		default:
+			plog.Warningf("failed to process raft message (%v)", err)
+			http.Error(w, "error processing raft message", http.StatusInternalServerError)
+			w.(http.Flusher).Flush()
+			// disconnect the http stream
+			panic(err)
+		}
+		return
+	}
+
+	// Write StatusNoContent header after the message has been processed by
+	// raft, which facilitates the client to report MsgSnap status.
+	w.WriteHeader(http.StatusNoContent)
+}
+
+type snapshotHandler struct {
+	tr          Transporter
+	r           Raft
+	snapshotter *snap.Snapshotter
+	cid         types.ID
+}
+
+func newSnapshotHandler(tr Transporter, r Raft, snapshotter *snap.Snapshotter, cid types.ID) http.Handler {
+	return &snapshotHandler{
+		tr:          tr,
+		r:           r,
+		snapshotter: snapshotter,
+		cid:         cid,
+	}
+}
+
+const unknownSnapshotSender = "UNKNOWN_SNAPSHOT_SENDER"
+
+// ServeHTTP serves HTTP request to receive and process snapshot message.
+//
+// If request sender dies without closing underlying TCP connection,
+// the handler will keep waiting for the request body until TCP keepalive
+// finds out that the connection is broken after several minutes.
+// This is acceptable because
+// 1. snapshot messages sent through other TCP connections could still be
+// received and processed.
+// 2. this case should happen rarely, so no further optimization is done.
+func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	start := time.Now()
+
+	if r.Method != "POST" {
+		w.Header().Set("Allow", "POST")
+		http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+		snapshotReceiveFailures.WithLabelValues(unknownSnapshotSender).Inc()
+		return
+	}
+
+	w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
+
+	if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {
+		http.Error(w, err.Error(), http.StatusPreconditionFailed)
+		snapshotReceiveFailures.WithLabelValues(unknownSnapshotSender).Inc()
+		return
+	}
+
+	addRemoteFromRequest(h.tr, r)
+
+	dec := &messageDecoder{r: r.Body}
+	// let snapshots be very large since they can exceed 512MB for large installations
+	m, err := dec.decodeLimit(uint64(1 << 63))
+	from := types.ID(m.From).String()
+	if err != nil {
+		msg := fmt.Sprintf("failed to decode raft message (%v)", err)
+		plog.Errorf(msg)
+		http.Error(w, msg, http.StatusBadRequest)
+		recvFailures.WithLabelValues(r.RemoteAddr).Inc()
+		snapshotReceiveFailures.WithLabelValues(from).Inc()
+		return
+	}
+
+	receivedBytes.WithLabelValues(from).Add(float64(m.Size()))
+
+	if m.Type != raftpb.MsgSnap {
+		plog.Errorf("unexpected raft message type %s on snapshot path", m.Type)
+		http.Error(w, "wrong raft message type", http.StatusBadRequest)
+		snapshotReceiveFailures.WithLabelValues(from).Inc()
+		return
+	}
+
+	plog.Infof("receiving database snapshot [index:%d, from %s] ...", m.Snapshot.Metadata.Index, types.ID(m.From))
+	// save incoming database snapshot.
+	n, err := h.snapshotter.SaveDBFrom(r.Body, m.Snapshot.Metadata.Index)
+	if err != nil {
+		msg := fmt.Sprintf("failed to save KV snapshot (%v)", err)
+		plog.Error(msg)
+		http.Error(w, msg, http.StatusInternalServerError)
+		snapshotReceiveFailures.WithLabelValues(from).Inc()
+		return
+	}
+	receivedBytes.WithLabelValues(from).Add(float64(n))
+	plog.Infof("received and saved database snapshot [index: %d, from: %s] successfully", m.Snapshot.Metadata.Index, types.ID(m.From))
+
+	if err := h.r.Process(context.TODO(), m); err != nil {
+		switch v := err.(type) {
+		// Process may return writerToResponse error when doing some
+		// additional checks before calling raft.Node.Step.
+		case writerToResponse:
+			v.WriteTo(w)
+		default:
+			msg := fmt.Sprintf("failed to process raft message (%v)", err)
+			plog.Warningf(msg)
+			http.Error(w, msg, http.StatusInternalServerError)
+			snapshotReceiveFailures.WithLabelValues(from).Inc()
+		}
+		return
+	}
+	// Write StatusNoContent header after the message has been processed by
+	// raft, which facilitates the client to report MsgSnap status.
+	w.WriteHeader(http.StatusNoContent)
+
+	snapshotReceive.WithLabelValues(from).Inc()
+	snapshotReceiveSeconds.WithLabelValues(from).Observe(time.Since(start).Seconds())
+}
+
+type streamHandler struct {
+	tr         *Transport
+	peerGetter peerGetter
+	r          Raft
+	id         types.ID
+	cid        types.ID
+}
+
+func newStreamHandler(tr *Transport, pg peerGetter, r Raft, id, cid types.ID) http.Handler {
+	return &streamHandler{
+		tr:         tr,
+		peerGetter: pg,
+		r:          r,
+		id:         id,
+		cid:        cid,
+	}
+}
+
+func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	if r.Method != "GET" {
+		w.Header().Set("Allow", "GET")
+		http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+		return
+	}
+
+	w.Header().Set("X-Server-Version", version.Version)
+	w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
+
+	if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {
+		http.Error(w, err.Error(), http.StatusPreconditionFailed)
+		return
+	}
+
+	var t streamType
+	switch path.Dir(r.URL.Path) {
+	case streamTypeMsgAppV2.endpoint():
+		t = streamTypeMsgAppV2
+	case streamTypeMessage.endpoint():
+		t = streamTypeMessage
+	default:
+		plog.Debugf("ignored unexpected streaming request path %s", r.URL.Path)
+		http.Error(w, "invalid path", http.StatusNotFound)
+		return
+	}
+
+	fromStr := path.Base(r.URL.Path)
+	from, err := types.IDFromString(fromStr)
+	if err != nil {
+		plog.Errorf("failed to parse from %s into ID (%v)", fromStr, err)
+		http.Error(w, "invalid from", http.StatusNotFound)
+		return
+	}
+	if h.r.IsIDRemoved(uint64(from)) {
+		plog.Warningf("rejected the stream from peer %s since it was removed", from)
+		http.Error(w, "removed member", http.StatusGone)
+		return
+	}
+	p := h.peerGetter.Get(from)
+	if p == nil {
+		// This may happen in following cases:
+		// 1. user starts a remote peer that belongs to a different cluster
+		// with the same cluster ID.
+		// 2. local etcd falls behind of the cluster, and cannot recognize
+		// the members that joined after its current progress.
+		if urls := r.Header.Get("X-PeerURLs"); urls != "" {
+			h.tr.AddRemote(from, strings.Split(urls, ","))
+		}
+		plog.Errorf("failed to find member %s in cluster %s", from, h.cid)
+		http.Error(w, "error sender not found", http.StatusNotFound)
+		return
+	}
+
+	wto := h.id.String()
+	if gto := r.Header.Get("X-Raft-To"); gto != wto {
+		plog.Errorf("streaming request ignored (ID mismatch got %s want %s)", gto, wto)
+		http.Error(w, "to field mismatch", http.StatusPreconditionFailed)
+		return
+	}
+
+	w.WriteHeader(http.StatusOK)
+	w.(http.Flusher).Flush()
+
+	c := newCloseNotifier()
+	conn := &outgoingConn{
+		t:       t,
+		Writer:  w,
+		Flusher: w.(http.Flusher),
+		Closer:  c,
+	}
+	p.attachOutgoingConn(conn)
+	<-c.closeNotify()
+}
+
+// checkClusterCompatibilityFromHeader checks the cluster compatibility of
+// the local member from the given header.
+// It checks whether the version of local member is compatible with
+// the versions in the header, and whether the cluster ID of local member
+// matches the one in the header.
+func checkClusterCompatibilityFromHeader(header http.Header, cid types.ID) error {
+	if err := checkVersionCompability(header.Get("X-Server-From"), serverVersion(header), minClusterVersion(header)); err != nil {
+		plog.Errorf("request version incompatibility (%v)", err)
+		return errIncompatibleVersion
+	}
+	if gcid := header.Get("X-Etcd-Cluster-ID"); gcid != cid.String() {
+		plog.Errorf("request cluster ID mismatch (got %s want %s)", gcid, cid)
+		return errClusterIDMismatch
+	}
+	return nil
+}
+
+type closeNotifier struct {
+	done chan struct{}
+}
+
+func newCloseNotifier() *closeNotifier {
+	return &closeNotifier{
+		done: make(chan struct{}),
+	}
+}
+
+func (n *closeNotifier) Close() error {
+	close(n.done)
+	return nil
+}
+
+func (n *closeNotifier) closeNotify() <-chan struct{} { return n.done }
diff --git a/vendor/github.com/coreos/etcd/rafthttp/metrics.go b/vendor/github.com/coreos/etcd/rafthttp/metrics.go
new file mode 100644
index 0000000..2066663
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/rafthttp/metrics.go
@@ -0,0 +1,143 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+	sentBytes = prometheus.NewCounterVec(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "network",
+		Name:      "peer_sent_bytes_total",
+		Help:      "The total number of bytes sent to peers.",
+	},
+		[]string{"To"},
+	)
+
+	receivedBytes = prometheus.NewCounterVec(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "network",
+		Name:      "peer_received_bytes_total",
+		Help:      "The total number of bytes received from peers.",
+	},
+		[]string{"From"},
+	)
+
+	sentFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "network",
+		Name:      "peer_sent_failures_total",
+		Help:      "The total number of send failures from peers.",
+	},
+		[]string{"To"},
+	)
+
+	recvFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "network",
+		Name:      "peer_received_failures_total",
+		Help:      "The total number of receive failures from peers.",
+	},
+		[]string{"From"},
+	)
+
+	snapshotSend = prometheus.NewCounterVec(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "network",
+		Name:      "snapshot_send_success",
+		Help:      "Total number of successful snapshot sends",
+	},
+		[]string{"To"},
+	)
+
+	snapshotSendFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "network",
+		Name:      "snapshot_send_failures",
+		Help:      "Total number of snapshot send failures",
+	},
+		[]string{"To"},
+	)
+
+	snapshotSendSeconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+		Namespace: "etcd",
+		Subsystem: "network",
+		Name:      "snapshot_send_total_duration_seconds",
+		Help:      "Total latency distributions of v3 snapshot sends",
+
+		// lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
+		// highest bucket start of 0.1 sec * 2^9 == 51.2 sec
+		Buckets: prometheus.ExponentialBuckets(0.1, 2, 10),
+	},
+		[]string{"To"},
+	)
+
+	snapshotReceive = prometheus.NewCounterVec(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "network",
+		Name:      "snapshot_receive_success",
+		Help:      "Total number of successful snapshot receives",
+	},
+		[]string{"From"},
+	)
+
+	snapshotReceiveFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
+		Namespace: "etcd",
+		Subsystem: "network",
+		Name:      "snapshot_receive_failures",
+		Help:      "Total number of snapshot receive failures",
+	},
+		[]string{"From"},
+	)
+
+	snapshotReceiveSeconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+		Namespace: "etcd",
+		Subsystem: "network",
+		Name:      "snapshot_receive_total_duration_seconds",
+		Help:      "Total latency distributions of v3 snapshot receives",
+
+		// lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
+		// highest bucket start of 0.1 sec * 2^9 == 51.2 sec
+		Buckets: prometheus.ExponentialBuckets(0.1, 2, 10),
+	},
+		[]string{"From"},
+	)
+
+	rtts = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+		Namespace: "etcd",
+		Subsystem: "network",
+		Name:      "peer_round_trip_time_seconds",
+		Help:      "Round-Trip-Time histogram between peers.",
+		Buckets:   prometheus.ExponentialBuckets(0.0001, 2, 14),
+	},
+		[]string{"To"},
+	)
+)
+
+func init() {
+	prometheus.MustRegister(sentBytes)
+	prometheus.MustRegister(receivedBytes)
+	prometheus.MustRegister(sentFailures)
+	prometheus.MustRegister(recvFailures)
+
+	prometheus.MustRegister(snapshotSend)
+	prometheus.MustRegister(snapshotSendFailures)
+	prometheus.MustRegister(snapshotSendSeconds)
+	prometheus.MustRegister(snapshotReceive)
+	prometheus.MustRegister(snapshotReceiveFailures)
+	prometheus.MustRegister(snapshotReceiveSeconds)
+
+	prometheus.MustRegister(rtts)
+}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/msg_codec.go b/vendor/github.com/coreos/etcd/rafthttp/msg_codec.go
new file mode 100644
index 0000000..ef59bc8
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/rafthttp/msg_codec.go
@@ -0,0 +1,68 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+
+	"github.com/coreos/etcd/pkg/pbutil"
+	"github.com/coreos/etcd/raft/raftpb"
+)
+
+// messageEncoder is a encoder that can encode all kinds of messages.
+// It MUST be used with a paired messageDecoder.
+type messageEncoder struct {
+	w io.Writer
+}
+
+func (enc *messageEncoder) encode(m *raftpb.Message) error {
+	if err := binary.Write(enc.w, binary.BigEndian, uint64(m.Size())); err != nil {
+		return err
+	}
+	_, err := enc.w.Write(pbutil.MustMarshal(m))
+	return err
+}
+
+// messageDecoder is a decoder that can decode all kinds of messages.
+type messageDecoder struct {
+	r io.Reader
+}
+
+var (
+	readBytesLimit     uint64 = 512 * 1024 * 1024 // 512 MB
+	ErrExceedSizeLimit        = errors.New("rafthttp: error limit exceeded")
+)
+
+func (dec *messageDecoder) decode() (raftpb.Message, error) {
+	return dec.decodeLimit(readBytesLimit)
+}
+
+func (dec *messageDecoder) decodeLimit(numBytes uint64) (raftpb.Message, error) {
+	var m raftpb.Message
+	var l uint64
+	if err := binary.Read(dec.r, binary.BigEndian, &l); err != nil {
+		return m, err
+	}
+	if l > numBytes {
+		return m, ErrExceedSizeLimit
+	}
+	buf := make([]byte, int(l))
+	if _, err := io.ReadFull(dec.r, buf); err != nil {
+		return m, err
+	}
+	return m, m.Unmarshal(buf)
+}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/msgappv2_codec.go b/vendor/github.com/coreos/etcd/rafthttp/msgappv2_codec.go
new file mode 100644
index 0000000..013ffe7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/rafthttp/msgappv2_codec.go
@@ -0,0 +1,248 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"time"
+
+	"github.com/coreos/etcd/etcdserver/stats"
+	"github.com/coreos/etcd/pkg/pbutil"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/raft/raftpb"
+)
+
+const (
+	msgTypeLinkHeartbeat uint8 = 0
+	msgTypeAppEntries    uint8 = 1
+	msgTypeApp           uint8 = 2
+
+	msgAppV2BufSize = 1024 * 1024
+)
+
+// msgappv2 stream sends three types of message: linkHeartbeatMessage,
+// AppEntries and MsgApp. AppEntries is the MsgApp that is sent in
+// replicate state in raft, whose index and term are fully predictable.
+//
+// Data format of linkHeartbeatMessage:
+// | offset | bytes | description |
+// +--------+-------+-------------+
+// | 0      | 1     | \x00        |
+//
+// Data format of AppEntries:
+// | offset | bytes | description |
+// +--------+-------+-------------+
+// | 0      | 1     | \x01        |
+// | 1      | 8     | length of entries |
+// | 9      | 8     | length of first entry |
+// | 17     | n1    | first entry |
+// ...
+// | x      | 8     | length of k-th entry data |
+// | x+8    | nk    | k-th entry data |
+// | x+8+nk | 8     | commit index |
+//
+// Data format of MsgApp:
+// | offset | bytes | description |
+// +--------+-------+-------------+
+// | 0      | 1     | \x02        |
+// | 1      | 8     | length of encoded message |
+// | 9      | n     | encoded message |
+type msgAppV2Encoder struct {
+	w  io.Writer
+	fs *stats.FollowerStats
+
+	term      uint64
+	index     uint64
+	buf       []byte
+	uint64buf []byte
+	uint8buf  []byte
+}
+
+func newMsgAppV2Encoder(w io.Writer, fs *stats.FollowerStats) *msgAppV2Encoder {
+	return &msgAppV2Encoder{
+		w:         w,
+		fs:        fs,
+		buf:       make([]byte, msgAppV2BufSize),
+		uint64buf: make([]byte, 8),
+		uint8buf:  make([]byte, 1),
+	}
+}
+
+func (enc *msgAppV2Encoder) encode(m *raftpb.Message) error {
+	start := time.Now()
+	switch {
+	case isLinkHeartbeatMessage(m):
+		enc.uint8buf[0] = byte(msgTypeLinkHeartbeat)
+		if _, err := enc.w.Write(enc.uint8buf); err != nil {
+			return err
+		}
+	case enc.index == m.Index && enc.term == m.LogTerm && m.LogTerm == m.Term:
+		enc.uint8buf[0] = byte(msgTypeAppEntries)
+		if _, err := enc.w.Write(enc.uint8buf); err != nil {
+			return err
+		}
+		// write length of entries
+		binary.BigEndian.PutUint64(enc.uint64buf, uint64(len(m.Entries)))
+		if _, err := enc.w.Write(enc.uint64buf); err != nil {
+			return err
+		}
+		for i := 0; i < len(m.Entries); i++ {
+			// write length of entry
+			binary.BigEndian.PutUint64(enc.uint64buf, uint64(m.Entries[i].Size()))
+			if _, err := enc.w.Write(enc.uint64buf); err != nil {
+				return err
+			}
+			if n := m.Entries[i].Size(); n < msgAppV2BufSize {
+				if _, err := m.Entries[i].MarshalTo(enc.buf); err != nil {
+					return err
+				}
+				if _, err := enc.w.Write(enc.buf[:n]); err != nil {
+					return err
+				}
+			} else {
+				if _, err := enc.w.Write(pbutil.MustMarshal(&m.Entries[i])); err != nil {
+					return err
+				}
+			}
+			enc.index++
+		}
+		// write commit index
+		binary.BigEndian.PutUint64(enc.uint64buf, m.Commit)
+		if _, err := enc.w.Write(enc.uint64buf); err != nil {
+			return err
+		}
+		enc.fs.Succ(time.Since(start))
+	default:
+		if err := binary.Write(enc.w, binary.BigEndian, msgTypeApp); err != nil {
+			return err
+		}
+		// write size of message
+		if err := binary.Write(enc.w, binary.BigEndian, uint64(m.Size())); err != nil {
+			return err
+		}
+		// write message
+		if _, err := enc.w.Write(pbutil.MustMarshal(m)); err != nil {
+			return err
+		}
+
+		enc.term = m.Term
+		enc.index = m.Index
+		if l := len(m.Entries); l > 0 {
+			enc.index = m.Entries[l-1].Index
+		}
+		enc.fs.Succ(time.Since(start))
+	}
+	return nil
+}
+
+type msgAppV2Decoder struct {
+	r             io.Reader
+	local, remote types.ID
+
+	term      uint64
+	index     uint64
+	buf       []byte
+	uint64buf []byte
+	uint8buf  []byte
+}
+
+func newMsgAppV2Decoder(r io.Reader, local, remote types.ID) *msgAppV2Decoder {
+	return &msgAppV2Decoder{
+		r:         r,
+		local:     local,
+		remote:    remote,
+		buf:       make([]byte, msgAppV2BufSize),
+		uint64buf: make([]byte, 8),
+		uint8buf:  make([]byte, 1),
+	}
+}
+
+func (dec *msgAppV2Decoder) decode() (raftpb.Message, error) {
+	var (
+		m   raftpb.Message
+		typ uint8
+	)
+	if _, err := io.ReadFull(dec.r, dec.uint8buf); err != nil {
+		return m, err
+	}
+	typ = uint8(dec.uint8buf[0])
+	switch typ {
+	case msgTypeLinkHeartbeat:
+		return linkHeartbeatMessage, nil
+	case msgTypeAppEntries:
+		m = raftpb.Message{
+			Type:    raftpb.MsgApp,
+			From:    uint64(dec.remote),
+			To:      uint64(dec.local),
+			Term:    dec.term,
+			LogTerm: dec.term,
+			Index:   dec.index,
+		}
+
+		// decode entries
+		if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil {
+			return m, err
+		}
+		l := binary.BigEndian.Uint64(dec.uint64buf)
+		m.Entries = make([]raftpb.Entry, int(l))
+		for i := 0; i < int(l); i++ {
+			if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil {
+				return m, err
+			}
+			size := binary.BigEndian.Uint64(dec.uint64buf)
+			var buf []byte
+			if size < msgAppV2BufSize {
+				buf = dec.buf[:size]
+				if _, err := io.ReadFull(dec.r, buf); err != nil {
+					return m, err
+				}
+			} else {
+				buf = make([]byte, int(size))
+				if _, err := io.ReadFull(dec.r, buf); err != nil {
+					return m, err
+				}
+			}
+			dec.index++
+			// 1 alloc
+			pbutil.MustUnmarshal(&m.Entries[i], buf)
+		}
+		// decode commit index
+		if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil {
+			return m, err
+		}
+		m.Commit = binary.BigEndian.Uint64(dec.uint64buf)
+	case msgTypeApp:
+		var size uint64
+		if err := binary.Read(dec.r, binary.BigEndian, &size); err != nil {
+			return m, err
+		}
+		buf := make([]byte, int(size))
+		if _, err := io.ReadFull(dec.r, buf); err != nil {
+			return m, err
+		}
+		pbutil.MustUnmarshal(&m, buf)
+
+		dec.term = m.Term
+		dec.index = m.Index
+		if l := len(m.Entries); l > 0 {
+			dec.index = m.Entries[l-1].Index
+		}
+	default:
+		return m, fmt.Errorf("failed to parse type %d in msgappv2 stream", typ)
+	}
+	return m, nil
+}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/peer.go b/vendor/github.com/coreos/etcd/rafthttp/peer.go
new file mode 100644
index 0000000..e9a25bb
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/rafthttp/peer.go
@@ -0,0 +1,313 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+	"context"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/etcdserver/stats"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/raft"
+	"github.com/coreos/etcd/raft/raftpb"
+	"github.com/coreos/etcd/snap"
+
+	"golang.org/x/time/rate"
+)
+
+const (
+	// ConnReadTimeout and ConnWriteTimeout are the i/o timeout set on each connection rafthttp pkg creates.
+	// A 5 seconds timeout is good enough for recycling bad connections. Or we have to wait for
+	// tcp keepalive failing to detect a bad connection, which is at minutes level.
+	// For long term streaming connections, rafthttp pkg sends application level linkHeartbeatMessage
+	// to keep the connection alive.
+	// For short term pipeline connections, the connection MUST be killed to avoid it being
+	// put back to http pkg connection pool.
+	ConnReadTimeout  = 5 * time.Second
+	ConnWriteTimeout = 5 * time.Second
+
+	recvBufSize = 4096
+	// maxPendingProposals holds the proposals during one leader election process.
+	// Generally one leader election takes at most 1 sec. It should have
+	// 0-2 election conflicts, and each one takes 0.5 sec.
+	// We assume the number of concurrent proposers is smaller than 4096.
+	// One client blocks on its proposal for at least 1 sec, so 4096 is enough
+	// to hold all proposals.
+	maxPendingProposals = 4096
+
+	streamAppV2 = "streamMsgAppV2"
+	streamMsg   = "streamMsg"
+	pipelineMsg = "pipeline"
+	sendSnap    = "sendMsgSnap"
+)
+
+type Peer interface {
+	// send sends the message to the remote peer. The function is non-blocking
+	// and has no promise that the message will be received by the remote.
+	// When it fails to send message out, it will report the status to underlying
+	// raft.
+	send(m raftpb.Message)
+
+	// sendSnap sends the merged snapshot message to the remote peer. Its behavior
+	// is similar to send.
+	sendSnap(m snap.Message)
+
+	// update updates the urls of remote peer.
+	update(urls types.URLs)
+
+	// attachOutgoingConn attaches the outgoing connection to the peer for
+	// stream usage. After the call, the ownership of the outgoing
+	// connection hands over to the peer. The peer will close the connection
+	// when it is no longer used.
+	attachOutgoingConn(conn *outgoingConn)
+	// activeSince returns the time that the connection with the
+	// peer becomes active.
+	activeSince() time.Time
+	// stop performs any necessary finalization and terminates the peer
+	// elegantly.
+	stop()
+}
+
+// peer is the representative of a remote raft node. Local raft node sends
+// messages to the remote through peer.
+// Each peer has two underlying mechanisms to send out a message: stream and
+// pipeline.
+// A stream is a receiver initialized long-polling connection, which
+// is always open to transfer messages. Besides general stream, peer also has
+// a optimized stream for sending msgApp since msgApp accounts for large part
+// of all messages. Only raft leader uses the optimized stream to send msgApp
+// to the remote follower node.
+// A pipeline is a series of http clients that send http requests to the remote.
+// It is only used when the stream has not been established.
+type peer struct {
+	// id of the remote raft peer node
+	id types.ID
+	r  Raft
+
+	status *peerStatus
+
+	picker *urlPicker
+
+	msgAppV2Writer *streamWriter
+	writer         *streamWriter
+	pipeline       *pipeline
+	snapSender     *snapshotSender // snapshot sender to send v3 snapshot messages
+	msgAppV2Reader *streamReader
+	msgAppReader   *streamReader
+
+	recvc chan raftpb.Message
+	propc chan raftpb.Message
+
+	mu     sync.Mutex
+	paused bool
+
+	cancel context.CancelFunc // cancel pending works in go routine created by peer.
+	stopc  chan struct{}
+}
+
+func startPeer(transport *Transport, urls types.URLs, peerID types.ID, fs *stats.FollowerStats) *peer {
+	plog.Infof("starting peer %s...", peerID)
+	defer plog.Infof("started peer %s", peerID)
+
+	status := newPeerStatus(peerID)
+	picker := newURLPicker(urls)
+	errorc := transport.ErrorC
+	r := transport.Raft
+	pipeline := &pipeline{
+		peerID:        peerID,
+		tr:            transport,
+		picker:        picker,
+		status:        status,
+		followerStats: fs,
+		raft:          r,
+		errorc:        errorc,
+	}
+	pipeline.start()
+
+	p := &peer{
+		id:             peerID,
+		r:              r,
+		status:         status,
+		picker:         picker,
+		msgAppV2Writer: startStreamWriter(peerID, status, fs, r),
+		writer:         startStreamWriter(peerID, status, fs, r),
+		pipeline:       pipeline,
+		snapSender:     newSnapshotSender(transport, picker, peerID, status),
+		recvc:          make(chan raftpb.Message, recvBufSize),
+		propc:          make(chan raftpb.Message, maxPendingProposals),
+		stopc:          make(chan struct{}),
+	}
+
+	ctx, cancel := context.WithCancel(context.Background())
+	p.cancel = cancel
+	go func() {
+		for {
+			select {
+			case mm := <-p.recvc:
+				if err := r.Process(ctx, mm); err != nil {
+					plog.Warningf("failed to process raft message (%v)", err)
+				}
+			case <-p.stopc:
+				return
+			}
+		}
+	}()
+
+	// r.Process might block for processing proposal when there is no leader.
+	// Thus propc must be put into a separate routine with recvc to avoid blocking
+	// processing other raft messages.
+	go func() {
+		for {
+			select {
+			case mm := <-p.propc:
+				if err := r.Process(ctx, mm); err != nil {
+					plog.Warningf("failed to process raft message (%v)", err)
+				}
+			case <-p.stopc:
+				return
+			}
+		}
+	}()
+
+	p.msgAppV2Reader = &streamReader{
+		peerID: peerID,
+		typ:    streamTypeMsgAppV2,
+		tr:     transport,
+		picker: picker,
+		status: status,
+		recvc:  p.recvc,
+		propc:  p.propc,
+		rl:     rate.NewLimiter(transport.DialRetryFrequency, 1),
+	}
+	p.msgAppReader = &streamReader{
+		peerID: peerID,
+		typ:    streamTypeMessage,
+		tr:     transport,
+		picker: picker,
+		status: status,
+		recvc:  p.recvc,
+		propc:  p.propc,
+		rl:     rate.NewLimiter(transport.DialRetryFrequency, 1),
+	}
+
+	p.msgAppV2Reader.start()
+	p.msgAppReader.start()
+
+	return p
+}
+
+func (p *peer) send(m raftpb.Message) {
+	p.mu.Lock()
+	paused := p.paused
+	p.mu.Unlock()
+
+	if paused {
+		return
+	}
+
+	writec, name := p.pick(m)
+	select {
+	case writec <- m:
+	default:
+		p.r.ReportUnreachable(m.To)
+		if isMsgSnap(m) {
+			p.r.ReportSnapshot(m.To, raft.SnapshotFailure)
+		}
+		if p.status.isActive() {
+			plog.MergeWarningf("dropped internal raft message to %s since %s's sending buffer is full (bad/overloaded network)", p.id, name)
+		}
+		plog.Debugf("dropped %s to %s since %s's sending buffer is full", m.Type, p.id, name)
+		sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
+	}
+}
+
+func (p *peer) sendSnap(m snap.Message) {
+	go p.snapSender.send(m)
+}
+
+func (p *peer) update(urls types.URLs) {
+	p.picker.update(urls)
+}
+
+func (p *peer) attachOutgoingConn(conn *outgoingConn) {
+	var ok bool
+	switch conn.t {
+	case streamTypeMsgAppV2:
+		ok = p.msgAppV2Writer.attach(conn)
+	case streamTypeMessage:
+		ok = p.writer.attach(conn)
+	default:
+		plog.Panicf("unhandled stream type %s", conn.t)
+	}
+	if !ok {
+		conn.Close()
+	}
+}
+
+func (p *peer) activeSince() time.Time { return p.status.activeSince() }
+
+// Pause pauses the peer. The peer will simply drops all incoming
+// messages without returning an error.
+func (p *peer) Pause() {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	p.paused = true
+	p.msgAppReader.pause()
+	p.msgAppV2Reader.pause()
+}
+
+// Resume resumes a paused peer.
+func (p *peer) Resume() {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	p.paused = false
+	p.msgAppReader.resume()
+	p.msgAppV2Reader.resume()
+}
+
+func (p *peer) stop() {
+	plog.Infof("stopping peer %s...", p.id)
+	defer plog.Infof("stopped peer %s", p.id)
+
+	close(p.stopc)
+	p.cancel()
+	p.msgAppV2Writer.stop()
+	p.writer.stop()
+	p.pipeline.stop()
+	p.snapSender.stop()
+	p.msgAppV2Reader.stop()
+	p.msgAppReader.stop()
+}
+
+// pick picks a chan for sending the given message. The picked chan and the picked chan
+// string name are returned.
+func (p *peer) pick(m raftpb.Message) (writec chan<- raftpb.Message, picked string) {
+	var ok bool
+	// Considering MsgSnap may have a big size, e.g., 1G, and will block
+	// stream for a long time, only use one of the N pipelines to send MsgSnap.
+	if isMsgSnap(m) {
+		return p.pipeline.msgc, pipelineMsg
+	} else if writec, ok = p.msgAppV2Writer.writec(); ok && isMsgApp(m) {
+		return writec, streamAppV2
+	} else if writec, ok = p.writer.writec(); ok {
+		return writec, streamMsg
+	}
+	return p.pipeline.msgc, pipelineMsg
+}
+
+func isMsgApp(m raftpb.Message) bool { return m.Type == raftpb.MsgApp }
+
+func isMsgSnap(m raftpb.Message) bool { return m.Type == raftpb.MsgSnap }
diff --git a/vendor/github.com/coreos/etcd/rafthttp/peer_status.go b/vendor/github.com/coreos/etcd/rafthttp/peer_status.go
new file mode 100644
index 0000000..69cbd38
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/rafthttp/peer_status.go
@@ -0,0 +1,77 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/pkg/types"
+)
+
+type failureType struct {
+	source string
+	action string
+}
+
+type peerStatus struct {
+	id     types.ID
+	mu     sync.Mutex // protect variables below
+	active bool
+	since  time.Time
+}
+
+func newPeerStatus(id types.ID) *peerStatus {
+	return &peerStatus{
+		id: id,
+	}
+}
+
+func (s *peerStatus) activate() {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if !s.active {
+		plog.Infof("peer %s became active", s.id)
+		s.active = true
+		s.since = time.Now()
+	}
+}
+
+func (s *peerStatus) deactivate(failure failureType, reason string) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	msg := fmt.Sprintf("failed to %s %s on %s (%s)", failure.action, s.id, failure.source, reason)
+	if s.active {
+		plog.Errorf(msg)
+		plog.Infof("peer %s became inactive (message send to peer failed)", s.id)
+		s.active = false
+		s.since = time.Time{}
+		return
+	}
+	plog.Debugf(msg)
+}
+
+func (s *peerStatus) isActive() bool {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.active
+}
+
+func (s *peerStatus) activeSince() time.Time {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.since
+}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/pipeline.go b/vendor/github.com/coreos/etcd/rafthttp/pipeline.go
new file mode 100644
index 0000000..d9f07c3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/rafthttp/pipeline.go
@@ -0,0 +1,160 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"io/ioutil"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/etcdserver/stats"
+	"github.com/coreos/etcd/pkg/pbutil"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/raft"
+	"github.com/coreos/etcd/raft/raftpb"
+)
+
+const (
+	connPerPipeline = 4
+	// pipelineBufSize is the size of pipeline buffer, which helps hold the
+	// temporary network latency.
+	// The size ensures that pipeline does not drop messages when the network
+	// is out of work for less than 1 second in good path.
+	pipelineBufSize = 64
+)
+
+var errStopped = errors.New("stopped")
+
+type pipeline struct {
+	peerID types.ID
+
+	tr     *Transport
+	picker *urlPicker
+	status *peerStatus
+	raft   Raft
+	errorc chan error
+	// deprecate when we depercate v2 API
+	followerStats *stats.FollowerStats
+
+	msgc chan raftpb.Message
+	// wait for the handling routines
+	wg    sync.WaitGroup
+	stopc chan struct{}
+}
+
+func (p *pipeline) start() {
+	p.stopc = make(chan struct{})
+	p.msgc = make(chan raftpb.Message, pipelineBufSize)
+	p.wg.Add(connPerPipeline)
+	for i := 0; i < connPerPipeline; i++ {
+		go p.handle()
+	}
+	plog.Infof("started HTTP pipelining with peer %s", p.peerID)
+}
+
+func (p *pipeline) stop() {
+	close(p.stopc)
+	p.wg.Wait()
+	plog.Infof("stopped HTTP pipelining with peer %s", p.peerID)
+}
+
+func (p *pipeline) handle() {
+	defer p.wg.Done()
+
+	for {
+		select {
+		case m := <-p.msgc:
+			start := time.Now()
+			err := p.post(pbutil.MustMarshal(&m))
+			end := time.Now()
+
+			if err != nil {
+				p.status.deactivate(failureType{source: pipelineMsg, action: "write"}, err.Error())
+
+				if m.Type == raftpb.MsgApp && p.followerStats != nil {
+					p.followerStats.Fail()
+				}
+				p.raft.ReportUnreachable(m.To)
+				if isMsgSnap(m) {
+					p.raft.ReportSnapshot(m.To, raft.SnapshotFailure)
+				}
+				sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
+				continue
+			}
+
+			p.status.activate()
+			if m.Type == raftpb.MsgApp && p.followerStats != nil {
+				p.followerStats.Succ(end.Sub(start))
+			}
+			if isMsgSnap(m) {
+				p.raft.ReportSnapshot(m.To, raft.SnapshotFinish)
+			}
+			sentBytes.WithLabelValues(types.ID(m.To).String()).Add(float64(m.Size()))
+		case <-p.stopc:
+			return
+		}
+	}
+}
+
+// post POSTs a data payload to a url. Returns nil if the POST succeeds,
+// error on any failure.
+func (p *pipeline) post(data []byte) (err error) {
+	u := p.picker.pick()
+	req := createPostRequest(u, RaftPrefix, bytes.NewBuffer(data), "application/protobuf", p.tr.URLs, p.tr.ID, p.tr.ClusterID)
+
+	done := make(chan struct{}, 1)
+	ctx, cancel := context.WithCancel(context.Background())
+	req = req.WithContext(ctx)
+	go func() {
+		select {
+		case <-done:
+		case <-p.stopc:
+			waitSchedule()
+			cancel()
+		}
+	}()
+
+	resp, err := p.tr.pipelineRt.RoundTrip(req)
+	done <- struct{}{}
+	if err != nil {
+		p.picker.unreachable(u)
+		return err
+	}
+	b, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		p.picker.unreachable(u)
+		return err
+	}
+	resp.Body.Close()
+
+	err = checkPostResponse(resp, b, req, p.peerID)
+	if err != nil {
+		p.picker.unreachable(u)
+		// errMemberRemoved is a critical error since a removed member should
+		// always be stopped. So we use reportCriticalError to report it to errorc.
+		if err == errMemberRemoved {
+			reportCriticalError(err, p.errorc)
+		}
+		return err
+	}
+
+	return nil
+}
+
+// waitSchedule waits other goroutines to be scheduled for a while
+func waitSchedule() { time.Sleep(time.Millisecond) }
diff --git a/vendor/github.com/coreos/etcd/rafthttp/probing_status.go b/vendor/github.com/coreos/etcd/rafthttp/probing_status.go
new file mode 100644
index 0000000..109a0ae
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/rafthttp/probing_status.go
@@ -0,0 +1,76 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+	"time"
+
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/xiang90/probing"
+)
+
+var (
+	// proberInterval must be shorter than read timeout.
+	// Or the connection will time-out.
+	proberInterval           = ConnReadTimeout - time.Second
+	statusMonitoringInterval = 30 * time.Second
+	statusErrorInterval      = 5 * time.Second
+)
+
+const (
+	// RoundTripperNameRaftMessage is the name of round-tripper that sends
+	// all other Raft messages, other than "snap.Message".
+	RoundTripperNameRaftMessage = "ROUND_TRIPPER_RAFT_MESSAGE"
+	// RoundTripperNameSnapshot is the name of round-tripper that sends merged snapshot message.
+	RoundTripperNameSnapshot = "ROUND_TRIPPER_SNAPSHOT"
+)
+
+func addPeerToProber(p probing.Prober, id string, us []string, roundTripperName string, rttSecProm *prometheus.HistogramVec) {
+	hus := make([]string, len(us))
+	for i := range us {
+		hus[i] = us[i] + ProbingPrefix
+	}
+
+	p.AddHTTP(id, proberInterval, hus)
+
+	s, err := p.Status(id)
+	if err != nil {
+		plog.Errorf("failed to add peer %s into prober", id)
+	} else {
+		go monitorProbingStatus(s, id, roundTripperName, rttSecProm)
+	}
+}
+
+func monitorProbingStatus(s probing.Status, id string, roundTripperName string, rttSecProm *prometheus.HistogramVec) {
+	// set the first interval short to log error early.
+	interval := statusErrorInterval
+	for {
+		select {
+		case <-time.After(interval):
+			if !s.Health() {
+				plog.Warningf("health check for peer %s could not connect: %v (prober %q)", id, s.Err(), roundTripperName)
+				interval = statusErrorInterval
+			} else {
+				interval = statusMonitoringInterval
+			}
+			if s.ClockDiff() > time.Second {
+				plog.Warningf("the clock difference against peer %s is too high [%v > %v] (prober %q)", id, s.ClockDiff(), time.Second, roundTripperName)
+			}
+			rttSecProm.WithLabelValues(id).Observe(s.SRTT().Seconds())
+		case <-s.StopNotify():
+			return
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/remote.go b/vendor/github.com/coreos/etcd/rafthttp/remote.go
new file mode 100644
index 0000000..f7f9d2c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/rafthttp/remote.go
@@ -0,0 +1,70 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/raft/raftpb"
+)
+
+type remote struct {
+	id       types.ID
+	status   *peerStatus
+	pipeline *pipeline
+}
+
+func startRemote(tr *Transport, urls types.URLs, id types.ID) *remote {
+	picker := newURLPicker(urls)
+	status := newPeerStatus(id)
+	pipeline := &pipeline{
+		peerID: id,
+		tr:     tr,
+		picker: picker,
+		status: status,
+		raft:   tr.Raft,
+		errorc: tr.ErrorC,
+	}
+	pipeline.start()
+
+	return &remote{
+		id:       id,
+		status:   status,
+		pipeline: pipeline,
+	}
+}
+
+func (g *remote) send(m raftpb.Message) {
+	select {
+	case g.pipeline.msgc <- m:
+	default:
+		if g.status.isActive() {
+			plog.MergeWarningf("dropped internal raft message to %s since sending buffer is full (bad/overloaded network)", g.id)
+		}
+		plog.Debugf("dropped %s to %s since sending buffer is full", m.Type, g.id)
+		sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
+	}
+}
+
+func (g *remote) stop() {
+	g.pipeline.stop()
+}
+
+func (g *remote) Pause() {
+	g.stop()
+}
+
+func (g *remote) Resume() {
+	g.pipeline.start()
+}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go b/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go
new file mode 100644
index 0000000..24eb535
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go
@@ -0,0 +1,164 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+	"bytes"
+	"context"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"time"
+
+	"github.com/coreos/etcd/pkg/httputil"
+	pioutil "github.com/coreos/etcd/pkg/ioutil"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/raft"
+	"github.com/coreos/etcd/snap"
+)
+
+var (
+	// timeout for reading snapshot response body
+	snapResponseReadTimeout = 5 * time.Second
+)
+
+type snapshotSender struct {
+	from, to types.ID
+	cid      types.ID
+
+	tr     *Transport
+	picker *urlPicker
+	status *peerStatus
+	r      Raft
+	errorc chan error
+
+	stopc chan struct{}
+}
+
+func newSnapshotSender(tr *Transport, picker *urlPicker, to types.ID, status *peerStatus) *snapshotSender {
+	return &snapshotSender{
+		from:   tr.ID,
+		to:     to,
+		cid:    tr.ClusterID,
+		tr:     tr,
+		picker: picker,
+		status: status,
+		r:      tr.Raft,
+		errorc: tr.ErrorC,
+		stopc:  make(chan struct{}),
+	}
+}
+
+func (s *snapshotSender) stop() { close(s.stopc) }
+
+func (s *snapshotSender) send(merged snap.Message) {
+	start := time.Now()
+
+	m := merged.Message
+	to := types.ID(m.To).String()
+
+	body := createSnapBody(merged)
+	defer body.Close()
+
+	u := s.picker.pick()
+	req := createPostRequest(u, RaftSnapshotPrefix, body, "application/octet-stream", s.tr.URLs, s.from, s.cid)
+
+	plog.Infof("start to send database snapshot [index: %d, to %s]...", m.Snapshot.Metadata.Index, types.ID(m.To))
+
+	err := s.post(req)
+	defer merged.CloseWithError(err)
+	if err != nil {
+		plog.Warningf("database snapshot [index: %d, to: %s] failed to be sent out (%v)", m.Snapshot.Metadata.Index, types.ID(m.To), err)
+
+		// errMemberRemoved is a critical error since a removed member should
+		// always be stopped. So we use reportCriticalError to report it to errorc.
+		if err == errMemberRemoved {
+			reportCriticalError(err, s.errorc)
+		}
+
+		s.picker.unreachable(u)
+		s.status.deactivate(failureType{source: sendSnap, action: "post"}, err.Error())
+		s.r.ReportUnreachable(m.To)
+		// report SnapshotFailure to raft state machine. After raft state
+		// machine knows about it, it would pause a while and retry sending
+		// new snapshot message.
+		s.r.ReportSnapshot(m.To, raft.SnapshotFailure)
+		sentFailures.WithLabelValues(to).Inc()
+		snapshotSendFailures.WithLabelValues(to).Inc()
+		return
+	}
+	s.status.activate()
+	s.r.ReportSnapshot(m.To, raft.SnapshotFinish)
+	plog.Infof("database snapshot [index: %d, to: %s] sent out successfully", m.Snapshot.Metadata.Index, types.ID(m.To))
+
+	sentBytes.WithLabelValues(to).Add(float64(merged.TotalSize))
+
+	snapshotSend.WithLabelValues(to).Inc()
+	snapshotSendSeconds.WithLabelValues(to).Observe(time.Since(start).Seconds())
+}
+
+// post posts the given request.
+// It returns nil when request is sent out and processed successfully.
+func (s *snapshotSender) post(req *http.Request) (err error) {
+	ctx, cancel := context.WithCancel(context.Background())
+	req = req.WithContext(ctx)
+	defer cancel()
+
+	type responseAndError struct {
+		resp *http.Response
+		body []byte
+		err  error
+	}
+	result := make(chan responseAndError, 1)
+
+	go func() {
+		resp, err := s.tr.pipelineRt.RoundTrip(req)
+		if err != nil {
+			result <- responseAndError{resp, nil, err}
+			return
+		}
+
+		// close the response body when timeouts.
+		// prevents from reading the body forever when the other side dies right after
+		// successfully receives the request body.
+		time.AfterFunc(snapResponseReadTimeout, func() { httputil.GracefulClose(resp) })
+		body, err := ioutil.ReadAll(resp.Body)
+		result <- responseAndError{resp, body, err}
+	}()
+
+	select {
+	case <-s.stopc:
+		return errStopped
+	case r := <-result:
+		if r.err != nil {
+			return r.err
+		}
+		return checkPostResponse(r.resp, r.body, req, s.to)
+	}
+}
+
+func createSnapBody(merged snap.Message) io.ReadCloser {
+	buf := new(bytes.Buffer)
+	enc := &messageEncoder{w: buf}
+	// encode raft message
+	if err := enc.encode(&merged.Message); err != nil {
+		plog.Panicf("encode message error (%v)", err)
+	}
+
+	return &pioutil.ReaderAndCloser{
+		Reader: io.MultiReader(buf, merged.ReadCloser),
+		Closer: merged.ReadCloser,
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/stream.go b/vendor/github.com/coreos/etcd/rafthttp/stream.go
new file mode 100644
index 0000000..af49c18
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/rafthttp/stream.go
@@ -0,0 +1,533 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"path"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/time/rate"
+
+	"github.com/coreos/etcd/etcdserver/stats"
+	"github.com/coreos/etcd/pkg/httputil"
+	"github.com/coreos/etcd/pkg/transport"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/raft/raftpb"
+	"github.com/coreos/etcd/version"
+	"github.com/coreos/go-semver/semver"
+)
+
+const (
+	streamTypeMessage  streamType = "message"
+	streamTypeMsgAppV2 streamType = "msgappv2"
+
+	streamBufSize = 4096
+)
+
+var (
+	errUnsupportedStreamType = fmt.Errorf("unsupported stream type")
+
+	// the key is in string format "major.minor.patch"
+	supportedStream = map[string][]streamType{
+		"2.0.0": {},
+		"2.1.0": {streamTypeMsgAppV2, streamTypeMessage},
+		"2.2.0": {streamTypeMsgAppV2, streamTypeMessage},
+		"2.3.0": {streamTypeMsgAppV2, streamTypeMessage},
+		"3.0.0": {streamTypeMsgAppV2, streamTypeMessage},
+		"3.1.0": {streamTypeMsgAppV2, streamTypeMessage},
+		"3.2.0": {streamTypeMsgAppV2, streamTypeMessage},
+		"3.3.0": {streamTypeMsgAppV2, streamTypeMessage},
+	}
+)
+
+type streamType string
+
+func (t streamType) endpoint() string {
+	switch t {
+	case streamTypeMsgAppV2:
+		return path.Join(RaftStreamPrefix, "msgapp")
+	case streamTypeMessage:
+		return path.Join(RaftStreamPrefix, "message")
+	default:
+		plog.Panicf("unhandled stream type %v", t)
+		return ""
+	}
+}
+
+func (t streamType) String() string {
+	switch t {
+	case streamTypeMsgAppV2:
+		return "stream MsgApp v2"
+	case streamTypeMessage:
+		return "stream Message"
+	default:
+		return "unknown stream"
+	}
+}
+
+var (
+	// linkHeartbeatMessage is a special message used as heartbeat message in
+	// link layer. It never conflicts with messages from raft because raft
+	// doesn't send out messages without From and To fields.
+	linkHeartbeatMessage = raftpb.Message{Type: raftpb.MsgHeartbeat}
+)
+
+func isLinkHeartbeatMessage(m *raftpb.Message) bool {
+	return m.Type == raftpb.MsgHeartbeat && m.From == 0 && m.To == 0
+}
+
+type outgoingConn struct {
+	t streamType
+	io.Writer
+	http.Flusher
+	io.Closer
+}
+
+// streamWriter writes messages to the attached outgoingConn.
+type streamWriter struct {
+	peerID types.ID
+	status *peerStatus
+	fs     *stats.FollowerStats
+	r      Raft
+
+	mu      sync.Mutex // guard field working and closer
+	closer  io.Closer
+	working bool
+
+	msgc  chan raftpb.Message
+	connc chan *outgoingConn
+	stopc chan struct{}
+	done  chan struct{}
+}
+
+// startStreamWriter creates a streamWrite and starts a long running go-routine that accepts
+// messages and writes to the attached outgoing connection.
+func startStreamWriter(id types.ID, status *peerStatus, fs *stats.FollowerStats, r Raft) *streamWriter {
+	w := &streamWriter{
+		peerID: id,
+		status: status,
+		fs:     fs,
+		r:      r,
+		msgc:   make(chan raftpb.Message, streamBufSize),
+		connc:  make(chan *outgoingConn),
+		stopc:  make(chan struct{}),
+		done:   make(chan struct{}),
+	}
+	go w.run()
+	return w
+}
+
+func (cw *streamWriter) run() {
+	var (
+		msgc       chan raftpb.Message
+		heartbeatc <-chan time.Time
+		t          streamType
+		enc        encoder
+		flusher    http.Flusher
+		batched    int
+	)
+	tickc := time.NewTicker(ConnReadTimeout / 3)
+	defer tickc.Stop()
+	unflushed := 0
+
+	plog.Infof("started streaming with peer %s (writer)", cw.peerID)
+
+	for {
+		select {
+		case <-heartbeatc:
+			err := enc.encode(&linkHeartbeatMessage)
+			unflushed += linkHeartbeatMessage.Size()
+			if err == nil {
+				flusher.Flush()
+				batched = 0
+				sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed))
+				unflushed = 0
+				continue
+			}
+
+			cw.status.deactivate(failureType{source: t.String(), action: "heartbeat"}, err.Error())
+
+			sentFailures.WithLabelValues(cw.peerID.String()).Inc()
+			cw.close()
+			plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
+			heartbeatc, msgc = nil, nil
+
+		case m := <-msgc:
+			err := enc.encode(&m)
+			if err == nil {
+				unflushed += m.Size()
+
+				if len(msgc) == 0 || batched > streamBufSize/2 {
+					flusher.Flush()
+					sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed))
+					unflushed = 0
+					batched = 0
+				} else {
+					batched++
+				}
+
+				continue
+			}
+
+			cw.status.deactivate(failureType{source: t.String(), action: "write"}, err.Error())
+			cw.close()
+			plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
+			heartbeatc, msgc = nil, nil
+			cw.r.ReportUnreachable(m.To)
+			sentFailures.WithLabelValues(cw.peerID.String()).Inc()
+
+		case conn := <-cw.connc:
+			cw.mu.Lock()
+			closed := cw.closeUnlocked()
+			t = conn.t
+			switch conn.t {
+			case streamTypeMsgAppV2:
+				enc = newMsgAppV2Encoder(conn.Writer, cw.fs)
+			case streamTypeMessage:
+				enc = &messageEncoder{w: conn.Writer}
+			default:
+				plog.Panicf("unhandled stream type %s", conn.t)
+			}
+			flusher = conn.Flusher
+			unflushed = 0
+			cw.status.activate()
+			cw.closer = conn.Closer
+			cw.working = true
+			cw.mu.Unlock()
+
+			if closed {
+				plog.Warningf("closed an existing TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
+			}
+			plog.Infof("established a TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
+			heartbeatc, msgc = tickc.C, cw.msgc
+		case <-cw.stopc:
+			if cw.close() {
+				plog.Infof("closed the TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
+			}
+			plog.Infof("stopped streaming with peer %s (writer)", cw.peerID)
+			close(cw.done)
+			return
+		}
+	}
+}
+
+func (cw *streamWriter) writec() (chan<- raftpb.Message, bool) {
+	cw.mu.Lock()
+	defer cw.mu.Unlock()
+	return cw.msgc, cw.working
+}
+
+func (cw *streamWriter) close() bool {
+	cw.mu.Lock()
+	defer cw.mu.Unlock()
+	return cw.closeUnlocked()
+}
+
+func (cw *streamWriter) closeUnlocked() bool {
+	if !cw.working {
+		return false
+	}
+	if err := cw.closer.Close(); err != nil {
+		plog.Errorf("peer %s (writer) connection close error: %v", cw.peerID, err)
+	}
+	if len(cw.msgc) > 0 {
+		cw.r.ReportUnreachable(uint64(cw.peerID))
+	}
+	cw.msgc = make(chan raftpb.Message, streamBufSize)
+	cw.working = false
+	return true
+}
+
+func (cw *streamWriter) attach(conn *outgoingConn) bool {
+	select {
+	case cw.connc <- conn:
+		return true
+	case <-cw.done:
+		return false
+	}
+}
+
+func (cw *streamWriter) stop() {
+	close(cw.stopc)
+	<-cw.done
+}
+
+// streamReader is a long-running go-routine that dials to the remote stream
+// endpoint and reads messages from the response body returned.
+type streamReader struct {
+	peerID types.ID
+	typ    streamType
+
+	tr     *Transport
+	picker *urlPicker
+	status *peerStatus
+	recvc  chan<- raftpb.Message
+	propc  chan<- raftpb.Message
+
+	rl *rate.Limiter // alters the frequency of dial retrial attempts
+
+	errorc chan<- error
+
+	mu     sync.Mutex
+	paused bool
+	closer io.Closer
+
+	ctx    context.Context
+	cancel context.CancelFunc
+	done   chan struct{}
+}
+
+func (cr *streamReader) start() {
+	cr.done = make(chan struct{})
+	if cr.errorc == nil {
+		cr.errorc = cr.tr.ErrorC
+	}
+	if cr.ctx == nil {
+		cr.ctx, cr.cancel = context.WithCancel(context.Background())
+	}
+	go cr.run()
+}
+
+func (cr *streamReader) run() {
+	t := cr.typ
+	plog.Infof("started streaming with peer %s (%s reader)", cr.peerID, t)
+	for {
+		rc, err := cr.dial(t)
+		if err != nil {
+			if err != errUnsupportedStreamType {
+				cr.status.deactivate(failureType{source: t.String(), action: "dial"}, err.Error())
+			}
+		} else {
+			cr.status.activate()
+			plog.Infof("established a TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ)
+			err = cr.decodeLoop(rc, t)
+			plog.Warningf("lost the TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ)
+			switch {
+			// all data is read out
+			case err == io.EOF:
+			// connection is closed by the remote
+			case transport.IsClosedConnError(err):
+			default:
+				cr.status.deactivate(failureType{source: t.String(), action: "read"}, err.Error())
+			}
+		}
+		// Wait for a while before new dial attempt
+		err = cr.rl.Wait(cr.ctx)
+		if cr.ctx.Err() != nil {
+			plog.Infof("stopped streaming with peer %s (%s reader)", cr.peerID, t)
+			close(cr.done)
+			return
+		}
+		if err != nil {
+			plog.Errorf("streaming with peer %s (%s reader) rate limiter error: %v", cr.peerID, t, err)
+		}
+	}
+}
+
+func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error {
+	var dec decoder
+	cr.mu.Lock()
+	switch t {
+	case streamTypeMsgAppV2:
+		dec = newMsgAppV2Decoder(rc, cr.tr.ID, cr.peerID)
+	case streamTypeMessage:
+		dec = &messageDecoder{r: rc}
+	default:
+		plog.Panicf("unhandled stream type %s", t)
+	}
+	select {
+	case <-cr.ctx.Done():
+		cr.mu.Unlock()
+		if err := rc.Close(); err != nil {
+			return err
+		}
+		return io.EOF
+	default:
+		cr.closer = rc
+	}
+	cr.mu.Unlock()
+
+	for {
+		m, err := dec.decode()
+		if err != nil {
+			cr.mu.Lock()
+			cr.close()
+			cr.mu.Unlock()
+			return err
+		}
+
+		receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(m.Size()))
+
+		cr.mu.Lock()
+		paused := cr.paused
+		cr.mu.Unlock()
+
+		if paused {
+			continue
+		}
+
+		if isLinkHeartbeatMessage(&m) {
+			// raft is not interested in link layer
+			// heartbeat message, so we should ignore
+			// it.
+			continue
+		}
+
+		recvc := cr.recvc
+		if m.Type == raftpb.MsgProp {
+			recvc = cr.propc
+		}
+
+		select {
+		case recvc <- m:
+		default:
+			if cr.status.isActive() {
+				plog.MergeWarningf("dropped internal raft message from %s since receiving buffer is full (overloaded network)", types.ID(m.From))
+			}
+			plog.Debugf("dropped %s from %s since receiving buffer is full", m.Type, types.ID(m.From))
+			recvFailures.WithLabelValues(types.ID(m.From).String()).Inc()
+		}
+	}
+}
+
+func (cr *streamReader) stop() {
+	cr.mu.Lock()
+	cr.cancel()
+	cr.close()
+	cr.mu.Unlock()
+	<-cr.done
+}
+
+func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) {
+	u := cr.picker.pick()
+	uu := u
+	uu.Path = path.Join(t.endpoint(), cr.tr.ID.String())
+
+	req, err := http.NewRequest("GET", uu.String(), nil)
+	if err != nil {
+		cr.picker.unreachable(u)
+		return nil, fmt.Errorf("failed to make http request to %v (%v)", u, err)
+	}
+	req.Header.Set("X-Server-From", cr.tr.ID.String())
+	req.Header.Set("X-Server-Version", version.Version)
+	req.Header.Set("X-Min-Cluster-Version", version.MinClusterVersion)
+	req.Header.Set("X-Etcd-Cluster-ID", cr.tr.ClusterID.String())
+	req.Header.Set("X-Raft-To", cr.peerID.String())
+
+	setPeerURLsHeader(req, cr.tr.URLs)
+
+	req = req.WithContext(cr.ctx)
+
+	cr.mu.Lock()
+	select {
+	case <-cr.ctx.Done():
+		cr.mu.Unlock()
+		return nil, fmt.Errorf("stream reader is stopped")
+	default:
+	}
+	cr.mu.Unlock()
+
+	resp, err := cr.tr.streamRt.RoundTrip(req)
+	if err != nil {
+		cr.picker.unreachable(u)
+		return nil, err
+	}
+
+	rv := serverVersion(resp.Header)
+	lv := semver.Must(semver.NewVersion(version.Version))
+	if compareMajorMinorVersion(rv, lv) == -1 && !checkStreamSupport(rv, t) {
+		httputil.GracefulClose(resp)
+		cr.picker.unreachable(u)
+		return nil, errUnsupportedStreamType
+	}
+
+	switch resp.StatusCode {
+	case http.StatusGone:
+		httputil.GracefulClose(resp)
+		cr.picker.unreachable(u)
+		reportCriticalError(errMemberRemoved, cr.errorc)
+		return nil, errMemberRemoved
+	case http.StatusOK:
+		return resp.Body, nil
+	case http.StatusNotFound:
+		httputil.GracefulClose(resp)
+		cr.picker.unreachable(u)
+		return nil, fmt.Errorf("peer %s failed to find local node %s", cr.peerID, cr.tr.ID)
+	case http.StatusPreconditionFailed:
+		b, err := ioutil.ReadAll(resp.Body)
+		if err != nil {
+			cr.picker.unreachable(u)
+			return nil, err
+		}
+		httputil.GracefulClose(resp)
+		cr.picker.unreachable(u)
+
+		switch strings.TrimSuffix(string(b), "\n") {
+		case errIncompatibleVersion.Error():
+			plog.Errorf("request sent was ignored by peer %s (server version incompatible)", cr.peerID)
+			return nil, errIncompatibleVersion
+		case errClusterIDMismatch.Error():
+			plog.Errorf("request sent was ignored (cluster ID mismatch: peer[%s]=%s, local=%s)",
+				cr.peerID, resp.Header.Get("X-Etcd-Cluster-ID"), cr.tr.ClusterID)
+			return nil, errClusterIDMismatch
+		default:
+			return nil, fmt.Errorf("unhandled error %q when precondition failed", string(b))
+		}
+	default:
+		httputil.GracefulClose(resp)
+		cr.picker.unreachable(u)
+		return nil, fmt.Errorf("unhandled http status %d", resp.StatusCode)
+	}
+}
+
+func (cr *streamReader) close() {
+	if cr.closer != nil {
+		if err := cr.closer.Close(); err != nil {
+			plog.Errorf("peer %s (reader) connection close error: %v", cr.peerID, err)
+		}
+	}
+	cr.closer = nil
+}
+
+func (cr *streamReader) pause() {
+	cr.mu.Lock()
+	defer cr.mu.Unlock()
+	cr.paused = true
+}
+
+func (cr *streamReader) resume() {
+	cr.mu.Lock()
+	defer cr.mu.Unlock()
+	cr.paused = false
+}
+
+// checkStreamSupport checks whether the stream type is supported in the
+// given version.
+func checkStreamSupport(v *semver.Version, t streamType) bool {
+	nv := &semver.Version{Major: v.Major, Minor: v.Minor}
+	for _, s := range supportedStream[nv.String()] {
+		if s == t {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/transport.go b/vendor/github.com/coreos/etcd/rafthttp/transport.go
new file mode 100644
index 0000000..16e854c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/rafthttp/transport.go
@@ -0,0 +1,438 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+	"context"
+	"net/http"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/etcdserver/stats"
+	"github.com/coreos/etcd/pkg/logutil"
+	"github.com/coreos/etcd/pkg/transport"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/raft"
+	"github.com/coreos/etcd/raft/raftpb"
+	"github.com/coreos/etcd/snap"
+
+	"github.com/coreos/pkg/capnslog"
+	"github.com/xiang90/probing"
+	"golang.org/x/time/rate"
+)
+
+var plog = logutil.NewMergeLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "rafthttp"))
+
+type Raft interface {
+	Process(ctx context.Context, m raftpb.Message) error
+	IsIDRemoved(id uint64) bool
+	ReportUnreachable(id uint64)
+	ReportSnapshot(id uint64, status raft.SnapshotStatus)
+}
+
+type Transporter interface {
+	// Start starts the given Transporter.
+	// Start MUST be called before calling other functions in the interface.
+	Start() error
+	// Handler returns the HTTP handler of the transporter.
+	// A transporter HTTP handler handles the HTTP requests
+	// from remote peers.
+	// The handler MUST be used to handle RaftPrefix(/raft)
+	// endpoint.
+	Handler() http.Handler
+	// Send sends out the given messages to the remote peers.
+	// Each message has a To field, which is an id that maps
+	// to an existing peer in the transport.
+	// If the id cannot be found in the transport, the message
+	// will be ignored.
+	Send(m []raftpb.Message)
+	// SendSnapshot sends out the given snapshot message to a remote peer.
+	// The behavior of SendSnapshot is similar to Send.
+	SendSnapshot(m snap.Message)
+	// AddRemote adds a remote with given peer urls into the transport.
+	// A remote helps newly joined member to catch up the progress of cluster,
+	// and will not be used after that.
+	// It is the caller's responsibility to ensure the urls are all valid,
+	// or it panics.
+	AddRemote(id types.ID, urls []string)
+	// AddPeer adds a peer with given peer urls into the transport.
+	// It is the caller's responsibility to ensure the urls are all valid,
+	// or it panics.
+	// Peer urls are used to connect to the remote peer.
+	AddPeer(id types.ID, urls []string)
+	// RemovePeer removes the peer with given id.
+	RemovePeer(id types.ID)
+	// RemoveAllPeers removes all the existing peers in the transport.
+	RemoveAllPeers()
+	// UpdatePeer updates the peer urls of the peer with the given id.
+	// It is the caller's responsibility to ensure the urls are all valid,
+	// or it panics.
+	UpdatePeer(id types.ID, urls []string)
+	// ActiveSince returns the time that the connection with the peer
+	// of the given id becomes active.
+	// If the connection is active since peer was added, it returns the adding time.
+	// If the connection is currently inactive, it returns zero time.
+	ActiveSince(id types.ID) time.Time
+	// ActivePeers returns the number of active peers.
+	ActivePeers() int
+	// Stop closes the connections and stops the transporter.
+	Stop()
+}
+
+// Transport implements Transporter interface. It provides the functionality
+// to send raft messages to peers, and receive raft messages from peers.
+// User should call Handler method to get a handler to serve requests
+// received from peerURLs.
+// User needs to call Start before calling other functions, and call
+// Stop when the Transport is no longer used.
+type Transport struct {
+	DialTimeout time.Duration // maximum duration before timing out dial of the request
+	// DialRetryFrequency defines the frequency of streamReader dial retrial attempts;
+	// a distinct rate limiter is created per every peer (default value: 10 events/sec)
+	DialRetryFrequency rate.Limit
+
+	TLSInfo transport.TLSInfo // TLS information used when creating connection
+
+	ID          types.ID   // local member ID
+	URLs        types.URLs // local peer URLs
+	ClusterID   types.ID   // raft cluster ID for request validation
+	Raft        Raft       // raft state machine, to which the Transport forwards received messages and reports status
+	Snapshotter *snap.Snapshotter
+	ServerStats *stats.ServerStats // used to record general transportation statistics
+	// used to record transportation statistics with followers when
+	// performing as leader in raft protocol
+	LeaderStats *stats.LeaderStats
+	// ErrorC is used to report detected critical errors, e.g.,
+	// the member has been permanently removed from the cluster
+	// When an error is received from ErrorC, user should stop raft state
+	// machine and thus stop the Transport.
+	ErrorC chan error
+
+	streamRt   http.RoundTripper // roundTripper used by streams
+	pipelineRt http.RoundTripper // roundTripper used by pipelines
+
+	mu      sync.RWMutex         // protect the remote and peer map
+	remotes map[types.ID]*remote // remotes map that helps newly joined member to catch up
+	peers   map[types.ID]Peer    // peers map
+
+	pipelineProber probing.Prober
+	streamProber   probing.Prober
+}
+
+func (t *Transport) Start() error {
+	var err error
+	t.streamRt, err = newStreamRoundTripper(t.TLSInfo, t.DialTimeout)
+	if err != nil {
+		return err
+	}
+	t.pipelineRt, err = NewRoundTripper(t.TLSInfo, t.DialTimeout)
+	if err != nil {
+		return err
+	}
+	t.remotes = make(map[types.ID]*remote)
+	t.peers = make(map[types.ID]Peer)
+	t.pipelineProber = probing.NewProber(t.pipelineRt)
+	t.streamProber = probing.NewProber(t.streamRt)
+
+	// If client didn't provide dial retry frequency, use the default
+	// (100ms backoff between attempts to create a new stream),
+	// so it doesn't bring too much overhead when retry.
+	if t.DialRetryFrequency == 0 {
+		t.DialRetryFrequency = rate.Every(100 * time.Millisecond)
+	}
+	return nil
+}
+
+func (t *Transport) Handler() http.Handler {
+	pipelineHandler := newPipelineHandler(t, t.Raft, t.ClusterID)
+	streamHandler := newStreamHandler(t, t, t.Raft, t.ID, t.ClusterID)
+	snapHandler := newSnapshotHandler(t, t.Raft, t.Snapshotter, t.ClusterID)
+	mux := http.NewServeMux()
+	mux.Handle(RaftPrefix, pipelineHandler)
+	mux.Handle(RaftStreamPrefix+"/", streamHandler)
+	mux.Handle(RaftSnapshotPrefix, snapHandler)
+	mux.Handle(ProbingPrefix, probing.NewHandler())
+	return mux
+}
+
+func (t *Transport) Get(id types.ID) Peer {
+	t.mu.RLock()
+	defer t.mu.RUnlock()
+	return t.peers[id]
+}
+
+func (t *Transport) Send(msgs []raftpb.Message) {
+	for _, m := range msgs {
+		if m.To == 0 {
+			// ignore intentionally dropped message
+			continue
+		}
+		to := types.ID(m.To)
+
+		t.mu.RLock()
+		p, pok := t.peers[to]
+		g, rok := t.remotes[to]
+		t.mu.RUnlock()
+
+		if pok {
+			if m.Type == raftpb.MsgApp {
+				t.ServerStats.SendAppendReq(m.Size())
+			}
+			p.send(m)
+			continue
+		}
+
+		if rok {
+			g.send(m)
+			continue
+		}
+
+		plog.Debugf("ignored message %s (sent to unknown peer %s)", m.Type, to)
+	}
+}
+
+func (t *Transport) Stop() {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	for _, r := range t.remotes {
+		r.stop()
+	}
+	for _, p := range t.peers {
+		p.stop()
+	}
+	t.pipelineProber.RemoveAll()
+	t.streamProber.RemoveAll()
+	if tr, ok := t.streamRt.(*http.Transport); ok {
+		tr.CloseIdleConnections()
+	}
+	if tr, ok := t.pipelineRt.(*http.Transport); ok {
+		tr.CloseIdleConnections()
+	}
+	t.peers = nil
+	t.remotes = nil
+}
+
+// CutPeer drops messages to the specified peer.
+func (t *Transport) CutPeer(id types.ID) {
+	t.mu.RLock()
+	p, pok := t.peers[id]
+	g, gok := t.remotes[id]
+	t.mu.RUnlock()
+
+	if pok {
+		p.(Pausable).Pause()
+	}
+	if gok {
+		g.Pause()
+	}
+}
+
+// MendPeer recovers the message dropping behavior of the given peer.
+func (t *Transport) MendPeer(id types.ID) {
+	t.mu.RLock()
+	p, pok := t.peers[id]
+	g, gok := t.remotes[id]
+	t.mu.RUnlock()
+
+	if pok {
+		p.(Pausable).Resume()
+	}
+	if gok {
+		g.Resume()
+	}
+}
+
+func (t *Transport) AddRemote(id types.ID, us []string) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if t.remotes == nil {
+		// there's no clean way to shutdown the golang http server
+		// (see: https://github.com/golang/go/issues/4674) before
+		// stopping the transport; ignore any new connections.
+		return
+	}
+	if _, ok := t.peers[id]; ok {
+		return
+	}
+	if _, ok := t.remotes[id]; ok {
+		return
+	}
+	urls, err := types.NewURLs(us)
+	if err != nil {
+		plog.Panicf("newURLs %+v should never fail: %+v", us, err)
+	}
+	t.remotes[id] = startRemote(t, urls, id)
+}
+
+func (t *Transport) AddPeer(id types.ID, us []string) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+
+	if t.peers == nil {
+		panic("transport stopped")
+	}
+	if _, ok := t.peers[id]; ok {
+		return
+	}
+	urls, err := types.NewURLs(us)
+	if err != nil {
+		plog.Panicf("newURLs %+v should never fail: %+v", us, err)
+	}
+	fs := t.LeaderStats.Follower(id.String())
+	t.peers[id] = startPeer(t, urls, id, fs)
+	addPeerToProber(t.pipelineProber, id.String(), us, RoundTripperNameSnapshot, rtts)
+	addPeerToProber(t.streamProber, id.String(), us, RoundTripperNameRaftMessage, rtts)
+	plog.Infof("added peer %s", id)
+}
+
+func (t *Transport) RemovePeer(id types.ID) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.removePeer(id)
+}
+
+func (t *Transport) RemoveAllPeers() {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	for id := range t.peers {
+		t.removePeer(id)
+	}
+}
+
+// the caller of this function must have the peers mutex.
+func (t *Transport) removePeer(id types.ID) {
+	if peer, ok := t.peers[id]; ok {
+		peer.stop()
+	} else {
+		plog.Panicf("unexpected removal of unknown peer '%d'", id)
+	}
+	delete(t.peers, id)
+	delete(t.LeaderStats.Followers, id.String())
+	t.pipelineProber.Remove(id.String())
+	t.streamProber.Remove(id.String())
+	plog.Infof("removed peer %s", id)
+}
+
+func (t *Transport) UpdatePeer(id types.ID, us []string) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	// TODO: return error or just panic?
+	if _, ok := t.peers[id]; !ok {
+		return
+	}
+	urls, err := types.NewURLs(us)
+	if err != nil {
+		plog.Panicf("newURLs %+v should never fail: %+v", us, err)
+	}
+	t.peers[id].update(urls)
+
+	t.pipelineProber.Remove(id.String())
+	addPeerToProber(t.pipelineProber, id.String(), us, RoundTripperNameSnapshot, rtts)
+	t.streamProber.Remove(id.String())
+	addPeerToProber(t.streamProber, id.String(), us, RoundTripperNameRaftMessage, rtts)
+	plog.Infof("updated peer %s", id)
+}
+
+func (t *Transport) ActiveSince(id types.ID) time.Time {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if p, ok := t.peers[id]; ok {
+		return p.activeSince()
+	}
+	return time.Time{}
+}
+
+func (t *Transport) SendSnapshot(m snap.Message) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	p := t.peers[types.ID(m.To)]
+	if p == nil {
+		m.CloseWithError(errMemberNotFound)
+		return
+	}
+	p.sendSnap(m)
+}
+
+// Pausable is a testing interface for pausing transport traffic.
+type Pausable interface {
+	Pause()
+	Resume()
+}
+
+func (t *Transport) Pause() {
+	for _, p := range t.peers {
+		p.(Pausable).Pause()
+	}
+}
+
+func (t *Transport) Resume() {
+	for _, p := range t.peers {
+		p.(Pausable).Resume()
+	}
+}
+
+// ActivePeers returns a channel that closes when an initial
+// peer connection has been established. Use this to wait until the
+// first peer connection becomes active.
+func (t *Transport) ActivePeers() (cnt int) {
+	t.mu.RLock()
+	defer t.mu.RUnlock()
+	for _, p := range t.peers {
+		if !p.activeSince().IsZero() {
+			cnt++
+		}
+	}
+	return cnt
+}
+
+type nopTransporter struct{}
+
+func NewNopTransporter() Transporter {
+	return &nopTransporter{}
+}
+
+func (s *nopTransporter) Start() error                        { return nil }
+func (s *nopTransporter) Handler() http.Handler               { return nil }
+func (s *nopTransporter) Send(m []raftpb.Message)             {}
+func (s *nopTransporter) SendSnapshot(m snap.Message)         {}
+func (s *nopTransporter) AddRemote(id types.ID, us []string)  {}
+func (s *nopTransporter) AddPeer(id types.ID, us []string)    {}
+func (s *nopTransporter) RemovePeer(id types.ID)              {}
+func (s *nopTransporter) RemoveAllPeers()                     {}
+func (s *nopTransporter) UpdatePeer(id types.ID, us []string) {}
+func (s *nopTransporter) ActiveSince(id types.ID) time.Time   { return time.Time{} }
+func (s *nopTransporter) ActivePeers() int                    { return 0 }
+func (s *nopTransporter) Stop()                               {}
+func (s *nopTransporter) Pause()                              {}
+func (s *nopTransporter) Resume()                             {}
+
+type snapTransporter struct {
+	nopTransporter
+	snapDoneC chan snap.Message
+	snapDir   string
+}
+
+func NewSnapTransporter(snapDir string) (Transporter, <-chan snap.Message) {
+	ch := make(chan snap.Message, 1)
+	tr := &snapTransporter{snapDoneC: ch, snapDir: snapDir}
+	return tr, ch
+}
+
+func (s *snapTransporter) SendSnapshot(m snap.Message) {
+	ss := snap.New(s.snapDir)
+	ss.SaveDBFrom(m.ReadCloser, m.Snapshot.Metadata.Index+1)
+	m.CloseWithError(nil)
+	s.snapDoneC <- m
+}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/urlpick.go b/vendor/github.com/coreos/etcd/rafthttp/urlpick.go
new file mode 100644
index 0000000..61839de
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/rafthttp/urlpick.go
@@ -0,0 +1,57 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+	"net/url"
+	"sync"
+
+	"github.com/coreos/etcd/pkg/types"
+)
+
+type urlPicker struct {
+	mu     sync.Mutex // guards urls and picked
+	urls   types.URLs
+	picked int
+}
+
+func newURLPicker(urls types.URLs) *urlPicker {
+	return &urlPicker{
+		urls: urls,
+	}
+}
+
+func (p *urlPicker) update(urls types.URLs) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	p.urls = urls
+	p.picked = 0
+}
+
+func (p *urlPicker) pick() url.URL {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	return p.urls[p.picked]
+}
+
+// unreachable notices the picker that the given url is unreachable,
+// and it should use other possible urls.
+func (p *urlPicker) unreachable(u url.URL) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if u == p.urls[p.picked] {
+		p.picked = (p.picked + 1) % len(p.urls)
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/util.go b/vendor/github.com/coreos/etcd/rafthttp/util.go
new file mode 100644
index 0000000..6ec3641
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/rafthttp/util.go
@@ -0,0 +1,186 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"net/url"
+	"strings"
+	"time"
+
+	"github.com/coreos/etcd/pkg/transport"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/coreos/etcd/version"
+	"github.com/coreos/go-semver/semver"
+)
+
+var (
+	errMemberRemoved  = fmt.Errorf("the member has been permanently removed from the cluster")
+	errMemberNotFound = fmt.Errorf("member not found")
+)
+
+// NewListener returns a listener for raft message transfer between peers.
+// It uses timeout listener to identify broken streams promptly.
+func NewListener(u url.URL, tlsinfo *transport.TLSInfo) (net.Listener, error) {
+	return transport.NewTimeoutListener(u.Host, u.Scheme, tlsinfo, ConnReadTimeout, ConnWriteTimeout)
+}
+
+// NewRoundTripper returns a roundTripper used to send requests
+// to rafthttp listener of remote peers.
+func NewRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) {
+	// It uses timeout transport to pair with remote timeout listeners.
+	// It sets no read/write timeout, because message in requests may
+	// take long time to write out before reading out the response.
+	return transport.NewTimeoutTransport(tlsInfo, dialTimeout, 0, 0)
+}
+
+// newStreamRoundTripper returns a roundTripper used to send stream requests
+// to rafthttp listener of remote peers.
+// Read/write timeout is set for stream roundTripper to promptly
+// find out broken status, which minimizes the number of messages
+// sent on broken connection.
+func newStreamRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) {
+	return transport.NewTimeoutTransport(tlsInfo, dialTimeout, ConnReadTimeout, ConnWriteTimeout)
+}
+
+// createPostRequest creates a HTTP POST request that sends raft message.
+func createPostRequest(u url.URL, path string, body io.Reader, ct string, urls types.URLs, from, cid types.ID) *http.Request {
+	uu := u
+	uu.Path = path
+	req, err := http.NewRequest("POST", uu.String(), body)
+	if err != nil {
+		plog.Panicf("unexpected new request error (%v)", err)
+	}
+	req.Header.Set("Content-Type", ct)
+	req.Header.Set("X-Server-From", from.String())
+	req.Header.Set("X-Server-Version", version.Version)
+	req.Header.Set("X-Min-Cluster-Version", version.MinClusterVersion)
+	req.Header.Set("X-Etcd-Cluster-ID", cid.String())
+	setPeerURLsHeader(req, urls)
+
+	return req
+}
+
+// checkPostResponse checks the response of the HTTP POST request that sends
+// raft message.
+func checkPostResponse(resp *http.Response, body []byte, req *http.Request, to types.ID) error {
+	switch resp.StatusCode {
+	case http.StatusPreconditionFailed:
+		switch strings.TrimSuffix(string(body), "\n") {
+		case errIncompatibleVersion.Error():
+			plog.Errorf("request sent was ignored by peer %s (server version incompatible)", to)
+			return errIncompatibleVersion
+		case errClusterIDMismatch.Error():
+			plog.Errorf("request sent was ignored (cluster ID mismatch: remote[%s]=%s, local=%s)",
+				to, resp.Header.Get("X-Etcd-Cluster-ID"), req.Header.Get("X-Etcd-Cluster-ID"))
+			return errClusterIDMismatch
+		default:
+			return fmt.Errorf("unhandled error %q when precondition failed", string(body))
+		}
+	case http.StatusForbidden:
+		return errMemberRemoved
+	case http.StatusNoContent:
+		return nil
+	default:
+		return fmt.Errorf("unexpected http status %s while posting to %q", http.StatusText(resp.StatusCode), req.URL.String())
+	}
+}
+
+// reportCriticalError reports the given error through sending it into
+// the given error channel.
+// If the error channel is filled up when sending error, it drops the error
+// because the fact that error has happened is reported, which is
+// good enough.
+func reportCriticalError(err error, errc chan<- error) {
+	select {
+	case errc <- err:
+	default:
+	}
+}
+
+// compareMajorMinorVersion returns an integer comparing two versions based on
+// their major and minor version. The result will be 0 if a==b, -1 if a < b,
+// and 1 if a > b.
+func compareMajorMinorVersion(a, b *semver.Version) int {
+	na := &semver.Version{Major: a.Major, Minor: a.Minor}
+	nb := &semver.Version{Major: b.Major, Minor: b.Minor}
+	switch {
+	case na.LessThan(*nb):
+		return -1
+	case nb.LessThan(*na):
+		return 1
+	default:
+		return 0
+	}
+}
+
+// serverVersion returns the server version from the given header.
+func serverVersion(h http.Header) *semver.Version {
+	verStr := h.Get("X-Server-Version")
+	// backward compatibility with etcd 2.0
+	if verStr == "" {
+		verStr = "2.0.0"
+	}
+	return semver.Must(semver.NewVersion(verStr))
+}
+
+// serverVersion returns the min cluster version from the given header.
+func minClusterVersion(h http.Header) *semver.Version {
+	verStr := h.Get("X-Min-Cluster-Version")
+	// backward compatibility with etcd 2.0
+	if verStr == "" {
+		verStr = "2.0.0"
+	}
+	return semver.Must(semver.NewVersion(verStr))
+}
+
+// checkVersionCompability checks whether the given version is compatible
+// with the local version.
+func checkVersionCompability(name string, server, minCluster *semver.Version) error {
+	localServer := semver.Must(semver.NewVersion(version.Version))
+	localMinCluster := semver.Must(semver.NewVersion(version.MinClusterVersion))
+	if compareMajorMinorVersion(server, localMinCluster) == -1 {
+		return fmt.Errorf("remote version is too low: remote[%s]=%s, local=%s", name, server, localServer)
+	}
+	if compareMajorMinorVersion(minCluster, localServer) == 1 {
+		return fmt.Errorf("local version is too low: remote[%s]=%s, local=%s", name, server, localServer)
+	}
+	return nil
+}
+
+// setPeerURLsHeader reports local urls for peer discovery
+func setPeerURLsHeader(req *http.Request, urls types.URLs) {
+	if urls == nil {
+		// often not set in unit tests
+		return
+	}
+	peerURLs := make([]string, urls.Len())
+	for i := range urls {
+		peerURLs[i] = urls[i].String()
+	}
+	req.Header.Set("X-PeerURLs", strings.Join(peerURLs, ","))
+}
+
+// addRemoteFromRequest adds a remote peer according to an http request header
+func addRemoteFromRequest(tr Transporter, r *http.Request) {
+	if from, err := types.IDFromString(r.Header.Get("X-Server-From")); err == nil {
+		if urls := r.Header.Get("X-PeerURLs"); urls != "" {
+			tr.AddRemote(from, strings.Split(urls, ","))
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/snap/db.go b/vendor/github.com/coreos/etcd/snap/db.go
new file mode 100644
index 0000000..dcbd3bd
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/snap/db.go
@@ -0,0 +1,83 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"time"
+
+	"github.com/coreos/etcd/pkg/fileutil"
+)
+
+var ErrNoDBSnapshot = errors.New("snap: snapshot file doesn't exist")
+
+// SaveDBFrom saves snapshot of the database from the given reader. It
+// guarantees the save operation is atomic.
+func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) {
+	start := time.Now()
+
+	f, err := ioutil.TempFile(s.dir, "tmp")
+	if err != nil {
+		return 0, err
+	}
+	var n int64
+	n, err = io.Copy(f, r)
+	if err == nil {
+		fsyncStart := time.Now()
+		err = fileutil.Fsync(f)
+		snapDBFsyncSec.Observe(time.Since(fsyncStart).Seconds())
+	}
+	f.Close()
+	if err != nil {
+		os.Remove(f.Name())
+		return n, err
+	}
+	fn := s.dbFilePath(id)
+	if fileutil.Exist(fn) {
+		os.Remove(f.Name())
+		return n, nil
+	}
+	err = os.Rename(f.Name(), fn)
+	if err != nil {
+		os.Remove(f.Name())
+		return n, err
+	}
+
+	plog.Infof("saved database snapshot to disk [total bytes: %d]", n)
+
+	snapDBSaveSec.Observe(time.Since(start).Seconds())
+	return n, nil
+}
+
+// DBFilePath returns the file path for the snapshot of the database with
+// given id. If the snapshot does not exist, it returns error.
+func (s *Snapshotter) DBFilePath(id uint64) (string, error) {
+	if _, err := fileutil.ReadDir(s.dir); err != nil {
+		return "", err
+	}
+	if fn := s.dbFilePath(id); fileutil.Exist(fn) {
+		return fn, nil
+	}
+	return "", ErrNoDBSnapshot
+}
+
+func (s *Snapshotter) dbFilePath(id uint64) string {
+	return filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id))
+}
diff --git a/vendor/github.com/coreos/etcd/snap/message.go b/vendor/github.com/coreos/etcd/snap/message.go
new file mode 100644
index 0000000..d73713f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/snap/message.go
@@ -0,0 +1,64 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import (
+	"io"
+
+	"github.com/coreos/etcd/pkg/ioutil"
+	"github.com/coreos/etcd/raft/raftpb"
+)
+
+// Message is a struct that contains a raft Message and a ReadCloser. The type
+// of raft message MUST be MsgSnap, which contains the raft meta-data and an
+// additional data []byte field that contains the snapshot of the actual state
+// machine.
+// Message contains the ReadCloser field for handling large snapshot. This avoid
+// copying the entire snapshot into a byte array, which consumes a lot of memory.
+//
+// User of Message should close the Message after sending it.
+type Message struct {
+	raftpb.Message
+	ReadCloser io.ReadCloser
+	TotalSize  int64
+	closeC     chan bool
+}
+
+func NewMessage(rs raftpb.Message, rc io.ReadCloser, rcSize int64) *Message {
+	return &Message{
+		Message:    rs,
+		ReadCloser: ioutil.NewExactReadCloser(rc, rcSize),
+		TotalSize:  int64(rs.Size()) + rcSize,
+		closeC:     make(chan bool, 1),
+	}
+}
+
+// CloseNotify returns a channel that receives a single value
+// when the message sent is finished. true indicates the sent
+// is successful.
+func (m Message) CloseNotify() <-chan bool {
+	return m.closeC
+}
+
+func (m Message) CloseWithError(err error) {
+	if cerr := m.ReadCloser.Close(); cerr != nil {
+		err = cerr
+	}
+	if err == nil {
+		m.closeC <- true
+	} else {
+		m.closeC <- false
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/snap/metrics.go b/vendor/github.com/coreos/etcd/snap/metrics.go
new file mode 100644
index 0000000..0d3b7e6
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/snap/metrics.go
@@ -0,0 +1,65 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+	// TODO: save_fsync latency?
+	saveDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Namespace: "etcd_debugging",
+		Subsystem: "snap",
+		Name:      "save_total_duration_seconds",
+		Help:      "The total latency distributions of save called by snapshot.",
+		Buckets:   prometheus.ExponentialBuckets(0.001, 2, 14),
+	})
+
+	marshallingDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Namespace: "etcd_debugging",
+		Subsystem: "snap",
+		Name:      "save_marshalling_duration_seconds",
+		Help:      "The marshalling cost distributions of save called by snapshot.",
+		Buckets:   prometheus.ExponentialBuckets(0.001, 2, 14),
+	})
+
+	snapDBSaveSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Namespace: "etcd",
+		Subsystem: "snap_db",
+		Name:      "save_total_duration_seconds",
+		Help:      "The total latency distributions of v3 snapshot save",
+
+		// lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
+		// highest bucket start of 0.1 sec * 2^9 == 51.2 sec
+		Buckets: prometheus.ExponentialBuckets(0.1, 2, 10),
+	})
+
+	snapDBFsyncSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Namespace: "etcd",
+		Subsystem: "snap_db",
+		Name:      "fsync_duration_seconds",
+		Help:      "The latency distributions of fsyncing .snap.db file",
+
+		// lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+		// highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+		Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+	})
+)
+
+func init() {
+	prometheus.MustRegister(saveDurations)
+	prometheus.MustRegister(marshallingDurations)
+	prometheus.MustRegister(snapDBSaveSec)
+	prometheus.MustRegister(snapDBFsyncSec)
+}
diff --git a/vendor/github.com/coreos/etcd/snap/snappb/snap.proto b/vendor/github.com/coreos/etcd/snap/snappb/snap.proto
new file mode 100644
index 0000000..cd3d21d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/snap/snappb/snap.proto
@@ -0,0 +1,14 @@
+syntax = "proto2";
+package snappb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message snapshot {
+	optional uint32 crc  = 1 [(gogoproto.nullable) = false];
+	optional bytes data  = 2;
+}
diff --git a/vendor/github.com/coreos/etcd/snap/snapshotter.go b/vendor/github.com/coreos/etcd/snap/snapshotter.go
new file mode 100644
index 0000000..0075559
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/snap/snapshotter.go
@@ -0,0 +1,204 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package snap stores raft nodes' states with snapshots.
+package snap
+
+import (
+	"errors"
+	"fmt"
+	"hash/crc32"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+	"time"
+
+	pioutil "github.com/coreos/etcd/pkg/ioutil"
+	"github.com/coreos/etcd/pkg/pbutil"
+	"github.com/coreos/etcd/raft"
+	"github.com/coreos/etcd/raft/raftpb"
+	"github.com/coreos/etcd/snap/snappb"
+
+	"github.com/coreos/pkg/capnslog"
+)
+
+const (
+	snapSuffix = ".snap"
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "snap")
+
+	ErrNoSnapshot    = errors.New("snap: no available snapshot")
+	ErrEmptySnapshot = errors.New("snap: empty snapshot")
+	ErrCRCMismatch   = errors.New("snap: crc mismatch")
+	crcTable         = crc32.MakeTable(crc32.Castagnoli)
+
+	// A map of valid files that can be present in the snap folder.
+	validFiles = map[string]bool{
+		"db": true,
+	}
+)
+
+type Snapshotter struct {
+	dir string
+}
+
+func New(dir string) *Snapshotter {
+	return &Snapshotter{
+		dir: dir,
+	}
+}
+
+func (s *Snapshotter) SaveSnap(snapshot raftpb.Snapshot) error {
+	if raft.IsEmptySnap(snapshot) {
+		return nil
+	}
+	return s.save(&snapshot)
+}
+
+func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error {
+	start := time.Now()
+
+	fname := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, snapSuffix)
+	b := pbutil.MustMarshal(snapshot)
+	crc := crc32.Update(0, crcTable, b)
+	snap := snappb.Snapshot{Crc: crc, Data: b}
+	d, err := snap.Marshal()
+	if err != nil {
+		return err
+	} else {
+		marshallingDurations.Observe(float64(time.Since(start)) / float64(time.Second))
+	}
+
+	err = pioutil.WriteAndSyncFile(filepath.Join(s.dir, fname), d, 0666)
+	if err == nil {
+		saveDurations.Observe(float64(time.Since(start)) / float64(time.Second))
+	} else {
+		err1 := os.Remove(filepath.Join(s.dir, fname))
+		if err1 != nil {
+			plog.Errorf("failed to remove broken snapshot file %s", filepath.Join(s.dir, fname))
+		}
+	}
+	return err
+}
+
+func (s *Snapshotter) Load() (*raftpb.Snapshot, error) {
+	names, err := s.snapNames()
+	if err != nil {
+		return nil, err
+	}
+	var snap *raftpb.Snapshot
+	for _, name := range names {
+		if snap, err = loadSnap(s.dir, name); err == nil {
+			break
+		}
+	}
+	if err != nil {
+		return nil, ErrNoSnapshot
+	}
+	return snap, nil
+}
+
+func loadSnap(dir, name string) (*raftpb.Snapshot, error) {
+	fpath := filepath.Join(dir, name)
+	snap, err := Read(fpath)
+	if err != nil {
+		renameBroken(fpath)
+	}
+	return snap, err
+}
+
+// Read reads the snapshot named by snapname and returns the snapshot.
+func Read(snapname string) (*raftpb.Snapshot, error) {
+	b, err := ioutil.ReadFile(snapname)
+	if err != nil {
+		plog.Errorf("cannot read file %v: %v", snapname, err)
+		return nil, err
+	}
+
+	if len(b) == 0 {
+		plog.Errorf("unexpected empty snapshot")
+		return nil, ErrEmptySnapshot
+	}
+
+	var serializedSnap snappb.Snapshot
+	if err = serializedSnap.Unmarshal(b); err != nil {
+		plog.Errorf("corrupted snapshot file %v: %v", snapname, err)
+		return nil, err
+	}
+
+	if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 {
+		plog.Errorf("unexpected empty snapshot")
+		return nil, ErrEmptySnapshot
+	}
+
+	crc := crc32.Update(0, crcTable, serializedSnap.Data)
+	if crc != serializedSnap.Crc {
+		plog.Errorf("corrupted snapshot file %v: crc mismatch", snapname)
+		return nil, ErrCRCMismatch
+	}
+
+	var snap raftpb.Snapshot
+	if err = snap.Unmarshal(serializedSnap.Data); err != nil {
+		plog.Errorf("corrupted snapshot file %v: %v", snapname, err)
+		return nil, err
+	}
+	return &snap, nil
+}
+
+// snapNames returns the filename of the snapshots in logical time order (from newest to oldest).
+// If there is no available snapshots, an ErrNoSnapshot will be returned.
+func (s *Snapshotter) snapNames() ([]string, error) {
+	dir, err := os.Open(s.dir)
+	if err != nil {
+		return nil, err
+	}
+	defer dir.Close()
+	names, err := dir.Readdirnames(-1)
+	if err != nil {
+		return nil, err
+	}
+	snaps := checkSuffix(names)
+	if len(snaps) == 0 {
+		return nil, ErrNoSnapshot
+	}
+	sort.Sort(sort.Reverse(sort.StringSlice(snaps)))
+	return snaps, nil
+}
+
+func checkSuffix(names []string) []string {
+	snaps := []string{}
+	for i := range names {
+		if strings.HasSuffix(names[i], snapSuffix) {
+			snaps = append(snaps, names[i])
+		} else {
+			// If we find a file which is not a snapshot then check if it's
+			// a vaild file. If not throw out a warning.
+			if _, ok := validFiles[names[i]]; !ok {
+				plog.Warningf("skipped unexpected non snapshot file %v", names[i])
+			}
+		}
+	}
+	return snaps
+}
+
+func renameBroken(path string) {
+	brokenPath := path + ".broken"
+	if err := os.Rename(path, brokenPath); err != nil {
+		plog.Warningf("cannot rename broken snapshot file %v to %v: %v", path, brokenPath, err)
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/store/doc.go b/vendor/github.com/coreos/etcd/store/doc.go
new file mode 100644
index 0000000..612df92
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/store/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package store defines etcd's in-memory key/value store.
+package store
diff --git a/vendor/github.com/coreos/etcd/store/event.go b/vendor/github.com/coreos/etcd/store/event.go
new file mode 100644
index 0000000..efcddb0
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/store/event.go
@@ -0,0 +1,71 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package store
+
+const (
+	Get              = "get"
+	Create           = "create"
+	Set              = "set"
+	Update           = "update"
+	Delete           = "delete"
+	CompareAndSwap   = "compareAndSwap"
+	CompareAndDelete = "compareAndDelete"
+	Expire           = "expire"
+)
+
+type Event struct {
+	Action    string      `json:"action"`
+	Node      *NodeExtern `json:"node,omitempty"`
+	PrevNode  *NodeExtern `json:"prevNode,omitempty"`
+	EtcdIndex uint64      `json:"-"`
+	Refresh   bool        `json:"refresh,omitempty"`
+}
+
+func newEvent(action string, key string, modifiedIndex, createdIndex uint64) *Event {
+	n := &NodeExtern{
+		Key:           key,
+		ModifiedIndex: modifiedIndex,
+		CreatedIndex:  createdIndex,
+	}
+
+	return &Event{
+		Action: action,
+		Node:   n,
+	}
+}
+
+func (e *Event) IsCreated() bool {
+	if e.Action == Create {
+		return true
+	}
+	return e.Action == Set && e.PrevNode == nil
+}
+
+func (e *Event) Index() uint64 {
+	return e.Node.ModifiedIndex
+}
+
+func (e *Event) Clone() *Event {
+	return &Event{
+		Action:    e.Action,
+		EtcdIndex: e.EtcdIndex,
+		Node:      e.Node.Clone(),
+		PrevNode:  e.PrevNode.Clone(),
+	}
+}
+
+func (e *Event) SetRefresh() {
+	e.Refresh = true
+}
diff --git a/vendor/github.com/coreos/etcd/store/event_history.go b/vendor/github.com/coreos/etcd/store/event_history.go
new file mode 100644
index 0000000..235d87a
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/store/event_history.go
@@ -0,0 +1,129 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package store
+
+import (
+	"fmt"
+	"path"
+	"strings"
+	"sync"
+
+	etcdErr "github.com/coreos/etcd/error"
+)
+
+type EventHistory struct {
+	Queue      eventQueue
+	StartIndex uint64
+	LastIndex  uint64
+	rwl        sync.RWMutex
+}
+
+func newEventHistory(capacity int) *EventHistory {
+	return &EventHistory{
+		Queue: eventQueue{
+			Capacity: capacity,
+			Events:   make([]*Event, capacity),
+		},
+	}
+}
+
+// addEvent function adds event into the eventHistory
+func (eh *EventHistory) addEvent(e *Event) *Event {
+	eh.rwl.Lock()
+	defer eh.rwl.Unlock()
+
+	eh.Queue.insert(e)
+
+	eh.LastIndex = e.Index()
+
+	eh.StartIndex = eh.Queue.Events[eh.Queue.Front].Index()
+
+	return e
+}
+
+// scan enumerates events from the index history and stops at the first point
+// where the key matches.
+func (eh *EventHistory) scan(key string, recursive bool, index uint64) (*Event, *etcdErr.Error) {
+	eh.rwl.RLock()
+	defer eh.rwl.RUnlock()
+
+	// index should be after the event history's StartIndex
+	if index < eh.StartIndex {
+		return nil,
+			etcdErr.NewError(etcdErr.EcodeEventIndexCleared,
+				fmt.Sprintf("the requested history has been cleared [%v/%v]",
+					eh.StartIndex, index), 0)
+	}
+
+	// the index should come before the size of the queue minus the duplicate count
+	if index > eh.LastIndex { // future index
+		return nil, nil
+	}
+
+	offset := index - eh.StartIndex
+	i := (eh.Queue.Front + int(offset)) % eh.Queue.Capacity
+
+	for {
+		e := eh.Queue.Events[i]
+
+		if !e.Refresh {
+			ok := (e.Node.Key == key)
+
+			if recursive {
+				// add tailing slash
+				nkey := path.Clean(key)
+				if nkey[len(nkey)-1] != '/' {
+					nkey = nkey + "/"
+				}
+
+				ok = ok || strings.HasPrefix(e.Node.Key, nkey)
+			}
+
+			if (e.Action == Delete || e.Action == Expire) && e.PrevNode != nil && e.PrevNode.Dir {
+				ok = ok || strings.HasPrefix(key, e.PrevNode.Key)
+			}
+
+			if ok {
+				return e, nil
+			}
+		}
+
+		i = (i + 1) % eh.Queue.Capacity
+
+		if i == eh.Queue.Back {
+			return nil, nil
+		}
+	}
+}
+
+// clone will be protected by a stop-world lock
+// do not need to obtain internal lock
+func (eh *EventHistory) clone() *EventHistory {
+	clonedQueue := eventQueue{
+		Capacity: eh.Queue.Capacity,
+		Events:   make([]*Event, eh.Queue.Capacity),
+		Size:     eh.Queue.Size,
+		Front:    eh.Queue.Front,
+		Back:     eh.Queue.Back,
+	}
+
+	copy(clonedQueue.Events, eh.Queue.Events)
+	return &EventHistory{
+		StartIndex: eh.StartIndex,
+		Queue:      clonedQueue,
+		LastIndex:  eh.LastIndex,
+	}
+
+}
diff --git a/vendor/github.com/coreos/etcd/store/event_queue.go b/vendor/github.com/coreos/etcd/store/event_queue.go
new file mode 100644
index 0000000..767b835
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/store/event_queue.go
@@ -0,0 +1,34 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package store
+
+type eventQueue struct {
+	Events   []*Event
+	Size     int
+	Front    int
+	Back     int
+	Capacity int
+}
+
+func (eq *eventQueue) insert(e *Event) {
+	eq.Events[eq.Back] = e
+	eq.Back = (eq.Back + 1) % eq.Capacity
+
+	if eq.Size == eq.Capacity { //dequeue
+		eq.Front = (eq.Front + 1) % eq.Capacity
+	} else {
+		eq.Size++
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/store/metrics.go b/vendor/github.com/coreos/etcd/store/metrics.go
new file mode 100644
index 0000000..077c0fa
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/store/metrics.go
@@ -0,0 +1,132 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package store
+
+import (
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+// Set of raw Prometheus metrics.
+// Labels
+// * action = declared in event.go
+// * outcome = Outcome
+// Do not increment directly, use Report* methods.
+var (
+	readCounter = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "store",
+			Name:      "reads_total",
+			Help:      "Total number of reads action by (get/getRecursive), local to this member.",
+		}, []string{"action"})
+
+	writeCounter = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "store",
+			Name:      "writes_total",
+			Help:      "Total number of writes (e.g. set/compareAndDelete) seen by this member.",
+		}, []string{"action"})
+
+	readFailedCounter = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "store",
+			Name:      "reads_failed_total",
+			Help:      "Failed read actions by (get/getRecursive), local to this member.",
+		}, []string{"action"})
+
+	writeFailedCounter = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "store",
+			Name:      "writes_failed_total",
+			Help:      "Failed write actions (e.g. set/compareAndDelete), seen by this member.",
+		}, []string{"action"})
+
+	expireCounter = prometheus.NewCounter(
+		prometheus.CounterOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "store",
+			Name:      "expires_total",
+			Help:      "Total number of expired keys.",
+		})
+
+	watchRequests = prometheus.NewCounter(
+		prometheus.CounterOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "store",
+			Name:      "watch_requests_total",
+			Help:      "Total number of incoming watch requests (new or reestablished).",
+		})
+
+	watcherCount = prometheus.NewGauge(
+		prometheus.GaugeOpts{
+			Namespace: "etcd_debugging",
+			Subsystem: "store",
+			Name:      "watchers",
+			Help:      "Count of currently active watchers.",
+		})
+)
+
+const (
+	GetRecursive = "getRecursive"
+)
+
+func init() {
+	if prometheus.Register(readCounter) != nil {
+		// Tests will try to double register since the tests use both
+		// store and store_test packages; ignore second attempts.
+		return
+	}
+	prometheus.MustRegister(writeCounter)
+	prometheus.MustRegister(expireCounter)
+	prometheus.MustRegister(watchRequests)
+	prometheus.MustRegister(watcherCount)
+}
+
+func reportReadSuccess(read_action string) {
+	readCounter.WithLabelValues(read_action).Inc()
+}
+
+func reportReadFailure(read_action string) {
+	readCounter.WithLabelValues(read_action).Inc()
+	readFailedCounter.WithLabelValues(read_action).Inc()
+}
+
+func reportWriteSuccess(write_action string) {
+	writeCounter.WithLabelValues(write_action).Inc()
+}
+
+func reportWriteFailure(write_action string) {
+	writeCounter.WithLabelValues(write_action).Inc()
+	writeFailedCounter.WithLabelValues(write_action).Inc()
+}
+
+func reportExpiredKey() {
+	expireCounter.Inc()
+}
+
+func reportWatchRequest() {
+	watchRequests.Inc()
+}
+
+func reportWatcherAdded() {
+	watcherCount.Inc()
+}
+
+func reportWatcherRemoved() {
+	watcherCount.Dec()
+}
diff --git a/vendor/github.com/coreos/etcd/store/node.go b/vendor/github.com/coreos/etcd/store/node.go
new file mode 100644
index 0000000..c3c8743
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/store/node.go
@@ -0,0 +1,395 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package store
+
+import (
+	"path"
+	"sort"
+	"time"
+
+	etcdErr "github.com/coreos/etcd/error"
+	"github.com/jonboulle/clockwork"
+)
+
+// explanations of Compare function result
+const (
+	CompareMatch = iota
+	CompareIndexNotMatch
+	CompareValueNotMatch
+	CompareNotMatch
+)
+
+var Permanent time.Time
+
+// node is the basic element in the store system.
+// A key-value pair will have a string value
+// A directory will have a children map
+type node struct {
+	Path string
+
+	CreatedIndex  uint64
+	ModifiedIndex uint64
+
+	Parent *node `json:"-"` // should not encode this field! avoid circular dependency.
+
+	ExpireTime time.Time
+	Value      string           // for key-value pair
+	Children   map[string]*node // for directory
+
+	// A reference to the store this node is attached to.
+	store *store
+}
+
+// newKV creates a Key-Value pair
+func newKV(store *store, nodePath string, value string, createdIndex uint64, parent *node, expireTime time.Time) *node {
+	return &node{
+		Path:          nodePath,
+		CreatedIndex:  createdIndex,
+		ModifiedIndex: createdIndex,
+		Parent:        parent,
+		store:         store,
+		ExpireTime:    expireTime,
+		Value:         value,
+	}
+}
+
+// newDir creates a directory
+func newDir(store *store, nodePath string, createdIndex uint64, parent *node, expireTime time.Time) *node {
+	return &node{
+		Path:          nodePath,
+		CreatedIndex:  createdIndex,
+		ModifiedIndex: createdIndex,
+		Parent:        parent,
+		ExpireTime:    expireTime,
+		Children:      make(map[string]*node),
+		store:         store,
+	}
+}
+
+// IsHidden function checks if the node is a hidden node. A hidden node
+// will begin with '_'
+// A hidden node will not be shown via get command under a directory
+// For example if we have /foo/_hidden and /foo/notHidden, get "/foo"
+// will only return /foo/notHidden
+func (n *node) IsHidden() bool {
+	_, name := path.Split(n.Path)
+
+	return name[0] == '_'
+}
+
+// IsPermanent function checks if the node is a permanent one.
+func (n *node) IsPermanent() bool {
+	// we use a uninitialized time.Time to indicate the node is a
+	// permanent one.
+	// the uninitialized time.Time should equal zero.
+	return n.ExpireTime.IsZero()
+}
+
+// IsDir function checks whether the node is a directory.
+// If the node is a directory, the function will return true.
+// Otherwise the function will return false.
+func (n *node) IsDir() bool {
+	return n.Children != nil
+}
+
+// Read function gets the value of the node.
+// If the receiver node is not a key-value pair, a "Not A File" error will be returned.
+func (n *node) Read() (string, *etcdErr.Error) {
+	if n.IsDir() {
+		return "", etcdErr.NewError(etcdErr.EcodeNotFile, "", n.store.CurrentIndex)
+	}
+
+	return n.Value, nil
+}
+
+// Write function set the value of the node to the given value.
+// If the receiver node is a directory, a "Not A File" error will be returned.
+func (n *node) Write(value string, index uint64) *etcdErr.Error {
+	if n.IsDir() {
+		return etcdErr.NewError(etcdErr.EcodeNotFile, "", n.store.CurrentIndex)
+	}
+
+	n.Value = value
+	n.ModifiedIndex = index
+
+	return nil
+}
+
+func (n *node) expirationAndTTL(clock clockwork.Clock) (*time.Time, int64) {
+	if !n.IsPermanent() {
+		/* compute ttl as:
+		   ceiling( (expireTime - timeNow) / nanosecondsPerSecond )
+		   which ranges from 1..n
+		   rather than as:
+		   ( (expireTime - timeNow) / nanosecondsPerSecond ) + 1
+		   which ranges 1..n+1
+		*/
+		ttlN := n.ExpireTime.Sub(clock.Now())
+		ttl := ttlN / time.Second
+		if (ttlN % time.Second) > 0 {
+			ttl++
+		}
+		t := n.ExpireTime.UTC()
+		return &t, int64(ttl)
+	}
+	return nil, 0
+}
+
+// List function return a slice of nodes under the receiver node.
+// If the receiver node is not a directory, a "Not A Directory" error will be returned.
+func (n *node) List() ([]*node, *etcdErr.Error) {
+	if !n.IsDir() {
+		return nil, etcdErr.NewError(etcdErr.EcodeNotDir, "", n.store.CurrentIndex)
+	}
+
+	nodes := make([]*node, len(n.Children))
+
+	i := 0
+	for _, node := range n.Children {
+		nodes[i] = node
+		i++
+	}
+
+	return nodes, nil
+}
+
+// GetChild function returns the child node under the directory node.
+// On success, it returns the file node
+func (n *node) GetChild(name string) (*node, *etcdErr.Error) {
+	if !n.IsDir() {
+		return nil, etcdErr.NewError(etcdErr.EcodeNotDir, n.Path, n.store.CurrentIndex)
+	}
+
+	child, ok := n.Children[name]
+
+	if ok {
+		return child, nil
+	}
+
+	return nil, nil
+}
+
+// Add function adds a node to the receiver node.
+// If the receiver is not a directory, a "Not A Directory" error will be returned.
+// If there is an existing node with the same name under the directory, a "Already Exist"
+// error will be returned
+func (n *node) Add(child *node) *etcdErr.Error {
+	if !n.IsDir() {
+		return etcdErr.NewError(etcdErr.EcodeNotDir, "", n.store.CurrentIndex)
+	}
+
+	_, name := path.Split(child.Path)
+
+	if _, ok := n.Children[name]; ok {
+		return etcdErr.NewError(etcdErr.EcodeNodeExist, "", n.store.CurrentIndex)
+	}
+
+	n.Children[name] = child
+
+	return nil
+}
+
+// Remove function remove the node.
+func (n *node) Remove(dir, recursive bool, callback func(path string)) *etcdErr.Error {
+	if !n.IsDir() { // key-value pair
+		_, name := path.Split(n.Path)
+
+		// find its parent and remove the node from the map
+		if n.Parent != nil && n.Parent.Children[name] == n {
+			delete(n.Parent.Children, name)
+		}
+
+		if callback != nil {
+			callback(n.Path)
+		}
+
+		if !n.IsPermanent() {
+			n.store.ttlKeyHeap.remove(n)
+		}
+
+		return nil
+	}
+
+	if !dir {
+		// cannot delete a directory without dir set to true
+		return etcdErr.NewError(etcdErr.EcodeNotFile, n.Path, n.store.CurrentIndex)
+	}
+
+	if len(n.Children) != 0 && !recursive {
+		// cannot delete a directory if it is not empty and the operation
+		// is not recursive
+		return etcdErr.NewError(etcdErr.EcodeDirNotEmpty, n.Path, n.store.CurrentIndex)
+	}
+
+	for _, child := range n.Children { // delete all children
+		child.Remove(true, true, callback)
+	}
+
+	// delete self
+	_, name := path.Split(n.Path)
+	if n.Parent != nil && n.Parent.Children[name] == n {
+		delete(n.Parent.Children, name)
+
+		if callback != nil {
+			callback(n.Path)
+		}
+
+		if !n.IsPermanent() {
+			n.store.ttlKeyHeap.remove(n)
+		}
+	}
+
+	return nil
+}
+
+func (n *node) Repr(recursive, sorted bool, clock clockwork.Clock) *NodeExtern {
+	if n.IsDir() {
+		node := &NodeExtern{
+			Key:           n.Path,
+			Dir:           true,
+			ModifiedIndex: n.ModifiedIndex,
+			CreatedIndex:  n.CreatedIndex,
+		}
+		node.Expiration, node.TTL = n.expirationAndTTL(clock)
+
+		if !recursive {
+			return node
+		}
+
+		children, _ := n.List()
+		node.Nodes = make(NodeExterns, len(children))
+
+		// we do not use the index in the children slice directly
+		// we need to skip the hidden one
+		i := 0
+
+		for _, child := range children {
+
+			if child.IsHidden() { // get will not list hidden node
+				continue
+			}
+
+			node.Nodes[i] = child.Repr(recursive, sorted, clock)
+
+			i++
+		}
+
+		// eliminate hidden nodes
+		node.Nodes = node.Nodes[:i]
+		if sorted {
+			sort.Sort(node.Nodes)
+		}
+
+		return node
+	}
+
+	// since n.Value could be changed later, so we need to copy the value out
+	value := n.Value
+	node := &NodeExtern{
+		Key:           n.Path,
+		Value:         &value,
+		ModifiedIndex: n.ModifiedIndex,
+		CreatedIndex:  n.CreatedIndex,
+	}
+	node.Expiration, node.TTL = n.expirationAndTTL(clock)
+	return node
+}
+
+func (n *node) UpdateTTL(expireTime time.Time) {
+	if !n.IsPermanent() {
+		if expireTime.IsZero() {
+			// from ttl to permanent
+			n.ExpireTime = expireTime
+			// remove from ttl heap
+			n.store.ttlKeyHeap.remove(n)
+			return
+		}
+
+		// update ttl
+		n.ExpireTime = expireTime
+		// update ttl heap
+		n.store.ttlKeyHeap.update(n)
+		return
+	}
+
+	if expireTime.IsZero() {
+		return
+	}
+
+	// from permanent to ttl
+	n.ExpireTime = expireTime
+	// push into ttl heap
+	n.store.ttlKeyHeap.push(n)
+}
+
+// Compare function compares node index and value with provided ones.
+// second result value explains result and equals to one of Compare.. constants
+func (n *node) Compare(prevValue string, prevIndex uint64) (ok bool, which int) {
+	indexMatch := (prevIndex == 0 || n.ModifiedIndex == prevIndex)
+	valueMatch := (prevValue == "" || n.Value == prevValue)
+	ok = valueMatch && indexMatch
+	switch {
+	case valueMatch && indexMatch:
+		which = CompareMatch
+	case indexMatch && !valueMatch:
+		which = CompareValueNotMatch
+	case valueMatch && !indexMatch:
+		which = CompareIndexNotMatch
+	default:
+		which = CompareNotMatch
+	}
+	return ok, which
+}
+
+// Clone function clone the node recursively and return the new node.
+// If the node is a directory, it will clone all the content under this directory.
+// If the node is a key-value pair, it will clone the pair.
+func (n *node) Clone() *node {
+	if !n.IsDir() {
+		newkv := newKV(n.store, n.Path, n.Value, n.CreatedIndex, n.Parent, n.ExpireTime)
+		newkv.ModifiedIndex = n.ModifiedIndex
+		return newkv
+	}
+
+	clone := newDir(n.store, n.Path, n.CreatedIndex, n.Parent, n.ExpireTime)
+	clone.ModifiedIndex = n.ModifiedIndex
+
+	for key, child := range n.Children {
+		clone.Children[key] = child.Clone()
+	}
+
+	return clone
+}
+
+// recoverAndclean function help to do recovery.
+// Two things need to be done: 1. recovery structure; 2. delete expired nodes
+//
+// If the node is a directory, it will help recover children's parent pointer and recursively
+// call this function on its children.
+// We check the expire last since we need to recover the whole structure first and add all the
+// notifications into the event history.
+func (n *node) recoverAndclean() {
+	if n.IsDir() {
+		for _, child := range n.Children {
+			child.Parent = n
+			child.store = n.store
+			child.recoverAndclean()
+		}
+	}
+
+	if !n.ExpireTime.IsZero() {
+		n.store.ttlKeyHeap.push(n)
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/store/node_extern.go b/vendor/github.com/coreos/etcd/store/node_extern.go
new file mode 100644
index 0000000..7ba870c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/store/node_extern.go
@@ -0,0 +1,116 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package store
+
+import (
+	"sort"
+	"time"
+
+	"github.com/jonboulle/clockwork"
+)
+
+// NodeExtern is the external representation of the
+// internal node with additional fields
+// PrevValue is the previous value of the node
+// TTL is time to live in second
+type NodeExtern struct {
+	Key           string      `json:"key,omitempty"`
+	Value         *string     `json:"value,omitempty"`
+	Dir           bool        `json:"dir,omitempty"`
+	Expiration    *time.Time  `json:"expiration,omitempty"`
+	TTL           int64       `json:"ttl,omitempty"`
+	Nodes         NodeExterns `json:"nodes,omitempty"`
+	ModifiedIndex uint64      `json:"modifiedIndex,omitempty"`
+	CreatedIndex  uint64      `json:"createdIndex,omitempty"`
+}
+
+func (eNode *NodeExtern) loadInternalNode(n *node, recursive, sorted bool, clock clockwork.Clock) {
+	if n.IsDir() { // node is a directory
+		eNode.Dir = true
+
+		children, _ := n.List()
+		eNode.Nodes = make(NodeExterns, len(children))
+
+		// we do not use the index in the children slice directly
+		// we need to skip the hidden one
+		i := 0
+
+		for _, child := range children {
+			if child.IsHidden() { // get will not return hidden nodes
+				continue
+			}
+
+			eNode.Nodes[i] = child.Repr(recursive, sorted, clock)
+			i++
+		}
+
+		// eliminate hidden nodes
+		eNode.Nodes = eNode.Nodes[:i]
+
+		if sorted {
+			sort.Sort(eNode.Nodes)
+		}
+
+	} else { // node is a file
+		value, _ := n.Read()
+		eNode.Value = &value
+	}
+
+	eNode.Expiration, eNode.TTL = n.expirationAndTTL(clock)
+}
+
+func (eNode *NodeExtern) Clone() *NodeExtern {
+	if eNode == nil {
+		return nil
+	}
+	nn := &NodeExtern{
+		Key:           eNode.Key,
+		Dir:           eNode.Dir,
+		TTL:           eNode.TTL,
+		ModifiedIndex: eNode.ModifiedIndex,
+		CreatedIndex:  eNode.CreatedIndex,
+	}
+	if eNode.Value != nil {
+		s := *eNode.Value
+		nn.Value = &s
+	}
+	if eNode.Expiration != nil {
+		t := *eNode.Expiration
+		nn.Expiration = &t
+	}
+	if eNode.Nodes != nil {
+		nn.Nodes = make(NodeExterns, len(eNode.Nodes))
+		for i, n := range eNode.Nodes {
+			nn.Nodes[i] = n.Clone()
+		}
+	}
+	return nn
+}
+
+type NodeExterns []*NodeExtern
+
+// interfaces for sorting
+
+func (ns NodeExterns) Len() int {
+	return len(ns)
+}
+
+func (ns NodeExterns) Less(i, j int) bool {
+	return ns[i].Key < ns[j].Key
+}
+
+func (ns NodeExterns) Swap(i, j int) {
+	ns[i], ns[j] = ns[j], ns[i]
+}
diff --git a/vendor/github.com/coreos/etcd/store/stats.go b/vendor/github.com/coreos/etcd/store/stats.go
new file mode 100644
index 0000000..ce464dd
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/store/stats.go
@@ -0,0 +1,145 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package store
+
+import (
+	"encoding/json"
+	"sync/atomic"
+)
+
+const (
+	SetSuccess = iota
+	SetFail
+	DeleteSuccess
+	DeleteFail
+	CreateSuccess
+	CreateFail
+	UpdateSuccess
+	UpdateFail
+	CompareAndSwapSuccess
+	CompareAndSwapFail
+	GetSuccess
+	GetFail
+	ExpireCount
+	CompareAndDeleteSuccess
+	CompareAndDeleteFail
+)
+
+type Stats struct {
+	// Number of get requests
+
+	GetSuccess uint64 `json:"getsSuccess"`
+	GetFail    uint64 `json:"getsFail"`
+
+	// Number of sets requests
+
+	SetSuccess uint64 `json:"setsSuccess"`
+	SetFail    uint64 `json:"setsFail"`
+
+	// Number of delete requests
+
+	DeleteSuccess uint64 `json:"deleteSuccess"`
+	DeleteFail    uint64 `json:"deleteFail"`
+
+	// Number of update requests
+
+	UpdateSuccess uint64 `json:"updateSuccess"`
+	UpdateFail    uint64 `json:"updateFail"`
+
+	// Number of create requests
+
+	CreateSuccess uint64 `json:"createSuccess"`
+	CreateFail    uint64 `json:"createFail"`
+
+	// Number of testAndSet requests
+
+	CompareAndSwapSuccess uint64 `json:"compareAndSwapSuccess"`
+	CompareAndSwapFail    uint64 `json:"compareAndSwapFail"`
+
+	// Number of compareAndDelete requests
+
+	CompareAndDeleteSuccess uint64 `json:"compareAndDeleteSuccess"`
+	CompareAndDeleteFail    uint64 `json:"compareAndDeleteFail"`
+
+	ExpireCount uint64 `json:"expireCount"`
+
+	Watchers uint64 `json:"watchers"`
+}
+
+func newStats() *Stats {
+	s := new(Stats)
+	return s
+}
+
+func (s *Stats) clone() *Stats {
+	return &Stats{
+		GetSuccess:              s.GetSuccess,
+		GetFail:                 s.GetFail,
+		SetSuccess:              s.SetSuccess,
+		SetFail:                 s.SetFail,
+		DeleteSuccess:           s.DeleteSuccess,
+		DeleteFail:              s.DeleteFail,
+		UpdateSuccess:           s.UpdateSuccess,
+		UpdateFail:              s.UpdateFail,
+		CreateSuccess:           s.CreateSuccess,
+		CreateFail:              s.CreateFail,
+		CompareAndSwapSuccess:   s.CompareAndSwapSuccess,
+		CompareAndSwapFail:      s.CompareAndSwapFail,
+		CompareAndDeleteSuccess: s.CompareAndDeleteSuccess,
+		CompareAndDeleteFail:    s.CompareAndDeleteFail,
+		ExpireCount:             s.ExpireCount,
+		Watchers:                s.Watchers,
+	}
+}
+
+func (s *Stats) toJson() []byte {
+	b, _ := json.Marshal(s)
+	return b
+}
+
+func (s *Stats) Inc(field int) {
+	switch field {
+	case SetSuccess:
+		atomic.AddUint64(&s.SetSuccess, 1)
+	case SetFail:
+		atomic.AddUint64(&s.SetFail, 1)
+	case CreateSuccess:
+		atomic.AddUint64(&s.CreateSuccess, 1)
+	case CreateFail:
+		atomic.AddUint64(&s.CreateFail, 1)
+	case DeleteSuccess:
+		atomic.AddUint64(&s.DeleteSuccess, 1)
+	case DeleteFail:
+		atomic.AddUint64(&s.DeleteFail, 1)
+	case GetSuccess:
+		atomic.AddUint64(&s.GetSuccess, 1)
+	case GetFail:
+		atomic.AddUint64(&s.GetFail, 1)
+	case UpdateSuccess:
+		atomic.AddUint64(&s.UpdateSuccess, 1)
+	case UpdateFail:
+		atomic.AddUint64(&s.UpdateFail, 1)
+	case CompareAndSwapSuccess:
+		atomic.AddUint64(&s.CompareAndSwapSuccess, 1)
+	case CompareAndSwapFail:
+		atomic.AddUint64(&s.CompareAndSwapFail, 1)
+	case CompareAndDeleteSuccess:
+		atomic.AddUint64(&s.CompareAndDeleteSuccess, 1)
+	case CompareAndDeleteFail:
+		atomic.AddUint64(&s.CompareAndDeleteFail, 1)
+	case ExpireCount:
+		atomic.AddUint64(&s.ExpireCount, 1)
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/store/store.go b/vendor/github.com/coreos/etcd/store/store.go
new file mode 100644
index 0000000..edf7f21
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/store/store.go
@@ -0,0 +1,791 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package store
+
+import (
+	"encoding/json"
+	"fmt"
+	"path"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	etcdErr "github.com/coreos/etcd/error"
+	"github.com/coreos/etcd/pkg/types"
+	"github.com/jonboulle/clockwork"
+)
+
+// The default version to set when the store is first initialized.
+const defaultVersion = 2
+
+var minExpireTime time.Time
+
+func init() {
+	minExpireTime, _ = time.Parse(time.RFC3339, "2000-01-01T00:00:00Z")
+}
+
+type Store interface {
+	Version() int
+	Index() uint64
+
+	Get(nodePath string, recursive, sorted bool) (*Event, error)
+	Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error)
+	Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error)
+	Create(nodePath string, dir bool, value string, unique bool,
+		expireOpts TTLOptionSet) (*Event, error)
+	CompareAndSwap(nodePath string, prevValue string, prevIndex uint64,
+		value string, expireOpts TTLOptionSet) (*Event, error)
+	Delete(nodePath string, dir, recursive bool) (*Event, error)
+	CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error)
+
+	Watch(prefix string, recursive, stream bool, sinceIndex uint64) (Watcher, error)
+
+	Save() ([]byte, error)
+	Recovery(state []byte) error
+
+	Clone() Store
+	SaveNoCopy() ([]byte, error)
+
+	JsonStats() []byte
+	DeleteExpiredKeys(cutoff time.Time)
+
+	HasTTLKeys() bool
+}
+
+type TTLOptionSet struct {
+	ExpireTime time.Time
+	Refresh    bool
+}
+
+type store struct {
+	Root           *node
+	WatcherHub     *watcherHub
+	CurrentIndex   uint64
+	Stats          *Stats
+	CurrentVersion int
+	ttlKeyHeap     *ttlKeyHeap  // need to recovery manually
+	worldLock      sync.RWMutex // stop the world lock
+	clock          clockwork.Clock
+	readonlySet    types.Set
+}
+
+// New creates a store where the given namespaces will be created as initial directories.
+func New(namespaces ...string) Store {
+	s := newStore(namespaces...)
+	s.clock = clockwork.NewRealClock()
+	return s
+}
+
+func newStore(namespaces ...string) *store {
+	s := new(store)
+	s.CurrentVersion = defaultVersion
+	s.Root = newDir(s, "/", s.CurrentIndex, nil, Permanent)
+	for _, namespace := range namespaces {
+		s.Root.Add(newDir(s, namespace, s.CurrentIndex, s.Root, Permanent))
+	}
+	s.Stats = newStats()
+	s.WatcherHub = newWatchHub(1000)
+	s.ttlKeyHeap = newTtlKeyHeap()
+	s.readonlySet = types.NewUnsafeSet(append(namespaces, "/")...)
+	return s
+}
+
+// Version retrieves current version of the store.
+func (s *store) Version() int {
+	return s.CurrentVersion
+}
+
+// Index retrieves the current index of the store.
+func (s *store) Index() uint64 {
+	s.worldLock.RLock()
+	defer s.worldLock.RUnlock()
+	return s.CurrentIndex
+}
+
+// Get returns a get event.
+// If recursive is true, it will return all the content under the node path.
+// If sorted is true, it will sort the content by keys.
+func (s *store) Get(nodePath string, recursive, sorted bool) (*Event, error) {
+	var err *etcdErr.Error
+
+	s.worldLock.RLock()
+	defer s.worldLock.RUnlock()
+
+	defer func() {
+		if err == nil {
+			s.Stats.Inc(GetSuccess)
+			if recursive {
+				reportReadSuccess(GetRecursive)
+			} else {
+				reportReadSuccess(Get)
+			}
+			return
+		}
+
+		s.Stats.Inc(GetFail)
+		if recursive {
+			reportReadFailure(GetRecursive)
+		} else {
+			reportReadFailure(Get)
+		}
+	}()
+
+	n, err := s.internalGet(nodePath)
+	if err != nil {
+		return nil, err
+	}
+
+	e := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex)
+	e.EtcdIndex = s.CurrentIndex
+	e.Node.loadInternalNode(n, recursive, sorted, s.clock)
+
+	return e, nil
+}
+
+// Create creates the node at nodePath. Create will help to create intermediate directories with no ttl.
+// If the node has already existed, create will fail.
+// If any node on the path is a file, create will fail.
+func (s *store) Create(nodePath string, dir bool, value string, unique bool, expireOpts TTLOptionSet) (*Event, error) {
+	var err *etcdErr.Error
+
+	s.worldLock.Lock()
+	defer s.worldLock.Unlock()
+
+	defer func() {
+		if err == nil {
+			s.Stats.Inc(CreateSuccess)
+			reportWriteSuccess(Create)
+			return
+		}
+
+		s.Stats.Inc(CreateFail)
+		reportWriteFailure(Create)
+	}()
+
+	e, err := s.internalCreate(nodePath, dir, value, unique, false, expireOpts.ExpireTime, Create)
+	if err != nil {
+		return nil, err
+	}
+
+	e.EtcdIndex = s.CurrentIndex
+	s.WatcherHub.notify(e)
+
+	return e, nil
+}
+
+// Set creates or replace the node at nodePath.
+func (s *store) Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error) {
+	var err *etcdErr.Error
+
+	s.worldLock.Lock()
+	defer s.worldLock.Unlock()
+
+	defer func() {
+		if err == nil {
+			s.Stats.Inc(SetSuccess)
+			reportWriteSuccess(Set)
+			return
+		}
+
+		s.Stats.Inc(SetFail)
+		reportWriteFailure(Set)
+	}()
+
+	// Get prevNode value
+	n, getErr := s.internalGet(nodePath)
+	if getErr != nil && getErr.ErrorCode != etcdErr.EcodeKeyNotFound {
+		err = getErr
+		return nil, err
+	}
+
+	if expireOpts.Refresh {
+		if getErr != nil {
+			err = getErr
+			return nil, err
+		} else {
+			value = n.Value
+		}
+	}
+
+	// Set new value
+	e, err := s.internalCreate(nodePath, dir, value, false, true, expireOpts.ExpireTime, Set)
+	if err != nil {
+		return nil, err
+	}
+	e.EtcdIndex = s.CurrentIndex
+
+	// Put prevNode into event
+	if getErr == nil {
+		prev := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex)
+		prev.Node.loadInternalNode(n, false, false, s.clock)
+		e.PrevNode = prev.Node
+	}
+
+	if !expireOpts.Refresh {
+		s.WatcherHub.notify(e)
+	} else {
+		e.SetRefresh()
+		s.WatcherHub.add(e)
+	}
+
+	return e, nil
+}
+
+// returns user-readable cause of failed comparison
+func getCompareFailCause(n *node, which int, prevValue string, prevIndex uint64) string {
+	switch which {
+	case CompareIndexNotMatch:
+		return fmt.Sprintf("[%v != %v]", prevIndex, n.ModifiedIndex)
+	case CompareValueNotMatch:
+		return fmt.Sprintf("[%v != %v]", prevValue, n.Value)
+	default:
+		return fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, n.Value, prevIndex, n.ModifiedIndex)
+	}
+}
+
+func (s *store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint64,
+	value string, expireOpts TTLOptionSet) (*Event, error) {
+
+	var err *etcdErr.Error
+
+	s.worldLock.Lock()
+	defer s.worldLock.Unlock()
+
+	defer func() {
+		if err == nil {
+			s.Stats.Inc(CompareAndSwapSuccess)
+			reportWriteSuccess(CompareAndSwap)
+			return
+		}
+
+		s.Stats.Inc(CompareAndSwapFail)
+		reportWriteFailure(CompareAndSwap)
+	}()
+
+	nodePath = path.Clean(path.Join("/", nodePath))
+	// we do not allow the user to change "/"
+	if s.readonlySet.Contains(nodePath) {
+		return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", s.CurrentIndex)
+	}
+
+	n, err := s.internalGet(nodePath)
+	if err != nil {
+		return nil, err
+	}
+	if n.IsDir() { // can only compare and swap file
+		err = etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, s.CurrentIndex)
+		return nil, err
+	}
+
+	// If both of the prevValue and prevIndex are given, we will test both of them.
+	// Command will be executed, only if both of the tests are successful.
+	if ok, which := n.Compare(prevValue, prevIndex); !ok {
+		cause := getCompareFailCause(n, which, prevValue, prevIndex)
+		err = etcdErr.NewError(etcdErr.EcodeTestFailed, cause, s.CurrentIndex)
+		return nil, err
+	}
+
+	if expireOpts.Refresh {
+		value = n.Value
+	}
+
+	// update etcd index
+	s.CurrentIndex++
+
+	e := newEvent(CompareAndSwap, nodePath, s.CurrentIndex, n.CreatedIndex)
+	e.EtcdIndex = s.CurrentIndex
+	e.PrevNode = n.Repr(false, false, s.clock)
+	eNode := e.Node
+
+	// if test succeed, write the value
+	n.Write(value, s.CurrentIndex)
+	n.UpdateTTL(expireOpts.ExpireTime)
+
+	// copy the value for safety
+	valueCopy := value
+	eNode.Value = &valueCopy
+	eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock)
+
+	if !expireOpts.Refresh {
+		s.WatcherHub.notify(e)
+	} else {
+		e.SetRefresh()
+		s.WatcherHub.add(e)
+	}
+
+	return e, nil
+}
+
+// Delete deletes the node at the given path.
+// If the node is a directory, recursive must be true to delete it.
+func (s *store) Delete(nodePath string, dir, recursive bool) (*Event, error) {
+	var err *etcdErr.Error
+
+	s.worldLock.Lock()
+	defer s.worldLock.Unlock()
+
+	defer func() {
+		if err == nil {
+			s.Stats.Inc(DeleteSuccess)
+			reportWriteSuccess(Delete)
+			return
+		}
+
+		s.Stats.Inc(DeleteFail)
+		reportWriteFailure(Delete)
+	}()
+
+	nodePath = path.Clean(path.Join("/", nodePath))
+	// we do not allow the user to change "/"
+	if s.readonlySet.Contains(nodePath) {
+		return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", s.CurrentIndex)
+	}
+
+	// recursive implies dir
+	if recursive {
+		dir = true
+	}
+
+	n, err := s.internalGet(nodePath)
+	if err != nil { // if the node does not exist, return error
+		return nil, err
+	}
+
+	nextIndex := s.CurrentIndex + 1
+	e := newEvent(Delete, nodePath, nextIndex, n.CreatedIndex)
+	e.EtcdIndex = nextIndex
+	e.PrevNode = n.Repr(false, false, s.clock)
+	eNode := e.Node
+
+	if n.IsDir() {
+		eNode.Dir = true
+	}
+
+	callback := func(path string) { // notify function
+		// notify the watchers with deleted set true
+		s.WatcherHub.notifyWatchers(e, path, true)
+	}
+
+	err = n.Remove(dir, recursive, callback)
+	if err != nil {
+		return nil, err
+	}
+
+	// update etcd index
+	s.CurrentIndex++
+
+	s.WatcherHub.notify(e)
+
+	return e, nil
+}
+
+func (s *store) CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error) {
+	var err *etcdErr.Error
+
+	s.worldLock.Lock()
+	defer s.worldLock.Unlock()
+
+	defer func() {
+		if err == nil {
+			s.Stats.Inc(CompareAndDeleteSuccess)
+			reportWriteSuccess(CompareAndDelete)
+			return
+		}
+
+		s.Stats.Inc(CompareAndDeleteFail)
+		reportWriteFailure(CompareAndDelete)
+	}()
+
+	nodePath = path.Clean(path.Join("/", nodePath))
+
+	n, err := s.internalGet(nodePath)
+	if err != nil { // if the node does not exist, return error
+		return nil, err
+	}
+	if n.IsDir() { // can only compare and delete file
+		return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, s.CurrentIndex)
+	}
+
+	// If both of the prevValue and prevIndex are given, we will test both of them.
+	// Command will be executed, only if both of the tests are successful.
+	if ok, which := n.Compare(prevValue, prevIndex); !ok {
+		cause := getCompareFailCause(n, which, prevValue, prevIndex)
+		return nil, etcdErr.NewError(etcdErr.EcodeTestFailed, cause, s.CurrentIndex)
+	}
+
+	// update etcd index
+	s.CurrentIndex++
+
+	e := newEvent(CompareAndDelete, nodePath, s.CurrentIndex, n.CreatedIndex)
+	e.EtcdIndex = s.CurrentIndex
+	e.PrevNode = n.Repr(false, false, s.clock)
+
+	callback := func(path string) { // notify function
+		// notify the watchers with deleted set true
+		s.WatcherHub.notifyWatchers(e, path, true)
+	}
+
+	err = n.Remove(false, false, callback)
+	if err != nil {
+		return nil, err
+	}
+
+	s.WatcherHub.notify(e)
+
+	return e, nil
+}
+
+func (s *store) Watch(key string, recursive, stream bool, sinceIndex uint64) (Watcher, error) {
+	s.worldLock.RLock()
+	defer s.worldLock.RUnlock()
+
+	key = path.Clean(path.Join("/", key))
+	if sinceIndex == 0 {
+		sinceIndex = s.CurrentIndex + 1
+	}
+	// WatcherHub does not know about the current index, so we need to pass it in
+	w, err := s.WatcherHub.watch(key, recursive, stream, sinceIndex, s.CurrentIndex)
+	if err != nil {
+		return nil, err
+	}
+
+	return w, nil
+}
+
+// walk walks all the nodePath and apply the walkFunc on each directory
+func (s *store) walk(nodePath string, walkFunc func(prev *node, component string) (*node, *etcdErr.Error)) (*node, *etcdErr.Error) {
+	components := strings.Split(nodePath, "/")
+
+	curr := s.Root
+	var err *etcdErr.Error
+
+	for i := 1; i < len(components); i++ {
+		if len(components[i]) == 0 { // ignore empty string
+			return curr, nil
+		}
+
+		curr, err = walkFunc(curr, components[i])
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return curr, nil
+}
+
+// Update updates the value/ttl of the node.
+// If the node is a file, the value and the ttl can be updated.
+// If the node is a directory, only the ttl can be updated.
+func (s *store) Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error) {
+	var err *etcdErr.Error
+
+	s.worldLock.Lock()
+	defer s.worldLock.Unlock()
+
+	defer func() {
+		if err == nil {
+			s.Stats.Inc(UpdateSuccess)
+			reportWriteSuccess(Update)
+			return
+		}
+
+		s.Stats.Inc(UpdateFail)
+		reportWriteFailure(Update)
+	}()
+
+	nodePath = path.Clean(path.Join("/", nodePath))
+	// we do not allow the user to change "/"
+	if s.readonlySet.Contains(nodePath) {
+		return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", s.CurrentIndex)
+	}
+
+	currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1
+
+	n, err := s.internalGet(nodePath)
+	if err != nil { // if the node does not exist, return error
+		return nil, err
+	}
+	if n.IsDir() && len(newValue) != 0 {
+		// if the node is a directory, we cannot update value to non-empty
+		return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, currIndex)
+	}
+
+	if expireOpts.Refresh {
+		newValue = n.Value
+	}
+
+	e := newEvent(Update, nodePath, nextIndex, n.CreatedIndex)
+	e.EtcdIndex = nextIndex
+	e.PrevNode = n.Repr(false, false, s.clock)
+	eNode := e.Node
+
+	n.Write(newValue, nextIndex)
+
+	if n.IsDir() {
+		eNode.Dir = true
+	} else {
+		// copy the value for safety
+		newValueCopy := newValue
+		eNode.Value = &newValueCopy
+	}
+
+	// update ttl
+	n.UpdateTTL(expireOpts.ExpireTime)
+
+	eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock)
+
+	if !expireOpts.Refresh {
+		s.WatcherHub.notify(e)
+	} else {
+		e.SetRefresh()
+		s.WatcherHub.add(e)
+	}
+
+	s.CurrentIndex = nextIndex
+
+	return e, nil
+}
+
+func (s *store) internalCreate(nodePath string, dir bool, value string, unique, replace bool,
+	expireTime time.Time, action string) (*Event, *etcdErr.Error) {
+
+	currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1
+
+	if unique { // append unique item under the node path
+		nodePath += "/" + fmt.Sprintf("%020s", strconv.FormatUint(nextIndex, 10))
+	}
+
+	nodePath = path.Clean(path.Join("/", nodePath))
+
+	// we do not allow the user to change "/"
+	if s.readonlySet.Contains(nodePath) {
+		return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", currIndex)
+	}
+
+	// Assume expire times that are way in the past are
+	// This can occur when the time is serialized to JS
+	if expireTime.Before(minExpireTime) {
+		expireTime = Permanent
+	}
+
+	dirName, nodeName := path.Split(nodePath)
+
+	// walk through the nodePath, create dirs and get the last directory node
+	d, err := s.walk(dirName, s.checkDir)
+
+	if err != nil {
+		s.Stats.Inc(SetFail)
+		reportWriteFailure(action)
+		err.Index = currIndex
+		return nil, err
+	}
+
+	e := newEvent(action, nodePath, nextIndex, nextIndex)
+	eNode := e.Node
+
+	n, _ := d.GetChild(nodeName)
+
+	// force will try to replace an existing file
+	if n != nil {
+		if replace {
+			if n.IsDir() {
+				return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, currIndex)
+			}
+			e.PrevNode = n.Repr(false, false, s.clock)
+
+			n.Remove(false, false, nil)
+		} else {
+			return nil, etcdErr.NewError(etcdErr.EcodeNodeExist, nodePath, currIndex)
+		}
+	}
+
+	if !dir { // create file
+		// copy the value for safety
+		valueCopy := value
+		eNode.Value = &valueCopy
+
+		n = newKV(s, nodePath, value, nextIndex, d, expireTime)
+
+	} else { // create directory
+		eNode.Dir = true
+
+		n = newDir(s, nodePath, nextIndex, d, expireTime)
+	}
+
+	// we are sure d is a directory and does not have the children with name n.Name
+	d.Add(n)
+
+	// node with TTL
+	if !n.IsPermanent() {
+		s.ttlKeyHeap.push(n)
+
+		eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock)
+	}
+
+	s.CurrentIndex = nextIndex
+
+	return e, nil
+}
+
+// InternalGet gets the node of the given nodePath.
+func (s *store) internalGet(nodePath string) (*node, *etcdErr.Error) {
+	nodePath = path.Clean(path.Join("/", nodePath))
+
+	walkFunc := func(parent *node, name string) (*node, *etcdErr.Error) {
+
+		if !parent.IsDir() {
+			err := etcdErr.NewError(etcdErr.EcodeNotDir, parent.Path, s.CurrentIndex)
+			return nil, err
+		}
+
+		child, ok := parent.Children[name]
+		if ok {
+			return child, nil
+		}
+
+		return nil, etcdErr.NewError(etcdErr.EcodeKeyNotFound, path.Join(parent.Path, name), s.CurrentIndex)
+	}
+
+	f, err := s.walk(nodePath, walkFunc)
+
+	if err != nil {
+		return nil, err
+	}
+	return f, nil
+}
+
+// DeleteExpiredKeys will delete all expired keys
+func (s *store) DeleteExpiredKeys(cutoff time.Time) {
+	s.worldLock.Lock()
+	defer s.worldLock.Unlock()
+
+	for {
+		node := s.ttlKeyHeap.top()
+		if node == nil || node.ExpireTime.After(cutoff) {
+			break
+		}
+
+		s.CurrentIndex++
+		e := newEvent(Expire, node.Path, s.CurrentIndex, node.CreatedIndex)
+		e.EtcdIndex = s.CurrentIndex
+		e.PrevNode = node.Repr(false, false, s.clock)
+		if node.IsDir() {
+			e.Node.Dir = true
+		}
+
+		callback := func(path string) { // notify function
+			// notify the watchers with deleted set true
+			s.WatcherHub.notifyWatchers(e, path, true)
+		}
+
+		s.ttlKeyHeap.pop()
+		node.Remove(true, true, callback)
+
+		reportExpiredKey()
+		s.Stats.Inc(ExpireCount)
+
+		s.WatcherHub.notify(e)
+	}
+
+}
+
+// checkDir will check whether the component is a directory under parent node.
+// If it is a directory, this function will return the pointer to that node.
+// If it does not exist, this function will create a new directory and return the pointer to that node.
+// If it is a file, this function will return error.
+func (s *store) checkDir(parent *node, dirName string) (*node, *etcdErr.Error) {
+	node, ok := parent.Children[dirName]
+
+	if ok {
+		if node.IsDir() {
+			return node, nil
+		}
+
+		return nil, etcdErr.NewError(etcdErr.EcodeNotDir, node.Path, s.CurrentIndex)
+	}
+
+	n := newDir(s, path.Join(parent.Path, dirName), s.CurrentIndex+1, parent, Permanent)
+
+	parent.Children[dirName] = n
+
+	return n, nil
+}
+
+// Save saves the static state of the store system.
+// It will not be able to save the state of watchers.
+// It will not save the parent field of the node. Or there will
+// be cyclic dependencies issue for the json package.
+func (s *store) Save() ([]byte, error) {
+	b, err := json.Marshal(s.Clone())
+	if err != nil {
+		return nil, err
+	}
+
+	return b, nil
+}
+
+func (s *store) SaveNoCopy() ([]byte, error) {
+	b, err := json.Marshal(s)
+	if err != nil {
+		return nil, err
+	}
+
+	return b, nil
+}
+
+func (s *store) Clone() Store {
+	s.worldLock.Lock()
+
+	clonedStore := newStore()
+	clonedStore.CurrentIndex = s.CurrentIndex
+	clonedStore.Root = s.Root.Clone()
+	clonedStore.WatcherHub = s.WatcherHub.clone()
+	clonedStore.Stats = s.Stats.clone()
+	clonedStore.CurrentVersion = s.CurrentVersion
+
+	s.worldLock.Unlock()
+	return clonedStore
+}
+
+// Recovery recovers the store system from a static state
+// It needs to recover the parent field of the nodes.
+// It needs to delete the expired nodes since the saved time and also
+// needs to create monitoring go routines.
+func (s *store) Recovery(state []byte) error {
+	s.worldLock.Lock()
+	defer s.worldLock.Unlock()
+	err := json.Unmarshal(state, s)
+
+	if err != nil {
+		return err
+	}
+
+	s.ttlKeyHeap = newTtlKeyHeap()
+
+	s.Root.recoverAndclean()
+	return nil
+}
+
+func (s *store) JsonStats() []byte {
+	s.Stats.Watchers = uint64(s.WatcherHub.count)
+	return s.Stats.toJson()
+}
+
+func (s *store) HasTTLKeys() bool {
+	s.worldLock.RLock()
+	defer s.worldLock.RUnlock()
+	return s.ttlKeyHeap.Len() != 0
+}
diff --git a/vendor/github.com/coreos/etcd/store/ttl_key_heap.go b/vendor/github.com/coreos/etcd/store/ttl_key_heap.go
new file mode 100644
index 0000000..21ae9b7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/store/ttl_key_heap.go
@@ -0,0 +1,99 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package store
+
+import (
+	"container/heap"
+)
+
+// An TTLKeyHeap is a min-heap of TTLKeys order by expiration time
+type ttlKeyHeap struct {
+	array  []*node
+	keyMap map[*node]int
+}
+
+func newTtlKeyHeap() *ttlKeyHeap {
+	h := &ttlKeyHeap{keyMap: make(map[*node]int)}
+	heap.Init(h)
+	return h
+}
+
+func (h ttlKeyHeap) Len() int {
+	return len(h.array)
+}
+
+func (h ttlKeyHeap) Less(i, j int) bool {
+	return h.array[i].ExpireTime.Before(h.array[j].ExpireTime)
+}
+
+func (h ttlKeyHeap) Swap(i, j int) {
+	// swap node
+	h.array[i], h.array[j] = h.array[j], h.array[i]
+
+	// update map
+	h.keyMap[h.array[i]] = i
+	h.keyMap[h.array[j]] = j
+}
+
+func (h *ttlKeyHeap) Push(x interface{}) {
+	n, _ := x.(*node)
+	h.keyMap[n] = len(h.array)
+	h.array = append(h.array, n)
+}
+
+func (h *ttlKeyHeap) Pop() interface{} {
+	old := h.array
+	n := len(old)
+	x := old[n-1]
+	// Set slice element to nil, so GC can recycle the node.
+	// This is due to golang GC doesn't support partial recycling:
+	// https://github.com/golang/go/issues/9618
+	old[n-1] = nil
+	h.array = old[0 : n-1]
+	delete(h.keyMap, x)
+	return x
+}
+
+func (h *ttlKeyHeap) top() *node {
+	if h.Len() != 0 {
+		return h.array[0]
+	}
+	return nil
+}
+
+func (h *ttlKeyHeap) pop() *node {
+	x := heap.Pop(h)
+	n, _ := x.(*node)
+	return n
+}
+
+func (h *ttlKeyHeap) push(x interface{}) {
+	heap.Push(h, x)
+}
+
+func (h *ttlKeyHeap) update(n *node) {
+	index, ok := h.keyMap[n]
+	if ok {
+		heap.Remove(h, index)
+		heap.Push(h, n)
+	}
+}
+
+func (h *ttlKeyHeap) remove(n *node) {
+	index, ok := h.keyMap[n]
+	if ok {
+		heap.Remove(h, index)
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/store/watcher.go b/vendor/github.com/coreos/etcd/store/watcher.go
new file mode 100644
index 0000000..a236ec7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/store/watcher.go
@@ -0,0 +1,95 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package store
+
+type Watcher interface {
+	EventChan() chan *Event
+	StartIndex() uint64 // The EtcdIndex at which the Watcher was created
+	Remove()
+}
+
+type watcher struct {
+	eventChan  chan *Event
+	stream     bool
+	recursive  bool
+	sinceIndex uint64
+	startIndex uint64
+	hub        *watcherHub
+	removed    bool
+	remove     func()
+}
+
+func (w *watcher) EventChan() chan *Event {
+	return w.eventChan
+}
+
+func (w *watcher) StartIndex() uint64 {
+	return w.startIndex
+}
+
+// notify function notifies the watcher. If the watcher interests in the given path,
+// the function will return true.
+func (w *watcher) notify(e *Event, originalPath bool, deleted bool) bool {
+	// watcher is interested the path in three cases and under one condition
+	// the condition is that the event happens after the watcher's sinceIndex
+
+	// 1. the path at which the event happens is the path the watcher is watching at.
+	// For example if the watcher is watching at "/foo" and the event happens at "/foo",
+	// the watcher must be interested in that event.
+
+	// 2. the watcher is a recursive watcher, it interests in the event happens after
+	// its watching path. For example if watcher A watches at "/foo" and it is a recursive
+	// one, it will interest in the event happens at "/foo/bar".
+
+	// 3. when we delete a directory, we need to force notify all the watchers who watches
+	// at the file we need to delete.
+	// For example a watcher is watching at "/foo/bar". And we deletes "/foo". The watcher
+	// should get notified even if "/foo" is not the path it is watching.
+	if (w.recursive || originalPath || deleted) && e.Index() >= w.sinceIndex {
+		// We cannot block here if the eventChan capacity is full, otherwise
+		// etcd will hang. eventChan capacity is full when the rate of
+		// notifications are higher than our send rate.
+		// If this happens, we close the channel.
+		select {
+		case w.eventChan <- e:
+		default:
+			// We have missed a notification. Remove the watcher.
+			// Removing the watcher also closes the eventChan.
+			w.remove()
+		}
+		return true
+	}
+	return false
+}
+
+// Remove removes the watcher from watcherHub
+// The actual remove function is guaranteed to only be executed once
+func (w *watcher) Remove() {
+	w.hub.mutex.Lock()
+	defer w.hub.mutex.Unlock()
+
+	close(w.eventChan)
+	if w.remove != nil {
+		w.remove()
+	}
+}
+
+// nopWatcher is a watcher that receives nothing, always blocking.
+type nopWatcher struct{}
+
+func NewNopWatcher() Watcher                 { return &nopWatcher{} }
+func (w *nopWatcher) EventChan() chan *Event { return nil }
+func (w *nopWatcher) StartIndex() uint64     { return 0 }
+func (w *nopWatcher) Remove()                {}
diff --git a/vendor/github.com/coreos/etcd/store/watcher_hub.go b/vendor/github.com/coreos/etcd/store/watcher_hub.go
new file mode 100644
index 0000000..13c23e3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/store/watcher_hub.go
@@ -0,0 +1,200 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package store
+
+import (
+	"container/list"
+	"path"
+	"strings"
+	"sync"
+	"sync/atomic"
+
+	etcdErr "github.com/coreos/etcd/error"
+)
+
+// A watcherHub contains all subscribed watchers
+// watchers is a map with watched path as key and watcher as value
+// EventHistory keeps the old events for watcherHub. It is used to help
+// watcher to get a continuous event history. Or a watcher might miss the
+// event happens between the end of the first watch command and the start
+// of the second command.
+type watcherHub struct {
+	// count must be the first element to keep 64-bit alignment for atomic
+	// access
+
+	count int64 // current number of watchers.
+
+	mutex        sync.Mutex
+	watchers     map[string]*list.List
+	EventHistory *EventHistory
+}
+
+// newWatchHub creates a watcherHub. The capacity determines how many events we will
+// keep in the eventHistory.
+// Typically, we only need to keep a small size of history[smaller than 20K].
+// Ideally, it should smaller than 20K/s[max throughput] * 2 * 50ms[RTT] = 2000
+func newWatchHub(capacity int) *watcherHub {
+	return &watcherHub{
+		watchers:     make(map[string]*list.List),
+		EventHistory: newEventHistory(capacity),
+	}
+}
+
+// Watch function returns a Watcher.
+// If recursive is true, the first change after index under key will be sent to the event channel of the watcher.
+// If recursive is false, the first change after index at key will be sent to the event channel of the watcher.
+// If index is zero, watch will start from the current index + 1.
+func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeIndex uint64) (Watcher, *etcdErr.Error) {
+	reportWatchRequest()
+	event, err := wh.EventHistory.scan(key, recursive, index)
+
+	if err != nil {
+		err.Index = storeIndex
+		return nil, err
+	}
+
+	w := &watcher{
+		eventChan:  make(chan *Event, 100), // use a buffered channel
+		recursive:  recursive,
+		stream:     stream,
+		sinceIndex: index,
+		startIndex: storeIndex,
+		hub:        wh,
+	}
+
+	wh.mutex.Lock()
+	defer wh.mutex.Unlock()
+	// If the event exists in the known history, append the EtcdIndex and return immediately
+	if event != nil {
+		ne := event.Clone()
+		ne.EtcdIndex = storeIndex
+		w.eventChan <- ne
+		return w, nil
+	}
+
+	l, ok := wh.watchers[key]
+
+	var elem *list.Element
+
+	if ok { // add the new watcher to the back of the list
+		elem = l.PushBack(w)
+	} else { // create a new list and add the new watcher
+		l = list.New()
+		elem = l.PushBack(w)
+		wh.watchers[key] = l
+	}
+
+	w.remove = func() {
+		if w.removed { // avoid removing it twice
+			return
+		}
+		w.removed = true
+		l.Remove(elem)
+		atomic.AddInt64(&wh.count, -1)
+		reportWatcherRemoved()
+		if l.Len() == 0 {
+			delete(wh.watchers, key)
+		}
+	}
+
+	atomic.AddInt64(&wh.count, 1)
+	reportWatcherAdded()
+
+	return w, nil
+}
+
+func (wh *watcherHub) add(e *Event) {
+	wh.EventHistory.addEvent(e)
+}
+
+// notify function accepts an event and notify to the watchers.
+func (wh *watcherHub) notify(e *Event) {
+	e = wh.EventHistory.addEvent(e) // add event into the eventHistory
+
+	segments := strings.Split(e.Node.Key, "/")
+
+	currPath := "/"
+
+	// walk through all the segments of the path and notify the watchers
+	// if the path is "/foo/bar", it will notify watchers with path "/",
+	// "/foo" and "/foo/bar"
+
+	for _, segment := range segments {
+		currPath = path.Join(currPath, segment)
+		// notify the watchers who interests in the changes of current path
+		wh.notifyWatchers(e, currPath, false)
+	}
+}
+
+func (wh *watcherHub) notifyWatchers(e *Event, nodePath string, deleted bool) {
+	wh.mutex.Lock()
+	defer wh.mutex.Unlock()
+
+	l, ok := wh.watchers[nodePath]
+	if ok {
+		curr := l.Front()
+
+		for curr != nil {
+			next := curr.Next() // save reference to the next one in the list
+
+			w, _ := curr.Value.(*watcher)
+
+			originalPath := (e.Node.Key == nodePath)
+			if (originalPath || !isHidden(nodePath, e.Node.Key)) && w.notify(e, originalPath, deleted) {
+				if !w.stream { // do not remove the stream watcher
+					// if we successfully notify a watcher
+					// we need to remove the watcher from the list
+					// and decrease the counter
+					w.removed = true
+					l.Remove(curr)
+					atomic.AddInt64(&wh.count, -1)
+					reportWatcherRemoved()
+				}
+			}
+
+			curr = next // update current to the next element in the list
+		}
+
+		if l.Len() == 0 {
+			// if we have notified all watcher in the list
+			// we can delete the list
+			delete(wh.watchers, nodePath)
+		}
+	}
+}
+
+// clone function clones the watcherHub and return the cloned one.
+// only clone the static content. do not clone the current watchers.
+func (wh *watcherHub) clone() *watcherHub {
+	clonedHistory := wh.EventHistory.clone()
+
+	return &watcherHub{
+		EventHistory: clonedHistory,
+	}
+}
+
+// isHidden checks to see if key path is considered hidden to watch path i.e. the
+// last element is hidden or it's within a hidden directory
+func isHidden(watchPath, keyPath string) bool {
+	// When deleting a directory, watchPath might be deeper than the actual keyPath
+	// For example, when deleting /foo we also need to notify watchers on /foo/bar.
+	if len(watchPath) > len(keyPath) {
+		return false
+	}
+	// if watch path is just a "/", after path will start without "/"
+	// add a "/" to deal with the special case when watchPath is "/"
+	afterPath := path.Clean("/" + keyPath[len(watchPath):])
+	return strings.Contains(afterPath, "/_")
+}
diff --git a/vendor/github.com/coreos/etcd/test b/vendor/github.com/coreos/etcd/test
new file mode 100755
index 0000000..da356f5
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/test
@@ -0,0 +1,632 @@
+#!/usr/bin/env bash
+#
+# Run all etcd tests
+# ./test
+# ./test -v
+#
+#
+# Run specified test pass
+#
+# $ PASSES=unit ./test
+# $ PASSES=integration ./test
+#
+#
+# Run tests for one package
+# Each pass has different default timeout, if you just run tests in one package or 1 test case then you can set TIMEOUT
+# flag for different expectation
+#
+# $ PASSES=unit PKG=./wal TIMEOUT=1m ./test
+# $ PASSES=integration PKG=client/integration TIMEOUT=1m ./test
+#
+#
+# Run specified unit tests in one package
+# To run all the tests with prefix of "TestNew", set "TESTCASE=TestNew ";
+# to run only "TestNew", set "TESTCASE="\bTestNew\b""
+#
+# $ PASSES=unit PKG=./wal TESTCASE=TestNew TIMEOUT=1m ./test
+# $ PASSES=unit PKG=./wal TESTCASE="\bTestNew\b" TIMEOUT=1m ./test
+# $ PASSES=integration PKG=client/integration TESTCASE="\bTestV2NoRetryEOF\b" TIMEOUT=1m ./test
+#
+#
+# Run code coverage
+# COVERDIR must either be a absolute path or a relative path to the etcd root
+# $ COVERDIR=coverage PASSES="build_cov cov" ./test
+set -e
+
+source ./build
+
+# build before setting up test GOPATH
+if [[ "${PASSES}" == *"functional"* ]]; then
+	./functional/build
+fi
+
+# build tests with vendored dependencies
+etcd_setup_gopath
+
+if [ -z "$PASSES" ]; then
+	PASSES="fmt bom dep build unit"
+fi
+
+USERPKG=${PKG:-}
+
+# Invoke ./tests/cover.test.bash for HTML output
+COVER=${COVER:-"-cover"}
+
+# Hack: gofmt ./ will recursively check the .git directory. So use *.go for gofmt.
+IGNORE_PKGS="(cmd/|etcdserverpb|rafttest|gopath.proto|v3lockpb|v3electionpb)"
+INTEGRATION_PKGS="(integration|e2e|contrib|functional)"
+
+# all github.com/coreos/etcd/whatever pkgs that are not auto-generated / tools
+# shellcheck disable=SC1117
+PKGS=$(find . -name \*.go | while read -r a; do dirname "$a"; done | sort | uniq | grep -vE "$IGNORE_PKGS" | grep -vE "(tools/|contrib/|e2e|pb)" | sed "s|\.|${REPO_PATH}|g" | xargs echo)
+# pkg1,pkg2,pkg3
+PKGS_COMMA=${PKGS// /,}
+
+# shellcheck disable=SC1117
+TEST_PKGS=$(find . -name \*_test.go | while read -r a; do dirname "$a"; done | sort | uniq | grep -vE "$IGNORE_PKGS" | sed "s|\./||g")
+
+# shellcheck disable=SC1117
+FORMATTABLE=$(find . -name \*.go | while read -r a; do echo "$(dirname "$a")/*.go"; done | sort | uniq | grep -vE "$IGNORE_PKGS" | sed "s|\./||g")
+
+TESTABLE_AND_FORMATTABLE=$(echo "$TEST_PKGS" | grep -vE "$INTEGRATION_PKGS")
+
+# check if user provided PKG override
+if [ -z "${USERPKG}" ]; then
+	TEST=$TESTABLE_AND_FORMATTABLE
+	FMT=$FORMATTABLE
+else
+	# strip out leading dotslashes and trailing slashes from PKG=./foo/
+	TEST=${USERPKG/#./}
+	TEST=${TEST/#\//}
+	TEST=${TEST/%\//}
+	# only run gofmt on packages provided by user
+	FMT="$TEST"
+fi
+
+# shellcheck disable=SC2206
+FMT=($FMT)
+
+# prepend REPO_PATH to each local package
+split=$TEST
+TEST=""
+for a in $split; do TEST="$TEST ${REPO_PATH}/${a}"; done
+
+# shellcheck disable=SC2206
+TEST=($TEST)
+
+# TODO: 'client' pkg fails with gosimple from generated files
+# TODO: 'rafttest' is failing with unused
+STATIC_ANALYSIS_PATHS=$(find . -name \*.go | while read -r a; do dirname "$a"; done | sort | uniq | grep -vE "$IGNORE_PKGS" | grep -v 'client')
+# shellcheck disable=SC2206
+STATIC_ANALYSIS_PATHS=($STATIC_ANALYSIS_PATHS)
+
+if [ -z "$GOARCH" ]; then
+	GOARCH=$(go env GOARCH);
+fi
+
+# determine whether target supports race detection
+if [ "$GOARCH" == "amd64" ]; then
+	RACE="--race"
+fi
+
+RUN_ARG=""
+if [ ! -z "${TESTCASE}" ]; then
+	RUN_ARG="-run=${TESTCASE}"
+fi
+
+function unit_pass {
+	echo "Running unit tests..."
+	GO_TEST_FLAG=""
+	if [ "${VERBOSE}" == "1" ]; then
+		GO_TEST_FLAG="-v"
+	fi
+	if [ "${VERBOSE}" == "2" ]; then
+		GO_TEST_FLAG="-v"
+		export CLIENT_DEBUG=1
+	fi
+
+	if [ "${RUN_ARG}" == "" ]; then
+	    RUN_ARG="-run=Test"
+	fi
+
+	# check if user provided time out, especially useful when just run one test case
+	# expectation could be different
+	USERTIMEOUT=""
+	if [ -z "${TIMEOUT}" ]; then
+		USERTIMEOUT="3m"
+	else
+		USERTIMEOUT="${TIMEOUT}"
+	fi
+	go test ${GO_TEST_FLAG} -timeout "${USERTIMEOUT}"  "${COVER}" ${RACE} -cpu 4 ${RUN_ARG} "$@" "${TEST[@]}"
+}
+
+function integration_pass {
+	echo "Running integration tests..."
+
+	# check if user provided time out, especially useful when just run one test case
+	# expectation could be different
+	USERTIMEOUT=""
+	if [ -z "${TIMEOUT}" ]; then
+		USERTIMEOUT="20m"
+	else
+		USERTIMEOUT="${TIMEOUT}"
+	fi
+
+	# if TESTCASE and PKG set, run specified test case in specified PKG
+	# if TESTCASE set, PKG not set, run specified test case in all integration and integration_extra packages
+	# if TESTCASE not set, PKG set, run all test cases in specified package
+	# if TESTCASE not set, PKG not set, run all tests in all integration and integration_extra packages
+	if [ -z "${TESTCASE}" ] && [ -z "${USERPKG}" ]; then
+		go test -timeout "${USERTIMEOUT}" -v -cpu 4 "$@" "${REPO_PATH}/integration"
+		integration_extra "$@"
+	else
+		if [ -z "${USERPKG}" ]; then
+			INTEGTESTPKG=("${REPO_PATH}/integration"
+						  "${REPO_PATH}/client/integration"
+						  "${REPO_PATH}/clientv3/integration"
+						  "${REPO_PATH}/store")
+		else
+			INTEGTESTPKG=("${TEST[@]}")
+		fi
+		go test -timeout "${USERTIMEOUT}" -v -cpu 4 "${RUN_ARG}"  "$@" "${INTEGTESTPKG[@]}"
+	fi
+}
+
+function integration_extra {
+	go test -timeout 1m -v ${RACE} -cpu 4 "$@" "${REPO_PATH}/client/integration"
+	go test -timeout 25m -v ${RACE} -cpu 4 "$@" "${REPO_PATH}/clientv3/integration"
+}
+
+function functional_pass {
+  	# Clean up any data and logs from previous runs
+  	rm -rf /tmp/etcd-functional-* /tmp/etcd-functional-*.backup
+
+	for a in 1 2 3; do
+		./bin/etcd-agent --network tcp --address 127.0.0.1:${a}9027 &
+		pid="$!"
+		agent_pids="${agent_pids} $pid"
+	done
+
+	for a in 1 2 3; do
+		echo "Waiting for 'etcd-agent' on ${a}9027..."
+		while ! nc -z localhost ${a}9027; do
+			sleep 1
+		done
+	done
+
+	echo "functional test START!"
+	./bin/etcd-tester --config ./functional.yaml && echo "'etcd-tester' succeeded"
+	ETCD_TESTER_EXIT_CODE=$?
+	echo "ETCD_TESTER_EXIT_CODE:" ${ETCD_TESTER_EXIT_CODE}
+
+	# shellcheck disable=SC2206
+	agent_pids=($agent_pids)
+	kill -s TERM "${agent_pids[@]}" || true
+
+	if [[ "${ETCD_TESTER_EXIT_CODE}" -ne "0" ]]; then
+		echo "--- FAIL: exit code" ${ETCD_TESTER_EXIT_CODE}
+		exit ${ETCD_TESTER_EXIT_CODE}
+	fi
+	echo "functional test PASS!"
+}
+
+function cov_pass {
+	echo "Running code coverage..."
+	# install gocovmerge before running code coverage from github.com/wadey/gocovmerge
+	# gocovmerge merges coverage files
+	if ! which gocovmerge >/dev/null; then
+		echo "gocovmerge not installed"
+		exit 255
+	fi
+
+	if [ -z "$COVERDIR" ]; then
+		echo "COVERDIR undeclared"
+		exit 255
+	fi
+
+	if [ ! -f "bin/etcd_test" ]; then
+		echo "etcd_test binary not found"
+		exit 255
+	fi
+
+	mkdir -p "$COVERDIR"
+
+	# run code coverage for unit and integration tests
+	GOCOVFLAGS="-covermode=set -coverpkg ${PKGS_COMMA} -v -timeout 20m"
+	# shellcheck disable=SC2206
+	GOCOVFLAGS=($GOCOVFLAGS)
+	failed=""
+	for t in $(echo "${TEST_PKGS}" | grep -vE "(e2e|functional)"); do
+		tf=$(echo "$t" | tr / _)
+		# cache package compilation data for faster repeated builds
+		go test "${GOCOVFLAGS[@]}" -i "${REPO_PATH}/$t" || true
+		# uses -run=Test to skip examples because clientv3/ example tests will leak goroutines
+		go test "${GOCOVFLAGS[@]}" -run=Test -coverprofile "$COVERDIR/${tf}.coverprofile"  "${REPO_PATH}/$t" || failed="$failed $t"
+	done
+
+	# v2v3 tests
+	go test -tags v2v3 "${GOCOVFLAGS[@]}" -coverprofile "$COVERDIR/store-v2v3.coverprofile" "${REPO_PATH}/clientv3/integration" || failed="$failed store-v2v3"
+
+	# proxy tests
+	go test -tags cluster_proxy "${GOCOVFLAGS[@]}" -coverprofile "$COVERDIR/proxy_integration.coverprofile" "${REPO_PATH}/integration" || failed="$failed proxy-integration"
+	go test -tags cluster_proxy "${GOCOVFLAGS[@]}" -coverprofile "$COVERDIR/proxy_clientv3.coverprofile" "${REPO_PATH}/clientv3/integration" || failed="$failed proxy-clientv3/integration"
+
+	# run code coverage for e2e tests
+	# use 30m timeout because e2e coverage takes longer
+	# due to many tests cause etcd process to wait
+	# on leadership transfer timeout during gracefully shutdown
+	echo Testing e2e without proxy...
+	go test -tags cov -timeout 30m -v "${REPO_PATH}/e2e" || failed="$failed e2e"
+	echo Testing e2e with proxy...
+	go test -tags "cov cluster_proxy" -timeout 30m -v "${REPO_PATH}/e2e" || failed="$failed e2e-proxy"
+
+	# incrementally merge to get coverage data even if some coverage files are corrupted
+	# optimistically assume etcdserver package's coverage file is OK since gocovmerge
+	# expects to start with a non-empty file
+	cp "$COVERDIR"/etcdserver.coverprofile "$COVERDIR"/cover.out
+	for f in "$COVERDIR"/*.coverprofile; do
+		echo "merging test coverage file ${f}"
+		gocovmerge "$f" "$COVERDIR"/cover.out  >"$COVERDIR"/cover.tmp || failed="$failed $f"
+		if [ -s "$COVERDIR"/cover.tmp ]; then
+			mv "$COVERDIR"/cover.tmp "$COVERDIR"/cover.out
+		fi
+	done
+	# strip out generated files (using GNU-style sed)
+	sed --in-place '/generated.go/d' "$COVERDIR"/cover.out || true
+
+	# held failures to generate the full coverage file, now fail
+	if [ -n "$failed" ]; then
+		for f in $failed; do
+			echo "--- FAIL:" "$f"
+		done
+		exit 255
+	fi
+}
+
+function e2e_pass {
+	echo "Running e2e tests..."
+
+	# check if user provided time out, especially useful when just run one test case
+	# expectation could be different
+	USERTIMEOUT=""
+	if [ -z "${TIMEOUT}" ]; then
+		USERTIMEOUT="20m"
+	else
+		USERTIMEOUT="${TIMEOUT}"
+	fi
+
+	go test -timeout "${USERTIMEOUT}" -v -cpu 4 "${RUN_ARG}"  "$@" "${REPO_PATH}/e2e"
+}
+
+function integration_e2e_pass {
+	echo "Running integration and e2e tests..."
+
+	go test -timeout 20m -v -cpu 4 "$@" "${REPO_PATH}/e2e" &
+	e2epid="$!"
+	go test -timeout 20m -v -cpu 4 "$@" "${REPO_PATH}/integration" &
+	intpid="$!"
+	wait $e2epid
+	wait $intpid
+	integration_extra "$@"
+}
+
+function grpcproxy_pass {
+	go test -timeout 20m -v ${RACE} -tags cluster_proxy -cpu 4 "$@" "${REPO_PATH}/integration"
+	go test -timeout 20m -v ${RACE} -tags cluster_proxy -cpu 4 "$@" "${REPO_PATH}/clientv3/integration"
+	go test -timeout 20m -v -tags cluster_proxy "$@" "${REPO_PATH}/e2e"
+}
+
+function release_pass {
+	rm -f ./bin/etcd-last-release
+	# to grab latest patch release; bump this up for every minor release
+	UPGRADE_VER=$(git tag -l --sort=-version:refname "v3.3.*" | head -1)
+	if [ -n "$MANUAL_VER" ]; then
+		# in case, we need to test against different version
+		UPGRADE_VER=$MANUAL_VER
+	fi
+	if [[ -z ${UPGRADE_VER} ]]; then
+		UPGRADE_VER="v3.3.0"
+		echo "fallback to" ${UPGRADE_VER}
+	fi
+
+	local file="etcd-$UPGRADE_VER-linux-$GOARCH.tar.gz"
+	echo "Downloading $file"
+
+	set +e
+	curl --fail -L "https://github.com/coreos/etcd/releases/download/$UPGRADE_VER/$file" -o "/tmp/$file"
+	local result=$?
+	set -e
+	case $result in
+		0)	;;
+		*)	echo "--- FAIL:" ${result}
+			exit $result
+			;;
+	esac
+
+	tar xzvf "/tmp/$file" -C /tmp/ --strip-components=1
+	mkdir -p ./bin
+	mv /tmp/etcd ./bin/etcd-last-release
+}
+
+function shellcheck_pass {
+	if which shellcheck >/dev/null; then
+		shellcheckResult=$(shellcheck -fgcc build test scripts/*.sh 2>&1 || true)
+		if [ -n "${shellcheckResult}" ]; then
+			echo -e "shellcheck checking failed:\\n${shellcheckResult}"
+			exit 255
+		fi
+	fi
+}
+
+function markdown_you_pass {
+	# eschew you
+	yous=$(find . -name \*.md -exec grep -E --color "[Yy]ou[r]?[ '.,;]" {} + | grep -v /v2/ || true)
+	if [ ! -z "$yous" ]; then
+		echo -e "found 'you' in documentation:\\n${yous}"
+		exit 255
+	fi
+}
+
+function markdown_marker_pass {
+	# TODO: check other markdown files when marker handles headers with '[]'
+	if which marker >/dev/null; then
+		markerResult=$(marker --skip-http --root ./Documentation 2>&1 || true)
+		if [ -n "${markerResult}" ]; then
+			echo -e "marker checking failed:\\n${markerResult}"
+			exit 255
+		fi
+	else
+		echo "Skipping marker..."
+	fi
+}
+
+function goword_pass {
+	if which goword >/dev/null; then
+		# get all go files to process
+		gofiles=$(find "${FMT[@]}" -iname '*.go' 2>/dev/null)
+		# shellcheck disable=SC2206
+		gofiles_all=($gofiles)
+		# ignore tests and protobuf files
+		# shellcheck disable=SC1117
+		gofiles=$(echo "${gofiles_all[@]}" | sort | uniq | sed "s/ /\n/g" | grep -vE "(\\_test.go|\\.pb\\.go)")
+		# shellcheck disable=SC2206
+		gofiles=($gofiles)
+		# only check for broken exported godocs
+		gowordRes=$(goword -use-spell=false "${gofiles[@]}" | grep godoc-export | sort)
+		if [ ! -z "$gowordRes" ]; then
+			echo -e "goword checking failed:\\n${gowordRes}"
+			exit 255
+		fi
+		# check some spelling
+		gowordRes=$(goword -ignore-file=.words clientv3/{*,*/*}.go 2>&1 | grep spell | sort)
+		if [ ! -z "$gowordRes" ]; then
+			echo -e "goword checking failed:\\n${gowordRes}"
+			exit 255
+		fi
+	else
+		echo "Skipping goword..."
+	fi
+}
+
+function gofmt_pass {
+	fmtRes=$(gofmt -l -s -d "${FMT[@]}")
+	if [ -n "${fmtRes}" ]; then
+		echo -e "gofmt checking failed:\\n${fmtRes}"
+		exit 255
+	fi
+}
+
+function govet_pass {
+	vetRes=$(go vet "${TEST[@]}")
+	if [ -n "${vetRes}" ]; then
+		echo -e "govet checking failed:\\n${vetRes}"
+		exit 255
+	fi
+}
+
+function govet_shadow_pass {
+	fmtpkgs=$(for a in "${FMT[@]}"; do dirname "$a"; done | sort | uniq | grep -v "\\.")
+	# shellcheck disable=SC2206
+	fmtpkgs=($fmtpkgs)
+	vetRes=$(go tool vet -all -shadow "${fmtpkgs[@]}" 2>&1 | grep -v '/gw/' || true)
+	if [ -n "${vetRes}" ]; then
+		echo -e "govet -all -shadow checking failed:\\n${vetRes}"
+		exit 255
+	fi
+}
+
+function gosimple_pass {
+	if which gosimple >/dev/null; then
+		gosimpleResult=$(gosimple "${STATIC_ANALYSIS_PATHS[@]}" 2>&1 || true)
+		if [ -n "${gosimpleResult}" ]; then
+			echo -e "gosimple checking failed:\\n${gosimpleResult}"
+			exit 255
+		fi
+	else
+		echo "Skipping gosimple..."
+	fi
+}
+
+function unused_pass {
+	if which unused >/dev/null; then
+		unusedResult=$(unused "${STATIC_ANALYSIS_PATHS[@]}" 2>&1 || true)
+		if [ -n "${unusedResult}" ]; then
+			echo -e "unused checking failed:\\n${unusedResult}"
+			exit 255
+		fi
+	else
+		echo "Skipping unused..."
+	fi
+}
+
+function staticcheck_pass {
+	if which staticcheck >/dev/null; then
+		staticcheckResult=$(staticcheck "${STATIC_ANALYSIS_PATHS[@]}" 2>&1 || true)
+		if [ -n "${staticcheckResult}" ]; then
+			# TODO: resolve these after go1.8 migration
+			# See https://github.com/dominikh/go-tools/tree/master/cmd/staticcheck
+			STATIC_CHECK_MASK="SA(1012|1019|2002)"
+			if echo "${staticcheckResult}" | grep -vE "$STATIC_CHECK_MASK"; then
+				echo -e "staticcheck checking failed:\\n${staticcheckResult}"
+				exit 255
+			else
+				suppressed=$(echo "${staticcheckResult}" | sed 's/ /\n/g' | grep "(SA" | sort | uniq -c)
+				echo -e "staticcheck suppressed warnings:\\n${suppressed}"
+			fi
+		fi
+	else
+		echo "Skipping staticcheck..."
+	fi
+}
+
+function ineffassign_pass {
+	if which ineffassign >/dev/null; then
+		ineffassignResult=$(ineffassign "${STATIC_ANALYSIS_PATHS[@]}" 2>&1 || true)
+		if [ -n "${ineffassignResult}" ]; then
+			echo -e "ineffassign checking failed:\\n${ineffassignResult}"
+			exit 255
+		fi
+	else
+		echo "Skipping ineffassign..."
+	fi
+}
+
+function nakedret_pass {
+	if which nakedret >/dev/null; then
+		nakedretResult=$(nakedret "${STATIC_ANALYSIS_PATHS[@]}" 2>&1 || true)
+		if [ -n "${nakedretResult}" ]; then
+			echo -e "nakedret checking failed:\\n${nakedretResult}"
+			exit 255
+		fi
+	else
+		echo "Skipping nakedret..."
+	fi
+}
+
+function license_header_pass {
+	licRes=""
+	files=$(find . -type f -iname '*.go' ! -path './cmd/*' ! -path './gopath.proto/*')
+	for file in $files; do
+		if ! head -n3 "${file}" | grep -Eq "(Copyright|generated|GENERATED)" ; then
+			licRes="${licRes}"$(echo -e "  ${file}")
+		fi
+	done
+	if [ -n "${licRes}" ]; then
+		echo -e "license header checking failed:\\n${licRes}"
+		exit 255
+	fi
+}
+
+function receiver_name_pass {
+	# shellcheck disable=SC1117
+	recvs=$(grep 'func ([^*]' {*,*/*,*/*/*}.go  | grep -Ev "(generated|pb/)" | tr  ':' ' ' |  \
+		awk ' { print $2" "$3" "$4" "$1 }' | sed "s/[a-zA-Z\.]*go//g" |  sort  | uniq  | \
+		grep -Ev  "(Descriptor|Proto|_)"  | awk ' { print $3" "$4 } ' | sort | uniq -c | grep -v ' 1 ' | awk ' { print $2 } ')
+	if [ -n "${recvs}" ]; then
+		# shellcheck disable=SC2206
+		recvs=($recvs)
+		for recv in "${recvs[@]}"; do
+			echo "Mismatched receiver for $recv..."
+			grep "$recv" "${FMT[@]}" | grep 'func ('
+		done
+		exit 255
+	fi
+}
+
+function commit_title_pass {
+	git log --oneline "$(git merge-base HEAD master)"...HEAD | while read -r l; do
+		commitMsg=$(echo "$l" | cut -f2- -d' ')
+		if [[ "$commitMsg" == Merge* ]]; then
+			# ignore "Merge pull" commits
+			continue
+		fi
+		if [[ "$commitMsg" == Revert* ]]; then
+			# ignore revert commits
+			continue
+		fi
+
+		pkgPrefix=$(echo "$commitMsg" | cut -f1 -d':')
+		spaceCommas=$(echo "$commitMsg" | sed 's/ /\n/g' | grep -c ',$' || echo 0)
+		commaSpaces=$(echo "$commitMsg" | sed 's/,/\n/g' | grep -c '^ ' || echo 0)
+		if [[ $(echo "$commitMsg" | grep -c ":..*") == 0 || "$commitMsg" == "$pkgPrefix" || "$spaceCommas" != "$commaSpaces" ]]; then
+			echo "$l"...
+			echo "Expected commit title format '<package>{\", \"<package>}: <description>'"
+			echo "Got: $l"
+			exit 255
+		fi
+	done
+}
+
+function fmt_pass {
+	toggle_failpoints disable
+
+	for p in shellcheck \
+			markdown_you \
+			markdown_marker \
+			goword \
+			gofmt \
+			govet \
+			govet_shadow \
+			gosimple \
+			unused \
+			staticcheck \
+			ineffassign \
+			nakedret \
+			license_header \
+			receiver_name \
+			commit_title \
+			; do
+		echo "'$p' started at $(date)"
+		"${p}"_pass "$@"
+		echo "'$p' completed at $(date)"
+	done
+}
+
+function bom_pass {
+	if ! which license-bill-of-materials >/dev/null; then
+		return
+	fi
+	echo "Checking bill of materials..."
+	license-bill-of-materials \
+		--override-file bill-of-materials.override.json \
+		github.com/coreos/etcd github.com/coreos/etcd/etcdctl >bom-now.json || true
+	if ! diff bill-of-materials.json bom-now.json; then
+		echo "vendored licenses do not match given bill of materials"
+		exit 255
+	fi
+	rm bom-now.json
+}
+
+function dep_pass {
+	echo "Checking package dependencies..."
+	# don't pull in etcdserver package
+	pushd clientv3 >/dev/null
+	badpkg="(etcdserver$|mvcc$|backend$|grpc-gateway)"
+	deps=$(go list -f '{{ .Deps }}'  | sed 's/ /\n/g' | grep -E "${badpkg}" || echo "")
+	popd >/dev/null
+	if [ ! -z "$deps" ]; then
+		echo -e "clientv3 has masked dependencies:\\n${deps}"
+		exit 255
+	fi
+}
+
+function build_cov_pass {
+	out="bin"
+	if [ -n "${BINDIR}" ]; then out="${BINDIR}"; fi
+	go test -tags cov -c -covermode=set -coverpkg="$PKGS_COMMA" -o "${out}/etcd_test"
+	go test -tags cov -c -covermode=set -coverpkg="$PKGS_COMMA" -o "${out}/etcdctl_test" "${REPO_PATH}/etcdctl"
+}
+
+# fail fast on static tests
+function build_pass {
+	echo "Checking build..."
+	GO_BUILD_FLAGS="-v" etcd_build
+}
+
+for pass in $PASSES; do
+	echo "Starting '$pass' pass at $(date)"
+	"${pass}"_pass "$@"
+	echo "Finished '$pass' pass at $(date)"
+done
+
+echo "Success"
diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go
new file mode 100644
index 0000000..c52accc
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/version/version.go
@@ -0,0 +1,56 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package version implements etcd version parsing and contains latest version
+// information.
+package version
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/coreos/go-semver/semver"
+)
+
+var (
+	// MinClusterVersion is the min cluster version this etcd binary is compatible with.
+	MinClusterVersion = "3.0.0"
+	Version           = "3.3.11"
+	APIVersion        = "unknown"
+
+	// Git SHA Value will be set during build
+	GitSHA = "Not provided (use ./build instead of go build)"
+)
+
+func init() {
+	ver, err := semver.NewVersion(Version)
+	if err == nil {
+		APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
+	}
+}
+
+type Versions struct {
+	Server  string `json:"etcdserver"`
+	Cluster string `json:"etcdcluster"`
+	// TODO: raft state machine version
+}
+
+// Cluster only keeps the major.minor.
+func Cluster(v string) string {
+	vs := strings.Split(v, ".")
+	if len(vs) <= 2 {
+		return v
+	}
+	return fmt.Sprintf("%s.%s", vs[0], vs[1])
+}
diff --git a/vendor/github.com/coreos/etcd/wal/decoder.go b/vendor/github.com/coreos/etcd/wal/decoder.go
new file mode 100644
index 0000000..6a217f8
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/wal/decoder.go
@@ -0,0 +1,188 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+	"bufio"
+	"encoding/binary"
+	"hash"
+	"io"
+	"sync"
+
+	"github.com/coreos/etcd/pkg/crc"
+	"github.com/coreos/etcd/pkg/pbutil"
+	"github.com/coreos/etcd/raft/raftpb"
+	"github.com/coreos/etcd/wal/walpb"
+)
+
+const minSectorSize = 512
+
+// frameSizeBytes is frame size in bytes, including record size and padding size.
+const frameSizeBytes = 8
+
+type decoder struct {
+	mu  sync.Mutex
+	brs []*bufio.Reader
+
+	// lastValidOff file offset following the last valid decoded record
+	lastValidOff int64
+	crc          hash.Hash32
+}
+
+func newDecoder(r ...io.Reader) *decoder {
+	readers := make([]*bufio.Reader, len(r))
+	for i := range r {
+		readers[i] = bufio.NewReader(r[i])
+	}
+	return &decoder{
+		brs: readers,
+		crc: crc.New(0, crcTable),
+	}
+}
+
+func (d *decoder) decode(rec *walpb.Record) error {
+	rec.Reset()
+	d.mu.Lock()
+	defer d.mu.Unlock()
+	return d.decodeRecord(rec)
+}
+
+func (d *decoder) decodeRecord(rec *walpb.Record) error {
+	if len(d.brs) == 0 {
+		return io.EOF
+	}
+
+	l, err := readInt64(d.brs[0])
+	if err == io.EOF || (err == nil && l == 0) {
+		// hit end of file or preallocated space
+		d.brs = d.brs[1:]
+		if len(d.brs) == 0 {
+			return io.EOF
+		}
+		d.lastValidOff = 0
+		return d.decodeRecord(rec)
+	}
+	if err != nil {
+		return err
+	}
+
+	recBytes, padBytes := decodeFrameSize(l)
+
+	data := make([]byte, recBytes+padBytes)
+	if _, err = io.ReadFull(d.brs[0], data); err != nil {
+		// ReadFull returns io.EOF only if no bytes were read
+		// the decoder should treat this as an ErrUnexpectedEOF instead.
+		if err == io.EOF {
+			err = io.ErrUnexpectedEOF
+		}
+		return err
+	}
+	if err := rec.Unmarshal(data[:recBytes]); err != nil {
+		if d.isTornEntry(data) {
+			return io.ErrUnexpectedEOF
+		}
+		return err
+	}
+
+	// skip crc checking if the record type is crcType
+	if rec.Type != crcType {
+		d.crc.Write(rec.Data)
+		if err := rec.Validate(d.crc.Sum32()); err != nil {
+			if d.isTornEntry(data) {
+				return io.ErrUnexpectedEOF
+			}
+			return err
+		}
+	}
+	// record decoded as valid; point last valid offset to end of record
+	d.lastValidOff += frameSizeBytes + recBytes + padBytes
+	return nil
+}
+
+func decodeFrameSize(lenField int64) (recBytes int64, padBytes int64) {
+	// the record size is stored in the lower 56 bits of the 64-bit length
+	recBytes = int64(uint64(lenField) & ^(uint64(0xff) << 56))
+	// non-zero padding is indicated by set MSb / a negative length
+	if lenField < 0 {
+		// padding is stored in lower 3 bits of length MSB
+		padBytes = int64((uint64(lenField) >> 56) & 0x7)
+	}
+	return recBytes, padBytes
+}
+
+// isTornEntry determines whether the last entry of the WAL was partially written
+// and corrupted because of a torn write.
+func (d *decoder) isTornEntry(data []byte) bool {
+	if len(d.brs) != 1 {
+		return false
+	}
+
+	fileOff := d.lastValidOff + frameSizeBytes
+	curOff := 0
+	chunks := [][]byte{}
+	// split data on sector boundaries
+	for curOff < len(data) {
+		chunkLen := int(minSectorSize - (fileOff % minSectorSize))
+		if chunkLen > len(data)-curOff {
+			chunkLen = len(data) - curOff
+		}
+		chunks = append(chunks, data[curOff:curOff+chunkLen])
+		fileOff += int64(chunkLen)
+		curOff += chunkLen
+	}
+
+	// if any data for a sector chunk is all 0, it's a torn write
+	for _, sect := range chunks {
+		isZero := true
+		for _, v := range sect {
+			if v != 0 {
+				isZero = false
+				break
+			}
+		}
+		if isZero {
+			return true
+		}
+	}
+	return false
+}
+
+func (d *decoder) updateCRC(prevCrc uint32) {
+	d.crc = crc.New(prevCrc, crcTable)
+}
+
+func (d *decoder) lastCRC() uint32 {
+	return d.crc.Sum32()
+}
+
+func (d *decoder) lastOffset() int64 { return d.lastValidOff }
+
+func mustUnmarshalEntry(d []byte) raftpb.Entry {
+	var e raftpb.Entry
+	pbutil.MustUnmarshal(&e, d)
+	return e
+}
+
+func mustUnmarshalState(d []byte) raftpb.HardState {
+	var s raftpb.HardState
+	pbutil.MustUnmarshal(&s, d)
+	return s
+}
+
+func readInt64(r io.Reader) (int64, error) {
+	var n int64
+	err := binary.Read(r, binary.LittleEndian, &n)
+	return n, err
+}
diff --git a/vendor/github.com/coreos/etcd/wal/doc.go b/vendor/github.com/coreos/etcd/wal/doc.go
new file mode 100644
index 0000000..a3abd69
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/wal/doc.go
@@ -0,0 +1,75 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package wal provides an implementation of a write ahead log that is used by
+etcd.
+
+A WAL is created at a particular directory and is made up of a number of
+segmented WAL files. Inside of each file the raft state and entries are appended
+to it with the Save method:
+
+	metadata := []byte{}
+	w, err := wal.Create("/var/lib/etcd", metadata)
+	...
+	err := w.Save(s, ents)
+
+After saving a raft snapshot to disk, SaveSnapshot method should be called to
+record it. So WAL can match with the saved snapshot when restarting.
+
+	err := w.SaveSnapshot(walpb.Snapshot{Index: 10, Term: 2})
+
+When a user has finished using a WAL it must be closed:
+
+	w.Close()
+
+Each WAL file is a stream of WAL records. A WAL record is a length field and a wal record
+protobuf. The record protobuf contains a CRC, a type, and a data payload. The length field is a
+64-bit packed structure holding the length of the remaining logical record data in its lower
+56 bits and its physical padding in the first three bits of the most significant byte. Each
+record is 8-byte aligned so that the length field is never torn. The CRC contains the CRC32
+value of all record protobufs preceding the current record.
+
+WAL files are placed inside of the directory in the following format:
+$seq-$index.wal
+
+The first WAL file to be created will be 0000000000000000-0000000000000000.wal
+indicating an initial sequence of 0 and an initial raft index of 0. The first
+entry written to WAL MUST have raft index 0.
+
+WAL will cut its current tail wal file if its size exceeds 64MB. This will increment an internal
+sequence number and cause a new file to be created. If the last raft index saved
+was 0x20 and this is the first time cut has been called on this WAL then the sequence will
+increment from 0x0 to 0x1. The new file will be: 0000000000000001-0000000000000021.wal.
+If a second cut issues 0x10 entries with incremental index later then the file will be called:
+0000000000000002-0000000000000031.wal.
+
+At a later time a WAL can be opened at a particular snapshot. If there is no
+snapshot, an empty snapshot should be passed in.
+
+	w, err := wal.Open("/var/lib/etcd", walpb.Snapshot{Index: 10, Term: 2})
+	...
+
+The snapshot must have been written to the WAL.
+
+Additional items cannot be Saved to this WAL until all of the items from the given
+snapshot to the end of the WAL are read first:
+
+	metadata, state, ents, err := w.ReadAll()
+
+This will give you the metadata, the last raft.State and the slice of
+raft.Entry items in the log.
+
+*/
+package wal
diff --git a/vendor/github.com/coreos/etcd/wal/encoder.go b/vendor/github.com/coreos/etcd/wal/encoder.go
new file mode 100644
index 0000000..e8040b8
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/wal/encoder.go
@@ -0,0 +1,120 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+	"encoding/binary"
+	"hash"
+	"io"
+	"os"
+	"sync"
+
+	"github.com/coreos/etcd/pkg/crc"
+	"github.com/coreos/etcd/pkg/ioutil"
+	"github.com/coreos/etcd/wal/walpb"
+)
+
+// walPageBytes is the alignment for flushing records to the backing Writer.
+// It should be a multiple of the minimum sector size so that WAL can safely
+// distinguish between torn writes and ordinary data corruption.
+const walPageBytes = 8 * minSectorSize
+
+type encoder struct {
+	mu sync.Mutex
+	bw *ioutil.PageWriter
+
+	crc       hash.Hash32
+	buf       []byte
+	uint64buf []byte
+}
+
+func newEncoder(w io.Writer, prevCrc uint32, pageOffset int) *encoder {
+	return &encoder{
+		bw:  ioutil.NewPageWriter(w, walPageBytes, pageOffset),
+		crc: crc.New(prevCrc, crcTable),
+		// 1MB buffer
+		buf:       make([]byte, 1024*1024),
+		uint64buf: make([]byte, 8),
+	}
+}
+
+// newFileEncoder creates a new encoder with current file offset for the page writer.
+func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) {
+	offset, err := f.Seek(0, io.SeekCurrent)
+	if err != nil {
+		return nil, err
+	}
+	return newEncoder(f, prevCrc, int(offset)), nil
+}
+
+func (e *encoder) encode(rec *walpb.Record) error {
+	e.mu.Lock()
+	defer e.mu.Unlock()
+
+	e.crc.Write(rec.Data)
+	rec.Crc = e.crc.Sum32()
+	var (
+		data []byte
+		err  error
+		n    int
+	)
+
+	if rec.Size() > len(e.buf) {
+		data, err = rec.Marshal()
+		if err != nil {
+			return err
+		}
+	} else {
+		n, err = rec.MarshalTo(e.buf)
+		if err != nil {
+			return err
+		}
+		data = e.buf[:n]
+	}
+
+	lenField, padBytes := encodeFrameSize(len(data))
+	if err = writeUint64(e.bw, lenField, e.uint64buf); err != nil {
+		return err
+	}
+
+	if padBytes != 0 {
+		data = append(data, make([]byte, padBytes)...)
+	}
+	_, err = e.bw.Write(data)
+	return err
+}
+
+func encodeFrameSize(dataBytes int) (lenField uint64, padBytes int) {
+	lenField = uint64(dataBytes)
+	// force 8 byte alignment so length never gets a torn write
+	padBytes = (8 - (dataBytes % 8)) % 8
+	if padBytes != 0 {
+		lenField |= uint64(0x80|padBytes) << 56
+	}
+	return lenField, padBytes
+}
+
+func (e *encoder) flush() error {
+	e.mu.Lock()
+	defer e.mu.Unlock()
+	return e.bw.Flush()
+}
+
+func writeUint64(w io.Writer, n uint64, buf []byte) error {
+	// http://golang.org/src/encoding/binary/binary.go
+	binary.LittleEndian.PutUint64(buf, n)
+	_, err := w.Write(buf)
+	return err
+}
diff --git a/vendor/github.com/coreos/etcd/wal/file_pipeline.go b/vendor/github.com/coreos/etcd/wal/file_pipeline.go
new file mode 100644
index 0000000..3a1c57c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/wal/file_pipeline.go
@@ -0,0 +1,97 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"github.com/coreos/etcd/pkg/fileutil"
+)
+
+// filePipeline pipelines allocating disk space
+type filePipeline struct {
+	// dir to put files
+	dir string
+	// size of files to make, in bytes
+	size int64
+	// count number of files generated
+	count int
+
+	filec chan *fileutil.LockedFile
+	errc  chan error
+	donec chan struct{}
+}
+
+func newFilePipeline(dir string, fileSize int64) *filePipeline {
+	fp := &filePipeline{
+		dir:   dir,
+		size:  fileSize,
+		filec: make(chan *fileutil.LockedFile),
+		errc:  make(chan error, 1),
+		donec: make(chan struct{}),
+	}
+	go fp.run()
+	return fp
+}
+
+// Open returns a fresh file for writing. Rename the file before calling
+// Open again or there will be file collisions.
+func (fp *filePipeline) Open() (f *fileutil.LockedFile, err error) {
+	select {
+	case f = <-fp.filec:
+	case err = <-fp.errc:
+	}
+	return f, err
+}
+
+func (fp *filePipeline) Close() error {
+	close(fp.donec)
+	return <-fp.errc
+}
+
+func (fp *filePipeline) alloc() (f *fileutil.LockedFile, err error) {
+	// count % 2 so this file isn't the same as the one last published
+	fpath := filepath.Join(fp.dir, fmt.Sprintf("%d.tmp", fp.count%2))
+	if f, err = fileutil.LockFile(fpath, os.O_CREATE|os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
+		return nil, err
+	}
+	if err = fileutil.Preallocate(f.File, fp.size, true); err != nil {
+		plog.Errorf("failed to allocate space when creating new wal file (%v)", err)
+		f.Close()
+		return nil, err
+	}
+	fp.count++
+	return f, nil
+}
+
+func (fp *filePipeline) run() {
+	defer close(fp.errc)
+	for {
+		f, err := fp.alloc()
+		if err != nil {
+			fp.errc <- err
+			return
+		}
+		select {
+		case fp.filec <- f:
+		case <-fp.donec:
+			os.Remove(f.Name())
+			f.Close()
+			return
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/wal/metrics.go b/vendor/github.com/coreos/etcd/wal/metrics.go
new file mode 100644
index 0000000..9e089d3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/wal/metrics.go
@@ -0,0 +1,31 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+	syncDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Namespace: "etcd",
+		Subsystem: "disk",
+		Name:      "wal_fsync_duration_seconds",
+		Help:      "The latency distributions of fsync called by wal.",
+		Buckets:   prometheus.ExponentialBuckets(0.001, 2, 14),
+	})
+)
+
+func init() {
+	prometheus.MustRegister(syncDurations)
+}
diff --git a/vendor/github.com/coreos/etcd/wal/repair.go b/vendor/github.com/coreos/etcd/wal/repair.go
new file mode 100644
index 0000000..091036b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/wal/repair.go
@@ -0,0 +1,99 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+	"io"
+	"os"
+	"path/filepath"
+
+	"github.com/coreos/etcd/pkg/fileutil"
+	"github.com/coreos/etcd/wal/walpb"
+)
+
+// Repair tries to repair ErrUnexpectedEOF in the
+// last wal file by truncating.
+func Repair(dirpath string) bool {
+	f, err := openLast(dirpath)
+	if err != nil {
+		return false
+	}
+	defer f.Close()
+
+	rec := &walpb.Record{}
+	decoder := newDecoder(f)
+	for {
+		lastOffset := decoder.lastOffset()
+		err := decoder.decode(rec)
+		switch err {
+		case nil:
+			// update crc of the decoder when necessary
+			switch rec.Type {
+			case crcType:
+				crc := decoder.crc.Sum32()
+				// current crc of decoder must match the crc of the record.
+				// do no need to match 0 crc, since the decoder is a new one at this case.
+				if crc != 0 && rec.Validate(crc) != nil {
+					return false
+				}
+				decoder.updateCRC(rec.Crc)
+			}
+			continue
+		case io.EOF:
+			return true
+		case io.ErrUnexpectedEOF:
+			plog.Noticef("repairing %v", f.Name())
+			bf, bferr := os.Create(f.Name() + ".broken")
+			if bferr != nil {
+				plog.Errorf("could not repair %v, failed to create backup file", f.Name())
+				return false
+			}
+			defer bf.Close()
+
+			if _, err = f.Seek(0, io.SeekStart); err != nil {
+				plog.Errorf("could not repair %v, failed to read file", f.Name())
+				return false
+			}
+
+			if _, err = io.Copy(bf, f); err != nil {
+				plog.Errorf("could not repair %v, failed to copy file", f.Name())
+				return false
+			}
+
+			if err = f.Truncate(int64(lastOffset)); err != nil {
+				plog.Errorf("could not repair %v, failed to truncate file", f.Name())
+				return false
+			}
+			if err = fileutil.Fsync(f.File); err != nil {
+				plog.Errorf("could not repair %v, failed to sync file", f.Name())
+				return false
+			}
+			return true
+		default:
+			plog.Errorf("could not repair error (%v)", err)
+			return false
+		}
+	}
+}
+
+// openLast opens the last wal file for read and write.
+func openLast(dirpath string) (*fileutil.LockedFile, error) {
+	names, err := readWalNames(dirpath)
+	if err != nil {
+		return nil, err
+	}
+	last := filepath.Join(dirpath, names[len(names)-1])
+	return fileutil.LockFile(last, os.O_RDWR, fileutil.PrivateFileMode)
+}
diff --git a/vendor/github.com/coreos/etcd/wal/util.go b/vendor/github.com/coreos/etcd/wal/util.go
new file mode 100644
index 0000000..5c56e22
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/wal/util.go
@@ -0,0 +1,107 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/coreos/etcd/pkg/fileutil"
+)
+
+var (
+	badWalName = errors.New("bad wal name")
+)
+
+func Exist(dirpath string) bool {
+	names, err := fileutil.ReadDir(dirpath)
+	if err != nil {
+		return false
+	}
+	return len(names) != 0
+}
+
+// searchIndex returns the last array index of names whose raft index section is
+// equal to or smaller than the given index.
+// The given names MUST be sorted.
+func searchIndex(names []string, index uint64) (int, bool) {
+	for i := len(names) - 1; i >= 0; i-- {
+		name := names[i]
+		_, curIndex, err := parseWalName(name)
+		if err != nil {
+			plog.Panicf("parse correct name should never fail: %v", err)
+		}
+		if index >= curIndex {
+			return i, true
+		}
+	}
+	return -1, false
+}
+
+// names should have been sorted based on sequence number.
+// isValidSeq checks whether seq increases continuously.
+func isValidSeq(names []string) bool {
+	var lastSeq uint64
+	for _, name := range names {
+		curSeq, _, err := parseWalName(name)
+		if err != nil {
+			plog.Panicf("parse correct name should never fail: %v", err)
+		}
+		if lastSeq != 0 && lastSeq != curSeq-1 {
+			return false
+		}
+		lastSeq = curSeq
+	}
+	return true
+}
+func readWalNames(dirpath string) ([]string, error) {
+	names, err := fileutil.ReadDir(dirpath)
+	if err != nil {
+		return nil, err
+	}
+	wnames := checkWalNames(names)
+	if len(wnames) == 0 {
+		return nil, ErrFileNotFound
+	}
+	return wnames, nil
+}
+
+func checkWalNames(names []string) []string {
+	wnames := make([]string, 0)
+	for _, name := range names {
+		if _, _, err := parseWalName(name); err != nil {
+			// don't complain about left over tmp files
+			if !strings.HasSuffix(name, ".tmp") {
+				plog.Warningf("ignored file %v in wal", name)
+			}
+			continue
+		}
+		wnames = append(wnames, name)
+	}
+	return wnames
+}
+
+func parseWalName(str string) (seq, index uint64, err error) {
+	if !strings.HasSuffix(str, ".wal") {
+		return 0, 0, badWalName
+	}
+	_, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index)
+	return seq, index, err
+}
+
+func walName(seq, index uint64) string {
+	return fmt.Sprintf("%016x-%016x.wal", seq, index)
+}
diff --git a/vendor/github.com/coreos/etcd/wal/wal.go b/vendor/github.com/coreos/etcd/wal/wal.go
new file mode 100644
index 0000000..96d01a2
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/wal/wal.go
@@ -0,0 +1,675 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"hash/crc32"
+	"io"
+	"os"
+	"path/filepath"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/pkg/fileutil"
+	"github.com/coreos/etcd/pkg/pbutil"
+	"github.com/coreos/etcd/raft"
+	"github.com/coreos/etcd/raft/raftpb"
+	"github.com/coreos/etcd/wal/walpb"
+
+	"github.com/coreos/pkg/capnslog"
+)
+
+const (
+	metadataType int64 = iota + 1
+	entryType
+	stateType
+	crcType
+	snapshotType
+
+	// warnSyncDuration is the amount of time allotted to an fsync before
+	// logging a warning
+	warnSyncDuration = time.Second
+)
+
+var (
+	// SegmentSizeBytes is the preallocated size of each wal segment file.
+	// The actual size might be larger than this. In general, the default
+	// value should be used, but this is defined as an exported variable
+	// so that tests can set a different segment size.
+	SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB
+
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "wal")
+
+	ErrMetadataConflict = errors.New("wal: conflicting metadata found")
+	ErrFileNotFound     = errors.New("wal: file not found")
+	ErrCRCMismatch      = errors.New("wal: crc mismatch")
+	ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
+	ErrSnapshotNotFound = errors.New("wal: snapshot not found")
+	crcTable            = crc32.MakeTable(crc32.Castagnoli)
+)
+
+// WAL is a logical representation of the stable storage.
+// WAL is either in read mode or append mode but not both.
+// A newly created WAL is in append mode, and ready for appending records.
+// A just opened WAL is in read mode, and ready for reading records.
+// The WAL will be ready for appending after reading out all the previous records.
+type WAL struct {
+	dir string // the living directory of the underlay files
+
+	// dirFile is a fd for the wal directory for syncing on Rename
+	dirFile *os.File
+
+	metadata []byte           // metadata recorded at the head of each WAL
+	state    raftpb.HardState // hardstate recorded at the head of WAL
+
+	start     walpb.Snapshot // snapshot to start reading
+	decoder   *decoder       // decoder to decode records
+	readClose func() error   // closer for decode reader
+
+	mu      sync.Mutex
+	enti    uint64   // index of the last entry saved to the wal
+	encoder *encoder // encoder to encode records
+
+	locks []*fileutil.LockedFile // the locked files the WAL holds (the name is increasing)
+	fp    *filePipeline
+}
+
+// Create creates a WAL ready for appending records. The given metadata is
+// recorded at the head of each WAL file, and can be retrieved with ReadAll.
+func Create(dirpath string, metadata []byte) (*WAL, error) {
+	if Exist(dirpath) {
+		return nil, os.ErrExist
+	}
+
+	// keep temporary wal directory so WAL initialization appears atomic
+	tmpdirpath := filepath.Clean(dirpath) + ".tmp"
+	if fileutil.Exist(tmpdirpath) {
+		if err := os.RemoveAll(tmpdirpath); err != nil {
+			return nil, err
+		}
+	}
+	if err := fileutil.CreateDirAll(tmpdirpath); err != nil {
+		return nil, err
+	}
+
+	p := filepath.Join(tmpdirpath, walName(0, 0))
+	f, err := fileutil.LockFile(p, os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode)
+	if err != nil {
+		return nil, err
+	}
+	if _, err = f.Seek(0, io.SeekEnd); err != nil {
+		return nil, err
+	}
+	if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil {
+		return nil, err
+	}
+
+	w := &WAL{
+		dir:      dirpath,
+		metadata: metadata,
+	}
+	w.encoder, err = newFileEncoder(f.File, 0)
+	if err != nil {
+		return nil, err
+	}
+	w.locks = append(w.locks, f)
+	if err = w.saveCrc(0); err != nil {
+		return nil, err
+	}
+	if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil {
+		return nil, err
+	}
+	if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil {
+		return nil, err
+	}
+
+	if w, err = w.renameWal(tmpdirpath); err != nil {
+		return nil, err
+	}
+
+	// directory was renamed; sync parent dir to persist rename
+	pdir, perr := fileutil.OpenDir(filepath.Dir(w.dir))
+	if perr != nil {
+		return nil, perr
+	}
+	if perr = fileutil.Fsync(pdir); perr != nil {
+		return nil, perr
+	}
+	if perr = pdir.Close(); err != nil {
+		return nil, perr
+	}
+
+	return w, nil
+}
+
+func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) {
+	if err := os.RemoveAll(w.dir); err != nil {
+		return nil, err
+	}
+	// On non-Windows platforms, hold the lock while renaming. Releasing
+	// the lock and trying to reacquire it quickly can be flaky because
+	// it's possible the process will fork to spawn a process while this is
+	// happening. The fds are set up as close-on-exec by the Go runtime,
+	// but there is a window between the fork and the exec where another
+	// process holds the lock.
+	if err := os.Rename(tmpdirpath, w.dir); err != nil {
+		if _, ok := err.(*os.LinkError); ok {
+			return w.renameWalUnlock(tmpdirpath)
+		}
+		return nil, err
+	}
+	w.fp = newFilePipeline(w.dir, SegmentSizeBytes)
+	df, err := fileutil.OpenDir(w.dir)
+	w.dirFile = df
+	return w, err
+}
+
+func (w *WAL) renameWalUnlock(tmpdirpath string) (*WAL, error) {
+	// rename of directory with locked files doesn't work on windows/cifs;
+	// close the WAL to release the locks so the directory can be renamed.
+	plog.Infof("releasing file lock to rename %q to %q", tmpdirpath, w.dir)
+	w.Close()
+	if err := os.Rename(tmpdirpath, w.dir); err != nil {
+		return nil, err
+	}
+	// reopen and relock
+	newWAL, oerr := Open(w.dir, walpb.Snapshot{})
+	if oerr != nil {
+		return nil, oerr
+	}
+	if _, _, _, err := newWAL.ReadAll(); err != nil {
+		newWAL.Close()
+		return nil, err
+	}
+	return newWAL, nil
+}
+
+// Open opens the WAL at the given snap.
+// The snap SHOULD have been previously saved to the WAL, or the following
+// ReadAll will fail.
+// The returned WAL is ready to read and the first record will be the one after
+// the given snap. The WAL cannot be appended to before reading out all of its
+// previous records.
+func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) {
+	w, err := openAtIndex(dirpath, snap, true)
+	if err != nil {
+		return nil, err
+	}
+	if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil {
+		return nil, err
+	}
+	return w, nil
+}
+
+// OpenForRead only opens the wal files for read.
+// Write on a read only wal panics.
+func OpenForRead(dirpath string, snap walpb.Snapshot) (*WAL, error) {
+	return openAtIndex(dirpath, snap, false)
+}
+
+func openAtIndex(dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) {
+	names, err := readWalNames(dirpath)
+	if err != nil {
+		return nil, err
+	}
+
+	nameIndex, ok := searchIndex(names, snap.Index)
+	if !ok || !isValidSeq(names[nameIndex:]) {
+		return nil, ErrFileNotFound
+	}
+
+	// open the wal files
+	rcs := make([]io.ReadCloser, 0)
+	rs := make([]io.Reader, 0)
+	ls := make([]*fileutil.LockedFile, 0)
+	for _, name := range names[nameIndex:] {
+		p := filepath.Join(dirpath, name)
+		if write {
+			l, err := fileutil.TryLockFile(p, os.O_RDWR, fileutil.PrivateFileMode)
+			if err != nil {
+				closeAll(rcs...)
+				return nil, err
+			}
+			ls = append(ls, l)
+			rcs = append(rcs, l)
+		} else {
+			rf, err := os.OpenFile(p, os.O_RDONLY, fileutil.PrivateFileMode)
+			if err != nil {
+				closeAll(rcs...)
+				return nil, err
+			}
+			ls = append(ls, nil)
+			rcs = append(rcs, rf)
+		}
+		rs = append(rs, rcs[len(rcs)-1])
+	}
+
+	closer := func() error { return closeAll(rcs...) }
+
+	// create a WAL ready for reading
+	w := &WAL{
+		dir:       dirpath,
+		start:     snap,
+		decoder:   newDecoder(rs...),
+		readClose: closer,
+		locks:     ls,
+	}
+
+	if write {
+		// write reuses the file descriptors from read; don't close so
+		// WAL can append without dropping the file lock
+		w.readClose = nil
+		if _, _, err := parseWalName(filepath.Base(w.tail().Name())); err != nil {
+			closer()
+			return nil, err
+		}
+		w.fp = newFilePipeline(w.dir, SegmentSizeBytes)
+	}
+
+	return w, nil
+}
+
+// ReadAll reads out records of the current WAL.
+// If opened in write mode, it must read out all records until EOF. Or an error
+// will be returned.
+// If opened in read mode, it will try to read all records if possible.
+// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
+// If loaded snap doesn't match with the expected one, it will return
+// all the records and error ErrSnapshotMismatch.
+// TODO: detect not-last-snap error.
+// TODO: maybe loose the checking of match.
+// After ReadAll, the WAL will be ready for appending new records.
+func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	rec := &walpb.Record{}
+	decoder := w.decoder
+
+	var match bool
+	for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
+		switch rec.Type {
+		case entryType:
+			e := mustUnmarshalEntry(rec.Data)
+			if e.Index > w.start.Index {
+				ents = append(ents[:e.Index-w.start.Index-1], e)
+			}
+			w.enti = e.Index
+		case stateType:
+			state = mustUnmarshalState(rec.Data)
+		case metadataType:
+			if metadata != nil && !bytes.Equal(metadata, rec.Data) {
+				state.Reset()
+				return nil, state, nil, ErrMetadataConflict
+			}
+			metadata = rec.Data
+		case crcType:
+			crc := decoder.crc.Sum32()
+			// current crc of decoder must match the crc of the record.
+			// do no need to match 0 crc, since the decoder is a new one at this case.
+			if crc != 0 && rec.Validate(crc) != nil {
+				state.Reset()
+				return nil, state, nil, ErrCRCMismatch
+			}
+			decoder.updateCRC(rec.Crc)
+		case snapshotType:
+			var snap walpb.Snapshot
+			pbutil.MustUnmarshal(&snap, rec.Data)
+			if snap.Index == w.start.Index {
+				if snap.Term != w.start.Term {
+					state.Reset()
+					return nil, state, nil, ErrSnapshotMismatch
+				}
+				match = true
+			}
+		default:
+			state.Reset()
+			return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type)
+		}
+	}
+
+	switch w.tail() {
+	case nil:
+		// We do not have to read out all entries in read mode.
+		// The last record maybe a partial written one, so
+		// ErrunexpectedEOF might be returned.
+		if err != io.EOF && err != io.ErrUnexpectedEOF {
+			state.Reset()
+			return nil, state, nil, err
+		}
+	default:
+		// We must read all of the entries if WAL is opened in write mode.
+		if err != io.EOF {
+			state.Reset()
+			return nil, state, nil, err
+		}
+		// decodeRecord() will return io.EOF if it detects a zero record,
+		// but this zero record may be followed by non-zero records from
+		// a torn write. Overwriting some of these non-zero records, but
+		// not all, will cause CRC errors on WAL open. Since the records
+		// were never fully synced to disk in the first place, it's safe
+		// to zero them out to avoid any CRC errors from new writes.
+		if _, err = w.tail().Seek(w.decoder.lastOffset(), io.SeekStart); err != nil {
+			return nil, state, nil, err
+		}
+		if err = fileutil.ZeroToEnd(w.tail().File); err != nil {
+			return nil, state, nil, err
+		}
+	}
+
+	err = nil
+	if !match {
+		err = ErrSnapshotNotFound
+	}
+
+	// close decoder, disable reading
+	if w.readClose != nil {
+		w.readClose()
+		w.readClose = nil
+	}
+	w.start = walpb.Snapshot{}
+
+	w.metadata = metadata
+
+	if w.tail() != nil {
+		// create encoder (chain crc with the decoder), enable appending
+		w.encoder, err = newFileEncoder(w.tail().File, w.decoder.lastCRC())
+		if err != nil {
+			return
+		}
+	}
+	w.decoder = nil
+
+	return metadata, state, ents, err
+}
+
+// cut closes current file written and creates a new one ready to append.
+// cut first creates a temp wal file and writes necessary headers into it.
+// Then cut atomically rename temp wal file to a wal file.
+func (w *WAL) cut() error {
+	// close old wal file; truncate to avoid wasting space if an early cut
+	off, serr := w.tail().Seek(0, io.SeekCurrent)
+	if serr != nil {
+		return serr
+	}
+	if err := w.tail().Truncate(off); err != nil {
+		return err
+	}
+	if err := w.sync(); err != nil {
+		return err
+	}
+
+	fpath := filepath.Join(w.dir, walName(w.seq()+1, w.enti+1))
+
+	// create a temp wal file with name sequence + 1, or truncate the existing one
+	newTail, err := w.fp.Open()
+	if err != nil {
+		return err
+	}
+
+	// update writer and save the previous crc
+	w.locks = append(w.locks, newTail)
+	prevCrc := w.encoder.crc.Sum32()
+	w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
+	if err != nil {
+		return err
+	}
+	if err = w.saveCrc(prevCrc); err != nil {
+		return err
+	}
+	if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil {
+		return err
+	}
+	if err = w.saveState(&w.state); err != nil {
+		return err
+	}
+	// atomically move temp wal file to wal file
+	if err = w.sync(); err != nil {
+		return err
+	}
+
+	off, err = w.tail().Seek(0, io.SeekCurrent)
+	if err != nil {
+		return err
+	}
+
+	if err = os.Rename(newTail.Name(), fpath); err != nil {
+		return err
+	}
+	if err = fileutil.Fsync(w.dirFile); err != nil {
+		return err
+	}
+
+	// reopen newTail with its new path so calls to Name() match the wal filename format
+	newTail.Close()
+
+	if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
+		return err
+	}
+	if _, err = newTail.Seek(off, io.SeekStart); err != nil {
+		return err
+	}
+
+	w.locks[len(w.locks)-1] = newTail
+
+	prevCrc = w.encoder.crc.Sum32()
+	w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
+	if err != nil {
+		return err
+	}
+
+	plog.Infof("segmented wal file %v is created", fpath)
+	return nil
+}
+
+func (w *WAL) sync() error {
+	if w.encoder != nil {
+		if err := w.encoder.flush(); err != nil {
+			return err
+		}
+	}
+	start := time.Now()
+	err := fileutil.Fdatasync(w.tail().File)
+
+	duration := time.Since(start)
+	if duration > warnSyncDuration {
+		plog.Warningf("sync duration of %v, expected less than %v", duration, warnSyncDuration)
+	}
+	syncDurations.Observe(duration.Seconds())
+
+	return err
+}
+
+// ReleaseLockTo releases the locks, which has smaller index than the given index
+// except the largest one among them.
+// For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release
+// lock 1,2 but keep 3. ReleaseLockTo(5) will release 1,2,3 but keep 4.
+func (w *WAL) ReleaseLockTo(index uint64) error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	if len(w.locks) == 0 {
+		return nil
+	}
+
+	var smaller int
+	found := false
+
+	for i, l := range w.locks {
+		_, lockIndex, err := parseWalName(filepath.Base(l.Name()))
+		if err != nil {
+			return err
+		}
+		if lockIndex >= index {
+			smaller = i - 1
+			found = true
+			break
+		}
+	}
+
+	// if no lock index is greater than the release index, we can
+	// release lock up to the last one(excluding).
+	if !found {
+		smaller = len(w.locks) - 1
+	}
+
+	if smaller <= 0 {
+		return nil
+	}
+
+	for i := 0; i < smaller; i++ {
+		if w.locks[i] == nil {
+			continue
+		}
+		w.locks[i].Close()
+	}
+	w.locks = w.locks[smaller:]
+
+	return nil
+}
+
+func (w *WAL) Close() error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	if w.fp != nil {
+		w.fp.Close()
+		w.fp = nil
+	}
+
+	if w.tail() != nil {
+		if err := w.sync(); err != nil {
+			return err
+		}
+	}
+	for _, l := range w.locks {
+		if l == nil {
+			continue
+		}
+		if err := l.Close(); err != nil {
+			plog.Errorf("failed to unlock during closing wal: %s", err)
+		}
+	}
+
+	return w.dirFile.Close()
+}
+
+func (w *WAL) saveEntry(e *raftpb.Entry) error {
+	// TODO: add MustMarshalTo to reduce one allocation.
+	b := pbutil.MustMarshal(e)
+	rec := &walpb.Record{Type: entryType, Data: b}
+	if err := w.encoder.encode(rec); err != nil {
+		return err
+	}
+	w.enti = e.Index
+	return nil
+}
+
+func (w *WAL) saveState(s *raftpb.HardState) error {
+	if raft.IsEmptyHardState(*s) {
+		return nil
+	}
+	w.state = *s
+	b := pbutil.MustMarshal(s)
+	rec := &walpb.Record{Type: stateType, Data: b}
+	return w.encoder.encode(rec)
+}
+
+func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	// short cut, do not call sync
+	if raft.IsEmptyHardState(st) && len(ents) == 0 {
+		return nil
+	}
+
+	mustSync := raft.MustSync(st, w.state, len(ents))
+
+	// TODO(xiangli): no more reference operator
+	for i := range ents {
+		if err := w.saveEntry(&ents[i]); err != nil {
+			return err
+		}
+	}
+	if err := w.saveState(&st); err != nil {
+		return err
+	}
+
+	curOff, err := w.tail().Seek(0, io.SeekCurrent)
+	if err != nil {
+		return err
+	}
+	if curOff < SegmentSizeBytes {
+		if mustSync {
+			return w.sync()
+		}
+		return nil
+	}
+
+	return w.cut()
+}
+
+func (w *WAL) SaveSnapshot(e walpb.Snapshot) error {
+	b := pbutil.MustMarshal(&e)
+
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	rec := &walpb.Record{Type: snapshotType, Data: b}
+	if err := w.encoder.encode(rec); err != nil {
+		return err
+	}
+	// update enti only when snapshot is ahead of last index
+	if w.enti < e.Index {
+		w.enti = e.Index
+	}
+	return w.sync()
+}
+
+func (w *WAL) saveCrc(prevCrc uint32) error {
+	return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc})
+}
+
+func (w *WAL) tail() *fileutil.LockedFile {
+	if len(w.locks) > 0 {
+		return w.locks[len(w.locks)-1]
+	}
+	return nil
+}
+
+func (w *WAL) seq() uint64 {
+	t := w.tail()
+	if t == nil {
+		return 0
+	}
+	seq, _, err := parseWalName(filepath.Base(t.Name()))
+	if err != nil {
+		plog.Fatalf("bad wal name %s (%v)", t.Name(), err)
+	}
+	return seq
+}
+
+func closeAll(rcs ...io.ReadCloser) error {
+	for _, f := range rcs {
+		if err := f.Close(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.go b/vendor/github.com/coreos/etcd/wal/walpb/record.go
new file mode 100644
index 0000000..30a05e0
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/wal/walpb/record.go
@@ -0,0 +1,29 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package walpb
+
+import "errors"
+
+var (
+	ErrCRCMismatch = errors.New("walpb: crc mismatch")
+)
+
+func (rec *Record) Validate(crc uint32) error {
+	if rec.Crc == crc {
+		return nil
+	}
+	rec.Reset()
+	return ErrCRCMismatch
+}
diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.proto b/vendor/github.com/coreos/etcd/wal/walpb/record.proto
new file mode 100644
index 0000000..b694cb2
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/wal/walpb/record.proto
@@ -0,0 +1,20 @@
+syntax = "proto2";
+package walpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message Record {
+	optional int64 type  = 1 [(gogoproto.nullable) = false];
+	optional uint32 crc  = 2 [(gogoproto.nullable) = false];
+	optional bytes data  = 3;
+}
+
+message Snapshot {
+	optional uint64 index = 1 [(gogoproto.nullable) = false];
+	optional uint64 term  = 2 [(gogoproto.nullable) = false];
+}
diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go
new file mode 100644
index 0000000..110fc23
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/semver/semver.go
@@ -0,0 +1,268 @@
+// Copyright 2013-2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Semantic Versions http://semver.org
+package semver
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+type Version struct {
+	Major      int64
+	Minor      int64
+	Patch      int64
+	PreRelease PreRelease
+	Metadata   string
+}
+
+type PreRelease string
+
+func splitOff(input *string, delim string) (val string) {
+	parts := strings.SplitN(*input, delim, 2)
+
+	if len(parts) == 2 {
+		*input = parts[0]
+		val = parts[1]
+	}
+
+	return val
+}
+
+func New(version string) *Version {
+	return Must(NewVersion(version))
+}
+
+func NewVersion(version string) (*Version, error) {
+	v := Version{}
+
+	if err := v.Set(version); err != nil {
+		return nil, err
+	}
+
+	return &v, nil
+}
+
+// Must is a helper for wrapping NewVersion and will panic if err is not nil.
+func Must(v *Version, err error) *Version {
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Set parses and updates v from the given version string. Implements flag.Value
+func (v *Version) Set(version string) error {
+	metadata := splitOff(&version, "+")
+	preRelease := PreRelease(splitOff(&version, "-"))
+	dotParts := strings.SplitN(version, ".", 3)
+
+	if len(dotParts) != 3 {
+		return fmt.Errorf("%s is not in dotted-tri format", version)
+	}
+
+	parsed := make([]int64, 3, 3)
+
+	for i, v := range dotParts[:3] {
+		val, err := strconv.ParseInt(v, 10, 64)
+		parsed[i] = val
+		if err != nil {
+			return err
+		}
+	}
+
+	v.Metadata = metadata
+	v.PreRelease = preRelease
+	v.Major = parsed[0]
+	v.Minor = parsed[1]
+	v.Patch = parsed[2]
+	return nil
+}
+
+func (v Version) String() string {
+	var buffer bytes.Buffer
+
+	fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
+
+	if v.PreRelease != "" {
+		fmt.Fprintf(&buffer, "-%s", v.PreRelease)
+	}
+
+	if v.Metadata != "" {
+		fmt.Fprintf(&buffer, "+%s", v.Metadata)
+	}
+
+	return buffer.String()
+}
+
+func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var data string
+	if err := unmarshal(&data); err != nil {
+		return err
+	}
+	return v.Set(data)
+}
+
+func (v Version) MarshalJSON() ([]byte, error) {
+	return []byte(`"` + v.String() + `"`), nil
+}
+
+func (v *Version) UnmarshalJSON(data []byte) error {
+	l := len(data)
+	if l == 0 || string(data) == `""` {
+		return nil
+	}
+	if l < 2 || data[0] != '"' || data[l-1] != '"' {
+		return errors.New("invalid semver string")
+	}
+	return v.Set(string(data[1 : l-1]))
+}
+
+// Compare tests if v is less than, equal to, or greater than versionB,
+// returning -1, 0, or +1 respectively.
+func (v Version) Compare(versionB Version) int {
+	if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
+		return cmp
+	}
+	return preReleaseCompare(v, versionB)
+}
+
+// Equal tests if v is equal to versionB.
+func (v Version) Equal(versionB Version) bool {
+	return v.Compare(versionB) == 0
+}
+
+// LessThan tests if v is less than versionB.
+func (v Version) LessThan(versionB Version) bool {
+	return v.Compare(versionB) < 0
+}
+
+// Slice converts the comparable parts of the semver into a slice of integers.
+func (v Version) Slice() []int64 {
+	return []int64{v.Major, v.Minor, v.Patch}
+}
+
+func (p PreRelease) Slice() []string {
+	preRelease := string(p)
+	return strings.Split(preRelease, ".")
+}
+
+func preReleaseCompare(versionA Version, versionB Version) int {
+	a := versionA.PreRelease
+	b := versionB.PreRelease
+
+	/* Handle the case where if two versions are otherwise equal it is the
+	 * one without a PreRelease that is greater */
+	if len(a) == 0 && (len(b) > 0) {
+		return 1
+	} else if len(b) == 0 && (len(a) > 0) {
+		return -1
+	}
+
+	// If there is a prerelease, check and compare each part.
+	return recursivePreReleaseCompare(a.Slice(), b.Slice())
+}
+
+func recursiveCompare(versionA []int64, versionB []int64) int {
+	if len(versionA) == 0 {
+		return 0
+	}
+
+	a := versionA[0]
+	b := versionB[0]
+
+	if a > b {
+		return 1
+	} else if a < b {
+		return -1
+	}
+
+	return recursiveCompare(versionA[1:], versionB[1:])
+}
+
+func recursivePreReleaseCompare(versionA []string, versionB []string) int {
+	// A larger set of pre-release fields has a higher precedence than a smaller set,
+	// if all of the preceding identifiers are equal.
+	if len(versionA) == 0 {
+		if len(versionB) > 0 {
+			return -1
+		}
+		return 0
+	} else if len(versionB) == 0 {
+		// We're longer than versionB so return 1.
+		return 1
+	}
+
+	a := versionA[0]
+	b := versionB[0]
+
+	aInt := false
+	bInt := false
+
+	aI, err := strconv.Atoi(versionA[0])
+	if err == nil {
+		aInt = true
+	}
+
+	bI, err := strconv.Atoi(versionB[0])
+	if err == nil {
+		bInt = true
+	}
+
+	// Handle Integer Comparison
+	if aInt && bInt {
+		if aI > bI {
+			return 1
+		} else if aI < bI {
+			return -1
+		}
+	}
+
+	// Handle String Comparison
+	if a > b {
+		return 1
+	} else if a < b {
+		return -1
+	}
+
+	return recursivePreReleaseCompare(versionA[1:], versionB[1:])
+}
+
+// BumpMajor increments the Major field by 1 and resets all other fields to their default values
+func (v *Version) BumpMajor() {
+	v.Major += 1
+	v.Minor = 0
+	v.Patch = 0
+	v.PreRelease = PreRelease("")
+	v.Metadata = ""
+}
+
+// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
+func (v *Version) BumpMinor() {
+	v.Minor += 1
+	v.Patch = 0
+	v.PreRelease = PreRelease("")
+	v.Metadata = ""
+}
+
+// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
+func (v *Version) BumpPatch() {
+	v.Patch += 1
+	v.PreRelease = PreRelease("")
+	v.Metadata = ""
+}
diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go
new file mode 100644
index 0000000..e256b41
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/semver/sort.go
@@ -0,0 +1,38 @@
+// Copyright 2013-2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semver
+
+import (
+	"sort"
+)
+
+type Versions []*Version
+
+func (s Versions) Len() int {
+	return len(s)
+}
+
+func (s Versions) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s Versions) Less(i, j int) bool {
+	return s[i].LessThan(*s[j])
+}
+
+// Sort sorts the given slice of Version
+func Sort(versions []*Version) {
+	sort.Sort(Versions(versions))
+}
diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/coreos/go-systemd/NOTICE b/vendor/github.com/coreos/go-systemd/NOTICE
new file mode 100644
index 0000000..23a0ada
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2018 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go
new file mode 100644
index 0000000..ba4ae31
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go
@@ -0,0 +1,84 @@
+// Copyright 2014 Docker, Inc.
+// Copyright 2015-2018 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Package daemon provides a Go implementation of the sd_notify protocol.
+// It can be used to inform systemd of service start-up completion, watchdog
+// events, and other status changes.
+//
+// https://www.freedesktop.org/software/systemd/man/sd_notify.html#Description
+package daemon
+
+import (
+	"net"
+	"os"
+)
+
+const (
+	// SdNotifyReady tells the service manager that service startup is finished
+	// or the service finished loading its configuration.
+	SdNotifyReady = "READY=1"
+
+	// SdNotifyStopping tells the service manager that the service is beginning
+	// its shutdown.
+	SdNotifyStopping = "STOPPING=1"
+
+	// SdNotifyReloading tells the service manager that this service is
+	// reloading its configuration. Note that you must call SdNotifyReady when
+	// it completed reloading.
+	SdNotifyReloading = "RELOADING=1"
+
+	// SdNotifyWatchdog tells the service manager to update the watchdog
+	// timestamp for the service.
+	SdNotifyWatchdog = "WATCHDOG=1"
+)
+
+// SdNotify sends a message to the init daemon. It is common to ignore the error.
+// If `unsetEnvironment` is true, the environment variable `NOTIFY_SOCKET`
+// will be unconditionally unset.
+//
+// It returns one of the following:
+// (false, nil) - notification not supported (i.e. NOTIFY_SOCKET is unset)
+// (false, err) - notification supported, but failure happened (e.g. error connecting to NOTIFY_SOCKET or while sending data)
+// (true, nil) - notification supported, data has been sent
+func SdNotify(unsetEnvironment bool, state string) (bool, error) {
+	socketAddr := &net.UnixAddr{
+		Name: os.Getenv("NOTIFY_SOCKET"),
+		Net:  "unixgram",
+	}
+
+	// NOTIFY_SOCKET not set
+	if socketAddr.Name == "" {
+		return false, nil
+	}
+
+	if unsetEnvironment {
+		if err := os.Unsetenv("NOTIFY_SOCKET"); err != nil {
+			return false, err
+		}
+	}
+
+	conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr)
+	// Error connecting to NOTIFY_SOCKET
+	if err != nil {
+		return false, err
+	}
+	defer conn.Close()
+
+	if _, err = conn.Write([]byte(state)); err != nil {
+		return false, err
+	}
+	return true, nil
+}
diff --git a/vendor/github.com/coreos/go-systemd/daemon/watchdog.go b/vendor/github.com/coreos/go-systemd/daemon/watchdog.go
new file mode 100644
index 0000000..7a0e0d3
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/daemon/watchdog.go
@@ -0,0 +1,73 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package daemon
+
+import (
+	"fmt"
+	"os"
+	"strconv"
+	"time"
+)
+
+// SdWatchdogEnabled returns watchdog information for a service.
+// Processes should call daemon.SdNotify(false, daemon.SdNotifyWatchdog) every
+// time / 2.
+// If `unsetEnvironment` is true, the environment variables `WATCHDOG_USEC` and
+// `WATCHDOG_PID` will be unconditionally unset.
+//
+// It returns one of the following:
+// (0, nil) - watchdog isn't enabled or we aren't the watched PID.
+// (0, err) - an error happened (e.g. error converting time).
+// (time, nil) - watchdog is enabled and we can send ping.
+//   time is delay before inactive service will be killed.
+func SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error) {
+	wusec := os.Getenv("WATCHDOG_USEC")
+	wpid := os.Getenv("WATCHDOG_PID")
+	if unsetEnvironment {
+		wusecErr := os.Unsetenv("WATCHDOG_USEC")
+		wpidErr := os.Unsetenv("WATCHDOG_PID")
+		if wusecErr != nil {
+			return 0, wusecErr
+		}
+		if wpidErr != nil {
+			return 0, wpidErr
+		}
+	}
+
+	if wusec == "" {
+		return 0, nil
+	}
+	s, err := strconv.Atoi(wusec)
+	if err != nil {
+		return 0, fmt.Errorf("error converting WATCHDOG_USEC: %s", err)
+	}
+	if s <= 0 {
+		return 0, fmt.Errorf("error WATCHDOG_USEC must be a positive number")
+	}
+	interval := time.Duration(s) * time.Microsecond
+
+	if wpid == "" {
+		return interval, nil
+	}
+	p, err := strconv.Atoi(wpid)
+	if err != nil {
+		return 0, fmt.Errorf("error converting WATCHDOG_PID: %s", err)
+	}
+	if os.Getpid() != p {
+		return 0, nil
+	}
+
+	return interval, nil
+}
diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go
new file mode 100644
index 0000000..603ad4c
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/journal/journal.go
@@ -0,0 +1,189 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package journal provides write bindings to the local systemd journal.
+// It is implemented in pure Go and connects to the journal directly over its
+// unix socket.
+//
+// To read from the journal, see the "sdjournal" package, which wraps the
+// sd-journal a C API.
+//
+// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
+package journal
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+	"syscall"
+)
+
+// Priority of a journal message
+type Priority int
+
+const (
+	PriEmerg Priority = iota
+	PriAlert
+	PriCrit
+	PriErr
+	PriWarning
+	PriNotice
+	PriInfo
+	PriDebug
+)
+
+var conn net.Conn
+
+func init() {
+	var err error
+	conn, err = net.Dial("unixgram", "/run/systemd/journal/socket")
+	if err != nil {
+		conn = nil
+	}
+}
+
+// Enabled returns true if the local systemd journal is available for logging
+func Enabled() bool {
+	return conn != nil
+}
+
+// Send a message to the local systemd journal. vars is a map of journald
+// fields to values.  Fields must be composed of uppercase letters, numbers,
+// and underscores, but must not start with an underscore. Within these
+// restrictions, any arbitrary field name may be used.  Some names have special
+// significance: see the journalctl documentation
+// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
+// for more details.  vars may be nil.
+func Send(message string, priority Priority, vars map[string]string) error {
+	if conn == nil {
+		return journalError("could not connect to journald socket")
+	}
+
+	data := new(bytes.Buffer)
+	appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
+	appendVariable(data, "MESSAGE", message)
+	for k, v := range vars {
+		appendVariable(data, k, v)
+	}
+
+	_, err := io.Copy(conn, data)
+	if err != nil && isSocketSpaceError(err) {
+		file, err := tempFd()
+		if err != nil {
+			return journalError(err.Error())
+		}
+		defer file.Close()
+		_, err = io.Copy(file, data)
+		if err != nil {
+			return journalError(err.Error())
+		}
+
+		rights := syscall.UnixRights(int(file.Fd()))
+
+		/* this connection should always be a UnixConn, but better safe than sorry */
+		unixConn, ok := conn.(*net.UnixConn)
+		if !ok {
+			return journalError("can't send file through non-Unix connection")
+		}
+		_, _, err = unixConn.WriteMsgUnix([]byte{}, rights, nil)
+		if err != nil {
+			return journalError(err.Error())
+		}
+	} else if err != nil {
+		return journalError(err.Error())
+	}
+	return nil
+}
+
+// Print prints a message to the local systemd journal using Send().
+func Print(priority Priority, format string, a ...interface{}) error {
+	return Send(fmt.Sprintf(format, a...), priority, nil)
+}
+
+func appendVariable(w io.Writer, name, value string) {
+	if err := validVarName(name); err != nil {
+		journalError(err.Error())
+	}
+	if strings.ContainsRune(value, '\n') {
+		/* When the value contains a newline, we write:
+		 * - the variable name, followed by a newline
+		 * - the size (in 64bit little endian format)
+		 * - the data, followed by a newline
+		 */
+		fmt.Fprintln(w, name)
+		binary.Write(w, binary.LittleEndian, uint64(len(value)))
+		fmt.Fprintln(w, value)
+	} else {
+		/* just write the variable and value all on one line */
+		fmt.Fprintf(w, "%s=%s\n", name, value)
+	}
+}
+
+// validVarName validates a variable name to make sure it journald will accept it.
+// The variable name must be in uppercase and consist only of characters,
+// numbers and underscores, and may not begin with an underscore. (from the docs)
+// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
+func validVarName(name string) error {
+	if name == "" {
+		return errors.New("Empty variable name")
+	} else if name[0] == '_' {
+		return errors.New("Variable name begins with an underscore")
+	}
+
+	for _, c := range name {
+		if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
+			return errors.New("Variable name contains invalid characters")
+		}
+	}
+	return nil
+}
+
+func isSocketSpaceError(err error) bool {
+	opErr, ok := err.(*net.OpError)
+	if !ok {
+		return false
+	}
+
+	sysErr, ok := opErr.Err.(syscall.Errno)
+	if !ok {
+		return false
+	}
+
+	return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS
+}
+
+func tempFd() (*os.File, error) {
+	file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
+	if err != nil {
+		return nil, err
+	}
+	err = syscall.Unlink(file.Name())
+	if err != nil {
+		return nil, err
+	}
+	return file, nil
+}
+
+func journalError(s string) error {
+	s = "journal error: " + s
+	fmt.Fprintln(os.Stderr, s)
+	return errors.New(s)
+}
diff --git a/vendor/github.com/coreos/go-systemd/util/util.go b/vendor/github.com/coreos/go-systemd/util/util.go
new file mode 100644
index 0000000..7828ce6
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/util/util.go
@@ -0,0 +1,90 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package util contains utility functions related to systemd that applications
+// can use to check things like whether systemd is running.  Note that some of
+// these functions attempt to manually load systemd libraries at runtime rather
+// than linking against them.
+package util
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"strings"
+)
+
+var (
+	ErrNoCGO = fmt.Errorf("go-systemd built with CGO disabled")
+)
+
+// GetRunningSlice attempts to retrieve the name of the systemd slice in which
+// the current process is running.
+// This function is a wrapper around the libsystemd C library; if it cannot be
+// opened, an error is returned.
+func GetRunningSlice() (string, error) {
+	return getRunningSlice()
+}
+
+// RunningFromSystemService tries to detect whether the current process has
+// been invoked from a system service. The condition for this is whether the
+// process is _not_ a user process. User processes are those running in session
+// scopes or under per-user `systemd --user` instances.
+//
+// To avoid false positives on systems without `pam_systemd` (which is
+// responsible for creating user sessions), this function also uses a heuristic
+// to detect whether it's being invoked from a session leader process. This is
+// the case if the current process is executed directly from a service file
+// (e.g. with `ExecStart=/this/cmd`). Note that this heuristic will fail if the
+// command is instead launched in a subshell or similar so that it is not
+// session leader (e.g. `ExecStart=/bin/bash -c "/this/cmd"`)
+//
+// This function is a wrapper around the libsystemd C library; if this is
+// unable to successfully open a handle to the library for any reason (e.g. it
+// cannot be found), an error will be returned.
+func RunningFromSystemService() (bool, error) {
+	return runningFromSystemService()
+}
+
+// CurrentUnitName attempts to retrieve the name of the systemd system unit
+// from which the calling process has been invoked. It wraps the systemd
+// `sd_pid_get_unit` call, with the same caveat: for processes not part of a
+// systemd system unit, this function will return an error.
+func CurrentUnitName() (string, error) {
+	return currentUnitName()
+}
+
+// IsRunningSystemd checks whether the host was booted with systemd as its init
+// system. This functions similarly to systemd's `sd_booted(3)`: internally, it
+// checks whether /run/systemd/system/ exists and is a directory.
+// http://www.freedesktop.org/software/systemd/man/sd_booted.html
+func IsRunningSystemd() bool {
+	fi, err := os.Lstat("/run/systemd/system")
+	if err != nil {
+		return false
+	}
+	return fi.IsDir()
+}
+
+// GetMachineID returns a host's 128-bit machine ID as a string. This functions
+// similarly to systemd's `sd_id128_get_machine`: internally, it simply reads
+// the contents of /etc/machine-id
+// http://www.freedesktop.org/software/systemd/man/sd_id128_get_machine.html
+func GetMachineID() (string, error) {
+	machineID, err := ioutil.ReadFile("/etc/machine-id")
+	if err != nil {
+		return "", fmt.Errorf("failed to read /etc/machine-id: %v", err)
+	}
+	return strings.TrimSpace(string(machineID)), nil
+}
diff --git a/vendor/github.com/coreos/go-systemd/util/util_cgo.go b/vendor/github.com/coreos/go-systemd/util/util_cgo.go
new file mode 100644
index 0000000..6269bc7
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/util/util_cgo.go
@@ -0,0 +1,175 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package util
+
+// #include <stdlib.h>
+// #include <sys/types.h>
+// #include <unistd.h>
+//
+// int
+// my_sd_pid_get_owner_uid(void *f, pid_t pid, uid_t *uid)
+// {
+//   int (*sd_pid_get_owner_uid)(pid_t, uid_t *);
+//
+//   sd_pid_get_owner_uid = (int (*)(pid_t, uid_t *))f;
+//   return sd_pid_get_owner_uid(pid, uid);
+// }
+//
+// int
+// my_sd_pid_get_unit(void *f, pid_t pid, char **unit)
+// {
+//   int (*sd_pid_get_unit)(pid_t, char **);
+//
+//   sd_pid_get_unit = (int (*)(pid_t, char **))f;
+//   return sd_pid_get_unit(pid, unit);
+// }
+//
+// int
+// my_sd_pid_get_slice(void *f, pid_t pid, char **slice)
+// {
+//   int (*sd_pid_get_slice)(pid_t, char **);
+//
+//   sd_pid_get_slice = (int (*)(pid_t, char **))f;
+//   return sd_pid_get_slice(pid, slice);
+// }
+//
+// int
+// am_session_leader()
+// {
+//   return (getsid(0) == getpid());
+// }
+import "C"
+import (
+	"fmt"
+	"syscall"
+	"unsafe"
+
+	"github.com/coreos/pkg/dlopen"
+)
+
+var libsystemdNames = []string{
+	// systemd < 209
+	"libsystemd-login.so.0",
+	"libsystemd-login.so",
+
+	// systemd >= 209 merged libsystemd-login into libsystemd proper
+	"libsystemd.so.0",
+	"libsystemd.so",
+}
+
+func getRunningSlice() (slice string, err error) {
+	var h *dlopen.LibHandle
+	h, err = dlopen.GetHandle(libsystemdNames)
+	if err != nil {
+		return
+	}
+	defer func() {
+		if err1 := h.Close(); err1 != nil {
+			err = err1
+		}
+	}()
+
+	sd_pid_get_slice, err := h.GetSymbolPointer("sd_pid_get_slice")
+	if err != nil {
+		return
+	}
+
+	var s string
+	sl := C.CString(s)
+	defer C.free(unsafe.Pointer(sl))
+
+	ret := C.my_sd_pid_get_slice(sd_pid_get_slice, 0, &sl)
+	if ret < 0 {
+		err = fmt.Errorf("error calling sd_pid_get_slice: %v", syscall.Errno(-ret))
+		return
+	}
+
+	return C.GoString(sl), nil
+}
+
+func runningFromSystemService() (ret bool, err error) {
+	var h *dlopen.LibHandle
+	h, err = dlopen.GetHandle(libsystemdNames)
+	if err != nil {
+		return
+	}
+	defer func() {
+		if err1 := h.Close(); err1 != nil {
+			err = err1
+		}
+	}()
+
+	sd_pid_get_owner_uid, err := h.GetSymbolPointer("sd_pid_get_owner_uid")
+	if err != nil {
+		return
+	}
+
+	var uid C.uid_t
+	errno := C.my_sd_pid_get_owner_uid(sd_pid_get_owner_uid, 0, &uid)
+	serrno := syscall.Errno(-errno)
+	// when we're running from a unit file, sd_pid_get_owner_uid returns
+	// ENOENT (systemd <220), ENXIO (systemd 220-223), or ENODATA
+	// (systemd >=234)
+	switch {
+	case errno >= 0:
+		ret = false
+	case serrno == syscall.ENOENT, serrno == syscall.ENXIO, serrno == syscall.ENODATA:
+		// Since the implementation of sessions in systemd relies on
+		// the `pam_systemd` module, using the sd_pid_get_owner_uid
+		// heuristic alone can result in false positives if that module
+		// (or PAM itself) is not present or properly configured on the
+		// system. As such, we also check if we're the session leader,
+		// which should be the case if we're invoked from a unit file,
+		// but not if e.g. we're invoked from the command line from a
+		// user's login session
+		ret = C.am_session_leader() == 1
+	default:
+		err = fmt.Errorf("error calling sd_pid_get_owner_uid: %v", syscall.Errno(-errno))
+	}
+	return
+}
+
+func currentUnitName() (unit string, err error) {
+	var h *dlopen.LibHandle
+	h, err = dlopen.GetHandle(libsystemdNames)
+	if err != nil {
+		return
+	}
+	defer func() {
+		if err1 := h.Close(); err1 != nil {
+			err = err1
+		}
+	}()
+
+	sd_pid_get_unit, err := h.GetSymbolPointer("sd_pid_get_unit")
+	if err != nil {
+		return
+	}
+
+	var s string
+	u := C.CString(s)
+	defer C.free(unsafe.Pointer(u))
+
+	ret := C.my_sd_pid_get_unit(sd_pid_get_unit, 0, &u)
+	if ret < 0 {
+		err = fmt.Errorf("error calling sd_pid_get_unit: %v", syscall.Errno(-ret))
+		return
+	}
+
+	unit = C.GoString(u)
+	return
+}
diff --git a/vendor/github.com/coreos/go-systemd/util/util_stub.go b/vendor/github.com/coreos/go-systemd/util/util_stub.go
new file mode 100644
index 0000000..477589e
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/util/util_stub.go
@@ -0,0 +1,23 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !cgo
+
+package util
+
+func getRunningSlice() (string, error) { return "", ErrNoCGO }
+
+func runningFromSystemService() (bool, error) { return false, ErrNoCGO }
+
+func currentUnitName() (string, error) { return "", ErrNoCGO }
diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE
new file mode 100644
index 0000000..e06d208
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE
new file mode 100644
index 0000000..b39ddfa
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2014 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md
new file mode 100644
index 0000000..f79dbfc
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/README.md
@@ -0,0 +1,39 @@
+# capnslog, the CoreOS logging package
+
+There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?).
+capnslog provides a simple but consistent logging interface suitable for all kinds of projects.
+
+### Design Principles
+
+##### `package main` is the place where logging gets turned on and routed
+
+A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak.
+
+##### All log options are runtime-configurable. 
+
+Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. 
+
+##### There is one log object per package. It is registered under its repository and package name.
+
+`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs.
+
+##### There is *one* output stream, and it is an `io.Writer` composed with a formatter.
+
+Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer.
+
+Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependent. These are, at best, provided as options, but more likely, provided by your application.
+
+##### Log objects are an interface
+
+An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed.
+
+##### Log levels have specific meanings:
+
+  * Critical: Unrecoverable. Must fail.
+  * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost
+  * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
+  * Notice: Normal, but important (uncommon) log information.
+  * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations.
+  * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices.
+  * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query.
+
diff --git a/vendor/github.com/coreos/pkg/capnslog/formatters.go b/vendor/github.com/coreos/pkg/capnslog/formatters.go
new file mode 100644
index 0000000..b305a84
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/formatters.go
@@ -0,0 +1,157 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"log"
+	"runtime"
+	"strings"
+	"time"
+)
+
+type Formatter interface {
+	Format(pkg string, level LogLevel, depth int, entries ...interface{})
+	Flush()
+}
+
+func NewStringFormatter(w io.Writer) Formatter {
+	return &StringFormatter{
+		w: bufio.NewWriter(w),
+	}
+}
+
+type StringFormatter struct {
+	w *bufio.Writer
+}
+
+func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) {
+	now := time.Now().UTC()
+	s.w.WriteString(now.Format(time.RFC3339))
+	s.w.WriteByte(' ')
+	writeEntries(s.w, pkg, l, i, entries...)
+	s.Flush()
+}
+
+func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) {
+	if pkg != "" {
+		w.WriteString(pkg + ": ")
+	}
+	str := fmt.Sprint(entries...)
+	endsInNL := strings.HasSuffix(str, "\n")
+	w.WriteString(str)
+	if !endsInNL {
+		w.WriteString("\n")
+	}
+}
+
+func (s *StringFormatter) Flush() {
+	s.w.Flush()
+}
+
+func NewPrettyFormatter(w io.Writer, debug bool) Formatter {
+	return &PrettyFormatter{
+		w:     bufio.NewWriter(w),
+		debug: debug,
+	}
+}
+
+type PrettyFormatter struct {
+	w     *bufio.Writer
+	debug bool
+}
+
+func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) {
+	now := time.Now()
+	ts := now.Format("2006-01-02 15:04:05")
+	c.w.WriteString(ts)
+	ms := now.Nanosecond() / 1000
+	c.w.WriteString(fmt.Sprintf(".%06d", ms))
+	if c.debug {
+		_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
+		if !ok {
+			file = "???"
+			line = 1
+		} else {
+			slash := strings.LastIndex(file, "/")
+			if slash >= 0 {
+				file = file[slash+1:]
+			}
+		}
+		if line < 0 {
+			line = 0 // not a real line number
+		}
+		c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line))
+	}
+	c.w.WriteString(fmt.Sprint(" ", l.Char(), " | "))
+	writeEntries(c.w, pkg, l, depth, entries...)
+	c.Flush()
+}
+
+func (c *PrettyFormatter) Flush() {
+	c.w.Flush()
+}
+
+// LogFormatter emulates the form of the traditional built-in logger.
+type LogFormatter struct {
+	logger *log.Logger
+	prefix string
+}
+
+// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the
+// golang log package to actually do the logging work so that logs look similar.
+func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter {
+	return &LogFormatter{
+		logger: log.New(w, "", flag), // don't use prefix here
+		prefix: prefix,               // save it instead
+	}
+}
+
+// Format builds a log message for the LogFormatter. The LogLevel is ignored.
+func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) {
+	str := fmt.Sprint(entries...)
+	prefix := lf.prefix
+	if pkg != "" {
+		prefix = fmt.Sprintf("%s%s: ", prefix, pkg)
+	}
+	lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5
+}
+
+// Flush is included so that the interface is complete, but is a no-op.
+func (lf *LogFormatter) Flush() {
+	// noop
+}
+
+// NilFormatter is a no-op log formatter that does nothing.
+type NilFormatter struct {
+}
+
+// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no
+// messages so that you can cause part of your logging to be silent.
+func NewNilFormatter() Formatter {
+	return &NilFormatter{}
+}
+
+// Format does nothing.
+func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) {
+	// noop
+}
+
+// Flush is included so that the interface is complete, but is a no-op.
+func (_ *NilFormatter) Flush() {
+	// noop
+}
diff --git a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
new file mode 100644
index 0000000..426603e
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
@@ -0,0 +1,96 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+	"bufio"
+	"bytes"
+	"io"
+	"os"
+	"runtime"
+	"strconv"
+	"strings"
+	"time"
+)
+
+var pid = os.Getpid()
+
+type GlogFormatter struct {
+	StringFormatter
+}
+
+func NewGlogFormatter(w io.Writer) *GlogFormatter {
+	g := &GlogFormatter{}
+	g.w = bufio.NewWriter(w)
+	return g
+}
+
+func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) {
+	g.w.Write(GlogHeader(level, depth+1))
+	g.StringFormatter.Format(pkg, level, depth+1, entries...)
+}
+
+func GlogHeader(level LogLevel, depth int) []byte {
+	// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+	now := time.Now().UTC()
+	_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
+	if !ok {
+		file = "???"
+		line = 1
+	} else {
+		slash := strings.LastIndex(file, "/")
+		if slash >= 0 {
+			file = file[slash+1:]
+		}
+	}
+	if line < 0 {
+		line = 0 // not a real line number
+	}
+	buf := &bytes.Buffer{}
+	buf.Grow(30)
+	_, month, day := now.Date()
+	hour, minute, second := now.Clock()
+	buf.WriteString(level.Char())
+	twoDigits(buf, int(month))
+	twoDigits(buf, day)
+	buf.WriteByte(' ')
+	twoDigits(buf, hour)
+	buf.WriteByte(':')
+	twoDigits(buf, minute)
+	buf.WriteByte(':')
+	twoDigits(buf, second)
+	buf.WriteByte('.')
+	buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000))
+	buf.WriteByte('Z')
+	buf.WriteByte(' ')
+	buf.WriteString(strconv.Itoa(pid))
+	buf.WriteByte(' ')
+	buf.WriteString(file)
+	buf.WriteByte(':')
+	buf.WriteString(strconv.Itoa(line))
+	buf.WriteByte(']')
+	buf.WriteByte(' ')
+	return buf.Bytes()
+}
+
+const digits = "0123456789"
+
+func twoDigits(b *bytes.Buffer, d int) {
+	c2 := digits[d%10]
+	d /= 10
+	c1 := digits[d%10]
+	b.WriteByte(c1)
+	b.WriteByte(c2)
+}
diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go
new file mode 100644
index 0000000..44b8cd3
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/init.go
@@ -0,0 +1,49 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+	"io"
+	"os"
+	"syscall"
+)
+
+// Here's where the opinionation comes in. We need some sensible defaults,
+// especially after taking over the log package. Your project (whatever it may
+// be) may see things differently. That's okay; there should be no defaults in
+// the main package that cannot be controlled or overridden programatically,
+// otherwise it's a bug. Doing so is creating your own init_log.go file much
+// like this one.
+
+func init() {
+	initHijack()
+
+	// Go `log` pacakge uses os.Stderr.
+	SetFormatter(NewDefaultFormatter(os.Stderr))
+	SetGlobalLogLevel(INFO)
+}
+
+func NewDefaultFormatter(out io.Writer) Formatter {
+	if syscall.Getppid() == 1 {
+		// We're running under init, which may be systemd.
+		f, err := NewJournaldFormatter()
+		if err == nil {
+			return f
+		}
+	}
+	return NewPrettyFormatter(out, false)
+}
diff --git a/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/github.com/coreos/pkg/capnslog/init_windows.go
new file mode 100644
index 0000000..4553050
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/init_windows.go
@@ -0,0 +1,25 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import "os"
+
+func init() {
+	initHijack()
+
+	// Go `log` package uses os.Stderr.
+	SetFormatter(NewPrettyFormatter(os.Stderr, false))
+	SetGlobalLogLevel(INFO)
+}
diff --git a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
new file mode 100644
index 0000000..72e0520
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
@@ -0,0 +1,68 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"github.com/coreos/go-systemd/journal"
+)
+
+func NewJournaldFormatter() (Formatter, error) {
+	if !journal.Enabled() {
+		return nil, errors.New("No systemd detected")
+	}
+	return &journaldFormatter{}, nil
+}
+
+type journaldFormatter struct{}
+
+func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
+	var pri journal.Priority
+	switch l {
+	case CRITICAL:
+		pri = journal.PriCrit
+	case ERROR:
+		pri = journal.PriErr
+	case WARNING:
+		pri = journal.PriWarning
+	case NOTICE:
+		pri = journal.PriNotice
+	case INFO:
+		pri = journal.PriInfo
+	case DEBUG:
+		pri = journal.PriDebug
+	case TRACE:
+		pri = journal.PriDebug
+	default:
+		panic("Unhandled loglevel")
+	}
+	msg := fmt.Sprint(entries...)
+	tags := map[string]string{
+		"PACKAGE":           pkg,
+		"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
+	}
+	err := journal.Send(msg, pri, tags)
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err)
+	}
+}
+
+func (j *journaldFormatter) Flush() {}
diff --git a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go
new file mode 100644
index 0000000..970086b
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go
@@ -0,0 +1,39 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+	"log"
+)
+
+func initHijack() {
+	pkg := NewPackageLogger("log", "")
+	w := packageWriter{pkg}
+	log.SetFlags(0)
+	log.SetPrefix("")
+	log.SetOutput(w)
+}
+
+type packageWriter struct {
+	pl *PackageLogger
+}
+
+func (p packageWriter) Write(b []byte) (int, error) {
+	if p.pl.level < INFO {
+		return 0, nil
+	}
+	p.pl.internalLog(calldepth+2, INFO, string(b))
+	return len(b), nil
+}
diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go
new file mode 100644
index 0000000..226b60c
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/logmap.go
@@ -0,0 +1,245 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+	"errors"
+	"strings"
+	"sync"
+)
+
+// LogLevel is the set of all log levels.
+type LogLevel int8
+
+const (
+	// CRITICAL is the lowest log level; only errors which will end the program will be propagated.
+	CRITICAL LogLevel = iota - 1
+	// ERROR is for errors that are not fatal but lead to troubling behavior.
+	ERROR
+	// WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations.
+	WARNING
+	// NOTICE is for normal but significant conditions.
+	NOTICE
+	// INFO is a log level for common, everyday log updates.
+	INFO
+	// DEBUG is the default hidden level for more verbose updates about internal processes.
+	DEBUG
+	// TRACE is for (potentially) call by call tracing of programs.
+	TRACE
+)
+
+// Char returns a single-character representation of the log level.
+func (l LogLevel) Char() string {
+	switch l {
+	case CRITICAL:
+		return "C"
+	case ERROR:
+		return "E"
+	case WARNING:
+		return "W"
+	case NOTICE:
+		return "N"
+	case INFO:
+		return "I"
+	case DEBUG:
+		return "D"
+	case TRACE:
+		return "T"
+	default:
+		panic("Unhandled loglevel")
+	}
+}
+
+// String returns a multi-character representation of the log level.
+func (l LogLevel) String() string {
+	switch l {
+	case CRITICAL:
+		return "CRITICAL"
+	case ERROR:
+		return "ERROR"
+	case WARNING:
+		return "WARNING"
+	case NOTICE:
+		return "NOTICE"
+	case INFO:
+		return "INFO"
+	case DEBUG:
+		return "DEBUG"
+	case TRACE:
+		return "TRACE"
+	default:
+		panic("Unhandled loglevel")
+	}
+}
+
+// Update using the given string value. Fulfills the flag.Value interface.
+func (l *LogLevel) Set(s string) error {
+	value, err := ParseLevel(s)
+	if err != nil {
+		return err
+	}
+
+	*l = value
+	return nil
+}
+
+// Returns an empty string, only here to fulfill the pflag.Value interface.
+func (l *LogLevel) Type() string {
+	return ""
+}
+
+// ParseLevel translates some potential loglevel strings into their corresponding levels.
+func ParseLevel(s string) (LogLevel, error) {
+	switch s {
+	case "CRITICAL", "C":
+		return CRITICAL, nil
+	case "ERROR", "0", "E":
+		return ERROR, nil
+	case "WARNING", "1", "W":
+		return WARNING, nil
+	case "NOTICE", "2", "N":
+		return NOTICE, nil
+	case "INFO", "3", "I":
+		return INFO, nil
+	case "DEBUG", "4", "D":
+		return DEBUG, nil
+	case "TRACE", "5", "T":
+		return TRACE, nil
+	}
+	return CRITICAL, errors.New("couldn't parse log level " + s)
+}
+
+type RepoLogger map[string]*PackageLogger
+
+type loggerStruct struct {
+	sync.Mutex
+	repoMap   map[string]RepoLogger
+	formatter Formatter
+}
+
+// logger is the global logger
+var logger = new(loggerStruct)
+
+// SetGlobalLogLevel sets the log level for all packages in all repositories
+// registered with capnslog.
+func SetGlobalLogLevel(l LogLevel) {
+	logger.Lock()
+	defer logger.Unlock()
+	for _, r := range logger.repoMap {
+		r.setRepoLogLevelInternal(l)
+	}
+}
+
+// GetRepoLogger may return the handle to the repository's set of packages' loggers.
+func GetRepoLogger(repo string) (RepoLogger, error) {
+	logger.Lock()
+	defer logger.Unlock()
+	r, ok := logger.repoMap[repo]
+	if !ok {
+		return nil, errors.New("no packages registered for repo " + repo)
+	}
+	return r, nil
+}
+
+// MustRepoLogger returns the handle to the repository's packages' loggers.
+func MustRepoLogger(repo string) RepoLogger {
+	r, err := GetRepoLogger(repo)
+	if err != nil {
+		panic(err)
+	}
+	return r
+}
+
+// SetRepoLogLevel sets the log level for all packages in the repository.
+func (r RepoLogger) SetRepoLogLevel(l LogLevel) {
+	logger.Lock()
+	defer logger.Unlock()
+	r.setRepoLogLevelInternal(l)
+}
+
+func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) {
+	for _, v := range r {
+		v.level = l
+	}
+}
+
+// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in
+// order, and returns a map of the results, for use in SetLogLevel.
+func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) {
+	setlist := strings.Split(conf, ",")
+	out := make(map[string]LogLevel)
+	for _, setstring := range setlist {
+		setting := strings.Split(setstring, "=")
+		if len(setting) != 2 {
+			return nil, errors.New("oddly structured `pkg=level` option: " + setstring)
+		}
+		l, err := ParseLevel(setting[1])
+		if err != nil {
+			return nil, err
+		}
+		out[setting[0]] = l
+	}
+	return out, nil
+}
+
+// SetLogLevel takes a map of package names within a repository to their desired
+// loglevel, and sets the levels appropriately. Unknown packages are ignored.
+// "*" is a special package name that corresponds to all packages, and will be
+// processed first.
+func (r RepoLogger) SetLogLevel(m map[string]LogLevel) {
+	logger.Lock()
+	defer logger.Unlock()
+	if l, ok := m["*"]; ok {
+		r.setRepoLogLevelInternal(l)
+	}
+	for k, v := range m {
+		l, ok := r[k]
+		if !ok {
+			continue
+		}
+		l.level = v
+	}
+}
+
+// SetFormatter sets the formatting function for all logs.
+func SetFormatter(f Formatter) {
+	logger.Lock()
+	defer logger.Unlock()
+	logger.formatter = f
+}
+
+// NewPackageLogger creates a package logger object.
+// This should be defined as a global var in your package, referencing your repo.
+func NewPackageLogger(repo string, pkg string) (p *PackageLogger) {
+	logger.Lock()
+	defer logger.Unlock()
+	if logger.repoMap == nil {
+		logger.repoMap = make(map[string]RepoLogger)
+	}
+	r, rok := logger.repoMap[repo]
+	if !rok {
+		logger.repoMap[repo] = make(RepoLogger)
+		r = logger.repoMap[repo]
+	}
+	p, pok := r[pkg]
+	if !pok {
+		r[pkg] = &PackageLogger{
+			pkg:   pkg,
+			level: INFO,
+		}
+		p = r[pkg]
+	}
+	return
+}
diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
new file mode 100644
index 0000000..00ff371
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
@@ -0,0 +1,191 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+	"fmt"
+	"os"
+)
+
+type PackageLogger struct {
+	pkg   string
+	level LogLevel
+}
+
+const calldepth = 2
+
+func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) {
+	logger.Lock()
+	defer logger.Unlock()
+	if inLevel != CRITICAL && p.level < inLevel {
+		return
+	}
+	if logger.formatter != nil {
+		logger.formatter.Format(p.pkg, inLevel, depth+1, entries...)
+	}
+}
+
+// SetLevel allows users to change the current logging level.
+func (p *PackageLogger) SetLevel(l LogLevel) {
+	logger.Lock()
+	defer logger.Unlock()
+	p.level = l
+}
+
+// LevelAt checks if the given log level will be outputted under current setting.
+func (p *PackageLogger) LevelAt(l LogLevel) bool {
+	logger.Lock()
+	defer logger.Unlock()
+	return p.level >= l
+}
+
+// Log a formatted string at any level between ERROR and TRACE
+func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) {
+	p.internalLog(calldepth, l, fmt.Sprintf(format, args...))
+}
+
+// Log a message at any level between ERROR and TRACE
+func (p *PackageLogger) Log(l LogLevel, args ...interface{}) {
+	p.internalLog(calldepth, l, fmt.Sprint(args...))
+}
+
+// log stdlib compatibility
+
+func (p *PackageLogger) Println(args ...interface{}) {
+	p.internalLog(calldepth, INFO, fmt.Sprintln(args...))
+}
+
+func (p *PackageLogger) Printf(format string, args ...interface{}) {
+	p.Logf(INFO, format, args...)
+}
+
+func (p *PackageLogger) Print(args ...interface{}) {
+	p.internalLog(calldepth, INFO, fmt.Sprint(args...))
+}
+
+// Panic and fatal
+
+func (p *PackageLogger) Panicf(format string, args ...interface{}) {
+	s := fmt.Sprintf(format, args...)
+	p.internalLog(calldepth, CRITICAL, s)
+	panic(s)
+}
+
+func (p *PackageLogger) Panic(args ...interface{}) {
+	s := fmt.Sprint(args...)
+	p.internalLog(calldepth, CRITICAL, s)
+	panic(s)
+}
+
+func (p *PackageLogger) Panicln(args ...interface{}) {
+	s := fmt.Sprintln(args...)
+	p.internalLog(calldepth, CRITICAL, s)
+	panic(s)
+}
+
+func (p *PackageLogger) Fatalf(format string, args ...interface{}) {
+	p.Logf(CRITICAL, format, args...)
+	os.Exit(1)
+}
+
+func (p *PackageLogger) Fatal(args ...interface{}) {
+	s := fmt.Sprint(args...)
+	p.internalLog(calldepth, CRITICAL, s)
+	os.Exit(1)
+}
+
+func (p *PackageLogger) Fatalln(args ...interface{}) {
+	s := fmt.Sprintln(args...)
+	p.internalLog(calldepth, CRITICAL, s)
+	os.Exit(1)
+}
+
+// Error Functions
+
+func (p *PackageLogger) Errorf(format string, args ...interface{}) {
+	p.Logf(ERROR, format, args...)
+}
+
+func (p *PackageLogger) Error(entries ...interface{}) {
+	p.internalLog(calldepth, ERROR, entries...)
+}
+
+// Warning Functions
+
+func (p *PackageLogger) Warningf(format string, args ...interface{}) {
+	p.Logf(WARNING, format, args...)
+}
+
+func (p *PackageLogger) Warning(entries ...interface{}) {
+	p.internalLog(calldepth, WARNING, entries...)
+}
+
+// Notice Functions
+
+func (p *PackageLogger) Noticef(format string, args ...interface{}) {
+	p.Logf(NOTICE, format, args...)
+}
+
+func (p *PackageLogger) Notice(entries ...interface{}) {
+	p.internalLog(calldepth, NOTICE, entries...)
+}
+
+// Info Functions
+
+func (p *PackageLogger) Infof(format string, args ...interface{}) {
+	p.Logf(INFO, format, args...)
+}
+
+func (p *PackageLogger) Info(entries ...interface{}) {
+	p.internalLog(calldepth, INFO, entries...)
+}
+
+// Debug Functions
+
+func (p *PackageLogger) Debugf(format string, args ...interface{}) {
+	if p.level < DEBUG {
+		return
+	}
+	p.Logf(DEBUG, format, args...)
+}
+
+func (p *PackageLogger) Debug(entries ...interface{}) {
+	if p.level < DEBUG {
+		return
+	}
+	p.internalLog(calldepth, DEBUG, entries...)
+}
+
+// Trace Functions
+
+func (p *PackageLogger) Tracef(format string, args ...interface{}) {
+	if p.level < TRACE {
+		return
+	}
+	p.Logf(TRACE, format, args...)
+}
+
+func (p *PackageLogger) Trace(entries ...interface{}) {
+	if p.level < TRACE {
+		return
+	}
+	p.internalLog(calldepth, TRACE, entries...)
+}
+
+func (p *PackageLogger) Flush() {
+	logger.Lock()
+	defer logger.Unlock()
+	logger.formatter.Flush()
+}
diff --git a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
new file mode 100644
index 0000000..4be5a1f
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
@@ -0,0 +1,65 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+	"fmt"
+	"log/syslog"
+)
+
+func NewSyslogFormatter(w *syslog.Writer) Formatter {
+	return &syslogFormatter{w}
+}
+
+func NewDefaultSyslogFormatter(tag string) (Formatter, error) {
+	w, err := syslog.New(syslog.LOG_DEBUG, tag)
+	if err != nil {
+		return nil, err
+	}
+	return NewSyslogFormatter(w), nil
+}
+
+type syslogFormatter struct {
+	w *syslog.Writer
+}
+
+func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
+	for _, entry := range entries {
+		str := fmt.Sprint(entry)
+		switch l {
+		case CRITICAL:
+			s.w.Crit(str)
+		case ERROR:
+			s.w.Err(str)
+		case WARNING:
+			s.w.Warning(str)
+		case NOTICE:
+			s.w.Notice(str)
+		case INFO:
+			s.w.Info(str)
+		case DEBUG:
+			s.w.Debug(str)
+		case TRACE:
+			s.w.Debug(str)
+		default:
+			panic("Unhandled loglevel")
+		}
+	}
+}
+
+func (s *syslogFormatter) Flush() {
+}
diff --git a/vendor/github.com/coreos/pkg/dlopen/dlopen.go b/vendor/github.com/coreos/pkg/dlopen/dlopen.go
new file mode 100644
index 0000000..23774f6
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/dlopen/dlopen.go
@@ -0,0 +1,82 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package dlopen provides some convenience functions to dlopen a library and
+// get its symbols.
+package dlopen
+
+// #cgo LDFLAGS: -ldl
+// #include <stdlib.h>
+// #include <dlfcn.h>
+import "C"
+import (
+	"errors"
+	"fmt"
+	"unsafe"
+)
+
+var ErrSoNotFound = errors.New("unable to open a handle to the library")
+
+// LibHandle represents an open handle to a library (.so)
+type LibHandle struct {
+	Handle  unsafe.Pointer
+	Libname string
+}
+
+// GetHandle tries to get a handle to a library (.so), attempting to access it
+// by the names specified in libs and returning the first that is successfully
+// opened. Callers are responsible for closing the handler. If no library can
+// be successfully opened, an error is returned.
+func GetHandle(libs []string) (*LibHandle, error) {
+	for _, name := range libs {
+		libname := C.CString(name)
+		defer C.free(unsafe.Pointer(libname))
+		handle := C.dlopen(libname, C.RTLD_LAZY)
+		if handle != nil {
+			h := &LibHandle{
+				Handle:  handle,
+				Libname: name,
+			}
+			return h, nil
+		}
+	}
+	return nil, ErrSoNotFound
+}
+
+// GetSymbolPointer takes a symbol name and returns a pointer to the symbol.
+func (l *LibHandle) GetSymbolPointer(symbol string) (unsafe.Pointer, error) {
+	sym := C.CString(symbol)
+	defer C.free(unsafe.Pointer(sym))
+
+	C.dlerror()
+	p := C.dlsym(l.Handle, sym)
+	e := C.dlerror()
+	if e != nil {
+		return nil, fmt.Errorf("error resolving symbol %q: %v", symbol, errors.New(C.GoString(e)))
+	}
+
+	return p, nil
+}
+
+// Close closes a LibHandle.
+func (l *LibHandle) Close() error {
+	C.dlerror()
+	C.dlclose(l.Handle)
+	e := C.dlerror()
+	if e != nil {
+		return fmt.Errorf("error closing %v: %v", l.Libname, errors.New(C.GoString(e)))
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go b/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go
new file mode 100644
index 0000000..48a6601
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go
@@ -0,0 +1,56 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build linux
+
+package dlopen
+
+// #include <string.h>
+// #include <stdlib.h>
+//
+// int
+// my_strlen(void *f, const char *s)
+// {
+//   size_t (*strlen)(const char *);
+//
+//   strlen = (size_t (*)(const char *))f;
+//   return strlen(s);
+// }
+import "C"
+
+import (
+	"fmt"
+	"unsafe"
+)
+
+func strlen(libs []string, s string) (int, error) {
+	h, err := GetHandle(libs)
+	if err != nil {
+		return -1, fmt.Errorf(`couldn't get a handle to the library: %v`, err)
+	}
+	defer h.Close()
+
+	f := "strlen"
+	cs := C.CString(s)
+	defer C.free(unsafe.Pointer(cs))
+
+	strlen, err := h.GetSymbolPointer(f)
+	if err != nil {
+		return -1, fmt.Errorf(`couldn't get symbol %q: %v`, f, err)
+	}
+
+	len := C.my_strlen(strlen, cs)
+
+	return int(len), nil
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/.gitignore b/vendor/github.com/dgrijalva/jwt-go/.gitignore
new file mode 100644
index 0000000..80bed65
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/.gitignore
@@ -0,0 +1,4 @@
+.DS_Store
+bin
+
+
diff --git a/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/.travis.yml
new file mode 100644
index 0000000..1027f56
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+script:
+    - go vet ./...
+    - go test -v ./...
+
+go:
+  - 1.3
+  - 1.4
+  - 1.5
+  - 1.6
+  - 1.7
+  - tip
diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE
new file mode 100644
index 0000000..df83a9c
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/LICENSE
@@ -0,0 +1,8 @@
+Copyright (c) 2012 Dave Grijalva
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md
new file mode 100644
index 0000000..7fc1f79
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md
@@ -0,0 +1,97 @@
+## Migration Guide from v2 -> v3
+
+Version 3 adds several new, frequently requested features.  To do so, it introduces a few breaking changes.  We've worked to keep these as minimal as possible.  This guide explains the breaking changes and how you can quickly update your code.
+
+### `Token.Claims` is now an interface type
+
+The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`.  We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`.
+
+`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior.  It is the default claims type when using `Parse`.  The usage is unchanged except you must type cast the claims property.
+
+The old example for parsing a token looked like this..
+
+```go
+	if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
+		fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
+	}
+```
+
+is now directly mapped to...
+
+```go
+	if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
+		claims := token.Claims.(jwt.MapClaims)
+		fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
+	}
+```
+
+`StandardClaims` is designed to be embedded in your custom type.  You can supply a custom claims type with the new `ParseWithClaims` function.  Here's an example of using a custom claims type.
+
+```go
+	type MyCustomClaims struct {
+		User string
+		*StandardClaims
+	}
+	
+	if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil {
+		claims := token.Claims.(*MyCustomClaims)
+		fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt)
+	}
+```
+
+### `ParseFromRequest` has been moved
+
+To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`.  The method signatues have also been augmented to receive a new argument: `Extractor`.
+
+`Extractors` do the work of picking the token string out of a request.  The interface is simple and composable.
+
+This simple parsing example:
+
+```go
+	if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil {
+		fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
+	}
+```
+
+is directly mapped to:
+
+```go
+	if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil {
+		claims := token.Claims.(jwt.MapClaims)
+		fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
+	}
+```
+
+There are several concrete `Extractor` types provided for your convenience:
+
+* `HeaderExtractor` will search a list of headers until one contains content.
+* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content.
+* `MultiExtractor` will try a list of `Extractors` in order until one returns content.
+* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token.
+* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument
+* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed.  A simple example is stripping the `Bearer ` text from a header
+
+
+### RSA signing methods no longer accept `[]byte` keys
+
+Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse.
+
+To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`.  These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types.
+
+```go 
+	func keyLookupFunc(*Token) (interface{}, error) {
+		// Don't forget to validate the alg is what you expect:
+		if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
+			return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
+		}
+		
+		// Look up key 
+		key, err := lookupPublicKey(token.Header["kid"])
+		if err != nil {
+			return nil, err
+		}
+		
+		// Unpack key from PEM encoded PKCS8
+		return jwt.ParseRSAPublicKeyFromPEM(key)
+	}
+```
diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md
new file mode 100644
index 0000000..d358d88
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/README.md
@@ -0,0 +1,100 @@
+# jwt-go
+
+[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go)
+[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go)
+
+A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
+
+**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. 
+
+**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail.
+
+**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage.  See the examples provided.
+
+## What the heck is a JWT?
+
+JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
+
+In short, it's a signed JSON object that does something useful (for example, authentication).  It's commonly used for `Bearer` tokens in Oauth 2.  A token is made of three parts, separated by `.`'s.  The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded.  The last part is the signature, encoded the same way.
+
+The first part is called the header.  It contains the necessary information for verifying the last part, the signature.  For example, which encryption method was used for signing and what key was used.
+
+The part in the middle is the interesting bit.  It's called the Claims and contains the actual stuff you care about.  Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
+
+## What's in the box?
+
+This library supports the parsing and verification as well as the generation and signing of JWTs.  Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
+
+## Examples
+
+See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
+
+* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac)
+* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac)
+* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
+
+## Extensions
+
+This library publishes all the necessary components for adding your own signing methods.  Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.  
+
+Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go
+
+## Compliance
+
+This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
+
+* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
+
+## Project Status & Versioning
+
+This library is considered production ready.  Feedback and feature requests are appreciated.  The API should be considered stable.  There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
+
+This project uses [Semantic Versioning 2.0.0](http://semver.org).  Accepted pull requests will land on `master`.  Periodically, versions will be tagged from `master`.  You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
+
+While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users.  You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`.  It will do the right thing WRT semantic versioning.
+
+**BREAKING CHANGES:*** 
+* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API.  We've tried to break as few things as possible, so there should just be a few type signature changes.  A full list of breaking changes is available in `VERSION_HISTORY.md`.  See `MIGRATION_GUIDE.md` for more information on updating your code.
+
+## Usage Tips
+
+### Signing vs Encryption
+
+A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
+
+* The author of the token was in the possession of the signing secret
+* The data has not been modified since it was signed
+
+It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
+
+### Choosing a Signing Method
+
+There are several signing methods available, and you should probably take the time to learn about the various options before choosing one.  The principal design decision is most likely going to be symmetric vs asymmetric.
+
+Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
+
+Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
+
+### Signing Methods and Key Types
+
+Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
+
+* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
+* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
+* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
+
+### JWT and OAuth
+
+It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
+
+Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
+
+* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
+* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
+* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
+
+## More
+
+Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
+
+The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md
new file mode 100644
index 0000000..6370298
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md
@@ -0,0 +1,118 @@
+## `jwt-go` Version History
+
+#### 3.2.0
+
+* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
+* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
+* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
+* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
+
+#### 3.1.0
+
+* Improvements to `jwt` command line tool
+* Added `SkipClaimsValidation` option to `Parser`
+* Documentation updates
+
+#### 3.0.0
+
+* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
+	* Dropped support for `[]byte` keys when using RSA signing methods.  This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
+	* `ParseFromRequest` has been moved to `request` subpackage and usage has changed
+	* The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`.  The default value is type `MapClaims`, which is an alias to `map[string]interface{}`.  This makes it possible to use a custom type when decoding claims.
+* Other Additions and Changes
+	* Added `Claims` interface type to allow users to decode the claims into a custom type
+	* Added `ParseWithClaims`, which takes a third argument of type `Claims`.  Use this function instead of `Parse` if you have a custom type you'd like to decode into.
+	* Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
+	* Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
+	* Added new interface type `Extractor`, which is used for extracting JWT strings from http requests.  Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
+	* Added several new, more specific, validation errors to error type bitmask
+	* Moved examples from README to executable example files
+	* Signing method registry is now thread safe
+	* Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
+
+#### 2.7.0
+
+This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
+
+* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
+* Error text for expired tokens includes how long it's been expired
+* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
+* Documentation updates
+
+#### 2.6.0
+
+* Exposed inner error within ValidationError
+* Fixed validation errors when using UseJSONNumber flag
+* Added several unit tests
+
+#### 2.5.0
+
+* Added support for signing method none.  You shouldn't use this.  The API tries to make this clear.
+* Updated/fixed some documentation
+* Added more helpful error message when trying to parse tokens that begin with `BEARER `
+
+#### 2.4.0
+
+* Added new type, Parser, to allow for configuration of various parsing parameters
+	* You can now specify a list of valid signing methods.  Anything outside this set will be rejected.
+	* You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
+* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
+* Fixed some bugs with ECDSA parsing
+
+#### 2.3.0
+
+* Added support for ECDSA signing methods
+* Added support for RSA PSS signing methods (requires go v1.4)
+
+#### 2.2.0
+
+* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`.  Result will now be the parsed token and an error, instead of a panic.
+
+#### 2.1.0
+
+Backwards compatible API change that was missed in 2.0.0.
+
+* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
+
+#### 2.0.0
+
+There were two major reasons for breaking backwards compatibility with this update.  The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations.  There will likely be no required code changes to support this change.
+
+The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods.  Not all keys used for all signing methods have a single standard on-disk representation.  Requiring `[]byte` as the type for all keys proved too limiting.  Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys.  Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
+
+It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
+
+* **Compatibility Breaking Changes**
+	* `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
+	* `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
+	* `KeyFunc` now returns `interface{}` instead of `[]byte`
+	* `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
+	* `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
+* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`.  Specific sizes are now just instances of this type.
+    * Added public package global `SigningMethodHS256`
+    * Added public package global `SigningMethodHS384`
+    * Added public package global `SigningMethodHS512`
+* Renamed type `SigningMethodRS256` to `SigningMethodRSA`.  Specific sizes are now just instances of this type.
+    * Added public package global `SigningMethodRS256`
+    * Added public package global `SigningMethodRS384`
+    * Added public package global `SigningMethodRS512`
+* Moved sample private key for HMAC tests from an inline value to a file on disk.  Value is unchanged.
+* Refactored the RSA implementation to be easier to read
+* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
+
+#### 1.0.2
+
+* Fixed bug in parsing public keys from certificates
+* Added more tests around the parsing of keys for RS256
+* Code refactoring in RS256 implementation.  No functional changes
+
+#### 1.0.1
+
+* Fixed panic if RS256 signing method was passed an invalid key
+
+#### 1.0.0
+
+* First versioned release
+* API stabilized
+* Supports creating, signing, parsing, and validating JWT tokens
+* Supports RS256 and HS256 signing methods
\ No newline at end of file
diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go
new file mode 100644
index 0000000..f0228f0
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/claims.go
@@ -0,0 +1,134 @@
+package jwt
+
+import (
+	"crypto/subtle"
+	"fmt"
+	"time"
+)
+
+// For a type to be a Claims object, it must just have a Valid method that determines
+// if the token is invalid for any supported reason
+type Claims interface {
+	Valid() error
+}
+
+// Structured version of Claims Section, as referenced at
+// https://tools.ietf.org/html/rfc7519#section-4.1
+// See examples for how to use this with your own claim types
+type StandardClaims struct {
+	Audience  string `json:"aud,omitempty"`
+	ExpiresAt int64  `json:"exp,omitempty"`
+	Id        string `json:"jti,omitempty"`
+	IssuedAt  int64  `json:"iat,omitempty"`
+	Issuer    string `json:"iss,omitempty"`
+	NotBefore int64  `json:"nbf,omitempty"`
+	Subject   string `json:"sub,omitempty"`
+}
+
+// Validates time based claims "exp, iat, nbf".
+// There is no accounting for clock skew.
+// As well, if any of the above claims are not in the token, it will still
+// be considered a valid claim.
+func (c StandardClaims) Valid() error {
+	vErr := new(ValidationError)
+	now := TimeFunc().Unix()
+
+	// The claims below are optional, by default, so if they are set to the
+	// default value in Go, let's not fail the verification for them.
+	if c.VerifyExpiresAt(now, false) == false {
+		delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
+		vErr.Inner = fmt.Errorf("token is expired by %v", delta)
+		vErr.Errors |= ValidationErrorExpired
+	}
+
+	if c.VerifyIssuedAt(now, false) == false {
+		vErr.Inner = fmt.Errorf("Token used before issued")
+		vErr.Errors |= ValidationErrorIssuedAt
+	}
+
+	if c.VerifyNotBefore(now, false) == false {
+		vErr.Inner = fmt.Errorf("token is not valid yet")
+		vErr.Errors |= ValidationErrorNotValidYet
+	}
+
+	if vErr.valid() {
+		return nil
+	}
+
+	return vErr
+}
+
+// Compares the aud claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
+	return verifyAud(c.Audience, cmp, req)
+}
+
+// Compares the exp claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+	return verifyExp(c.ExpiresAt, cmp, req)
+}
+
+// Compares the iat claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+	return verifyIat(c.IssuedAt, cmp, req)
+}
+
+// Compares the iss claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
+	return verifyIss(c.Issuer, cmp, req)
+}
+
+// Compares the nbf claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
+	return verifyNbf(c.NotBefore, cmp, req)
+}
+
+// ----- helpers
+
+func verifyAud(aud string, cmp string, required bool) bool {
+	if aud == "" {
+		return !required
+	}
+	if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 {
+		return true
+	} else {
+		return false
+	}
+}
+
+func verifyExp(exp int64, now int64, required bool) bool {
+	if exp == 0 {
+		return !required
+	}
+	return now <= exp
+}
+
+func verifyIat(iat int64, now int64, required bool) bool {
+	if iat == 0 {
+		return !required
+	}
+	return now >= iat
+}
+
+func verifyIss(iss string, cmp string, required bool) bool {
+	if iss == "" {
+		return !required
+	}
+	if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 {
+		return true
+	} else {
+		return false
+	}
+}
+
+func verifyNbf(nbf int64, now int64, required bool) bool {
+	if nbf == 0 {
+		return !required
+	}
+	return now >= nbf
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go
new file mode 100644
index 0000000..a86dc1a
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/doc.go
@@ -0,0 +1,4 @@
+// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
+//
+// See README.md for more info.
+package jwt
diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go
new file mode 100644
index 0000000..f977381
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go
@@ -0,0 +1,148 @@
+package jwt
+
+import (
+	"crypto"
+	"crypto/ecdsa"
+	"crypto/rand"
+	"errors"
+	"math/big"
+)
+
+var (
+	// Sadly this is missing from crypto/ecdsa compared to crypto/rsa
+	ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
+)
+
+// Implements the ECDSA family of signing methods signing methods
+// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
+type SigningMethodECDSA struct {
+	Name      string
+	Hash      crypto.Hash
+	KeySize   int
+	CurveBits int
+}
+
+// Specific instances for EC256 and company
+var (
+	SigningMethodES256 *SigningMethodECDSA
+	SigningMethodES384 *SigningMethodECDSA
+	SigningMethodES512 *SigningMethodECDSA
+)
+
+func init() {
+	// ES256
+	SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
+	RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
+		return SigningMethodES256
+	})
+
+	// ES384
+	SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
+	RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
+		return SigningMethodES384
+	})
+
+	// ES512
+	SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
+	RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
+		return SigningMethodES512
+	})
+}
+
+func (m *SigningMethodECDSA) Alg() string {
+	return m.Name
+}
+
+// Implements the Verify method from SigningMethod
+// For this verify method, key must be an ecdsa.PublicKey struct
+func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
+	var err error
+
+	// Decode the signature
+	var sig []byte
+	if sig, err = DecodeSegment(signature); err != nil {
+		return err
+	}
+
+	// Get the key
+	var ecdsaKey *ecdsa.PublicKey
+	switch k := key.(type) {
+	case *ecdsa.PublicKey:
+		ecdsaKey = k
+	default:
+		return ErrInvalidKeyType
+	}
+
+	if len(sig) != 2*m.KeySize {
+		return ErrECDSAVerification
+	}
+
+	r := big.NewInt(0).SetBytes(sig[:m.KeySize])
+	s := big.NewInt(0).SetBytes(sig[m.KeySize:])
+
+	// Create hasher
+	if !m.Hash.Available() {
+		return ErrHashUnavailable
+	}
+	hasher := m.Hash.New()
+	hasher.Write([]byte(signingString))
+
+	// Verify the signature
+	if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true {
+		return nil
+	} else {
+		return ErrECDSAVerification
+	}
+}
+
+// Implements the Sign method from SigningMethod
+// For this signing method, key must be an ecdsa.PrivateKey struct
+func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
+	// Get the key
+	var ecdsaKey *ecdsa.PrivateKey
+	switch k := key.(type) {
+	case *ecdsa.PrivateKey:
+		ecdsaKey = k
+	default:
+		return "", ErrInvalidKeyType
+	}
+
+	// Create the hasher
+	if !m.Hash.Available() {
+		return "", ErrHashUnavailable
+	}
+
+	hasher := m.Hash.New()
+	hasher.Write([]byte(signingString))
+
+	// Sign the string and return r, s
+	if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
+		curveBits := ecdsaKey.Curve.Params().BitSize
+
+		if m.CurveBits != curveBits {
+			return "", ErrInvalidKey
+		}
+
+		keyBytes := curveBits / 8
+		if curveBits%8 > 0 {
+			keyBytes += 1
+		}
+
+		// We serialize the outpus (r and s) into big-endian byte arrays and pad
+		// them with zeros on the left to make sure the sizes work out. Both arrays
+		// must be keyBytes long, and the output must be 2*keyBytes long.
+		rBytes := r.Bytes()
+		rBytesPadded := make([]byte, keyBytes)
+		copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
+
+		sBytes := s.Bytes()
+		sBytesPadded := make([]byte, keyBytes)
+		copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
+
+		out := append(rBytesPadded, sBytesPadded...)
+
+		return EncodeSegment(out), nil
+	} else {
+		return "", err
+	}
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
new file mode 100644
index 0000000..d19624b
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
@@ -0,0 +1,67 @@
+package jwt
+
+import (
+	"crypto/ecdsa"
+	"crypto/x509"
+	"encoding/pem"
+	"errors"
+)
+
+var (
+	ErrNotECPublicKey  = errors.New("Key is not a valid ECDSA public key")
+	ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key")
+)
+
+// Parse PEM encoded Elliptic Curve Private Key Structure
+func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
+	var err error
+
+	// Parse PEM block
+	var block *pem.Block
+	if block, _ = pem.Decode(key); block == nil {
+		return nil, ErrKeyMustBePEMEncoded
+	}
+
+	// Parse the key
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
+		return nil, err
+	}
+
+	var pkey *ecdsa.PrivateKey
+	var ok bool
+	if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+		return nil, ErrNotECPrivateKey
+	}
+
+	return pkey, nil
+}
+
+// Parse PEM encoded PKCS1 or PKCS8 public key
+func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
+	var err error
+
+	// Parse PEM block
+	var block *pem.Block
+	if block, _ = pem.Decode(key); block == nil {
+		return nil, ErrKeyMustBePEMEncoded
+	}
+
+	// Parse the key
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+		if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+			parsedKey = cert.PublicKey
+		} else {
+			return nil, err
+		}
+	}
+
+	var pkey *ecdsa.PublicKey
+	var ok bool
+	if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+		return nil, ErrNotECPublicKey
+	}
+
+	return pkey, nil
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go
new file mode 100644
index 0000000..1c93024
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/errors.go
@@ -0,0 +1,59 @@
+package jwt
+
+import (
+	"errors"
+)
+
+// Error constants
+var (
+	ErrInvalidKey      = errors.New("key is invalid")
+	ErrInvalidKeyType  = errors.New("key is of invalid type")
+	ErrHashUnavailable = errors.New("the requested hash function is unavailable")
+)
+
+// The errors that might occur when parsing and validating a token
+const (
+	ValidationErrorMalformed        uint32 = 1 << iota // Token is malformed
+	ValidationErrorUnverifiable                        // Token could not be verified because of signing problems
+	ValidationErrorSignatureInvalid                    // Signature validation failed
+
+	// Standard Claim validation errors
+	ValidationErrorAudience      // AUD validation failed
+	ValidationErrorExpired       // EXP validation failed
+	ValidationErrorIssuedAt      // IAT validation failed
+	ValidationErrorIssuer        // ISS validation failed
+	ValidationErrorNotValidYet   // NBF validation failed
+	ValidationErrorId            // JTI validation failed
+	ValidationErrorClaimsInvalid // Generic claims validation error
+)
+
+// Helper for constructing a ValidationError with a string error message
+func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
+	return &ValidationError{
+		text:   errorText,
+		Errors: errorFlags,
+	}
+}
+
+// The error from Parse if token is not valid
+type ValidationError struct {
+	Inner  error  // stores the error returned by external dependencies, i.e.: KeyFunc
+	Errors uint32 // bitfield.  see ValidationError... constants
+	text   string // errors that do not have a valid error just have text
+}
+
+// Validation error is an error type
+func (e ValidationError) Error() string {
+	if e.Inner != nil {
+		return e.Inner.Error()
+	} else if e.text != "" {
+		return e.text
+	} else {
+		return "token is invalid"
+	}
+}
+
+// No errors
+func (e *ValidationError) valid() bool {
+	return e.Errors == 0
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go
new file mode 100644
index 0000000..addbe5d
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/hmac.go
@@ -0,0 +1,95 @@
+package jwt
+
+import (
+	"crypto"
+	"crypto/hmac"
+	"errors"
+)
+
+// Implements the HMAC-SHA family of signing methods signing methods
+// Expects key type of []byte for both signing and validation
+type SigningMethodHMAC struct {
+	Name string
+	Hash crypto.Hash
+}
+
+// Specific instances for HS256 and company
+var (
+	SigningMethodHS256  *SigningMethodHMAC
+	SigningMethodHS384  *SigningMethodHMAC
+	SigningMethodHS512  *SigningMethodHMAC
+	ErrSignatureInvalid = errors.New("signature is invalid")
+)
+
+func init() {
+	// HS256
+	SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
+	RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
+		return SigningMethodHS256
+	})
+
+	// HS384
+	SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
+	RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
+		return SigningMethodHS384
+	})
+
+	// HS512
+	SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
+	RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
+		return SigningMethodHS512
+	})
+}
+
+func (m *SigningMethodHMAC) Alg() string {
+	return m.Name
+}
+
+// Verify the signature of HSXXX tokens.  Returns nil if the signature is valid.
+func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
+	// Verify the key is the right type
+	keyBytes, ok := key.([]byte)
+	if !ok {
+		return ErrInvalidKeyType
+	}
+
+	// Decode signature, for comparison
+	sig, err := DecodeSegment(signature)
+	if err != nil {
+		return err
+	}
+
+	// Can we use the specified hashing method?
+	if !m.Hash.Available() {
+		return ErrHashUnavailable
+	}
+
+	// This signing method is symmetric, so we validate the signature
+	// by reproducing the signature from the signing string and key, then
+	// comparing that against the provided signature.
+	hasher := hmac.New(m.Hash.New, keyBytes)
+	hasher.Write([]byte(signingString))
+	if !hmac.Equal(sig, hasher.Sum(nil)) {
+		return ErrSignatureInvalid
+	}
+
+	// No validation errors.  Signature is good.
+	return nil
+}
+
+// Implements the Sign method from SigningMethod for this signing method.
+// Key must be []byte
+func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
+	if keyBytes, ok := key.([]byte); ok {
+		if !m.Hash.Available() {
+			return "", ErrHashUnavailable
+		}
+
+		hasher := hmac.New(m.Hash.New, keyBytes)
+		hasher.Write([]byte(signingString))
+
+		return EncodeSegment(hasher.Sum(nil)), nil
+	}
+
+	return "", ErrInvalidKeyType
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go
new file mode 100644
index 0000000..291213c
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/map_claims.go
@@ -0,0 +1,94 @@
+package jwt
+
+import (
+	"encoding/json"
+	"errors"
+	// "fmt"
+)
+
+// Claims type that uses the map[string]interface{} for JSON decoding
+// This is the default claims type if you don't supply one
+type MapClaims map[string]interface{}
+
+// Compares the aud claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
+	aud, _ := m["aud"].(string)
+	return verifyAud(aud, cmp, req)
+}
+
+// Compares the exp claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+	switch exp := m["exp"].(type) {
+	case float64:
+		return verifyExp(int64(exp), cmp, req)
+	case json.Number:
+		v, _ := exp.Int64()
+		return verifyExp(v, cmp, req)
+	}
+	return req == false
+}
+
+// Compares the iat claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+	switch iat := m["iat"].(type) {
+	case float64:
+		return verifyIat(int64(iat), cmp, req)
+	case json.Number:
+		v, _ := iat.Int64()
+		return verifyIat(v, cmp, req)
+	}
+	return req == false
+}
+
+// Compares the iss claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
+	iss, _ := m["iss"].(string)
+	return verifyIss(iss, cmp, req)
+}
+
+// Compares the nbf claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
+	switch nbf := m["nbf"].(type) {
+	case float64:
+		return verifyNbf(int64(nbf), cmp, req)
+	case json.Number:
+		v, _ := nbf.Int64()
+		return verifyNbf(v, cmp, req)
+	}
+	return req == false
+}
+
+// Validates time based claims "exp, iat, nbf".
+// There is no accounting for clock skew.
+// As well, if any of the above claims are not in the token, it will still
+// be considered a valid claim.
+func (m MapClaims) Valid() error {
+	vErr := new(ValidationError)
+	now := TimeFunc().Unix()
+
+	if m.VerifyExpiresAt(now, false) == false {
+		vErr.Inner = errors.New("Token is expired")
+		vErr.Errors |= ValidationErrorExpired
+	}
+
+	if m.VerifyIssuedAt(now, false) == false {
+		vErr.Inner = errors.New("Token used before issued")
+		vErr.Errors |= ValidationErrorIssuedAt
+	}
+
+	if m.VerifyNotBefore(now, false) == false {
+		vErr.Inner = errors.New("Token is not valid yet")
+		vErr.Errors |= ValidationErrorNotValidYet
+	}
+
+	if vErr.valid() {
+		return nil
+	}
+
+	return vErr
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go
new file mode 100644
index 0000000..f04d189
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/none.go
@@ -0,0 +1,52 @@
+package jwt
+
+// Implements the none signing method.  This is required by the spec
+// but you probably should never use it.
+var SigningMethodNone *signingMethodNone
+
+const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
+
+var NoneSignatureTypeDisallowedError error
+
+type signingMethodNone struct{}
+type unsafeNoneMagicConstant string
+
+func init() {
+	SigningMethodNone = &signingMethodNone{}
+	NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid)
+
+	RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
+		return SigningMethodNone
+	})
+}
+
+func (m *signingMethodNone) Alg() string {
+	return "none"
+}
+
+// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) {
+	// Key must be UnsafeAllowNoneSignatureType to prevent accidentally
+	// accepting 'none' signing method
+	if _, ok := key.(unsafeNoneMagicConstant); !ok {
+		return NoneSignatureTypeDisallowedError
+	}
+	// If signing method is none, signature must be an empty string
+	if signature != "" {
+		return NewValidationError(
+			"'none' signing method with non-empty signature",
+			ValidationErrorSignatureInvalid,
+		)
+	}
+
+	// Accept 'none' signing method.
+	return nil
+}
+
+// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) {
+	if _, ok := key.(unsafeNoneMagicConstant); ok {
+		return "", nil
+	}
+	return "", NoneSignatureTypeDisallowedError
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go
new file mode 100644
index 0000000..d6901d9
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/parser.go
@@ -0,0 +1,148 @@
+package jwt
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"strings"
+)
+
+type Parser struct {
+	ValidMethods         []string // If populated, only these methods will be considered valid
+	UseJSONNumber        bool     // Use JSON Number format in JSON decoder
+	SkipClaimsValidation bool     // Skip claims validation during token parsing
+}
+
+// Parse, validate, and return a token.
+// keyFunc will receive the parsed token and should return the key for validating.
+// If everything is kosher, err will be nil
+func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+	return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
+}
+
+func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+	token, parts, err := p.ParseUnverified(tokenString, claims)
+	if err != nil {
+		return token, err
+	}
+
+	// Verify signing method is in the required set
+	if p.ValidMethods != nil {
+		var signingMethodValid = false
+		var alg = token.Method.Alg()
+		for _, m := range p.ValidMethods {
+			if m == alg {
+				signingMethodValid = true
+				break
+			}
+		}
+		if !signingMethodValid {
+			// signing method is not in the listed set
+			return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
+		}
+	}
+
+	// Lookup key
+	var key interface{}
+	if keyFunc == nil {
+		// keyFunc was not provided.  short circuiting validation
+		return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
+	}
+	if key, err = keyFunc(token); err != nil {
+		// keyFunc returned an error
+		if ve, ok := err.(*ValidationError); ok {
+			return token, ve
+		}
+		return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
+	}
+
+	vErr := &ValidationError{}
+
+	// Validate Claims
+	if !p.SkipClaimsValidation {
+		if err := token.Claims.Valid(); err != nil {
+
+			// If the Claims Valid returned an error, check if it is a validation error,
+			// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
+			if e, ok := err.(*ValidationError); !ok {
+				vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
+			} else {
+				vErr = e
+			}
+		}
+	}
+
+	// Perform validation
+	token.Signature = parts[2]
+	if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
+		vErr.Inner = err
+		vErr.Errors |= ValidationErrorSignatureInvalid
+	}
+
+	if vErr.valid() {
+		token.Valid = true
+		return token, nil
+	}
+
+	return token, vErr
+}
+
+// WARNING: Don't use this method unless you know what you're doing
+//
+// This method parses the token but doesn't validate the signature. It's only
+// ever useful in cases where you know the signature is valid (because it has
+// been checked previously in the stack) and you want to extract values from
+// it.
+func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
+	parts = strings.Split(tokenString, ".")
+	if len(parts) != 3 {
+		return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
+	}
+
+	token = &Token{Raw: tokenString}
+
+	// parse Header
+	var headerBytes []byte
+	if headerBytes, err = DecodeSegment(parts[0]); err != nil {
+		if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
+			return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
+		}
+		return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+	}
+	if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
+		return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+	}
+
+	// parse Claims
+	var claimBytes []byte
+	token.Claims = claims
+
+	if claimBytes, err = DecodeSegment(parts[1]); err != nil {
+		return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+	}
+	dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
+	if p.UseJSONNumber {
+		dec.UseNumber()
+	}
+	// JSON Decode.  Special case for map type to avoid weird pointer behavior
+	if c, ok := token.Claims.(MapClaims); ok {
+		err = dec.Decode(&c)
+	} else {
+		err = dec.Decode(&claims)
+	}
+	// Handle decode error
+	if err != nil {
+		return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+	}
+
+	// Lookup signature method
+	if method, ok := token.Header["alg"].(string); ok {
+		if token.Method = GetSigningMethod(method); token.Method == nil {
+			return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
+		}
+	} else {
+		return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
+	}
+
+	return token, parts, nil
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go
new file mode 100644
index 0000000..e4caf1c
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/rsa.go
@@ -0,0 +1,101 @@
+package jwt
+
+import (
+	"crypto"
+	"crypto/rand"
+	"crypto/rsa"
+)
+
+// Implements the RSA family of signing methods signing methods
+// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
+type SigningMethodRSA struct {
+	Name string
+	Hash crypto.Hash
+}
+
+// Specific instances for RS256 and company
+var (
+	SigningMethodRS256 *SigningMethodRSA
+	SigningMethodRS384 *SigningMethodRSA
+	SigningMethodRS512 *SigningMethodRSA
+)
+
+func init() {
+	// RS256
+	SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
+	RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
+		return SigningMethodRS256
+	})
+
+	// RS384
+	SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
+	RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
+		return SigningMethodRS384
+	})
+
+	// RS512
+	SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
+	RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
+		return SigningMethodRS512
+	})
+}
+
+func (m *SigningMethodRSA) Alg() string {
+	return m.Name
+}
+
+// Implements the Verify method from SigningMethod
+// For this signing method, must be an *rsa.PublicKey structure.
+func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
+	var err error
+
+	// Decode the signature
+	var sig []byte
+	if sig, err = DecodeSegment(signature); err != nil {
+		return err
+	}
+
+	var rsaKey *rsa.PublicKey
+	var ok bool
+
+	if rsaKey, ok = key.(*rsa.PublicKey); !ok {
+		return ErrInvalidKeyType
+	}
+
+	// Create hasher
+	if !m.Hash.Available() {
+		return ErrHashUnavailable
+	}
+	hasher := m.Hash.New()
+	hasher.Write([]byte(signingString))
+
+	// Verify the signature
+	return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
+}
+
+// Implements the Sign method from SigningMethod
+// For this signing method, must be an *rsa.PrivateKey structure.
+func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
+	var rsaKey *rsa.PrivateKey
+	var ok bool
+
+	// Validate type of key
+	if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
+		return "", ErrInvalidKey
+	}
+
+	// Create the hasher
+	if !m.Hash.Available() {
+		return "", ErrHashUnavailable
+	}
+
+	hasher := m.Hash.New()
+	hasher.Write([]byte(signingString))
+
+	// Sign the string and return the encoded bytes
+	if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
+		return EncodeSegment(sigBytes), nil
+	} else {
+		return "", err
+	}
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go
new file mode 100644
index 0000000..10ee9db
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go
@@ -0,0 +1,126 @@
+// +build go1.4
+
+package jwt
+
+import (
+	"crypto"
+	"crypto/rand"
+	"crypto/rsa"
+)
+
+// Implements the RSAPSS family of signing methods signing methods
+type SigningMethodRSAPSS struct {
+	*SigningMethodRSA
+	Options *rsa.PSSOptions
+}
+
+// Specific instances for RS/PS and company
+var (
+	SigningMethodPS256 *SigningMethodRSAPSS
+	SigningMethodPS384 *SigningMethodRSAPSS
+	SigningMethodPS512 *SigningMethodRSAPSS
+)
+
+func init() {
+	// PS256
+	SigningMethodPS256 = &SigningMethodRSAPSS{
+		&SigningMethodRSA{
+			Name: "PS256",
+			Hash: crypto.SHA256,
+		},
+		&rsa.PSSOptions{
+			SaltLength: rsa.PSSSaltLengthAuto,
+			Hash:       crypto.SHA256,
+		},
+	}
+	RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
+		return SigningMethodPS256
+	})
+
+	// PS384
+	SigningMethodPS384 = &SigningMethodRSAPSS{
+		&SigningMethodRSA{
+			Name: "PS384",
+			Hash: crypto.SHA384,
+		},
+		&rsa.PSSOptions{
+			SaltLength: rsa.PSSSaltLengthAuto,
+			Hash:       crypto.SHA384,
+		},
+	}
+	RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
+		return SigningMethodPS384
+	})
+
+	// PS512
+	SigningMethodPS512 = &SigningMethodRSAPSS{
+		&SigningMethodRSA{
+			Name: "PS512",
+			Hash: crypto.SHA512,
+		},
+		&rsa.PSSOptions{
+			SaltLength: rsa.PSSSaltLengthAuto,
+			Hash:       crypto.SHA512,
+		},
+	}
+	RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
+		return SigningMethodPS512
+	})
+}
+
+// Implements the Verify method from SigningMethod
+// For this verify method, key must be an rsa.PublicKey struct
+func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
+	var err error
+
+	// Decode the signature
+	var sig []byte
+	if sig, err = DecodeSegment(signature); err != nil {
+		return err
+	}
+
+	var rsaKey *rsa.PublicKey
+	switch k := key.(type) {
+	case *rsa.PublicKey:
+		rsaKey = k
+	default:
+		return ErrInvalidKey
+	}
+
+	// Create hasher
+	if !m.Hash.Available() {
+		return ErrHashUnavailable
+	}
+	hasher := m.Hash.New()
+	hasher.Write([]byte(signingString))
+
+	return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options)
+}
+
+// Implements the Sign method from SigningMethod
+// For this signing method, key must be an rsa.PrivateKey struct
+func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
+	var rsaKey *rsa.PrivateKey
+
+	switch k := key.(type) {
+	case *rsa.PrivateKey:
+		rsaKey = k
+	default:
+		return "", ErrInvalidKeyType
+	}
+
+	// Create the hasher
+	if !m.Hash.Available() {
+		return "", ErrHashUnavailable
+	}
+
+	hasher := m.Hash.New()
+	hasher.Write([]byte(signingString))
+
+	// Sign the string and return the encoded bytes
+	if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
+		return EncodeSegment(sigBytes), nil
+	} else {
+		return "", err
+	}
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
new file mode 100644
index 0000000..a5ababf
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
@@ -0,0 +1,101 @@
+package jwt
+
+import (
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/pem"
+	"errors"
+)
+
+var (
+	ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key")
+	ErrNotRSAPrivateKey    = errors.New("Key is not a valid RSA private key")
+	ErrNotRSAPublicKey     = errors.New("Key is not a valid RSA public key")
+)
+
+// Parse PEM encoded PKCS1 or PKCS8 private key
+func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
+	var err error
+
+	// Parse PEM block
+	var block *pem.Block
+	if block, _ = pem.Decode(key); block == nil {
+		return nil, ErrKeyMustBePEMEncoded
+	}
+
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
+		if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+			return nil, err
+		}
+	}
+
+	var pkey *rsa.PrivateKey
+	var ok bool
+	if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+		return nil, ErrNotRSAPrivateKey
+	}
+
+	return pkey, nil
+}
+
+// Parse PEM encoded PKCS1 or PKCS8 private key protected with password
+func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
+	var err error
+
+	// Parse PEM block
+	var block *pem.Block
+	if block, _ = pem.Decode(key); block == nil {
+		return nil, ErrKeyMustBePEMEncoded
+	}
+
+	var parsedKey interface{}
+
+	var blockDecrypted []byte
+	if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
+		return nil, err
+	}
+
+	if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
+		if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
+			return nil, err
+		}
+	}
+
+	var pkey *rsa.PrivateKey
+	var ok bool
+	if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+		return nil, ErrNotRSAPrivateKey
+	}
+
+	return pkey, nil
+}
+
+// Parse PEM encoded PKCS1 or PKCS8 public key
+func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
+	var err error
+
+	// Parse PEM block
+	var block *pem.Block
+	if block, _ = pem.Decode(key); block == nil {
+		return nil, ErrKeyMustBePEMEncoded
+	}
+
+	// Parse the key
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+		if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+			parsedKey = cert.PublicKey
+		} else {
+			return nil, err
+		}
+	}
+
+	var pkey *rsa.PublicKey
+	var ok bool
+	if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
+		return nil, ErrNotRSAPublicKey
+	}
+
+	return pkey, nil
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go
new file mode 100644
index 0000000..ed1f212
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/signing_method.go
@@ -0,0 +1,35 @@
+package jwt
+
+import (
+	"sync"
+)
+
+var signingMethods = map[string]func() SigningMethod{}
+var signingMethodLock = new(sync.RWMutex)
+
+// Implement SigningMethod to add new methods for signing or verifying tokens.
+type SigningMethod interface {
+	Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
+	Sign(signingString string, key interface{}) (string, error)    // Returns encoded signature or error
+	Alg() string                                                   // returns the alg identifier for this method (example: 'HS256')
+}
+
+// Register the "alg" name and a factory function for signing method.
+// This is typically done during init() in the method's implementation
+func RegisterSigningMethod(alg string, f func() SigningMethod) {
+	signingMethodLock.Lock()
+	defer signingMethodLock.Unlock()
+
+	signingMethods[alg] = f
+}
+
+// Get a signing method from an "alg" string
+func GetSigningMethod(alg string) (method SigningMethod) {
+	signingMethodLock.RLock()
+	defer signingMethodLock.RUnlock()
+
+	if methodF, ok := signingMethods[alg]; ok {
+		method = methodF()
+	}
+	return
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go
new file mode 100644
index 0000000..d637e08
--- /dev/null
+++ b/vendor/github.com/dgrijalva/jwt-go/token.go
@@ -0,0 +1,108 @@
+package jwt
+
+import (
+	"encoding/base64"
+	"encoding/json"
+	"strings"
+	"time"
+)
+
+// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
+// You can override it to use another time value.  This is useful for testing or if your
+// server uses a different time zone than your tokens.
+var TimeFunc = time.Now
+
+// Parse methods use this callback function to supply
+// the key for verification.  The function receives the parsed,
+// but unverified Token.  This allows you to use properties in the
+// Header of the token (such as `kid`) to identify which key to use.
+type Keyfunc func(*Token) (interface{}, error)
+
+// A JWT Token.  Different fields will be used depending on whether you're
+// creating or parsing/verifying a token.
+type Token struct {
+	Raw       string                 // The raw token.  Populated when you Parse a token
+	Method    SigningMethod          // The signing method used or to be used
+	Header    map[string]interface{} // The first segment of the token
+	Claims    Claims                 // The second segment of the token
+	Signature string                 // The third segment of the token.  Populated when you Parse a token
+	Valid     bool                   // Is the token valid?  Populated when you Parse/Verify a token
+}
+
+// Create a new Token.  Takes a signing method
+func New(method SigningMethod) *Token {
+	return NewWithClaims(method, MapClaims{})
+}
+
+func NewWithClaims(method SigningMethod, claims Claims) *Token {
+	return &Token{
+		Header: map[string]interface{}{
+			"typ": "JWT",
+			"alg": method.Alg(),
+		},
+		Claims: claims,
+		Method: method,
+	}
+}
+
+// Get the complete, signed token
+func (t *Token) SignedString(key interface{}) (string, error) {
+	var sig, sstr string
+	var err error
+	if sstr, err = t.SigningString(); err != nil {
+		return "", err
+	}
+	if sig, err = t.Method.Sign(sstr, key); err != nil {
+		return "", err
+	}
+	return strings.Join([]string{sstr, sig}, "."), nil
+}
+
+// Generate the signing string.  This is the
+// most expensive part of the whole deal.  Unless you
+// need this for something special, just go straight for
+// the SignedString.
+func (t *Token) SigningString() (string, error) {
+	var err error
+	parts := make([]string, 2)
+	for i, _ := range parts {
+		var jsonValue []byte
+		if i == 0 {
+			if jsonValue, err = json.Marshal(t.Header); err != nil {
+				return "", err
+			}
+		} else {
+			if jsonValue, err = json.Marshal(t.Claims); err != nil {
+				return "", err
+			}
+		}
+
+		parts[i] = EncodeSegment(jsonValue)
+	}
+	return strings.Join(parts, "."), nil
+}
+
+// Parse, validate, and return a token.
+// keyFunc will receive the parsed token and should return the key for validating.
+// If everything is kosher, err will be nil
+func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+	return new(Parser).Parse(tokenString, keyFunc)
+}
+
+func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+	return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
+}
+
+// Encode JWT specific base64url encoding with padding stripped
+func EncodeSegment(seg []byte) string {
+	return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
+}
+
+// Decode JWT specific base64url encoding with padding stripped
+func DecodeSegment(seg string) ([]byte, error) {
+	if l := len(seg) % 4; l > 0 {
+		seg += strings.Repeat("=", 4-l)
+	}
+
+	return base64.URLEncoding.DecodeString(seg)
+}
diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore
new file mode 100644
index 0000000..e256a31
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/.gitignore
@@ -0,0 +1,20 @@
+# OSX leaves these everywhere on SMB shares
+._*
+
+# Eclipse files
+.classpath
+.project
+.settings/**
+
+# Emacs save files
+*~
+
+# Vim-related files
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+*.un~
+Session.vim
+.netrwhist
+
+# Go test binaries
+*.test
diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml
new file mode 100644
index 0000000..0e9d6ed
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+go:
+  - 1.3
+  - 1.4
+script:
+  - go test
+  - go build
diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE
new file mode 100644
index 0000000..7805d36
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/LICENSE
@@ -0,0 +1,50 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Sam Ghods
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md
new file mode 100644
index 0000000..0200f75
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/README.md
@@ -0,0 +1,121 @@
+# YAML marshaling and unmarshaling support for Go
+
+[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
+
+## Introduction
+
+A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
+
+In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
+
+## Compatibility
+
+This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
+
+## Caveats
+
+**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
+
+```
+BAD:
+	exampleKey: !!binary gIGC
+
+GOOD:
+	exampleKey: gIGC
+... and decode the base64 data in your code.
+```
+
+**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
+
+## Installation and usage
+
+To install, run:
+
+```
+$ go get github.com/ghodss/yaml
+```
+
+And import using:
+
+```
+import "github.com/ghodss/yaml"
+```
+
+Usage is very similar to the JSON library:
+
+```go
+package main
+
+import (
+	"fmt"
+
+	"github.com/ghodss/yaml"
+)
+
+type Person struct {
+	Name string `json:"name"` // Affects YAML field names too.
+	Age  int    `json:"age"`
+}
+
+func main() {
+	// Marshal a Person struct to YAML.
+	p := Person{"John", 30}
+	y, err := yaml.Marshal(p)
+	if err != nil {
+		fmt.Printf("err: %v\n", err)
+		return
+	}
+	fmt.Println(string(y))
+	/* Output:
+	age: 30
+	name: John
+	*/
+
+	// Unmarshal the YAML back into a Person struct.
+	var p2 Person
+	err = yaml.Unmarshal(y, &p2)
+	if err != nil {
+		fmt.Printf("err: %v\n", err)
+		return
+	}
+	fmt.Println(p2)
+	/* Output:
+	{John 30}
+	*/
+}
+```
+
+`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
+
+```go
+package main
+
+import (
+	"fmt"
+
+	"github.com/ghodss/yaml"
+)
+
+func main() {
+	j := []byte(`{"name": "John", "age": 30}`)
+	y, err := yaml.JSONToYAML(j)
+	if err != nil {
+		fmt.Printf("err: %v\n", err)
+		return
+	}
+	fmt.Println(string(y))
+	/* Output:
+	name: John
+	age: 30
+	*/
+	j2, err := yaml.YAMLToJSON(y)
+	if err != nil {
+		fmt.Printf("err: %v\n", err)
+		return
+	}
+	fmt.Println(string(j2))
+	/* Output:
+	{"age":30,"name":"John"}
+	*/
+}
+```
diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go
new file mode 100644
index 0000000..5860074
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/fields.go
@@ -0,0 +1,501 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package yaml
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/json"
+	"reflect"
+	"sort"
+	"strings"
+	"sync"
+	"unicode"
+	"unicode/utf8"
+)
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+	// If v is a named type and is addressable,
+	// start with its address, so that if the type has pointer methods,
+	// we find them.
+	if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+		v = v.Addr()
+	}
+	for {
+		// Load value from interface, but only if the result will be
+		// usefully addressable.
+		if v.Kind() == reflect.Interface && !v.IsNil() {
+			e := v.Elem()
+			if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+				v = e
+				continue
+			}
+		}
+
+		if v.Kind() != reflect.Ptr {
+			break
+		}
+
+		if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+			break
+		}
+		if v.IsNil() {
+			if v.CanSet() {
+				v.Set(reflect.New(v.Type().Elem()))
+			} else {
+				v = reflect.New(v.Type().Elem())
+			}
+		}
+		if v.Type().NumMethod() > 0 {
+			if u, ok := v.Interface().(json.Unmarshaler); ok {
+				return u, nil, reflect.Value{}
+			}
+			if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+				return nil, u, reflect.Value{}
+			}
+		}
+		v = v.Elem()
+	}
+	return nil, nil, v
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+	name      string
+	nameBytes []byte                 // []byte(name)
+	equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+	tag       bool
+	index     []int
+	typ       reflect.Type
+	omitEmpty bool
+	quoted    bool
+}
+
+func fillField(f field) field {
+	f.nameBytes = []byte(f.name)
+	f.equalFold = foldFunc(f.nameBytes)
+	return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+	if x[i].name != x[j].name {
+		return x[i].name < x[j].name
+	}
+	if len(x[i].index) != len(x[j].index) {
+		return len(x[i].index) < len(x[j].index)
+	}
+	if x[i].tag != x[j].tag {
+		return x[i].tag
+	}
+	return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+	for k, xik := range x[i].index {
+		if k >= len(x[j].index) {
+			return false
+		}
+		if xik != x[j].index[k] {
+			return xik < x[j].index[k]
+		}
+	}
+	return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+	// Anonymous fields to explore at the current level and the next.
+	current := []field{}
+	next := []field{{typ: t}}
+
+	// Count of queued names for current level and the next.
+	count := map[reflect.Type]int{}
+	nextCount := map[reflect.Type]int{}
+
+	// Types already visited at an earlier level.
+	visited := map[reflect.Type]bool{}
+
+	// Fields found.
+	var fields []field
+
+	for len(next) > 0 {
+		current, next = next, current[:0]
+		count, nextCount = nextCount, map[reflect.Type]int{}
+
+		for _, f := range current {
+			if visited[f.typ] {
+				continue
+			}
+			visited[f.typ] = true
+
+			// Scan f.typ for fields to include.
+			for i := 0; i < f.typ.NumField(); i++ {
+				sf := f.typ.Field(i)
+				if sf.PkgPath != "" { // unexported
+					continue
+				}
+				tag := sf.Tag.Get("json")
+				if tag == "-" {
+					continue
+				}
+				name, opts := parseTag(tag)
+				if !isValidTag(name) {
+					name = ""
+				}
+				index := make([]int, len(f.index)+1)
+				copy(index, f.index)
+				index[len(f.index)] = i
+
+				ft := sf.Type
+				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+					// Follow pointer.
+					ft = ft.Elem()
+				}
+
+				// Record found field and index sequence.
+				if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+					tagged := name != ""
+					if name == "" {
+						name = sf.Name
+					}
+					fields = append(fields, fillField(field{
+						name:      name,
+						tag:       tagged,
+						index:     index,
+						typ:       ft,
+						omitEmpty: opts.Contains("omitempty"),
+						quoted:    opts.Contains("string"),
+					}))
+					if count[f.typ] > 1 {
+						// If there were multiple instances, add a second,
+						// so that the annihilation code will see a duplicate.
+						// It only cares about the distinction between 1 or 2,
+						// so don't bother generating any more copies.
+						fields = append(fields, fields[len(fields)-1])
+					}
+					continue
+				}
+
+				// Record new anonymous struct to explore in next round.
+				nextCount[ft]++
+				if nextCount[ft] == 1 {
+					next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+				}
+			}
+		}
+	}
+
+	sort.Sort(byName(fields))
+
+	// Delete all fields that are hidden by the Go rules for embedded fields,
+	// except that fields with JSON tags are promoted.
+
+	// The fields are sorted in primary order of name, secondary order
+	// of field index length. Loop over names; for each name, delete
+	// hidden fields by choosing the one dominant field that survives.
+	out := fields[:0]
+	for advance, i := 0, 0; i < len(fields); i += advance {
+		// One iteration per name.
+		// Find the sequence of fields with the name of this first field.
+		fi := fields[i]
+		name := fi.name
+		for advance = 1; i+advance < len(fields); advance++ {
+			fj := fields[i+advance]
+			if fj.name != name {
+				break
+			}
+		}
+		if advance == 1 { // Only one field with this name
+			out = append(out, fi)
+			continue
+		}
+		dominant, ok := dominantField(fields[i : i+advance])
+		if ok {
+			out = append(out, dominant)
+		}
+	}
+
+	fields = out
+	sort.Sort(byIndex(fields))
+
+	return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+	// The fields are sorted in increasing index-length order. The winner
+	// must therefore be one with the shortest index length. Drop all
+	// longer entries, which is easy: just truncate the slice.
+	length := len(fields[0].index)
+	tagged := -1 // Index of first tagged field.
+	for i, f := range fields {
+		if len(f.index) > length {
+			fields = fields[:i]
+			break
+		}
+		if f.tag {
+			if tagged >= 0 {
+				// Multiple tagged fields at the same level: conflict.
+				// Return no field.
+				return field{}, false
+			}
+			tagged = i
+		}
+	}
+	if tagged >= 0 {
+		return fields[tagged], true
+	}
+	// All remaining fields have the same length. If there's more than one,
+	// we have a conflict (two fields named "X" at the same level) and we
+	// return no field.
+	if len(fields) > 1 {
+		return field{}, false
+	}
+	return fields[0], true
+}
+
+var fieldCache struct {
+	sync.RWMutex
+	m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+	fieldCache.RLock()
+	f := fieldCache.m[t]
+	fieldCache.RUnlock()
+	if f != nil {
+		return f
+	}
+
+	// Compute fields without lock.
+	// Might duplicate effort but won't hold other computations back.
+	f = typeFields(t)
+	if f == nil {
+		f = []field{}
+	}
+
+	fieldCache.Lock()
+	if fieldCache.m == nil {
+		fieldCache.m = map[reflect.Type][]field{}
+	}
+	fieldCache.m[t] = f
+	fieldCache.Unlock()
+	return f
+}
+
+func isValidTag(s string) bool {
+	if s == "" {
+		return false
+	}
+	for _, c := range s {
+		switch {
+		case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+			// Backslash and quote chars are reserved, but
+			// otherwise any punctuation chars are allowed
+			// in a tag name.
+		default:
+			if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+const (
+	caseMask     = ^byte(0x20) // Mask to ignore case in ASCII.
+	kelvin       = '\u212a'
+	smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+//  * S maps to s and to U+017F 'ſ' Latin small letter long s
+//  * k maps to K and to U+212A 'K' Kelvin sign
+// See http://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+	nonLetter := false
+	special := false // special letter
+	for _, b := range s {
+		if b >= utf8.RuneSelf {
+			return bytes.EqualFold
+		}
+		upper := b & caseMask
+		if upper < 'A' || upper > 'Z' {
+			nonLetter = true
+		} else if upper == 'K' || upper == 'S' {
+			// See above for why these letters are special.
+			special = true
+		}
+	}
+	if special {
+		return equalFoldRight
+	}
+	if nonLetter {
+		return asciiEqualFold
+	}
+	return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+	for _, sb := range s {
+		if len(t) == 0 {
+			return false
+		}
+		tb := t[0]
+		if tb < utf8.RuneSelf {
+			if sb != tb {
+				sbUpper := sb & caseMask
+				if 'A' <= sbUpper && sbUpper <= 'Z' {
+					if sbUpper != tb&caseMask {
+						return false
+					}
+				} else {
+					return false
+				}
+			}
+			t = t[1:]
+			continue
+		}
+		// sb is ASCII and t is not. t must be either kelvin
+		// sign or long s; sb must be s, S, k, or K.
+		tr, size := utf8.DecodeRune(t)
+		switch sb {
+		case 's', 'S':
+			if tr != smallLongEss {
+				return false
+			}
+		case 'k', 'K':
+			if tr != kelvin {
+				return false
+			}
+		default:
+			return false
+		}
+		t = t[size:]
+
+	}
+	if len(t) > 0 {
+		return false
+	}
+	return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i, sb := range s {
+		tb := t[i]
+		if sb == tb {
+			continue
+		}
+		if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+			if sb&caseMask != tb&caseMask {
+				return false
+			}
+		} else {
+			return false
+		}
+	}
+	return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i, b := range s {
+		if b&caseMask != t[i]&caseMask {
+			return false
+		}
+	}
+	return true
+}
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+	if idx := strings.Index(tag, ","); idx != -1 {
+		return tag[:idx], tagOptions(tag[idx+1:])
+	}
+	return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+	if len(o) == 0 {
+		return false
+	}
+	s := string(o)
+	for s != "" {
+		var next string
+		i := strings.Index(s, ",")
+		if i >= 0 {
+			s, next = s[:i], s[i+1:]
+		}
+		if s == optionName {
+			return true
+		}
+		s = next
+	}
+	return false
+}
diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go
new file mode 100644
index 0000000..4fb4054
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/yaml.go
@@ -0,0 +1,277 @@
+package yaml
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strconv"
+
+	"gopkg.in/yaml.v2"
+)
+
+// Marshals the object into JSON then converts JSON to YAML and returns the
+// YAML.
+func Marshal(o interface{}) ([]byte, error) {
+	j, err := json.Marshal(o)
+	if err != nil {
+		return nil, fmt.Errorf("error marshaling into JSON: %v", err)
+	}
+
+	y, err := JSONToYAML(j)
+	if err != nil {
+		return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
+	}
+
+	return y, nil
+}
+
+// Converts YAML to JSON then uses JSON to unmarshal into an object.
+func Unmarshal(y []byte, o interface{}) error {
+	vo := reflect.ValueOf(o)
+	j, err := yamlToJSON(y, &vo)
+	if err != nil {
+		return fmt.Errorf("error converting YAML to JSON: %v", err)
+	}
+
+	err = json.Unmarshal(j, o)
+	if err != nil {
+		return fmt.Errorf("error unmarshaling JSON: %v", err)
+	}
+
+	return nil
+}
+
+// Convert JSON to YAML.
+func JSONToYAML(j []byte) ([]byte, error) {
+	// Convert the JSON to an object.
+	var jsonObj interface{}
+	// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
+	// Go JSON library doesn't try to pick the right number type (int, float,
+	// etc.) when unmarshalling to interface{}, it just picks float64
+	// universally. go-yaml does go through the effort of picking the right
+	// number type, so we can preserve number type throughout this process.
+	err := yaml.Unmarshal(j, &jsonObj)
+	if err != nil {
+		return nil, err
+	}
+
+	// Marshal this object into YAML.
+	return yaml.Marshal(jsonObj)
+}
+
+// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
+// this method should be a no-op.
+//
+// Things YAML can do that are not supported by JSON:
+// * In YAML you can have binary and null keys in your maps. These are invalid
+//   in JSON. (int and float keys are converted to strings.)
+// * Binary data in YAML with the !!binary tag is not supported. If you want to
+//   use binary data with this library, encode the data as base64 as usual but do
+//   not use the !!binary tag in your YAML. This will ensure the original base64
+//   encoded data makes it all the way through to the JSON.
+func YAMLToJSON(y []byte) ([]byte, error) {
+	return yamlToJSON(y, nil)
+}
+
+func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
+	// Convert the YAML to an object.
+	var yamlObj interface{}
+	err := yaml.Unmarshal(y, &yamlObj)
+	if err != nil {
+		return nil, err
+	}
+
+	// YAML objects are not completely compatible with JSON objects (e.g. you
+	// can have non-string keys in YAML). So, convert the YAML-compatible object
+	// to a JSON-compatible object, failing with an error if irrecoverable
+	// incompatibilties happen along the way.
+	jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
+	if err != nil {
+		return nil, err
+	}
+
+	// Convert this object to JSON and return the data.
+	return json.Marshal(jsonObj)
+}
+
+func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
+	var err error
+
+	// Resolve jsonTarget to a concrete value (i.e. not a pointer or an
+	// interface). We pass decodingNull as false because we're not actually
+	// decoding into the value, we're just checking if the ultimate target is a
+	// string.
+	if jsonTarget != nil {
+		ju, tu, pv := indirect(*jsonTarget, false)
+		// We have a JSON or Text Umarshaler at this level, so we can't be trying
+		// to decode into a string.
+		if ju != nil || tu != nil {
+			jsonTarget = nil
+		} else {
+			jsonTarget = &pv
+		}
+	}
+
+	// If yamlObj is a number or a boolean, check if jsonTarget is a string -
+	// if so, coerce.  Else return normal.
+	// If yamlObj is a map or array, find the field that each key is
+	// unmarshaling to, and when you recurse pass the reflect.Value for that
+	// field back into this function.
+	switch typedYAMLObj := yamlObj.(type) {
+	case map[interface{}]interface{}:
+		// JSON does not support arbitrary keys in a map, so we must convert
+		// these keys to strings.
+		//
+		// From my reading of go-yaml v2 (specifically the resolve function),
+		// keys can only have the types string, int, int64, float64, binary
+		// (unsupported), or null (unsupported).
+		strMap := make(map[string]interface{})
+		for k, v := range typedYAMLObj {
+			// Resolve the key to a string first.
+			var keyString string
+			switch typedKey := k.(type) {
+			case string:
+				keyString = typedKey
+			case int:
+				keyString = strconv.Itoa(typedKey)
+			case int64:
+				// go-yaml will only return an int64 as a key if the system
+				// architecture is 32-bit and the key's value is between 32-bit
+				// and 64-bit. Otherwise the key type will simply be int.
+				keyString = strconv.FormatInt(typedKey, 10)
+			case float64:
+				// Stolen from go-yaml to use the same conversion to string as
+				// the go-yaml library uses to convert float to string when
+				// Marshaling.
+				s := strconv.FormatFloat(typedKey, 'g', -1, 32)
+				switch s {
+				case "+Inf":
+					s = ".inf"
+				case "-Inf":
+					s = "-.inf"
+				case "NaN":
+					s = ".nan"
+				}
+				keyString = s
+			case bool:
+				if typedKey {
+					keyString = "true"
+				} else {
+					keyString = "false"
+				}
+			default:
+				return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
+					reflect.TypeOf(k), k, v)
+			}
+
+			// jsonTarget should be a struct or a map. If it's a struct, find
+			// the field it's going to map to and pass its reflect.Value. If
+			// it's a map, find the element type of the map and pass the
+			// reflect.Value created from that type. If it's neither, just pass
+			// nil - JSON conversion will error for us if it's a real issue.
+			if jsonTarget != nil {
+				t := *jsonTarget
+				if t.Kind() == reflect.Struct {
+					keyBytes := []byte(keyString)
+					// Find the field that the JSON library would use.
+					var f *field
+					fields := cachedTypeFields(t.Type())
+					for i := range fields {
+						ff := &fields[i]
+						if bytes.Equal(ff.nameBytes, keyBytes) {
+							f = ff
+							break
+						}
+						// Do case-insensitive comparison.
+						if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
+							f = ff
+						}
+					}
+					if f != nil {
+						// Find the reflect.Value of the most preferential
+						// struct field.
+						jtf := t.Field(f.index[0])
+						strMap[keyString], err = convertToJSONableObject(v, &jtf)
+						if err != nil {
+							return nil, err
+						}
+						continue
+					}
+				} else if t.Kind() == reflect.Map {
+					// Create a zero value of the map's element type to use as
+					// the JSON target.
+					jtv := reflect.Zero(t.Type().Elem())
+					strMap[keyString], err = convertToJSONableObject(v, &jtv)
+					if err != nil {
+						return nil, err
+					}
+					continue
+				}
+			}
+			strMap[keyString], err = convertToJSONableObject(v, nil)
+			if err != nil {
+				return nil, err
+			}
+		}
+		return strMap, nil
+	case []interface{}:
+		// We need to recurse into arrays in case there are any
+		// map[interface{}]interface{}'s inside and to convert any
+		// numbers to strings.
+
+		// If jsonTarget is a slice (which it really should be), find the
+		// thing it's going to map to. If it's not a slice, just pass nil
+		// - JSON conversion will error for us if it's a real issue.
+		var jsonSliceElemValue *reflect.Value
+		if jsonTarget != nil {
+			t := *jsonTarget
+			if t.Kind() == reflect.Slice {
+				// By default slices point to nil, but we need a reflect.Value
+				// pointing to a value of the slice type, so we create one here.
+				ev := reflect.Indirect(reflect.New(t.Type().Elem()))
+				jsonSliceElemValue = &ev
+			}
+		}
+
+		// Make and use a new array.
+		arr := make([]interface{}, len(typedYAMLObj))
+		for i, v := range typedYAMLObj {
+			arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
+			if err != nil {
+				return nil, err
+			}
+		}
+		return arr, nil
+	default:
+		// If the target type is a string and the YAML type is a number,
+		// convert the YAML type to a string.
+		if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
+			// Based on my reading of go-yaml, it may return int, int64,
+			// float64, or uint64.
+			var s string
+			switch typedVal := typedYAMLObj.(type) {
+			case int:
+				s = strconv.FormatInt(int64(typedVal), 10)
+			case int64:
+				s = strconv.FormatInt(typedVal, 10)
+			case float64:
+				s = strconv.FormatFloat(typedVal, 'g', -1, 32)
+			case uint64:
+				s = strconv.FormatUint(typedVal, 10)
+			case bool:
+				if typedVal {
+					s = "true"
+				} else {
+					s = "false"
+				}
+			}
+			if len(s) > 0 {
+				yamlObj = interface{}(s)
+			}
+		}
+		return yamlObj, nil
+	}
+
+	return nil, nil
+}
diff --git a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
new file mode 100644
index 0000000..ceadde6
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
@@ -0,0 +1,101 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package sortkeys
+
+import (
+	"sort"
+)
+
+func Strings(l []string) {
+	sort.Strings(l)
+}
+
+func Float64s(l []float64) {
+	sort.Float64s(l)
+}
+
+func Float32s(l []float32) {
+	sort.Sort(Float32Slice(l))
+}
+
+func Int64s(l []int64) {
+	sort.Sort(Int64Slice(l))
+}
+
+func Int32s(l []int32) {
+	sort.Sort(Int32Slice(l))
+}
+
+func Uint64s(l []uint64) {
+	sort.Sort(Uint64Slice(l))
+}
+
+func Uint32s(l []uint32) {
+	sort.Sort(Uint32Slice(l))
+}
+
+func Bools(l []bool) {
+	sort.Sort(BoolSlice(l))
+}
+
+type BoolSlice []bool
+
+func (p BoolSlice) Len() int           { return len(p) }
+func (p BoolSlice) Less(i, j int) bool { return p[j] }
+func (p BoolSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+type Int64Slice []int64
+
+func (p Int64Slice) Len() int           { return len(p) }
+func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Int64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+type Int32Slice []int32
+
+func (p Int32Slice) Len() int           { return len(p) }
+func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Int32Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+type Uint64Slice []uint64
+
+func (p Uint64Slice) Len() int           { return len(p) }
+func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Uint64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+type Uint32Slice []uint32
+
+func (p Uint32Slice) Len() int           { return len(p) }
+func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Uint32Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+type Float32Slice []float32
+
+func (p Float32Slice) Len() int           { return len(p) }
+func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Float32Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go
new file mode 100644
index 0000000..532cc45
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/lru/lru.go
@@ -0,0 +1,133 @@
+/*
+Copyright 2013 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package lru implements an LRU cache.
+package lru
+
+import "container/list"
+
+// Cache is an LRU cache. It is not safe for concurrent access.
+type Cache struct {
+	// MaxEntries is the maximum number of cache entries before
+	// an item is evicted. Zero means no limit.
+	MaxEntries int
+
+	// OnEvicted optionally specificies a callback function to be
+	// executed when an entry is purged from the cache.
+	OnEvicted func(key Key, value interface{})
+
+	ll    *list.List
+	cache map[interface{}]*list.Element
+}
+
+// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
+type Key interface{}
+
+type entry struct {
+	key   Key
+	value interface{}
+}
+
+// New creates a new Cache.
+// If maxEntries is zero, the cache has no limit and it's assumed
+// that eviction is done by the caller.
+func New(maxEntries int) *Cache {
+	return &Cache{
+		MaxEntries: maxEntries,
+		ll:         list.New(),
+		cache:      make(map[interface{}]*list.Element),
+	}
+}
+
+// Add adds a value to the cache.
+func (c *Cache) Add(key Key, value interface{}) {
+	if c.cache == nil {
+		c.cache = make(map[interface{}]*list.Element)
+		c.ll = list.New()
+	}
+	if ee, ok := c.cache[key]; ok {
+		c.ll.MoveToFront(ee)
+		ee.Value.(*entry).value = value
+		return
+	}
+	ele := c.ll.PushFront(&entry{key, value})
+	c.cache[key] = ele
+	if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
+		c.RemoveOldest()
+	}
+}
+
+// Get looks up a key's value from the cache.
+func (c *Cache) Get(key Key) (value interface{}, ok bool) {
+	if c.cache == nil {
+		return
+	}
+	if ele, hit := c.cache[key]; hit {
+		c.ll.MoveToFront(ele)
+		return ele.Value.(*entry).value, true
+	}
+	return
+}
+
+// Remove removes the provided key from the cache.
+func (c *Cache) Remove(key Key) {
+	if c.cache == nil {
+		return
+	}
+	if ele, hit := c.cache[key]; hit {
+		c.removeElement(ele)
+	}
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *Cache) RemoveOldest() {
+	if c.cache == nil {
+		return
+	}
+	ele := c.ll.Back()
+	if ele != nil {
+		c.removeElement(ele)
+	}
+}
+
+func (c *Cache) removeElement(e *list.Element) {
+	c.ll.Remove(e)
+	kv := e.Value.(*entry)
+	delete(c.cache, kv.key)
+	if c.OnEvicted != nil {
+		c.OnEvicted(kv.key, kv.value)
+	}
+}
+
+// Len returns the number of items in the cache.
+func (c *Cache) Len() int {
+	if c.cache == nil {
+		return 0
+	}
+	return c.ll.Len()
+}
+
+// Clear purges all stored items from the cache.
+func (c *Cache) Clear() {
+	if c.OnEvicted != nil {
+		for _, e := range c.cache {
+			kv := e.Value.(*entry)
+			c.OnEvicted(kv.key, kv.value)
+		}
+	}
+	c.ll = nil
+	c.cache = nil
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
new file mode 100644
index 0000000..ada2b78
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
@@ -0,0 +1,1271 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2015 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON.
+It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json.
+
+This package produces a different output than the standard "encoding/json" package,
+which does not operate correctly on protocol buffers.
+*/
+package jsonpb
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+
+	stpb "github.com/golang/protobuf/ptypes/struct"
+)
+
+const secondInNanos = int64(time.Second / time.Nanosecond)
+
+// Marshaler is a configurable object for converting between
+// protocol buffer objects and a JSON representation for them.
+type Marshaler struct {
+	// Whether to render enum values as integers, as opposed to string values.
+	EnumsAsInts bool
+
+	// Whether to render fields with zero values.
+	EmitDefaults bool
+
+	// A string to indent each level by. The presence of this field will
+	// also cause a space to appear between the field separator and
+	// value, and for newlines to be appear between fields and array
+	// elements.
+	Indent string
+
+	// Whether to use the original (.proto) name for fields.
+	OrigName bool
+
+	// A custom URL resolver to use when marshaling Any messages to JSON.
+	// If unset, the default resolution strategy is to extract the
+	// fully-qualified type name from the type URL and pass that to
+	// proto.MessageType(string).
+	AnyResolver AnyResolver
+}
+
+// AnyResolver takes a type URL, present in an Any message, and resolves it into
+// an instance of the associated message.
+type AnyResolver interface {
+	Resolve(typeUrl string) (proto.Message, error)
+}
+
+func defaultResolveAny(typeUrl string) (proto.Message, error) {
+	// Only the part of typeUrl after the last slash is relevant.
+	mname := typeUrl
+	if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+		mname = mname[slash+1:]
+	}
+	mt := proto.MessageType(mname)
+	if mt == nil {
+		return nil, fmt.Errorf("unknown message type %q", mname)
+	}
+	return reflect.New(mt.Elem()).Interface().(proto.Message), nil
+}
+
+// JSONPBMarshaler is implemented by protobuf messages that customize the
+// way they are marshaled to JSON. Messages that implement this should
+// also implement JSONPBUnmarshaler so that the custom format can be
+// parsed.
+//
+// The JSON marshaling must follow the proto to JSON specification:
+//	https://developers.google.com/protocol-buffers/docs/proto3#json
+type JSONPBMarshaler interface {
+	MarshalJSONPB(*Marshaler) ([]byte, error)
+}
+
+// JSONPBUnmarshaler is implemented by protobuf messages that customize
+// the way they are unmarshaled from JSON. Messages that implement this
+// should also implement JSONPBMarshaler so that the custom format can be
+// produced.
+//
+// The JSON unmarshaling must follow the JSON to proto specification:
+//	https://developers.google.com/protocol-buffers/docs/proto3#json
+type JSONPBUnmarshaler interface {
+	UnmarshalJSONPB(*Unmarshaler, []byte) error
+}
+
+// Marshal marshals a protocol buffer into JSON.
+func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error {
+	v := reflect.ValueOf(pb)
+	if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
+		return errors.New("Marshal called with nil")
+	}
+	// Check for unset required fields first.
+	if err := checkRequiredFields(pb); err != nil {
+		return err
+	}
+	writer := &errWriter{writer: out}
+	return m.marshalObject(writer, pb, "", "")
+}
+
+// MarshalToString converts a protocol buffer object to JSON string.
+func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) {
+	var buf bytes.Buffer
+	if err := m.Marshal(&buf, pb); err != nil {
+		return "", err
+	}
+	return buf.String(), nil
+}
+
+type int32Slice []int32
+
+var nonFinite = map[string]float64{
+	`"NaN"`:       math.NaN(),
+	`"Infinity"`:  math.Inf(1),
+	`"-Infinity"`: math.Inf(-1),
+}
+
+// For sorting extensions ids to ensure stable output.
+func (s int32Slice) Len() int           { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+type wkt interface {
+	XXX_WellKnownType() string
+}
+
+// marshalObject writes a struct to the Writer.
+func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error {
+	if jsm, ok := v.(JSONPBMarshaler); ok {
+		b, err := jsm.MarshalJSONPB(m)
+		if err != nil {
+			return err
+		}
+		if typeURL != "" {
+			// we are marshaling this object to an Any type
+			var js map[string]*json.RawMessage
+			if err = json.Unmarshal(b, &js); err != nil {
+				return fmt.Errorf("type %T produced invalid JSON: %v", v, err)
+			}
+			turl, err := json.Marshal(typeURL)
+			if err != nil {
+				return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
+			}
+			js["@type"] = (*json.RawMessage)(&turl)
+			if b, err = json.Marshal(js); err != nil {
+				return err
+			}
+		}
+
+		out.write(string(b))
+		return out.err
+	}
+
+	s := reflect.ValueOf(v).Elem()
+
+	// Handle well-known types.
+	if wkt, ok := v.(wkt); ok {
+		switch wkt.XXX_WellKnownType() {
+		case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
+			"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
+			// "Wrappers use the same representation in JSON
+			//  as the wrapped primitive type, ..."
+			sprop := proto.GetProperties(s.Type())
+			return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent)
+		case "Any":
+			// Any is a bit more involved.
+			return m.marshalAny(out, v, indent)
+		case "Duration":
+			// "Generated output always contains 0, 3, 6, or 9 fractional digits,
+			//  depending on required precision."
+			s, ns := s.Field(0).Int(), s.Field(1).Int()
+			if ns <= -secondInNanos || ns >= secondInNanos {
+				return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
+			}
+			if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
+				return errors.New("signs of seconds and nanos do not match")
+			}
+			if s < 0 {
+				ns = -ns
+			}
+			x := fmt.Sprintf("%d.%09d", s, ns)
+			x = strings.TrimSuffix(x, "000")
+			x = strings.TrimSuffix(x, "000")
+			x = strings.TrimSuffix(x, ".000")
+			out.write(`"`)
+			out.write(x)
+			out.write(`s"`)
+			return out.err
+		case "Struct", "ListValue":
+			// Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice.
+			// TODO: pass the correct Properties if needed.
+			return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent)
+		case "Timestamp":
+			// "RFC 3339, where generated output will always be Z-normalized
+			//  and uses 0, 3, 6 or 9 fractional digits."
+			s, ns := s.Field(0).Int(), s.Field(1).Int()
+			if ns < 0 || ns >= secondInNanos {
+				return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
+			}
+			t := time.Unix(s, ns).UTC()
+			// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
+			x := t.Format("2006-01-02T15:04:05.000000000")
+			x = strings.TrimSuffix(x, "000")
+			x = strings.TrimSuffix(x, "000")
+			x = strings.TrimSuffix(x, ".000")
+			out.write(`"`)
+			out.write(x)
+			out.write(`Z"`)
+			return out.err
+		case "Value":
+			// Value has a single oneof.
+			kind := s.Field(0)
+			if kind.IsNil() {
+				// "absence of any variant indicates an error"
+				return errors.New("nil Value")
+			}
+			// oneof -> *T -> T -> T.F
+			x := kind.Elem().Elem().Field(0)
+			// TODO: pass the correct Properties if needed.
+			return m.marshalValue(out, &proto.Properties{}, x, indent)
+		}
+	}
+
+	out.write("{")
+	if m.Indent != "" {
+		out.write("\n")
+	}
+
+	firstField := true
+
+	if typeURL != "" {
+		if err := m.marshalTypeURL(out, indent, typeURL); err != nil {
+			return err
+		}
+		firstField = false
+	}
+
+	for i := 0; i < s.NumField(); i++ {
+		value := s.Field(i)
+		valueField := s.Type().Field(i)
+		if strings.HasPrefix(valueField.Name, "XXX_") {
+			continue
+		}
+
+		// IsNil will panic on most value kinds.
+		switch value.Kind() {
+		case reflect.Chan, reflect.Func, reflect.Interface:
+			if value.IsNil() {
+				continue
+			}
+		}
+
+		if !m.EmitDefaults {
+			switch value.Kind() {
+			case reflect.Bool:
+				if !value.Bool() {
+					continue
+				}
+			case reflect.Int32, reflect.Int64:
+				if value.Int() == 0 {
+					continue
+				}
+			case reflect.Uint32, reflect.Uint64:
+				if value.Uint() == 0 {
+					continue
+				}
+			case reflect.Float32, reflect.Float64:
+				if value.Float() == 0 {
+					continue
+				}
+			case reflect.String:
+				if value.Len() == 0 {
+					continue
+				}
+			case reflect.Map, reflect.Ptr, reflect.Slice:
+				if value.IsNil() {
+					continue
+				}
+			}
+		}
+
+		// Oneof fields need special handling.
+		if valueField.Tag.Get("protobuf_oneof") != "" {
+			// value is an interface containing &T{real_value}.
+			sv := value.Elem().Elem() // interface -> *T -> T
+			value = sv.Field(0)
+			valueField = sv.Type().Field(0)
+		}
+		prop := jsonProperties(valueField, m.OrigName)
+		if !firstField {
+			m.writeSep(out)
+		}
+		if err := m.marshalField(out, prop, value, indent); err != nil {
+			return err
+		}
+		firstField = false
+	}
+
+	// Handle proto2 extensions.
+	if ep, ok := v.(proto.Message); ok {
+		extensions := proto.RegisteredExtensions(v)
+		// Sort extensions for stable output.
+		ids := make([]int32, 0, len(extensions))
+		for id, desc := range extensions {
+			if !proto.HasExtension(ep, desc) {
+				continue
+			}
+			ids = append(ids, id)
+		}
+		sort.Sort(int32Slice(ids))
+		for _, id := range ids {
+			desc := extensions[id]
+			if desc == nil {
+				// unknown extension
+				continue
+			}
+			ext, extErr := proto.GetExtension(ep, desc)
+			if extErr != nil {
+				return extErr
+			}
+			value := reflect.ValueOf(ext)
+			var prop proto.Properties
+			prop.Parse(desc.Tag)
+			prop.JSONName = fmt.Sprintf("[%s]", desc.Name)
+			if !firstField {
+				m.writeSep(out)
+			}
+			if err := m.marshalField(out, &prop, value, indent); err != nil {
+				return err
+			}
+			firstField = false
+		}
+
+	}
+
+	if m.Indent != "" {
+		out.write("\n")
+		out.write(indent)
+	}
+	out.write("}")
+	return out.err
+}
+
+func (m *Marshaler) writeSep(out *errWriter) {
+	if m.Indent != "" {
+		out.write(",\n")
+	} else {
+		out.write(",")
+	}
+}
+
+func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error {
+	// "If the Any contains a value that has a special JSON mapping,
+	//  it will be converted as follows: {"@type": xxx, "value": yyy}.
+	//  Otherwise, the value will be converted into a JSON object,
+	//  and the "@type" field will be inserted to indicate the actual data type."
+	v := reflect.ValueOf(any).Elem()
+	turl := v.Field(0).String()
+	val := v.Field(1).Bytes()
+
+	var msg proto.Message
+	var err error
+	if m.AnyResolver != nil {
+		msg, err = m.AnyResolver.Resolve(turl)
+	} else {
+		msg, err = defaultResolveAny(turl)
+	}
+	if err != nil {
+		return err
+	}
+
+	if err := proto.Unmarshal(val, msg); err != nil {
+		return err
+	}
+
+	if _, ok := msg.(wkt); ok {
+		out.write("{")
+		if m.Indent != "" {
+			out.write("\n")
+		}
+		if err := m.marshalTypeURL(out, indent, turl); err != nil {
+			return err
+		}
+		m.writeSep(out)
+		if m.Indent != "" {
+			out.write(indent)
+			out.write(m.Indent)
+			out.write(`"value": `)
+		} else {
+			out.write(`"value":`)
+		}
+		if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil {
+			return err
+		}
+		if m.Indent != "" {
+			out.write("\n")
+			out.write(indent)
+		}
+		out.write("}")
+		return out.err
+	}
+
+	return m.marshalObject(out, msg, indent, turl)
+}
+
+func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error {
+	if m.Indent != "" {
+		out.write(indent)
+		out.write(m.Indent)
+	}
+	out.write(`"@type":`)
+	if m.Indent != "" {
+		out.write(" ")
+	}
+	b, err := json.Marshal(typeURL)
+	if err != nil {
+		return err
+	}
+	out.write(string(b))
+	return out.err
+}
+
+// marshalField writes field description and value to the Writer.
+func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
+	if m.Indent != "" {
+		out.write(indent)
+		out.write(m.Indent)
+	}
+	out.write(`"`)
+	out.write(prop.JSONName)
+	out.write(`":`)
+	if m.Indent != "" {
+		out.write(" ")
+	}
+	if err := m.marshalValue(out, prop, v, indent); err != nil {
+		return err
+	}
+	return nil
+}
+
+// marshalValue writes the value to the Writer.
+func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
+	var err error
+	v = reflect.Indirect(v)
+
+	// Handle nil pointer
+	if v.Kind() == reflect.Invalid {
+		out.write("null")
+		return out.err
+	}
+
+	// Handle repeated elements.
+	if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+		out.write("[")
+		comma := ""
+		for i := 0; i < v.Len(); i++ {
+			sliceVal := v.Index(i)
+			out.write(comma)
+			if m.Indent != "" {
+				out.write("\n")
+				out.write(indent)
+				out.write(m.Indent)
+				out.write(m.Indent)
+			}
+			if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil {
+				return err
+			}
+			comma = ","
+		}
+		if m.Indent != "" {
+			out.write("\n")
+			out.write(indent)
+			out.write(m.Indent)
+		}
+		out.write("]")
+		return out.err
+	}
+
+	// Handle well-known types.
+	// Most are handled up in marshalObject (because 99% are messages).
+	if wkt, ok := v.Interface().(wkt); ok {
+		switch wkt.XXX_WellKnownType() {
+		case "NullValue":
+			out.write("null")
+			return out.err
+		}
+	}
+
+	// Handle enumerations.
+	if !m.EnumsAsInts && prop.Enum != "" {
+		// Unknown enum values will are stringified by the proto library as their
+		// value. Such values should _not_ be quoted or they will be interpreted
+		// as an enum string instead of their value.
+		enumStr := v.Interface().(fmt.Stringer).String()
+		var valStr string
+		if v.Kind() == reflect.Ptr {
+			valStr = strconv.Itoa(int(v.Elem().Int()))
+		} else {
+			valStr = strconv.Itoa(int(v.Int()))
+		}
+		isKnownEnum := enumStr != valStr
+		if isKnownEnum {
+			out.write(`"`)
+		}
+		out.write(enumStr)
+		if isKnownEnum {
+			out.write(`"`)
+		}
+		return out.err
+	}
+
+	// Handle nested messages.
+	if v.Kind() == reflect.Struct {
+		return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "")
+	}
+
+	// Handle maps.
+	// Since Go randomizes map iteration, we sort keys for stable output.
+	if v.Kind() == reflect.Map {
+		out.write(`{`)
+		keys := v.MapKeys()
+		sort.Sort(mapKeys(keys))
+		for i, k := range keys {
+			if i > 0 {
+				out.write(`,`)
+			}
+			if m.Indent != "" {
+				out.write("\n")
+				out.write(indent)
+				out.write(m.Indent)
+				out.write(m.Indent)
+			}
+
+			// TODO handle map key prop properly
+			b, err := json.Marshal(k.Interface())
+			if err != nil {
+				return err
+			}
+			s := string(b)
+
+			// If the JSON is not a string value, encode it again to make it one.
+			if !strings.HasPrefix(s, `"`) {
+				b, err := json.Marshal(s)
+				if err != nil {
+					return err
+				}
+				s = string(b)
+			}
+
+			out.write(s)
+			out.write(`:`)
+			if m.Indent != "" {
+				out.write(` `)
+			}
+
+			vprop := prop
+			if prop != nil && prop.MapValProp != nil {
+				vprop = prop.MapValProp
+			}
+			if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil {
+				return err
+			}
+		}
+		if m.Indent != "" {
+			out.write("\n")
+			out.write(indent)
+			out.write(m.Indent)
+		}
+		out.write(`}`)
+		return out.err
+	}
+
+	// Handle non-finite floats, e.g. NaN, Infinity and -Infinity.
+	if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+		f := v.Float()
+		var sval string
+		switch {
+		case math.IsInf(f, 1):
+			sval = `"Infinity"`
+		case math.IsInf(f, -1):
+			sval = `"-Infinity"`
+		case math.IsNaN(f):
+			sval = `"NaN"`
+		}
+		if sval != "" {
+			out.write(sval)
+			return out.err
+		}
+	}
+
+	// Default handling defers to the encoding/json library.
+	b, err := json.Marshal(v.Interface())
+	if err != nil {
+		return err
+	}
+	needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64)
+	if needToQuote {
+		out.write(`"`)
+	}
+	out.write(string(b))
+	if needToQuote {
+		out.write(`"`)
+	}
+	return out.err
+}
+
+// Unmarshaler is a configurable object for converting from a JSON
+// representation to a protocol buffer object.
+type Unmarshaler struct {
+	// Whether to allow messages to contain unknown fields, as opposed to
+	// failing to unmarshal.
+	AllowUnknownFields bool
+
+	// A custom URL resolver to use when unmarshaling Any messages from JSON.
+	// If unset, the default resolution strategy is to extract the
+	// fully-qualified type name from the type URL and pass that to
+	// proto.MessageType(string).
+	AnyResolver AnyResolver
+}
+
+// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
+// This function is lenient and will decode any options permutations of the
+// related Marshaler.
+func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
+	inputValue := json.RawMessage{}
+	if err := dec.Decode(&inputValue); err != nil {
+		return err
+	}
+	if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil {
+		return err
+	}
+	return checkRequiredFields(pb)
+}
+
+// Unmarshal unmarshals a JSON object stream into a protocol
+// buffer. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error {
+	dec := json.NewDecoder(r)
+	return u.UnmarshalNext(dec, pb)
+}
+
+// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
+// This function is lenient and will decode any options permutations of the
+// related Marshaler.
+func UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
+	return new(Unmarshaler).UnmarshalNext(dec, pb)
+}
+
+// Unmarshal unmarshals a JSON object stream into a protocol
+// buffer. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func Unmarshal(r io.Reader, pb proto.Message) error {
+	return new(Unmarshaler).Unmarshal(r, pb)
+}
+
+// UnmarshalString will populate the fields of a protocol buffer based
+// on a JSON string. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func UnmarshalString(str string, pb proto.Message) error {
+	return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb)
+}
+
+// unmarshalValue converts/copies a value into the target.
+// prop may be nil.
+func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error {
+	targetType := target.Type()
+
+	// Allocate memory for pointer fields.
+	if targetType.Kind() == reflect.Ptr {
+		// If input value is "null" and target is a pointer type, then the field should be treated as not set
+		// UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue.
+		_, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler)
+		if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler {
+			return nil
+		}
+		target.Set(reflect.New(targetType.Elem()))
+
+		return u.unmarshalValue(target.Elem(), inputValue, prop)
+	}
+
+	if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok {
+		return jsu.UnmarshalJSONPB(u, []byte(inputValue))
+	}
+
+	// Handle well-known types that are not pointers.
+	if w, ok := target.Addr().Interface().(wkt); ok {
+		switch w.XXX_WellKnownType() {
+		case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
+			"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
+			return u.unmarshalValue(target.Field(0), inputValue, prop)
+		case "Any":
+			// Use json.RawMessage pointer type instead of value to support pre-1.8 version.
+			// 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see
+			// https://github.com/golang/go/issues/14493
+			var jsonFields map[string]*json.RawMessage
+			if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
+				return err
+			}
+
+			val, ok := jsonFields["@type"]
+			if !ok || val == nil {
+				return errors.New("Any JSON doesn't have '@type'")
+			}
+
+			var turl string
+			if err := json.Unmarshal([]byte(*val), &turl); err != nil {
+				return fmt.Errorf("can't unmarshal Any's '@type': %q", *val)
+			}
+			target.Field(0).SetString(turl)
+
+			var m proto.Message
+			var err error
+			if u.AnyResolver != nil {
+				m, err = u.AnyResolver.Resolve(turl)
+			} else {
+				m, err = defaultResolveAny(turl)
+			}
+			if err != nil {
+				return err
+			}
+
+			if _, ok := m.(wkt); ok {
+				val, ok := jsonFields["value"]
+				if !ok {
+					return errors.New("Any JSON doesn't have 'value'")
+				}
+
+				if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil {
+					return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
+				}
+			} else {
+				delete(jsonFields, "@type")
+				nestedProto, err := json.Marshal(jsonFields)
+				if err != nil {
+					return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
+				}
+
+				if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil {
+					return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
+				}
+			}
+
+			b, err := proto.Marshal(m)
+			if err != nil {
+				return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err)
+			}
+			target.Field(1).SetBytes(b)
+
+			return nil
+		case "Duration":
+			unq, err := unquote(string(inputValue))
+			if err != nil {
+				return err
+			}
+
+			d, err := time.ParseDuration(unq)
+			if err != nil {
+				return fmt.Errorf("bad Duration: %v", err)
+			}
+
+			ns := d.Nanoseconds()
+			s := ns / 1e9
+			ns %= 1e9
+			target.Field(0).SetInt(s)
+			target.Field(1).SetInt(ns)
+			return nil
+		case "Timestamp":
+			unq, err := unquote(string(inputValue))
+			if err != nil {
+				return err
+			}
+
+			t, err := time.Parse(time.RFC3339Nano, unq)
+			if err != nil {
+				return fmt.Errorf("bad Timestamp: %v", err)
+			}
+
+			target.Field(0).SetInt(t.Unix())
+			target.Field(1).SetInt(int64(t.Nanosecond()))
+			return nil
+		case "Struct":
+			var m map[string]json.RawMessage
+			if err := json.Unmarshal(inputValue, &m); err != nil {
+				return fmt.Errorf("bad StructValue: %v", err)
+			}
+
+			target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{}))
+			for k, jv := range m {
+				pv := &stpb.Value{}
+				if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil {
+					return fmt.Errorf("bad value in StructValue for key %q: %v", k, err)
+				}
+				target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv))
+			}
+			return nil
+		case "ListValue":
+			var s []json.RawMessage
+			if err := json.Unmarshal(inputValue, &s); err != nil {
+				return fmt.Errorf("bad ListValue: %v", err)
+			}
+
+			target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s))))
+			for i, sv := range s {
+				if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil {
+					return err
+				}
+			}
+			return nil
+		case "Value":
+			ivStr := string(inputValue)
+			if ivStr == "null" {
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{}))
+			} else if v, err := strconv.ParseFloat(ivStr, 0); err == nil {
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v}))
+			} else if v, err := unquote(ivStr); err == nil {
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v}))
+			} else if v, err := strconv.ParseBool(ivStr); err == nil {
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v}))
+			} else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil {
+				lv := &stpb.ListValue{}
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv}))
+				return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop)
+			} else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil {
+				sv := &stpb.Struct{}
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv}))
+				return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop)
+			} else {
+				return fmt.Errorf("unrecognized type for Value %q", ivStr)
+			}
+			return nil
+		}
+	}
+
+	// Handle enums, which have an underlying type of int32,
+	// and may appear as strings.
+	// The case of an enum appearing as a number is handled
+	// at the bottom of this function.
+	if inputValue[0] == '"' && prop != nil && prop.Enum != "" {
+		vmap := proto.EnumValueMap(prop.Enum)
+		// Don't need to do unquoting; valid enum names
+		// are from a limited character set.
+		s := inputValue[1 : len(inputValue)-1]
+		n, ok := vmap[string(s)]
+		if !ok {
+			return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum)
+		}
+		if target.Kind() == reflect.Ptr { // proto2
+			target.Set(reflect.New(targetType.Elem()))
+			target = target.Elem()
+		}
+		if targetType.Kind() != reflect.Int32 {
+			return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum)
+		}
+		target.SetInt(int64(n))
+		return nil
+	}
+
+	// Handle nested messages.
+	if targetType.Kind() == reflect.Struct {
+		var jsonFields map[string]json.RawMessage
+		if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
+			return err
+		}
+
+		consumeField := func(prop *proto.Properties) (json.RawMessage, bool) {
+			// Be liberal in what names we accept; both orig_name and camelName are okay.
+			fieldNames := acceptedJSONFieldNames(prop)
+
+			vOrig, okOrig := jsonFields[fieldNames.orig]
+			vCamel, okCamel := jsonFields[fieldNames.camel]
+			if !okOrig && !okCamel {
+				return nil, false
+			}
+			// If, for some reason, both are present in the data, favour the camelName.
+			var raw json.RawMessage
+			if okOrig {
+				raw = vOrig
+				delete(jsonFields, fieldNames.orig)
+			}
+			if okCamel {
+				raw = vCamel
+				delete(jsonFields, fieldNames.camel)
+			}
+			return raw, true
+		}
+
+		sprops := proto.GetProperties(targetType)
+		for i := 0; i < target.NumField(); i++ {
+			ft := target.Type().Field(i)
+			if strings.HasPrefix(ft.Name, "XXX_") {
+				continue
+			}
+
+			valueForField, ok := consumeField(sprops.Prop[i])
+			if !ok {
+				continue
+			}
+
+			if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil {
+				return err
+			}
+		}
+		// Check for any oneof fields.
+		if len(jsonFields) > 0 {
+			for _, oop := range sprops.OneofTypes {
+				raw, ok := consumeField(oop.Prop)
+				if !ok {
+					continue
+				}
+				nv := reflect.New(oop.Type.Elem())
+				target.Field(oop.Field).Set(nv)
+				if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil {
+					return err
+				}
+			}
+		}
+		// Handle proto2 extensions.
+		if len(jsonFields) > 0 {
+			if ep, ok := target.Addr().Interface().(proto.Message); ok {
+				for _, ext := range proto.RegisteredExtensions(ep) {
+					name := fmt.Sprintf("[%s]", ext.Name)
+					raw, ok := jsonFields[name]
+					if !ok {
+						continue
+					}
+					delete(jsonFields, name)
+					nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem())
+					if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil {
+						return err
+					}
+					if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil {
+						return err
+					}
+				}
+			}
+		}
+		if !u.AllowUnknownFields && len(jsonFields) > 0 {
+			// Pick any field to be the scapegoat.
+			var f string
+			for fname := range jsonFields {
+				f = fname
+				break
+			}
+			return fmt.Errorf("unknown field %q in %v", f, targetType)
+		}
+		return nil
+	}
+
+	// Handle arrays (which aren't encoded bytes)
+	if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 {
+		var slc []json.RawMessage
+		if err := json.Unmarshal(inputValue, &slc); err != nil {
+			return err
+		}
+		if slc != nil {
+			l := len(slc)
+			target.Set(reflect.MakeSlice(targetType, l, l))
+			for i := 0; i < l; i++ {
+				if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil {
+					return err
+				}
+			}
+		}
+		return nil
+	}
+
+	// Handle maps (whose keys are always strings)
+	if targetType.Kind() == reflect.Map {
+		var mp map[string]json.RawMessage
+		if err := json.Unmarshal(inputValue, &mp); err != nil {
+			return err
+		}
+		if mp != nil {
+			target.Set(reflect.MakeMap(targetType))
+			for ks, raw := range mp {
+				// Unmarshal map key. The core json library already decoded the key into a
+				// string, so we handle that specially. Other types were quoted post-serialization.
+				var k reflect.Value
+				if targetType.Key().Kind() == reflect.String {
+					k = reflect.ValueOf(ks)
+				} else {
+					k = reflect.New(targetType.Key()).Elem()
+					var kprop *proto.Properties
+					if prop != nil && prop.MapKeyProp != nil {
+						kprop = prop.MapKeyProp
+					}
+					if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil {
+						return err
+					}
+				}
+
+				// Unmarshal map value.
+				v := reflect.New(targetType.Elem()).Elem()
+				var vprop *proto.Properties
+				if prop != nil && prop.MapValProp != nil {
+					vprop = prop.MapValProp
+				}
+				if err := u.unmarshalValue(v, raw, vprop); err != nil {
+					return err
+				}
+				target.SetMapIndex(k, v)
+			}
+		}
+		return nil
+	}
+
+	// Non-finite numbers can be encoded as strings.
+	isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
+	if isFloat {
+		if num, ok := nonFinite[string(inputValue)]; ok {
+			target.SetFloat(num)
+			return nil
+		}
+	}
+
+	// integers & floats can be encoded as strings. In this case we drop
+	// the quotes and proceed as normal.
+	isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 ||
+		targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 ||
+		targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
+	if isNum && strings.HasPrefix(string(inputValue), `"`) {
+		inputValue = inputValue[1 : len(inputValue)-1]
+	}
+
+	// Use the encoding/json for parsing other value types.
+	return json.Unmarshal(inputValue, target.Addr().Interface())
+}
+
+func unquote(s string) (string, error) {
+	var ret string
+	err := json.Unmarshal([]byte(s), &ret)
+	return ret, err
+}
+
+// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute.
+func jsonProperties(f reflect.StructField, origName bool) *proto.Properties {
+	var prop proto.Properties
+	prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f)
+	if origName || prop.JSONName == "" {
+		prop.JSONName = prop.OrigName
+	}
+	return &prop
+}
+
+type fieldNames struct {
+	orig, camel string
+}
+
+func acceptedJSONFieldNames(prop *proto.Properties) fieldNames {
+	opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName}
+	if prop.JSONName != "" {
+		opts.camel = prop.JSONName
+	}
+	return opts
+}
+
+// Writer wrapper inspired by https://blog.golang.org/errors-are-values
+type errWriter struct {
+	writer io.Writer
+	err    error
+}
+
+func (w *errWriter) write(str string) {
+	if w.err != nil {
+		return
+	}
+	_, w.err = w.writer.Write([]byte(str))
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+//
+// Numeric keys are sorted in numeric order per
+// https://developers.google.com/protocol-buffers/docs/proto#maps.
+type mapKeys []reflect.Value
+
+func (s mapKeys) Len() int      { return len(s) }
+func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s mapKeys) Less(i, j int) bool {
+	if k := s[i].Kind(); k == s[j].Kind() {
+		switch k {
+		case reflect.String:
+			return s[i].String() < s[j].String()
+		case reflect.Int32, reflect.Int64:
+			return s[i].Int() < s[j].Int()
+		case reflect.Uint32, reflect.Uint64:
+			return s[i].Uint() < s[j].Uint()
+		}
+	}
+	return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
+}
+
+// checkRequiredFields returns an error if any required field in the given proto message is not set.
+// This function is used by both Marshal and Unmarshal.  While required fields only exist in a
+// proto2 message, a proto3 message can contain proto2 message(s).
+func checkRequiredFields(pb proto.Message) error {
+	// Most well-known type messages do not contain required fields.  The "Any" type may contain
+	// a message that has required fields.
+	//
+	// When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value
+	// field in order to transform that into JSON, and that should have returned an error if a
+	// required field is not set in the embedded message.
+	//
+	// When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the
+	// embedded message to store the serialized message in Any.Value field, and that should have
+	// returned an error if a required field is not set.
+	if _, ok := pb.(wkt); ok {
+		return nil
+	}
+
+	v := reflect.ValueOf(pb)
+	// Skip message if it is not a struct pointer.
+	if v.Kind() != reflect.Ptr {
+		return nil
+	}
+	v = v.Elem()
+	if v.Kind() != reflect.Struct {
+		return nil
+	}
+
+	for i := 0; i < v.NumField(); i++ {
+		field := v.Field(i)
+		sfield := v.Type().Field(i)
+
+		if sfield.PkgPath != "" {
+			// blank PkgPath means the field is exported; skip if not exported
+			continue
+		}
+
+		if strings.HasPrefix(sfield.Name, "XXX_") {
+			continue
+		}
+
+		// Oneof field is an interface implemented by wrapper structs containing the actual oneof
+		// field, i.e. an interface containing &T{real_value}.
+		if sfield.Tag.Get("protobuf_oneof") != "" {
+			if field.Kind() != reflect.Interface {
+				continue
+			}
+			v := field.Elem()
+			if v.Kind() != reflect.Ptr || v.IsNil() {
+				continue
+			}
+			v = v.Elem()
+			if v.Kind() != reflect.Struct || v.NumField() < 1 {
+				continue
+			}
+			field = v.Field(0)
+			sfield = v.Type().Field(0)
+		}
+
+		protoTag := sfield.Tag.Get("protobuf")
+		if protoTag == "" {
+			continue
+		}
+		var prop proto.Properties
+		prop.Init(sfield.Type, sfield.Name, protoTag, &sfield)
+
+		switch field.Kind() {
+		case reflect.Map:
+			if field.IsNil() {
+				continue
+			}
+			// Check each map value.
+			keys := field.MapKeys()
+			for _, k := range keys {
+				v := field.MapIndex(k)
+				if err := checkRequiredFieldsInValue(v); err != nil {
+					return err
+				}
+			}
+		case reflect.Slice:
+			// Handle non-repeated type, e.g. bytes.
+			if !prop.Repeated {
+				if prop.Required && field.IsNil() {
+					return fmt.Errorf("required field %q is not set", prop.Name)
+				}
+				continue
+			}
+
+			// Handle repeated type.
+			if field.IsNil() {
+				continue
+			}
+			// Check each slice item.
+			for i := 0; i < field.Len(); i++ {
+				v := field.Index(i)
+				if err := checkRequiredFieldsInValue(v); err != nil {
+					return err
+				}
+			}
+		case reflect.Ptr:
+			if field.IsNil() {
+				if prop.Required {
+					return fmt.Errorf("required field %q is not set", prop.Name)
+				}
+				continue
+			}
+			if err := checkRequiredFieldsInValue(field); err != nil {
+				return err
+			}
+		}
+	}
+
+	// Handle proto2 extensions.
+	for _, ext := range proto.RegisteredExtensions(pb) {
+		if !proto.HasExtension(pb, ext) {
+			continue
+		}
+		ep, err := proto.GetExtension(pb, ext)
+		if err != nil {
+			return err
+		}
+		err = checkRequiredFieldsInValue(reflect.ValueOf(ep))
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func checkRequiredFieldsInValue(v reflect.Value) error {
+	if pm, ok := v.Interface().(proto.Message); ok {
+		return checkRequiredFields(pm)
+	}
+	return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
new file mode 100644
index 0000000..7d7808e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
@@ -0,0 +1,96 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "StructProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+message Struct {
+  // Unordered map of dynamically typed values.
+  map<string, Value> fields = 1;
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+message Value {
+  // The kind of value.
+  oneof kind {
+    // Represents a null value.
+    NullValue null_value = 1;
+    // Represents a double value.
+    double number_value = 2;
+    // Represents a string value.
+    string string_value = 3;
+    // Represents a boolean value.
+    bool bool_value = 4;
+    // Represents a structured value.
+    Struct struct_value = 5;
+    // Represents a repeated `Value`.
+    ListValue list_value = 6;
+  }
+}
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+//  The JSON representation for `NullValue` is JSON `null`.
+enum NullValue {
+  // Null value.
+  NULL_VALUE = 0;
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+message ListValue {
+  // Repeated field of dynamically typed values.
+  repeated Value values = 1;
+}
diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml
new file mode 100644
index 0000000..4f2ee4d
--- /dev/null
+++ b/vendor/github.com/google/btree/.travis.yml
@@ -0,0 +1 @@
+language: go
diff --git a/vendor/github.com/google/btree/LICENSE b/vendor/github.com/google/btree/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/google/btree/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md
new file mode 100644
index 0000000..6062a4d
--- /dev/null
+++ b/vendor/github.com/google/btree/README.md
@@ -0,0 +1,12 @@
+# BTree implementation for Go
+
+![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master)
+
+This package provides an in-memory B-Tree implementation for Go, useful as
+an ordered, mutable data structure.
+
+The API is based off of the wonderful
+http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
+act as a drop-in replacement for gollrb trees.
+
+See http://godoc.org/github.com/google/btree for documentation.
diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go
new file mode 100644
index 0000000..6ff062f
--- /dev/null
+++ b/vendor/github.com/google/btree/btree.go
@@ -0,0 +1,890 @@
+// Copyright 2014 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package btree implements in-memory B-Trees of arbitrary degree.
+//
+// btree implements an in-memory B-Tree for use as an ordered data structure.
+// It is not meant for persistent storage solutions.
+//
+// It has a flatter structure than an equivalent red-black or other binary tree,
+// which in some cases yields better memory usage and/or performance.
+// See some discussion on the matter here:
+//   http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
+// Note, though, that this project is in no way related to the C++ B-Tree
+// implementation written about there.
+//
+// Within this tree, each node contains a slice of items and a (possibly nil)
+// slice of children.  For basic numeric values or raw structs, this can cause
+// efficiency differences when compared to equivalent C++ template code that
+// stores values in arrays within the node:
+//   * Due to the overhead of storing values as interfaces (each
+//     value needs to be stored as the value itself, then 2 words for the
+//     interface pointing to that value and its type), resulting in higher
+//     memory use.
+//   * Since interfaces can point to values anywhere in memory, values are
+//     most likely not stored in contiguous blocks, resulting in a higher
+//     number of cache misses.
+// These issues don't tend to matter, though, when working with strings or other
+// heap-allocated structures, since C++-equivalent structures also must store
+// pointers and also distribute their values across the heap.
+//
+// This implementation is designed to be a drop-in replacement to gollrb.LLRB
+// trees, (http://github.com/petar/gollrb), an excellent and probably the most
+// widely used ordered tree implementation in the Go ecosystem currently.
+// Its functions, therefore, exactly mirror those of
+// llrb.LLRB where possible.  Unlike gollrb, though, we currently don't
+// support storing multiple equivalent values.
+package btree
+
+import (
+	"fmt"
+	"io"
+	"sort"
+	"strings"
+	"sync"
+)
+
+// Item represents a single object in the tree.
+type Item interface {
+	// Less tests whether the current item is less than the given argument.
+	//
+	// This must provide a strict weak ordering.
+	// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
+	// hold one of either a or b in the tree).
+	Less(than Item) bool
+}
+
+const (
+	DefaultFreeListSize = 32
+)
+
+var (
+	nilItems    = make(items, 16)
+	nilChildren = make(children, 16)
+)
+
+// FreeList represents a free list of btree nodes. By default each
+// BTree has its own FreeList, but multiple BTrees can share the same
+// FreeList.
+// Two Btrees using the same freelist are safe for concurrent write access.
+type FreeList struct {
+	mu       sync.Mutex
+	freelist []*node
+}
+
+// NewFreeList creates a new free list.
+// size is the maximum size of the returned free list.
+func NewFreeList(size int) *FreeList {
+	return &FreeList{freelist: make([]*node, 0, size)}
+}
+
+func (f *FreeList) newNode() (n *node) {
+	f.mu.Lock()
+	index := len(f.freelist) - 1
+	if index < 0 {
+		f.mu.Unlock()
+		return new(node)
+	}
+	n = f.freelist[index]
+	f.freelist[index] = nil
+	f.freelist = f.freelist[:index]
+	f.mu.Unlock()
+	return
+}
+
+// freeNode adds the given node to the list, returning true if it was added
+// and false if it was discarded.
+func (f *FreeList) freeNode(n *node) (out bool) {
+	f.mu.Lock()
+	if len(f.freelist) < cap(f.freelist) {
+		f.freelist = append(f.freelist, n)
+		out = true
+	}
+	f.mu.Unlock()
+	return
+}
+
+// ItemIterator allows callers of Ascend* to iterate in-order over portions of
+// the tree.  When this function returns false, iteration will stop and the
+// associated Ascend* function will immediately return.
+type ItemIterator func(i Item) bool
+
+// New creates a new B-Tree with the given degree.
+//
+// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
+// and 2-4 children).
+func New(degree int) *BTree {
+	return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
+}
+
+// NewWithFreeList creates a new B-Tree that uses the given node free list.
+func NewWithFreeList(degree int, f *FreeList) *BTree {
+	if degree <= 1 {
+		panic("bad degree")
+	}
+	return &BTree{
+		degree: degree,
+		cow:    &copyOnWriteContext{freelist: f},
+	}
+}
+
+// items stores items in a node.
+type items []Item
+
+// insertAt inserts a value into the given index, pushing all subsequent values
+// forward.
+func (s *items) insertAt(index int, item Item) {
+	*s = append(*s, nil)
+	if index < len(*s) {
+		copy((*s)[index+1:], (*s)[index:])
+	}
+	(*s)[index] = item
+}
+
+// removeAt removes a value at a given index, pulling all subsequent values
+// back.
+func (s *items) removeAt(index int) Item {
+	item := (*s)[index]
+	copy((*s)[index:], (*s)[index+1:])
+	(*s)[len(*s)-1] = nil
+	*s = (*s)[:len(*s)-1]
+	return item
+}
+
+// pop removes and returns the last element in the list.
+func (s *items) pop() (out Item) {
+	index := len(*s) - 1
+	out = (*s)[index]
+	(*s)[index] = nil
+	*s = (*s)[:index]
+	return
+}
+
+// truncate truncates this instance at index so that it contains only the
+// first index items. index must be less than or equal to length.
+func (s *items) truncate(index int) {
+	var toClear items
+	*s, toClear = (*s)[:index], (*s)[index:]
+	for len(toClear) > 0 {
+		toClear = toClear[copy(toClear, nilItems):]
+	}
+}
+
+// find returns the index where the given item should be inserted into this
+// list.  'found' is true if the item already exists in the list at the given
+// index.
+func (s items) find(item Item) (index int, found bool) {
+	i := sort.Search(len(s), func(i int) bool {
+		return item.Less(s[i])
+	})
+	if i > 0 && !s[i-1].Less(item) {
+		return i - 1, true
+	}
+	return i, false
+}
+
+// children stores child nodes in a node.
+type children []*node
+
+// insertAt inserts a value into the given index, pushing all subsequent values
+// forward.
+func (s *children) insertAt(index int, n *node) {
+	*s = append(*s, nil)
+	if index < len(*s) {
+		copy((*s)[index+1:], (*s)[index:])
+	}
+	(*s)[index] = n
+}
+
+// removeAt removes a value at a given index, pulling all subsequent values
+// back.
+func (s *children) removeAt(index int) *node {
+	n := (*s)[index]
+	copy((*s)[index:], (*s)[index+1:])
+	(*s)[len(*s)-1] = nil
+	*s = (*s)[:len(*s)-1]
+	return n
+}
+
+// pop removes and returns the last element in the list.
+func (s *children) pop() (out *node) {
+	index := len(*s) - 1
+	out = (*s)[index]
+	(*s)[index] = nil
+	*s = (*s)[:index]
+	return
+}
+
+// truncate truncates this instance at index so that it contains only the
+// first index children. index must be less than or equal to length.
+func (s *children) truncate(index int) {
+	var toClear children
+	*s, toClear = (*s)[:index], (*s)[index:]
+	for len(toClear) > 0 {
+		toClear = toClear[copy(toClear, nilChildren):]
+	}
+}
+
+// node is an internal node in a tree.
+//
+// It must at all times maintain the invariant that either
+//   * len(children) == 0, len(items) unconstrained
+//   * len(children) == len(items) + 1
+type node struct {
+	items    items
+	children children
+	cow      *copyOnWriteContext
+}
+
+func (n *node) mutableFor(cow *copyOnWriteContext) *node {
+	if n.cow == cow {
+		return n
+	}
+	out := cow.newNode()
+	if cap(out.items) >= len(n.items) {
+		out.items = out.items[:len(n.items)]
+	} else {
+		out.items = make(items, len(n.items), cap(n.items))
+	}
+	copy(out.items, n.items)
+	// Copy children
+	if cap(out.children) >= len(n.children) {
+		out.children = out.children[:len(n.children)]
+	} else {
+		out.children = make(children, len(n.children), cap(n.children))
+	}
+	copy(out.children, n.children)
+	return out
+}
+
+func (n *node) mutableChild(i int) *node {
+	c := n.children[i].mutableFor(n.cow)
+	n.children[i] = c
+	return c
+}
+
+// split splits the given node at the given index.  The current node shrinks,
+// and this function returns the item that existed at that index and a new node
+// containing all items/children after it.
+func (n *node) split(i int) (Item, *node) {
+	item := n.items[i]
+	next := n.cow.newNode()
+	next.items = append(next.items, n.items[i+1:]...)
+	n.items.truncate(i)
+	if len(n.children) > 0 {
+		next.children = append(next.children, n.children[i+1:]...)
+		n.children.truncate(i + 1)
+	}
+	return item, next
+}
+
+// maybeSplitChild checks if a child should be split, and if so splits it.
+// Returns whether or not a split occurred.
+func (n *node) maybeSplitChild(i, maxItems int) bool {
+	if len(n.children[i].items) < maxItems {
+		return false
+	}
+	first := n.mutableChild(i)
+	item, second := first.split(maxItems / 2)
+	n.items.insertAt(i, item)
+	n.children.insertAt(i+1, second)
+	return true
+}
+
+// insert inserts an item into the subtree rooted at this node, making sure
+// no nodes in the subtree exceed maxItems items.  Should an equivalent item be
+// be found/replaced by insert, it will be returned.
+func (n *node) insert(item Item, maxItems int) Item {
+	i, found := n.items.find(item)
+	if found {
+		out := n.items[i]
+		n.items[i] = item
+		return out
+	}
+	if len(n.children) == 0 {
+		n.items.insertAt(i, item)
+		return nil
+	}
+	if n.maybeSplitChild(i, maxItems) {
+		inTree := n.items[i]
+		switch {
+		case item.Less(inTree):
+			// no change, we want first split node
+		case inTree.Less(item):
+			i++ // we want second split node
+		default:
+			out := n.items[i]
+			n.items[i] = item
+			return out
+		}
+	}
+	return n.mutableChild(i).insert(item, maxItems)
+}
+
+// get finds the given key in the subtree and returns it.
+func (n *node) get(key Item) Item {
+	i, found := n.items.find(key)
+	if found {
+		return n.items[i]
+	} else if len(n.children) > 0 {
+		return n.children[i].get(key)
+	}
+	return nil
+}
+
+// min returns the first item in the subtree.
+func min(n *node) Item {
+	if n == nil {
+		return nil
+	}
+	for len(n.children) > 0 {
+		n = n.children[0]
+	}
+	if len(n.items) == 0 {
+		return nil
+	}
+	return n.items[0]
+}
+
+// max returns the last item in the subtree.
+func max(n *node) Item {
+	if n == nil {
+		return nil
+	}
+	for len(n.children) > 0 {
+		n = n.children[len(n.children)-1]
+	}
+	if len(n.items) == 0 {
+		return nil
+	}
+	return n.items[len(n.items)-1]
+}
+
+// toRemove details what item to remove in a node.remove call.
+type toRemove int
+
+const (
+	removeItem toRemove = iota // removes the given item
+	removeMin                  // removes smallest item in the subtree
+	removeMax                  // removes largest item in the subtree
+)
+
+// remove removes an item from the subtree rooted at this node.
+func (n *node) remove(item Item, minItems int, typ toRemove) Item {
+	var i int
+	var found bool
+	switch typ {
+	case removeMax:
+		if len(n.children) == 0 {
+			return n.items.pop()
+		}
+		i = len(n.items)
+	case removeMin:
+		if len(n.children) == 0 {
+			return n.items.removeAt(0)
+		}
+		i = 0
+	case removeItem:
+		i, found = n.items.find(item)
+		if len(n.children) == 0 {
+			if found {
+				return n.items.removeAt(i)
+			}
+			return nil
+		}
+	default:
+		panic("invalid type")
+	}
+	// If we get to here, we have children.
+	if len(n.children[i].items) <= minItems {
+		return n.growChildAndRemove(i, item, minItems, typ)
+	}
+	child := n.mutableChild(i)
+	// Either we had enough items to begin with, or we've done some
+	// merging/stealing, because we've got enough now and we're ready to return
+	// stuff.
+	if found {
+		// The item exists at index 'i', and the child we've selected can give us a
+		// predecessor, since if we've gotten here it's got > minItems items in it.
+		out := n.items[i]
+		// We use our special-case 'remove' call with typ=maxItem to pull the
+		// predecessor of item i (the rightmost leaf of our immediate left child)
+		// and set it into where we pulled the item from.
+		n.items[i] = child.remove(nil, minItems, removeMax)
+		return out
+	}
+	// Final recursive call.  Once we're here, we know that the item isn't in this
+	// node and that the child is big enough to remove from.
+	return child.remove(item, minItems, typ)
+}
+
+// growChildAndRemove grows child 'i' to make sure it's possible to remove an
+// item from it while keeping it at minItems, then calls remove to actually
+// remove it.
+//
+// Most documentation says we have to do two sets of special casing:
+//   1) item is in this node
+//   2) item is in child
+// In both cases, we need to handle the two subcases:
+//   A) node has enough values that it can spare one
+//   B) node doesn't have enough values
+// For the latter, we have to check:
+//   a) left sibling has node to spare
+//   b) right sibling has node to spare
+//   c) we must merge
+// To simplify our code here, we handle cases #1 and #2 the same:
+// If a node doesn't have enough items, we make sure it does (using a,b,c).
+// We then simply redo our remove call, and the second time (regardless of
+// whether we're in case 1 or 2), we'll have enough items and can guarantee
+// that we hit case A.
+func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
+	if i > 0 && len(n.children[i-1].items) > minItems {
+		// Steal from left child
+		child := n.mutableChild(i)
+		stealFrom := n.mutableChild(i - 1)
+		stolenItem := stealFrom.items.pop()
+		child.items.insertAt(0, n.items[i-1])
+		n.items[i-1] = stolenItem
+		if len(stealFrom.children) > 0 {
+			child.children.insertAt(0, stealFrom.children.pop())
+		}
+	} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
+		// steal from right child
+		child := n.mutableChild(i)
+		stealFrom := n.mutableChild(i + 1)
+		stolenItem := stealFrom.items.removeAt(0)
+		child.items = append(child.items, n.items[i])
+		n.items[i] = stolenItem
+		if len(stealFrom.children) > 0 {
+			child.children = append(child.children, stealFrom.children.removeAt(0))
+		}
+	} else {
+		if i >= len(n.items) {
+			i--
+		}
+		child := n.mutableChild(i)
+		// merge with right child
+		mergeItem := n.items.removeAt(i)
+		mergeChild := n.children.removeAt(i + 1)
+		child.items = append(child.items, mergeItem)
+		child.items = append(child.items, mergeChild.items...)
+		child.children = append(child.children, mergeChild.children...)
+		n.cow.freeNode(mergeChild)
+	}
+	return n.remove(item, minItems, typ)
+}
+
+type direction int
+
+const (
+	descend = direction(-1)
+	ascend  = direction(+1)
+)
+
+// iterate provides a simple method for iterating over elements in the tree.
+//
+// When ascending, the 'start' should be less than 'stop' and when descending,
+// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
+// will force the iterator to include the first item when it equals 'start',
+// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
+// "greaterThan" or "lessThan" queries.
+func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) {
+	var ok, found bool
+	var index int
+	switch dir {
+	case ascend:
+		if start != nil {
+			index, _ = n.items.find(start)
+		}
+		for i := index; i < len(n.items); i++ {
+			if len(n.children) > 0 {
+				if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+					return hit, false
+				}
+			}
+			if !includeStart && !hit && start != nil && !start.Less(n.items[i]) {
+				hit = true
+				continue
+			}
+			hit = true
+			if stop != nil && !n.items[i].Less(stop) {
+				return hit, false
+			}
+			if !iter(n.items[i]) {
+				return hit, false
+			}
+		}
+		if len(n.children) > 0 {
+			if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+				return hit, false
+			}
+		}
+	case descend:
+		if start != nil {
+			index, found = n.items.find(start)
+			if !found {
+				index = index - 1
+			}
+		} else {
+			index = len(n.items) - 1
+		}
+		for i := index; i >= 0; i-- {
+			if start != nil && !n.items[i].Less(start) {
+				if !includeStart || hit || start.Less(n.items[i]) {
+					continue
+				}
+			}
+			if len(n.children) > 0 {
+				if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+					return hit, false
+				}
+			}
+			if stop != nil && !stop.Less(n.items[i]) {
+				return hit, false //	continue
+			}
+			hit = true
+			if !iter(n.items[i]) {
+				return hit, false
+			}
+		}
+		if len(n.children) > 0 {
+			if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+				return hit, false
+			}
+		}
+	}
+	return hit, true
+}
+
+// Used for testing/debugging purposes.
+func (n *node) print(w io.Writer, level int) {
+	fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat("  ", level), n.items)
+	for _, c := range n.children {
+		c.print(w, level+1)
+	}
+}
+
+// BTree is an implementation of a B-Tree.
+//
+// BTree stores Item instances in an ordered structure, allowing easy insertion,
+// removal, and iteration.
+//
+// Write operations are not safe for concurrent mutation by multiple
+// goroutines, but Read operations are.
+type BTree struct {
+	degree int
+	length int
+	root   *node
+	cow    *copyOnWriteContext
+}
+
+// copyOnWriteContext pointers determine node ownership... a tree with a write
+// context equivalent to a node's write context is allowed to modify that node.
+// A tree whose write context does not match a node's is not allowed to modify
+// it, and must create a new, writable copy (IE: it's a Clone).
+//
+// When doing any write operation, we maintain the invariant that the current
+// node's context is equal to the context of the tree that requested the write.
+// We do this by, before we descend into any node, creating a copy with the
+// correct context if the contexts don't match.
+//
+// Since the node we're currently visiting on any write has the requesting
+// tree's context, that node is modifiable in place.  Children of that node may
+// not share context, but before we descend into them, we'll make a mutable
+// copy.
+type copyOnWriteContext struct {
+	freelist *FreeList
+}
+
+// Clone clones the btree, lazily.  Clone should not be called concurrently,
+// but the original tree (t) and the new tree (t2) can be used concurrently
+// once the Clone call completes.
+//
+// The internal tree structure of b is marked read-only and shared between t and
+// t2.  Writes to both t and t2 use copy-on-write logic, creating new nodes
+// whenever one of b's original nodes would have been modified.  Read operations
+// should have no performance degredation.  Write operations for both t and t2
+// will initially experience minor slow-downs caused by additional allocs and
+// copies due to the aforementioned copy-on-write logic, but should converge to
+// the original performance characteristics of the original tree.
+func (t *BTree) Clone() (t2 *BTree) {
+	// Create two entirely new copy-on-write contexts.
+	// This operation effectively creates three trees:
+	//   the original, shared nodes (old b.cow)
+	//   the new b.cow nodes
+	//   the new out.cow nodes
+	cow1, cow2 := *t.cow, *t.cow
+	out := *t
+	t.cow = &cow1
+	out.cow = &cow2
+	return &out
+}
+
+// maxItems returns the max number of items to allow per node.
+func (t *BTree) maxItems() int {
+	return t.degree*2 - 1
+}
+
+// minItems returns the min number of items to allow per node (ignored for the
+// root node).
+func (t *BTree) minItems() int {
+	return t.degree - 1
+}
+
+func (c *copyOnWriteContext) newNode() (n *node) {
+	n = c.freelist.newNode()
+	n.cow = c
+	return
+}
+
+type freeType int
+
+const (
+	ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
+	ftStored                       // node was stored in the freelist for later use
+	ftNotOwned                     // node was ignored by COW, since it's owned by another one
+)
+
+// freeNode frees a node within a given COW context, if it's owned by that
+// context.  It returns what happened to the node (see freeType const
+// documentation).
+func (c *copyOnWriteContext) freeNode(n *node) freeType {
+	if n.cow == c {
+		// clear to allow GC
+		n.items.truncate(0)
+		n.children.truncate(0)
+		n.cow = nil
+		if c.freelist.freeNode(n) {
+			return ftStored
+		} else {
+			return ftFreelistFull
+		}
+	} else {
+		return ftNotOwned
+	}
+}
+
+// ReplaceOrInsert adds the given item to the tree.  If an item in the tree
+// already equals the given one, it is removed from the tree and returned.
+// Otherwise, nil is returned.
+//
+// nil cannot be added to the tree (will panic).
+func (t *BTree) ReplaceOrInsert(item Item) Item {
+	if item == nil {
+		panic("nil item being added to BTree")
+	}
+	if t.root == nil {
+		t.root = t.cow.newNode()
+		t.root.items = append(t.root.items, item)
+		t.length++
+		return nil
+	} else {
+		t.root = t.root.mutableFor(t.cow)
+		if len(t.root.items) >= t.maxItems() {
+			item2, second := t.root.split(t.maxItems() / 2)
+			oldroot := t.root
+			t.root = t.cow.newNode()
+			t.root.items = append(t.root.items, item2)
+			t.root.children = append(t.root.children, oldroot, second)
+		}
+	}
+	out := t.root.insert(item, t.maxItems())
+	if out == nil {
+		t.length++
+	}
+	return out
+}
+
+// Delete removes an item equal to the passed in item from the tree, returning
+// it.  If no such item exists, returns nil.
+func (t *BTree) Delete(item Item) Item {
+	return t.deleteItem(item, removeItem)
+}
+
+// DeleteMin removes the smallest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMin() Item {
+	return t.deleteItem(nil, removeMin)
+}
+
+// DeleteMax removes the largest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMax() Item {
+	return t.deleteItem(nil, removeMax)
+}
+
+func (t *BTree) deleteItem(item Item, typ toRemove) Item {
+	if t.root == nil || len(t.root.items) == 0 {
+		return nil
+	}
+	t.root = t.root.mutableFor(t.cow)
+	out := t.root.remove(item, t.minItems(), typ)
+	if len(t.root.items) == 0 && len(t.root.children) > 0 {
+		oldroot := t.root
+		t.root = t.root.children[0]
+		t.cow.freeNode(oldroot)
+	}
+	if out != nil {
+		t.length--
+	}
+	return out
+}
+
+// AscendRange calls the iterator for every value in the tree within the range
+// [greaterOrEqual, lessThan), until iterator returns false.
+func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator)
+}
+
+// AscendLessThan calls the iterator for every value in the tree within the range
+// [first, pivot), until iterator returns false.
+func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, nil, pivot, false, false, iterator)
+}
+
+// AscendGreaterOrEqual calls the iterator for every value in the tree within
+// the range [pivot, last], until iterator returns false.
+func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, pivot, nil, true, false, iterator)
+}
+
+// Ascend calls the iterator for every value in the tree within the range
+// [first, last], until iterator returns false.
+func (t *BTree) Ascend(iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, nil, nil, false, false, iterator)
+}
+
+// DescendRange calls the iterator for every value in the tree within the range
+// [lessOrEqual, greaterThan), until iterator returns false.
+func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator)
+}
+
+// DescendLessOrEqual calls the iterator for every value in the tree within the range
+// [pivot, first], until iterator returns false.
+func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, pivot, nil, true, false, iterator)
+}
+
+// DescendGreaterThan calls the iterator for every value in the tree within
+// the range (pivot, last], until iterator returns false.
+func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, nil, pivot, false, false, iterator)
+}
+
+// Descend calls the iterator for every value in the tree within the range
+// [last, first], until iterator returns false.
+func (t *BTree) Descend(iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, nil, nil, false, false, iterator)
+}
+
+// Get looks for the key item in the tree, returning it.  It returns nil if
+// unable to find that item.
+func (t *BTree) Get(key Item) Item {
+	if t.root == nil {
+		return nil
+	}
+	return t.root.get(key)
+}
+
+// Min returns the smallest item in the tree, or nil if the tree is empty.
+func (t *BTree) Min() Item {
+	return min(t.root)
+}
+
+// Max returns the largest item in the tree, or nil if the tree is empty.
+func (t *BTree) Max() Item {
+	return max(t.root)
+}
+
+// Has returns true if the given key is in the tree.
+func (t *BTree) Has(key Item) bool {
+	return t.Get(key) != nil
+}
+
+// Len returns the number of items currently in the tree.
+func (t *BTree) Len() int {
+	return t.length
+}
+
+// Clear removes all items from the btree.  If addNodesToFreelist is true,
+// t's nodes are added to its freelist as part of this call, until the freelist
+// is full.  Otherwise, the root node is simply dereferenced and the subtree
+// left to Go's normal GC processes.
+//
+// This can be much faster
+// than calling Delete on all elements, because that requires finding/removing
+// each element in the tree and updating the tree accordingly.  It also is
+// somewhat faster than creating a new tree to replace the old one, because
+// nodes from the old tree are reclaimed into the freelist for use by the new
+// one, instead of being lost to the garbage collector.
+//
+// This call takes:
+//   O(1): when addNodesToFreelist is false, this is a single operation.
+//   O(1): when the freelist is already full, it breaks out immediately
+//   O(freelist size):  when the freelist is empty and the nodes are all owned
+//       by this tree, nodes are added to the freelist until full.
+//   O(tree size):  when all nodes are owned by another tree, all nodes are
+//       iterated over looking for nodes to add to the freelist, and due to
+//       ownership, none are.
+func (t *BTree) Clear(addNodesToFreelist bool) {
+	if t.root != nil && addNodesToFreelist {
+		t.root.reset(t.cow)
+	}
+	t.root, t.length = nil, 0
+}
+
+// reset returns a subtree to the freelist.  It breaks out immediately if the
+// freelist is full, since the only benefit of iterating is to fill that
+// freelist up.  Returns true if parent reset call should continue.
+func (n *node) reset(c *copyOnWriteContext) bool {
+	for _, child := range n.children {
+		if !child.reset(c) {
+			return false
+		}
+	}
+	return c.freeNode(n) != ftFreelistFull
+}
+
+// Int implements the Item interface for integers.
+type Int int
+
+// Less returns true if int(a) < int(b).
+func (a Int) Less(b Item) bool {
+	return a < b.(Int)
+}
diff --git a/vendor/github.com/google/btree/btree_mem.go b/vendor/github.com/google/btree/btree_mem.go
new file mode 100644
index 0000000..cb95b7f
--- /dev/null
+++ b/vendor/github.com/google/btree/btree_mem.go
@@ -0,0 +1,76 @@
+// Copyright 2014 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build ignore
+
+// This binary compares memory usage between btree and gollrb.
+package main
+
+import (
+	"flag"
+	"fmt"
+	"math/rand"
+	"runtime"
+	"time"
+
+	"github.com/google/btree"
+	"github.com/petar/GoLLRB/llrb"
+)
+
+var (
+	size   = flag.Int("size", 1000000, "size of the tree to build")
+	degree = flag.Int("degree", 8, "degree of btree")
+	gollrb = flag.Bool("llrb", false, "use llrb instead of btree")
+)
+
+func main() {
+	flag.Parse()
+	vals := rand.Perm(*size)
+	var t, v interface{}
+	v = vals
+	var stats runtime.MemStats
+	for i := 0; i < 10; i++ {
+		runtime.GC()
+	}
+	fmt.Println("-------- BEFORE ----------")
+	runtime.ReadMemStats(&stats)
+	fmt.Printf("%+v\n", stats)
+	start := time.Now()
+	if *gollrb {
+		tr := llrb.New()
+		for _, v := range vals {
+			tr.ReplaceOrInsert(llrb.Int(v))
+		}
+		t = tr // keep it around
+	} else {
+		tr := btree.New(*degree)
+		for _, v := range vals {
+			tr.ReplaceOrInsert(btree.Int(v))
+		}
+		t = tr // keep it around
+	}
+	fmt.Printf("%v inserts in %v\n", *size, time.Since(start))
+	fmt.Println("-------- AFTER ----------")
+	runtime.ReadMemStats(&stats)
+	fmt.Printf("%+v\n", stats)
+	for i := 0; i < 10; i++ {
+		runtime.GC()
+	}
+	fmt.Println("-------- AFTER GC ----------")
+	runtime.ReadMemStats(&stats)
+	fmt.Printf("%+v\n", stats)
+	if t == v {
+		fmt.Println("to make sure vals and tree aren't GC'd")
+	}
+}
diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml
new file mode 100644
index 0000000..f8684d9
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+go:
+  - 1.4
+  - 1.3
+  - 1.2
+  - tip
+
+install:
+  - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
+
+script:
+  - go test -cover
diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md
new file mode 100644
index 0000000..51cf5cd
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/CONTRIBUTING.md
@@ -0,0 +1,67 @@
+# How to contribute #
+
+We'd love to accept your patches and contributions to this project.  There are
+a just a few small guidelines you need to follow.
+
+
+## Contributor License Agreement ##
+
+Contributions to any Google project must be accompanied by a Contributor
+License Agreement.  This is not a copyright **assignment**, it simply gives
+Google permission to use and redistribute your contributions as part of the
+project.
+
+  * If you are an individual writing original source code and you're sure you
+    own the intellectual property, then you'll need to sign an [individual
+    CLA][].
+
+  * If you work for a company that wants to allow you to contribute your work,
+    then you'll need to sign a [corporate CLA][].
+
+You generally only need to submit a CLA once, so if you've already submitted
+one (even if it was for a different project), you probably don't need to do it
+again.
+
+[individual CLA]: https://developers.google.com/open-source/cla/individual
+[corporate CLA]: https://developers.google.com/open-source/cla/corporate
+
+
+## Submitting a patch ##
+
+  1. It's generally best to start by opening a new issue describing the bug or
+     feature you're intending to fix.  Even if you think it's relatively minor,
+     it's helpful to know what people are working on.  Mention in the initial
+     issue that you are planning to work on that bug or feature so that it can
+     be assigned to you.
+
+  1. Follow the normal process of [forking][] the project, and setup a new
+     branch to work in.  It's important that each group of changes be done in
+     separate branches in order to ensure that a pull request only includes the
+     commits related to that bug or feature.
+
+  1. Go makes it very simple to ensure properly formatted code, so always run
+     `go fmt` on your code before committing it.  You should also run
+     [golint][] over your code.  As noted in the [golint readme][], it's not
+     strictly necessary that your code be completely "lint-free", but this will
+     help you find common style issues.
+
+  1. Any significant changes should almost always be accompanied by tests.  The
+     project already has good test coverage, so look at some of the existing
+     tests if you're unsure how to go about it.  [gocov][] and [gocov-html][]
+     are invaluable tools for seeing which parts of your code aren't being
+     exercised by your tests.
+
+  1. Do your best to have [well-formed commit messages][] for each change.
+     This provides consistency throughout the project, and ensures that commit
+     messages are able to be formatted properly by various git tools.
+
+  1. Finally, push the commits to your fork and submit a [pull request][].
+
+[forking]: https://help.github.com/articles/fork-a-repo
+[golint]: https://github.com/golang/lint
+[golint readme]: https://github.com/golang/lint/blob/master/README
+[gocov]: https://github.com/axw/gocov
+[gocov-html]: https://github.com/matm/gocov-html
+[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
+[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits
+[pull request]: https://help.github.com/articles/creating-a-pull-request
diff --git a/vendor/github.com/google/gofuzz/LICENSE b/vendor/github.com/google/gofuzz/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md
new file mode 100644
index 0000000..64869af
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/README.md
@@ -0,0 +1,71 @@
+gofuzz
+======
+
+gofuzz is a library for populating go objects with random values.
+
+[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz)
+[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz)
+
+This is useful for testing:
+
+* Do your project's objects really serialize/unserialize correctly in all cases?
+* Is there an incorrectly formatted object that will cause your project to panic?
+
+Import with ```import "github.com/google/gofuzz"```
+
+You can use it on single variables:
+```go
+f := fuzz.New()
+var myInt int
+f.Fuzz(&myInt) // myInt gets a random value.
+```
+
+You can use it on maps:
+```go
+f := fuzz.New().NilChance(0).NumElements(1, 1)
+var myMap map[ComplexKeyType]string
+f.Fuzz(&myMap) // myMap will have exactly one element.
+```
+
+Customize the chance of getting a nil pointer:
+```go
+f := fuzz.New().NilChance(.5)
+var fancyStruct struct {
+  A, B, C, D *string
+}
+f.Fuzz(&fancyStruct) // About half the pointers should be set.
+```
+
+You can even customize the randomization completely if needed:
+```go
+type MyEnum string
+const (
+        A MyEnum = "A"
+        B MyEnum = "B"
+)
+type MyInfo struct {
+        Type MyEnum
+        AInfo *string
+        BInfo *string
+}
+
+f := fuzz.New().NilChance(0).Funcs(
+        func(e *MyInfo, c fuzz.Continue) {
+                switch c.Intn(2) {
+                case 0:
+                        e.Type = A
+                        c.Fuzz(&e.AInfo)
+                case 1:
+                        e.Type = B
+                        c.Fuzz(&e.BInfo)
+                }
+        },
+)
+
+var myObject MyInfo
+f.Fuzz(&myObject) // Type will correspond to whether A or B info is set.
+```
+
+See more examples in ```example_test.go```.
+
+Happy testing!
diff --git a/vendor/github.com/google/gofuzz/doc.go b/vendor/github.com/google/gofuzz/doc.go
new file mode 100644
index 0000000..9f9956d
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package fuzz is a library for populating go objects with random values.
+package fuzz
diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go
new file mode 100644
index 0000000..1dfa80a
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/fuzz.go
@@ -0,0 +1,487 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fuzz
+
+import (
+	"fmt"
+	"math/rand"
+	"reflect"
+	"time"
+)
+
+// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type.
+type fuzzFuncMap map[reflect.Type]reflect.Value
+
+// Fuzzer knows how to fill any object with random fields.
+type Fuzzer struct {
+	fuzzFuncs        fuzzFuncMap
+	defaultFuzzFuncs fuzzFuncMap
+	r                *rand.Rand
+	nilChance        float64
+	minElements      int
+	maxElements      int
+	maxDepth         int
+}
+
+// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
+// RandSource, NilChance, or NumElements in any order.
+func New() *Fuzzer {
+	return NewWithSeed(time.Now().UnixNano())
+}
+
+func NewWithSeed(seed int64) *Fuzzer {
+	f := &Fuzzer{
+		defaultFuzzFuncs: fuzzFuncMap{
+			reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime),
+		},
+
+		fuzzFuncs:   fuzzFuncMap{},
+		r:           rand.New(rand.NewSource(seed)),
+		nilChance:   .2,
+		minElements: 1,
+		maxElements: 10,
+		maxDepth:    100,
+	}
+	return f
+}
+
+// Funcs adds each entry in fuzzFuncs as a custom fuzzing function.
+//
+// Each entry in fuzzFuncs must be a function taking two parameters.
+// The first parameter must be a pointer or map. It is the variable that
+// function will fill with random data. The second parameter must be a
+// fuzz.Continue, which will provide a source of randomness and a way
+// to automatically continue fuzzing smaller pieces of the first parameter.
+//
+// These functions are called sensibly, e.g., if you wanted custom string
+// fuzzing, the function `func(s *string, c fuzz.Continue)` would get
+// called and passed the address of strings. Maps and pointers will always
+// be made/new'd for you, ignoring the NilChange option. For slices, it
+// doesn't make much sense to  pre-create them--Fuzzer doesn't know how
+// long you want your slice--so take a pointer to a slice, and make it
+// yourself. (If you don't want your map/pointer type pre-made, take a
+// pointer to it, and make it yourself.) See the examples for a range of
+// custom functions.
+func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer {
+	for i := range fuzzFuncs {
+		v := reflect.ValueOf(fuzzFuncs[i])
+		if v.Kind() != reflect.Func {
+			panic("Need only funcs!")
+		}
+		t := v.Type()
+		if t.NumIn() != 2 || t.NumOut() != 0 {
+			panic("Need 2 in and 0 out params!")
+		}
+		argT := t.In(0)
+		switch argT.Kind() {
+		case reflect.Ptr, reflect.Map:
+		default:
+			panic("fuzzFunc must take pointer or map type")
+		}
+		if t.In(1) != reflect.TypeOf(Continue{}) {
+			panic("fuzzFunc's second parameter must be type fuzz.Continue")
+		}
+		f.fuzzFuncs[argT] = v
+	}
+	return f
+}
+
+// RandSource causes f to get values from the given source of randomness.
+// Use if you want deterministic fuzzing.
+func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer {
+	f.r = rand.New(s)
+	return f
+}
+
+// NilChance sets the probability of creating a nil pointer, map, or slice to
+// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive.
+func (f *Fuzzer) NilChance(p float64) *Fuzzer {
+	if p < 0 || p > 1 {
+		panic("p should be between 0 and 1, inclusive.")
+	}
+	f.nilChance = p
+	return f
+}
+
+// NumElements sets the minimum and maximum number of elements that will be
+// added to a non-nil map or slice.
+func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer {
+	if atLeast > atMost {
+		panic("atLeast must be <= atMost")
+	}
+	if atLeast < 0 {
+		panic("atLeast must be >= 0")
+	}
+	f.minElements = atLeast
+	f.maxElements = atMost
+	return f
+}
+
+func (f *Fuzzer) genElementCount() int {
+	if f.minElements == f.maxElements {
+		return f.minElements
+	}
+	return f.minElements + f.r.Intn(f.maxElements-f.minElements+1)
+}
+
+func (f *Fuzzer) genShouldFill() bool {
+	return f.r.Float64() > f.nilChance
+}
+
+// MaxDepth sets the maximum number of recursive fuzz calls that will be made
+// before stopping.  This includes struct members, pointers, and map and slice
+// elements.
+func (f *Fuzzer) MaxDepth(d int) *Fuzzer {
+	f.maxDepth = d
+	return f
+}
+
+// Fuzz recursively fills all of obj's fields with something random.  First
+// this tries to find a custom fuzz function (see Funcs).  If there is no
+// custom function this tests whether the object implements fuzz.Interface and,
+// if so, calls Fuzz on it to fuzz itself.  If that fails, this will see if
+// there is a default fuzz function provided by this package.  If all of that
+// fails, this will generate random values for all primitive fields and then
+// recurse for all non-primitives.
+//
+// This is safe for cyclic or tree-like structs, up to a limit.  Use the
+// MaxDepth method to adjust how deep you need it to recurse.
+//
+// obj must be a pointer. Only exported (public) fields can be set (thanks,
+// golang :/ ) Intended for tests, so will panic on bad input or unimplemented
+// fields.
+func (f *Fuzzer) Fuzz(obj interface{}) {
+	v := reflect.ValueOf(obj)
+	if v.Kind() != reflect.Ptr {
+		panic("needed ptr!")
+	}
+	v = v.Elem()
+	f.fuzzWithContext(v, 0)
+}
+
+// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for
+// obj's type will not be called and obj will not be tested for fuzz.Interface
+// conformance.  This applies only to obj and not other instances of obj's
+// type.
+// Not safe for cyclic or tree-like structs!
+// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
+// Intended for tests, so will panic on bad input or unimplemented fields.
+func (f *Fuzzer) FuzzNoCustom(obj interface{}) {
+	v := reflect.ValueOf(obj)
+	if v.Kind() != reflect.Ptr {
+		panic("needed ptr!")
+	}
+	v = v.Elem()
+	f.fuzzWithContext(v, flagNoCustomFuzz)
+}
+
+const (
+	// Do not try to find a custom fuzz function.  Does not apply recursively.
+	flagNoCustomFuzz uint64 = 1 << iota
+)
+
+func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) {
+	fc := &fuzzerContext{fuzzer: f}
+	fc.doFuzz(v, flags)
+}
+
+// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer
+// be thread-safe.
+type fuzzerContext struct {
+	fuzzer   *Fuzzer
+	curDepth int
+}
+
+func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) {
+	if fc.curDepth >= fc.fuzzer.maxDepth {
+		return
+	}
+	fc.curDepth++
+	defer func() { fc.curDepth-- }()
+
+	if !v.CanSet() {
+		return
+	}
+
+	if flags&flagNoCustomFuzz == 0 {
+		// Check for both pointer and non-pointer custom functions.
+		if v.CanAddr() && fc.tryCustom(v.Addr()) {
+			return
+		}
+		if fc.tryCustom(v) {
+			return
+		}
+	}
+
+	if fn, ok := fillFuncMap[v.Kind()]; ok {
+		fn(v, fc.fuzzer.r)
+		return
+	}
+	switch v.Kind() {
+	case reflect.Map:
+		if fc.fuzzer.genShouldFill() {
+			v.Set(reflect.MakeMap(v.Type()))
+			n := fc.fuzzer.genElementCount()
+			for i := 0; i < n; i++ {
+				key := reflect.New(v.Type().Key()).Elem()
+				fc.doFuzz(key, 0)
+				val := reflect.New(v.Type().Elem()).Elem()
+				fc.doFuzz(val, 0)
+				v.SetMapIndex(key, val)
+			}
+			return
+		}
+		v.Set(reflect.Zero(v.Type()))
+	case reflect.Ptr:
+		if fc.fuzzer.genShouldFill() {
+			v.Set(reflect.New(v.Type().Elem()))
+			fc.doFuzz(v.Elem(), 0)
+			return
+		}
+		v.Set(reflect.Zero(v.Type()))
+	case reflect.Slice:
+		if fc.fuzzer.genShouldFill() {
+			n := fc.fuzzer.genElementCount()
+			v.Set(reflect.MakeSlice(v.Type(), n, n))
+			for i := 0; i < n; i++ {
+				fc.doFuzz(v.Index(i), 0)
+			}
+			return
+		}
+		v.Set(reflect.Zero(v.Type()))
+	case reflect.Array:
+		if fc.fuzzer.genShouldFill() {
+			n := v.Len()
+			for i := 0; i < n; i++ {
+				fc.doFuzz(v.Index(i), 0)
+			}
+			return
+		}
+		v.Set(reflect.Zero(v.Type()))
+	case reflect.Struct:
+		for i := 0; i < v.NumField(); i++ {
+			fc.doFuzz(v.Field(i), 0)
+		}
+	case reflect.Chan:
+		fallthrough
+	case reflect.Func:
+		fallthrough
+	case reflect.Interface:
+		fallthrough
+	default:
+		panic(fmt.Sprintf("Can't handle %#v", v.Interface()))
+	}
+}
+
+// tryCustom searches for custom handlers, and returns true iff it finds a match
+// and successfully randomizes v.
+func (fc *fuzzerContext) tryCustom(v reflect.Value) bool {
+	// First: see if we have a fuzz function for it.
+	doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()]
+	if !ok {
+		// Second: see if it can fuzz itself.
+		if v.CanInterface() {
+			intf := v.Interface()
+			if fuzzable, ok := intf.(Interface); ok {
+				fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r})
+				return true
+			}
+		}
+		// Finally: see if there is a default fuzz function.
+		doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()]
+		if !ok {
+			return false
+		}
+	}
+
+	switch v.Kind() {
+	case reflect.Ptr:
+		if v.IsNil() {
+			if !v.CanSet() {
+				return false
+			}
+			v.Set(reflect.New(v.Type().Elem()))
+		}
+	case reflect.Map:
+		if v.IsNil() {
+			if !v.CanSet() {
+				return false
+			}
+			v.Set(reflect.MakeMap(v.Type()))
+		}
+	default:
+		return false
+	}
+
+	doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
+		fc:   fc,
+		Rand: fc.fuzzer.r,
+	})})
+	return true
+}
+
+// Interface represents an object that knows how to fuzz itself.  Any time we
+// find a type that implements this interface we will delegate the act of
+// fuzzing itself.
+type Interface interface {
+	Fuzz(c Continue)
+}
+
+// Continue can be passed to custom fuzzing functions to allow them to use
+// the correct source of randomness and to continue fuzzing their members.
+type Continue struct {
+	fc *fuzzerContext
+
+	// For convenience, Continue implements rand.Rand via embedding.
+	// Use this for generating any randomness if you want your fuzzing
+	// to be repeatable for a given seed.
+	*rand.Rand
+}
+
+// Fuzz continues fuzzing obj. obj must be a pointer.
+func (c Continue) Fuzz(obj interface{}) {
+	v := reflect.ValueOf(obj)
+	if v.Kind() != reflect.Ptr {
+		panic("needed ptr!")
+	}
+	v = v.Elem()
+	c.fc.doFuzz(v, 0)
+}
+
+// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for
+// obj's type will not be called and obj will not be tested for fuzz.Interface
+// conformance.  This applies only to obj and not other instances of obj's
+// type.
+func (c Continue) FuzzNoCustom(obj interface{}) {
+	v := reflect.ValueOf(obj)
+	if v.Kind() != reflect.Ptr {
+		panic("needed ptr!")
+	}
+	v = v.Elem()
+	c.fc.doFuzz(v, flagNoCustomFuzz)
+}
+
+// RandString makes a random string up to 20 characters long. The returned string
+// may include a variety of (valid) UTF-8 encodings.
+func (c Continue) RandString() string {
+	return randString(c.Rand)
+}
+
+// RandUint64 makes random 64 bit numbers.
+// Weirdly, rand doesn't have a function that gives you 64 random bits.
+func (c Continue) RandUint64() uint64 {
+	return randUint64(c.Rand)
+}
+
+// RandBool returns true or false randomly.
+func (c Continue) RandBool() bool {
+	return randBool(c.Rand)
+}
+
+func fuzzInt(v reflect.Value, r *rand.Rand) {
+	v.SetInt(int64(randUint64(r)))
+}
+
+func fuzzUint(v reflect.Value, r *rand.Rand) {
+	v.SetUint(randUint64(r))
+}
+
+func fuzzTime(t *time.Time, c Continue) {
+	var sec, nsec int64
+	// Allow for about 1000 years of random time values, which keeps things
+	// like JSON parsing reasonably happy.
+	sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60)
+	c.Fuzz(&nsec)
+	*t = time.Unix(sec, nsec)
+}
+
+var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
+	reflect.Bool: func(v reflect.Value, r *rand.Rand) {
+		v.SetBool(randBool(r))
+	},
+	reflect.Int:     fuzzInt,
+	reflect.Int8:    fuzzInt,
+	reflect.Int16:   fuzzInt,
+	reflect.Int32:   fuzzInt,
+	reflect.Int64:   fuzzInt,
+	reflect.Uint:    fuzzUint,
+	reflect.Uint8:   fuzzUint,
+	reflect.Uint16:  fuzzUint,
+	reflect.Uint32:  fuzzUint,
+	reflect.Uint64:  fuzzUint,
+	reflect.Uintptr: fuzzUint,
+	reflect.Float32: func(v reflect.Value, r *rand.Rand) {
+		v.SetFloat(float64(r.Float32()))
+	},
+	reflect.Float64: func(v reflect.Value, r *rand.Rand) {
+		v.SetFloat(r.Float64())
+	},
+	reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
+		panic("unimplemented")
+	},
+	reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
+		panic("unimplemented")
+	},
+	reflect.String: func(v reflect.Value, r *rand.Rand) {
+		v.SetString(randString(r))
+	},
+	reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) {
+		panic("unimplemented")
+	},
+}
+
+// randBool returns true or false randomly.
+func randBool(r *rand.Rand) bool {
+	if r.Int()&1 == 1 {
+		return true
+	}
+	return false
+}
+
+type charRange struct {
+	first, last rune
+}
+
+// choose returns a random unicode character from the given range, using the
+// given randomness source.
+func (r *charRange) choose(rand *rand.Rand) rune {
+	count := int64(r.last - r.first)
+	return r.first + rune(rand.Int63n(count))
+}
+
+var unicodeRanges = []charRange{
+	{' ', '~'},           // ASCII characters
+	{'\u00a0', '\u02af'}, // Multi-byte encoded characters
+	{'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
+}
+
+// randString makes a random string up to 20 characters long. The returned string
+// may include a variety of (valid) UTF-8 encodings.
+func randString(r *rand.Rand) string {
+	n := r.Intn(20)
+	runes := make([]rune, n)
+	for i := range runes {
+		runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r)
+	}
+	return string(runes)
+}
+
+// randUint64 makes random 64 bit numbers.
+// Weirdly, rand doesn't have a function that gives you 64 random bits.
+func randUint64(r *rand.Rand) uint64 {
+	return uint64(r.Uint32())<<32 | uint64(r.Uint32())
+}
diff --git a/vendor/github.com/googleapis/gnostic/LICENSE b/vendor/github.com/googleapis/gnostic/LICENSE
new file mode 100644
index 0000000..6b0b127
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/LICENSE
@@ -0,0 +1,203 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go
new file mode 100644
index 0000000..5351f36
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go
@@ -0,0 +1,8728 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// THIS FILE IS AUTOMATICALLY GENERATED.
+
+package openapi_v2
+
+import (
+	"fmt"
+	"github.com/googleapis/gnostic/compiler"
+	"gopkg.in/yaml.v2"
+	"regexp"
+	"strings"
+)
+
+// Version returns the package name (and OpenAPI version).
+func Version() string {
+	return "openapi_v2"
+}
+
+// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not.
+func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) {
+	errors := make([]error, 0)
+	x := &AdditionalPropertiesItem{}
+	matched := false
+	// Schema schema = 1;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewSchema(m, compiler.NewContext("schema", context))
+			if matchingError == nil {
+				x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// bool boolean = 2;
+	boolValue, ok := in.(bool)
+	if ok {
+		x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue}
+	}
+	if matched {
+		// since the oneof matched one of its possibilities, discard any matching errors
+		errors = make([]error, 0)
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewAny creates an object of type Any if possible, returning an error if not.
+func NewAny(in interface{}, context *compiler.Context) (*Any, error) {
+	errors := make([]error, 0)
+	x := &Any{}
+	bytes, _ := yaml.Marshal(in)
+	x.Yaml = string(bytes)
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not.
+func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) {
+	errors := make([]error, 0)
+	x := &ApiKeySecurity{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"in", "name", "type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"description", "in", "name", "type"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string type = 1;
+		v1 := compiler.MapValueForKey(m, "type")
+		if v1 != nil {
+			x.Type, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [apiKey]
+			if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string name = 2;
+		v2 := compiler.MapValueForKey(m, "name")
+		if v2 != nil {
+			x.Name, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string in = 3;
+		v3 := compiler.MapValueForKey(m, "in")
+		if v3 != nil {
+			x.In, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [header query]
+			if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 4;
+		v4 := compiler.MapValueForKey(m, "description")
+		if v4 != nil {
+			x.Description, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 5;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not.
+func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) {
+	errors := make([]error, 0)
+	x := &BasicAuthenticationSecurity{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"description", "type"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string type = 1;
+		v1 := compiler.MapValueForKey(m, "type")
+		if v1 != nil {
+			x.Type, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [basic]
+			if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 2;
+		v2 := compiler.MapValueForKey(m, "description")
+		if v2 != nil {
+			x.Description, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 3;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not.
+func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) {
+	errors := make([]error, 0)
+	x := &BodyParameter{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"in", "name", "schema"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"description", "in", "name", "required", "schema"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string description = 1;
+		v1 := compiler.MapValueForKey(m, "description")
+		if v1 != nil {
+			x.Description, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string name = 2;
+		v2 := compiler.MapValueForKey(m, "name")
+		if v2 != nil {
+			x.Name, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string in = 3;
+		v3 := compiler.MapValueForKey(m, "in")
+		if v3 != nil {
+			x.In, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [body]
+			if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool required = 4;
+		v4 := compiler.MapValueForKey(m, "required")
+		if v4 != nil {
+			x.Required, ok = v4.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Schema schema = 5;
+		v5 := compiler.MapValueForKey(m, "schema")
+		if v5 != nil {
+			var err error
+			x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated NamedAny vendor_extension = 6;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewContact creates an object of type Contact if possible, returning an error if not.
+func NewContact(in interface{}, context *compiler.Context) (*Contact, error) {
+	errors := make([]error, 0)
+	x := &Contact{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"email", "name", "url"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string url = 2;
+		v2 := compiler.MapValueForKey(m, "url")
+		if v2 != nil {
+			x.Url, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string email = 3;
+		v3 := compiler.MapValueForKey(m, "email")
+		if v3 != nil {
+			x.Email, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 4;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewDefault creates an object of type Default if possible, returning an error if not.
+func NewDefault(in interface{}, context *compiler.Context) (*Default, error) {
+	errors := make([]error, 0)
+	x := &Default{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedAny additional_properties = 1;
+		// MAP: Any
+		x.AdditionalProperties = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedAny{}
+				pair.Name = k
+				result := &Any{}
+				handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+				if handled {
+					if err != nil {
+						errors = append(errors, err)
+					} else {
+						bytes, _ := yaml.Marshal(v)
+						result.Yaml = string(bytes)
+						result.Value = resultFromExt
+						pair.Value = result
+					}
+				} else {
+					pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewDefinitions creates an object of type Definitions if possible, returning an error if not.
+func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) {
+	errors := make([]error, 0)
+	x := &Definitions{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedSchema additional_properties = 1;
+		// MAP: Schema
+		x.AdditionalProperties = make([]*NamedSchema, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedSchema{}
+				pair.Name = k
+				var err error
+				pair.Value, err = NewSchema(v, compiler.NewContext(k, context))
+				if err != nil {
+					errors = append(errors, err)
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewDocument creates an object of type Document if possible, returning an error if not.
+func NewDocument(in interface{}, context *compiler.Context) (*Document, error) {
+	errors := make([]error, 0)
+	x := &Document{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"info", "paths", "swagger"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string swagger = 1;
+		v1 := compiler.MapValueForKey(m, "swagger")
+		if v1 != nil {
+			x.Swagger, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [2.0]
+			if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) {
+				message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Info info = 2;
+		v2 := compiler.MapValueForKey(m, "info")
+		if v2 != nil {
+			var err error
+			x.Info, err = NewInfo(v2, compiler.NewContext("info", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string host = 3;
+		v3 := compiler.MapValueForKey(m, "host")
+		if v3 != nil {
+			x.Host, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string base_path = 4;
+		v4 := compiler.MapValueForKey(m, "basePath")
+		if v4 != nil {
+			x.BasePath, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated string schemes = 5;
+		v5 := compiler.MapValueForKey(m, "schemes")
+		if v5 != nil {
+			v, ok := v5.([]interface{})
+			if ok {
+				x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [http https ws wss]
+			if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) {
+				message := fmt.Sprintf("has unexpected value for schemes: %+v", v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated string consumes = 6;
+		v6 := compiler.MapValueForKey(m, "consumes")
+		if v6 != nil {
+			v, ok := v6.([]interface{})
+			if ok {
+				x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated string produces = 7;
+		v7 := compiler.MapValueForKey(m, "produces")
+		if v7 != nil {
+			v, ok := v7.([]interface{})
+			if ok {
+				x.Produces = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Paths paths = 8;
+		v8 := compiler.MapValueForKey(m, "paths")
+		if v8 != nil {
+			var err error
+			x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Definitions definitions = 9;
+		v9 := compiler.MapValueForKey(m, "definitions")
+		if v9 != nil {
+			var err error
+			x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// ParameterDefinitions parameters = 10;
+		v10 := compiler.MapValueForKey(m, "parameters")
+		if v10 != nil {
+			var err error
+			x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// ResponseDefinitions responses = 11;
+		v11 := compiler.MapValueForKey(m, "responses")
+		if v11 != nil {
+			var err error
+			x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated SecurityRequirement security = 12;
+		v12 := compiler.MapValueForKey(m, "security")
+		if v12 != nil {
+			// repeated SecurityRequirement
+			x.Security = make([]*SecurityRequirement, 0)
+			a, ok := v12.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewSecurityRequirement(item, compiler.NewContext("security", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Security = append(x.Security, y)
+				}
+			}
+		}
+		// SecurityDefinitions security_definitions = 13;
+		v13 := compiler.MapValueForKey(m, "securityDefinitions")
+		if v13 != nil {
+			var err error
+			x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated Tag tags = 14;
+		v14 := compiler.MapValueForKey(m, "tags")
+		if v14 != nil {
+			// repeated Tag
+			x.Tags = make([]*Tag, 0)
+			a, ok := v14.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewTag(item, compiler.NewContext("tags", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Tags = append(x.Tags, y)
+				}
+			}
+		}
+		// ExternalDocs external_docs = 15;
+		v15 := compiler.MapValueForKey(m, "externalDocs")
+		if v15 != nil {
+			var err error
+			x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated NamedAny vendor_extension = 16;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewExamples creates an object of type Examples if possible, returning an error if not.
+func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) {
+	errors := make([]error, 0)
+	x := &Examples{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedAny additional_properties = 1;
+		// MAP: Any
+		x.AdditionalProperties = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedAny{}
+				pair.Name = k
+				result := &Any{}
+				handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+				if handled {
+					if err != nil {
+						errors = append(errors, err)
+					} else {
+						bytes, _ := yaml.Marshal(v)
+						result.Yaml = string(bytes)
+						result.Value = resultFromExt
+						pair.Value = result
+					}
+				} else {
+					pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not.
+func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) {
+	errors := make([]error, 0)
+	x := &ExternalDocs{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"url"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"description", "url"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string description = 1;
+		v1 := compiler.MapValueForKey(m, "description")
+		if v1 != nil {
+			x.Description, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string url = 2;
+		v2 := compiler.MapValueForKey(m, "url")
+		if v2 != nil {
+			x.Url, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 3;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewFileSchema creates an object of type FileSchema if possible, returning an error if not.
+func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) {
+	errors := make([]error, 0)
+	x := &FileSchema{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string format = 1;
+		v1 := compiler.MapValueForKey(m, "format")
+		if v1 != nil {
+			x.Format, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string title = 2;
+		v2 := compiler.MapValueForKey(m, "title")
+		if v2 != nil {
+			x.Title, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 3;
+		v3 := compiler.MapValueForKey(m, "description")
+		if v3 != nil {
+			x.Description, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any default = 4;
+		v4 := compiler.MapValueForKey(m, "default")
+		if v4 != nil {
+			var err error
+			x.Default, err = NewAny(v4, compiler.NewContext("default", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated string required = 5;
+		v5 := compiler.MapValueForKey(m, "required")
+		if v5 != nil {
+			v, ok := v5.([]interface{})
+			if ok {
+				x.Required = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string type = 6;
+		v6 := compiler.MapValueForKey(m, "type")
+		if v6 != nil {
+			x.Type, ok = v6.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [file]
+			if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool read_only = 7;
+		v7 := compiler.MapValueForKey(m, "readOnly")
+		if v7 != nil {
+			x.ReadOnly, ok = v7.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// ExternalDocs external_docs = 8;
+		v8 := compiler.MapValueForKey(m, "externalDocs")
+		if v8 != nil {
+			var err error
+			x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Any example = 9;
+		v9 := compiler.MapValueForKey(m, "example")
+		if v9 != nil {
+			var err error
+			x.Example, err = NewAny(v9, compiler.NewContext("example", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated NamedAny vendor_extension = 10;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not.
+func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) {
+	errors := make([]error, 0)
+	x := &FormDataParameterSubSchema{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// bool required = 1;
+		v1 := compiler.MapValueForKey(m, "required")
+		if v1 != nil {
+			x.Required, ok = v1.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string in = 2;
+		v2 := compiler.MapValueForKey(m, "in")
+		if v2 != nil {
+			x.In, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [formData]
+			if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 3;
+		v3 := compiler.MapValueForKey(m, "description")
+		if v3 != nil {
+			x.Description, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string name = 4;
+		v4 := compiler.MapValueForKey(m, "name")
+		if v4 != nil {
+			x.Name, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool allow_empty_value = 5;
+		v5 := compiler.MapValueForKey(m, "allowEmptyValue")
+		if v5 != nil {
+			x.AllowEmptyValue, ok = v5.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string type = 6;
+		v6 := compiler.MapValueForKey(m, "type")
+		if v6 != nil {
+			x.Type, ok = v6.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [string number boolean integer array file]
+			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string format = 7;
+		v7 := compiler.MapValueForKey(m, "format")
+		if v7 != nil {
+			x.Format, ok = v7.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// PrimitivesItems items = 8;
+		v8 := compiler.MapValueForKey(m, "items")
+		if v8 != nil {
+			var err error
+			x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string collection_format = 9;
+		v9 := compiler.MapValueForKey(m, "collectionFormat")
+		if v9 != nil {
+			x.CollectionFormat, ok = v9.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [csv ssv tsv pipes multi]
+			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any default = 10;
+		v10 := compiler.MapValueForKey(m, "default")
+		if v10 != nil {
+			var err error
+			x.Default, err = NewAny(v10, compiler.NewContext("default", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// float maximum = 11;
+		v11 := compiler.MapValueForKey(m, "maximum")
+		if v11 != nil {
+			switch v11 := v11.(type) {
+			case float64:
+				x.Maximum = v11
+			case float32:
+				x.Maximum = float64(v11)
+			case uint64:
+				x.Maximum = float64(v11)
+			case uint32:
+				x.Maximum = float64(v11)
+			case int64:
+				x.Maximum = float64(v11)
+			case int32:
+				x.Maximum = float64(v11)
+			case int:
+				x.Maximum = float64(v11)
+			default:
+				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_maximum = 12;
+		v12 := compiler.MapValueForKey(m, "exclusiveMaximum")
+		if v12 != nil {
+			x.ExclusiveMaximum, ok = v12.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// float minimum = 13;
+		v13 := compiler.MapValueForKey(m, "minimum")
+		if v13 != nil {
+			switch v13 := v13.(type) {
+			case float64:
+				x.Minimum = v13
+			case float32:
+				x.Minimum = float64(v13)
+			case uint64:
+				x.Minimum = float64(v13)
+			case uint32:
+				x.Minimum = float64(v13)
+			case int64:
+				x.Minimum = float64(v13)
+			case int32:
+				x.Minimum = float64(v13)
+			case int:
+				x.Minimum = float64(v13)
+			default:
+				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_minimum = 14;
+		v14 := compiler.MapValueForKey(m, "exclusiveMinimum")
+		if v14 != nil {
+			x.ExclusiveMinimum, ok = v14.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_length = 15;
+		v15 := compiler.MapValueForKey(m, "maxLength")
+		if v15 != nil {
+			t, ok := v15.(int)
+			if ok {
+				x.MaxLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_length = 16;
+		v16 := compiler.MapValueForKey(m, "minLength")
+		if v16 != nil {
+			t, ok := v16.(int)
+			if ok {
+				x.MinLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string pattern = 17;
+		v17 := compiler.MapValueForKey(m, "pattern")
+		if v17 != nil {
+			x.Pattern, ok = v17.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_items = 18;
+		v18 := compiler.MapValueForKey(m, "maxItems")
+		if v18 != nil {
+			t, ok := v18.(int)
+			if ok {
+				x.MaxItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_items = 19;
+		v19 := compiler.MapValueForKey(m, "minItems")
+		if v19 != nil {
+			t, ok := v19.(int)
+			if ok {
+				x.MinItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool unique_items = 20;
+		v20 := compiler.MapValueForKey(m, "uniqueItems")
+		if v20 != nil {
+			x.UniqueItems, ok = v20.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated Any enum = 21;
+		v21 := compiler.MapValueForKey(m, "enum")
+		if v21 != nil {
+			// repeated Any
+			x.Enum = make([]*Any, 0)
+			a, ok := v21.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewAny(item, compiler.NewContext("enum", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Enum = append(x.Enum, y)
+				}
+			}
+		}
+		// float multiple_of = 22;
+		v22 := compiler.MapValueForKey(m, "multipleOf")
+		if v22 != nil {
+			switch v22 := v22.(type) {
+			case float64:
+				x.MultipleOf = v22
+			case float32:
+				x.MultipleOf = float64(v22)
+			case uint64:
+				x.MultipleOf = float64(v22)
+			case uint32:
+				x.MultipleOf = float64(v22)
+			case int64:
+				x.MultipleOf = float64(v22)
+			case int32:
+				x.MultipleOf = float64(v22)
+			case int:
+				x.MultipleOf = float64(v22)
+			default:
+				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 23;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewHeader creates an object of type Header if possible, returning an error if not.
+func NewHeader(in interface{}, context *compiler.Context) (*Header, error) {
+	errors := make([]error, 0)
+	x := &Header{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string type = 1;
+		v1 := compiler.MapValueForKey(m, "type")
+		if v1 != nil {
+			x.Type, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [string number integer boolean array]
+			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string format = 2;
+		v2 := compiler.MapValueForKey(m, "format")
+		if v2 != nil {
+			x.Format, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// PrimitivesItems items = 3;
+		v3 := compiler.MapValueForKey(m, "items")
+		if v3 != nil {
+			var err error
+			x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string collection_format = 4;
+		v4 := compiler.MapValueForKey(m, "collectionFormat")
+		if v4 != nil {
+			x.CollectionFormat, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [csv ssv tsv pipes]
+			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any default = 5;
+		v5 := compiler.MapValueForKey(m, "default")
+		if v5 != nil {
+			var err error
+			x.Default, err = NewAny(v5, compiler.NewContext("default", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// float maximum = 6;
+		v6 := compiler.MapValueForKey(m, "maximum")
+		if v6 != nil {
+			switch v6 := v6.(type) {
+			case float64:
+				x.Maximum = v6
+			case float32:
+				x.Maximum = float64(v6)
+			case uint64:
+				x.Maximum = float64(v6)
+			case uint32:
+				x.Maximum = float64(v6)
+			case int64:
+				x.Maximum = float64(v6)
+			case int32:
+				x.Maximum = float64(v6)
+			case int:
+				x.Maximum = float64(v6)
+			default:
+				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_maximum = 7;
+		v7 := compiler.MapValueForKey(m, "exclusiveMaximum")
+		if v7 != nil {
+			x.ExclusiveMaximum, ok = v7.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// float minimum = 8;
+		v8 := compiler.MapValueForKey(m, "minimum")
+		if v8 != nil {
+			switch v8 := v8.(type) {
+			case float64:
+				x.Minimum = v8
+			case float32:
+				x.Minimum = float64(v8)
+			case uint64:
+				x.Minimum = float64(v8)
+			case uint32:
+				x.Minimum = float64(v8)
+			case int64:
+				x.Minimum = float64(v8)
+			case int32:
+				x.Minimum = float64(v8)
+			case int:
+				x.Minimum = float64(v8)
+			default:
+				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_minimum = 9;
+		v9 := compiler.MapValueForKey(m, "exclusiveMinimum")
+		if v9 != nil {
+			x.ExclusiveMinimum, ok = v9.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_length = 10;
+		v10 := compiler.MapValueForKey(m, "maxLength")
+		if v10 != nil {
+			t, ok := v10.(int)
+			if ok {
+				x.MaxLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_length = 11;
+		v11 := compiler.MapValueForKey(m, "minLength")
+		if v11 != nil {
+			t, ok := v11.(int)
+			if ok {
+				x.MinLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string pattern = 12;
+		v12 := compiler.MapValueForKey(m, "pattern")
+		if v12 != nil {
+			x.Pattern, ok = v12.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_items = 13;
+		v13 := compiler.MapValueForKey(m, "maxItems")
+		if v13 != nil {
+			t, ok := v13.(int)
+			if ok {
+				x.MaxItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_items = 14;
+		v14 := compiler.MapValueForKey(m, "minItems")
+		if v14 != nil {
+			t, ok := v14.(int)
+			if ok {
+				x.MinItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool unique_items = 15;
+		v15 := compiler.MapValueForKey(m, "uniqueItems")
+		if v15 != nil {
+			x.UniqueItems, ok = v15.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated Any enum = 16;
+		v16 := compiler.MapValueForKey(m, "enum")
+		if v16 != nil {
+			// repeated Any
+			x.Enum = make([]*Any, 0)
+			a, ok := v16.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewAny(item, compiler.NewContext("enum", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Enum = append(x.Enum, y)
+				}
+			}
+		}
+		// float multiple_of = 17;
+		v17 := compiler.MapValueForKey(m, "multipleOf")
+		if v17 != nil {
+			switch v17 := v17.(type) {
+			case float64:
+				x.MultipleOf = v17
+			case float32:
+				x.MultipleOf = float64(v17)
+			case uint64:
+				x.MultipleOf = float64(v17)
+			case uint32:
+				x.MultipleOf = float64(v17)
+			case int64:
+				x.MultipleOf = float64(v17)
+			case int32:
+				x.MultipleOf = float64(v17)
+			case int:
+				x.MultipleOf = float64(v17)
+			default:
+				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 18;
+		v18 := compiler.MapValueForKey(m, "description")
+		if v18 != nil {
+			x.Description, ok = v18.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 19;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not.
+func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) {
+	errors := make([]error, 0)
+	x := &HeaderParameterSubSchema{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// bool required = 1;
+		v1 := compiler.MapValueForKey(m, "required")
+		if v1 != nil {
+			x.Required, ok = v1.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string in = 2;
+		v2 := compiler.MapValueForKey(m, "in")
+		if v2 != nil {
+			x.In, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [header]
+			if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 3;
+		v3 := compiler.MapValueForKey(m, "description")
+		if v3 != nil {
+			x.Description, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string name = 4;
+		v4 := compiler.MapValueForKey(m, "name")
+		if v4 != nil {
+			x.Name, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string type = 5;
+		v5 := compiler.MapValueForKey(m, "type")
+		if v5 != nil {
+			x.Type, ok = v5.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [string number boolean integer array]
+			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string format = 6;
+		v6 := compiler.MapValueForKey(m, "format")
+		if v6 != nil {
+			x.Format, ok = v6.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// PrimitivesItems items = 7;
+		v7 := compiler.MapValueForKey(m, "items")
+		if v7 != nil {
+			var err error
+			x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string collection_format = 8;
+		v8 := compiler.MapValueForKey(m, "collectionFormat")
+		if v8 != nil {
+			x.CollectionFormat, ok = v8.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [csv ssv tsv pipes]
+			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any default = 9;
+		v9 := compiler.MapValueForKey(m, "default")
+		if v9 != nil {
+			var err error
+			x.Default, err = NewAny(v9, compiler.NewContext("default", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// float maximum = 10;
+		v10 := compiler.MapValueForKey(m, "maximum")
+		if v10 != nil {
+			switch v10 := v10.(type) {
+			case float64:
+				x.Maximum = v10
+			case float32:
+				x.Maximum = float64(v10)
+			case uint64:
+				x.Maximum = float64(v10)
+			case uint32:
+				x.Maximum = float64(v10)
+			case int64:
+				x.Maximum = float64(v10)
+			case int32:
+				x.Maximum = float64(v10)
+			case int:
+				x.Maximum = float64(v10)
+			default:
+				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_maximum = 11;
+		v11 := compiler.MapValueForKey(m, "exclusiveMaximum")
+		if v11 != nil {
+			x.ExclusiveMaximum, ok = v11.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// float minimum = 12;
+		v12 := compiler.MapValueForKey(m, "minimum")
+		if v12 != nil {
+			switch v12 := v12.(type) {
+			case float64:
+				x.Minimum = v12
+			case float32:
+				x.Minimum = float64(v12)
+			case uint64:
+				x.Minimum = float64(v12)
+			case uint32:
+				x.Minimum = float64(v12)
+			case int64:
+				x.Minimum = float64(v12)
+			case int32:
+				x.Minimum = float64(v12)
+			case int:
+				x.Minimum = float64(v12)
+			default:
+				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_minimum = 13;
+		v13 := compiler.MapValueForKey(m, "exclusiveMinimum")
+		if v13 != nil {
+			x.ExclusiveMinimum, ok = v13.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_length = 14;
+		v14 := compiler.MapValueForKey(m, "maxLength")
+		if v14 != nil {
+			t, ok := v14.(int)
+			if ok {
+				x.MaxLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_length = 15;
+		v15 := compiler.MapValueForKey(m, "minLength")
+		if v15 != nil {
+			t, ok := v15.(int)
+			if ok {
+				x.MinLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string pattern = 16;
+		v16 := compiler.MapValueForKey(m, "pattern")
+		if v16 != nil {
+			x.Pattern, ok = v16.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_items = 17;
+		v17 := compiler.MapValueForKey(m, "maxItems")
+		if v17 != nil {
+			t, ok := v17.(int)
+			if ok {
+				x.MaxItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_items = 18;
+		v18 := compiler.MapValueForKey(m, "minItems")
+		if v18 != nil {
+			t, ok := v18.(int)
+			if ok {
+				x.MinItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool unique_items = 19;
+		v19 := compiler.MapValueForKey(m, "uniqueItems")
+		if v19 != nil {
+			x.UniqueItems, ok = v19.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated Any enum = 20;
+		v20 := compiler.MapValueForKey(m, "enum")
+		if v20 != nil {
+			// repeated Any
+			x.Enum = make([]*Any, 0)
+			a, ok := v20.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewAny(item, compiler.NewContext("enum", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Enum = append(x.Enum, y)
+				}
+			}
+		}
+		// float multiple_of = 21;
+		v21 := compiler.MapValueForKey(m, "multipleOf")
+		if v21 != nil {
+			switch v21 := v21.(type) {
+			case float64:
+				x.MultipleOf = v21
+			case float32:
+				x.MultipleOf = float64(v21)
+			case uint64:
+				x.MultipleOf = float64(v21)
+			case uint32:
+				x.MultipleOf = float64(v21)
+			case int64:
+				x.MultipleOf = float64(v21)
+			case int32:
+				x.MultipleOf = float64(v21)
+			case int:
+				x.MultipleOf = float64(v21)
+			default:
+				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 22;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewHeaders creates an object of type Headers if possible, returning an error if not.
+func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) {
+	errors := make([]error, 0)
+	x := &Headers{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedHeader additional_properties = 1;
+		// MAP: Header
+		x.AdditionalProperties = make([]*NamedHeader, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedHeader{}
+				pair.Name = k
+				var err error
+				pair.Value, err = NewHeader(v, compiler.NewContext(k, context))
+				if err != nil {
+					errors = append(errors, err)
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewInfo creates an object of type Info if possible, returning an error if not.
+func NewInfo(in interface{}, context *compiler.Context) (*Info, error) {
+	errors := make([]error, 0)
+	x := &Info{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"title", "version"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string title = 1;
+		v1 := compiler.MapValueForKey(m, "title")
+		if v1 != nil {
+			x.Title, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string version = 2;
+		v2 := compiler.MapValueForKey(m, "version")
+		if v2 != nil {
+			x.Version, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 3;
+		v3 := compiler.MapValueForKey(m, "description")
+		if v3 != nil {
+			x.Description, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string terms_of_service = 4;
+		v4 := compiler.MapValueForKey(m, "termsOfService")
+		if v4 != nil {
+			x.TermsOfService, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Contact contact = 5;
+		v5 := compiler.MapValueForKey(m, "contact")
+		if v5 != nil {
+			var err error
+			x.Contact, err = NewContact(v5, compiler.NewContext("contact", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// License license = 6;
+		v6 := compiler.MapValueForKey(m, "license")
+		if v6 != nil {
+			var err error
+			x.License, err = NewLicense(v6, compiler.NewContext("license", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated NamedAny vendor_extension = 7;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not.
+func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) {
+	errors := make([]error, 0)
+	x := &ItemsItem{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		x.Schema = make([]*Schema, 0)
+		y, err := NewSchema(m, compiler.NewContext("<array>", context))
+		if err != nil {
+			return nil, err
+		}
+		x.Schema = append(x.Schema, y)
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewJsonReference creates an object of type JsonReference if possible, returning an error if not.
+func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) {
+	errors := make([]error, 0)
+	x := &JsonReference{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"$ref"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"$ref", "description"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string _ref = 1;
+		v1 := compiler.MapValueForKey(m, "$ref")
+		if v1 != nil {
+			x.XRef, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 2;
+		v2 := compiler.MapValueForKey(m, "description")
+		if v2 != nil {
+			x.Description, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewLicense creates an object of type License if possible, returning an error if not.
+func NewLicense(in interface{}, context *compiler.Context) (*License, error) {
+	errors := make([]error, 0)
+	x := &License{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"name"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"name", "url"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string url = 2;
+		v2 := compiler.MapValueForKey(m, "url")
+		if v2 != nil {
+			x.Url, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 3;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedAny creates an object of type NamedAny if possible, returning an error if not.
+func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) {
+	errors := make([]error, 0)
+	x := &NamedAny{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewAny(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not.
+func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) {
+	errors := make([]error, 0)
+	x := &NamedHeader{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Header value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewHeader(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not.
+func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) {
+	errors := make([]error, 0)
+	x := &NamedParameter{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Parameter value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewParameter(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not.
+func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) {
+	errors := make([]error, 0)
+	x := &NamedPathItem{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// PathItem value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewPathItem(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not.
+func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) {
+	errors := make([]error, 0)
+	x := &NamedResponse{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Response value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewResponse(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not.
+func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) {
+	errors := make([]error, 0)
+	x := &NamedResponseValue{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// ResponseValue value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not.
+func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) {
+	errors := make([]error, 0)
+	x := &NamedSchema{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Schema value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewSchema(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not.
+func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) {
+	errors := make([]error, 0)
+	x := &NamedSecurityDefinitionsItem{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// SecurityDefinitionsItem value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedString creates an object of type NamedString if possible, returning an error if not.
+func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) {
+	errors := make([]error, 0)
+	x := &NamedString{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			x.Value, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not.
+func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) {
+	errors := make([]error, 0)
+	x := &NamedStringArray{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// StringArray value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewStringArray(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not.
+func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) {
+	errors := make([]error, 0)
+	x := &NonBodyParameter{}
+	matched := false
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"in", "name", "type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// HeaderParameterSubSchema header_parameter_sub_schema = 1;
+		{
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context))
+			if matchingError == nil {
+				x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+		// FormDataParameterSubSchema form_data_parameter_sub_schema = 2;
+		{
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context))
+			if matchingError == nil {
+				x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+		// QueryParameterSubSchema query_parameter_sub_schema = 3;
+		{
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context))
+			if matchingError == nil {
+				x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+		// PathParameterSubSchema path_parameter_sub_schema = 4;
+		{
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context))
+			if matchingError == nil {
+				x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	if matched {
+		// since the oneof matched one of its possibilities, discard any matching errors
+		errors = make([]error, 0)
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not.
+func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) {
+	errors := make([]error, 0)
+	x := &Oauth2AccessCodeSecurity{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string type = 1;
+		v1 := compiler.MapValueForKey(m, "type")
+		if v1 != nil {
+			x.Type, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [oauth2]
+			if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string flow = 2;
+		v2 := compiler.MapValueForKey(m, "flow")
+		if v2 != nil {
+			x.Flow, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [accessCode]
+			if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) {
+				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Oauth2Scopes scopes = 3;
+		v3 := compiler.MapValueForKey(m, "scopes")
+		if v3 != nil {
+			var err error
+			x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string authorization_url = 4;
+		v4 := compiler.MapValueForKey(m, "authorizationUrl")
+		if v4 != nil {
+			x.AuthorizationUrl, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string token_url = 5;
+		v5 := compiler.MapValueForKey(m, "tokenUrl")
+		if v5 != nil {
+			x.TokenUrl, ok = v5.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 6;
+		v6 := compiler.MapValueForKey(m, "description")
+		if v6 != nil {
+			x.Description, ok = v6.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 7;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not.
+func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) {
+	errors := make([]error, 0)
+	x := &Oauth2ApplicationSecurity{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"flow", "tokenUrl", "type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string type = 1;
+		v1 := compiler.MapValueForKey(m, "type")
+		if v1 != nil {
+			x.Type, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [oauth2]
+			if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string flow = 2;
+		v2 := compiler.MapValueForKey(m, "flow")
+		if v2 != nil {
+			x.Flow, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [application]
+			if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) {
+				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Oauth2Scopes scopes = 3;
+		v3 := compiler.MapValueForKey(m, "scopes")
+		if v3 != nil {
+			var err error
+			x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string token_url = 4;
+		v4 := compiler.MapValueForKey(m, "tokenUrl")
+		if v4 != nil {
+			x.TokenUrl, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 5;
+		v5 := compiler.MapValueForKey(m, "description")
+		if v5 != nil {
+			x.Description, ok = v5.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 6;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not.
+func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) {
+	errors := make([]error, 0)
+	x := &Oauth2ImplicitSecurity{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"authorizationUrl", "flow", "type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string type = 1;
+		v1 := compiler.MapValueForKey(m, "type")
+		if v1 != nil {
+			x.Type, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [oauth2]
+			if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string flow = 2;
+		v2 := compiler.MapValueForKey(m, "flow")
+		if v2 != nil {
+			x.Flow, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [implicit]
+			if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) {
+				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Oauth2Scopes scopes = 3;
+		v3 := compiler.MapValueForKey(m, "scopes")
+		if v3 != nil {
+			var err error
+			x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string authorization_url = 4;
+		v4 := compiler.MapValueForKey(m, "authorizationUrl")
+		if v4 != nil {
+			x.AuthorizationUrl, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 5;
+		v5 := compiler.MapValueForKey(m, "description")
+		if v5 != nil {
+			x.Description, ok = v5.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 6;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not.
+func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) {
+	errors := make([]error, 0)
+	x := &Oauth2PasswordSecurity{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"flow", "tokenUrl", "type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string type = 1;
+		v1 := compiler.MapValueForKey(m, "type")
+		if v1 != nil {
+			x.Type, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [oauth2]
+			if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string flow = 2;
+		v2 := compiler.MapValueForKey(m, "flow")
+		if v2 != nil {
+			x.Flow, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [password]
+			if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) {
+				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Oauth2Scopes scopes = 3;
+		v3 := compiler.MapValueForKey(m, "scopes")
+		if v3 != nil {
+			var err error
+			x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string token_url = 4;
+		v4 := compiler.MapValueForKey(m, "tokenUrl")
+		if v4 != nil {
+			x.TokenUrl, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 5;
+		v5 := compiler.MapValueForKey(m, "description")
+		if v5 != nil {
+			x.Description, ok = v5.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 6;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not.
+func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) {
+	errors := make([]error, 0)
+	x := &Oauth2Scopes{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedString additional_properties = 1;
+		// MAP: string
+		x.AdditionalProperties = make([]*NamedString, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedString{}
+				pair.Name = k
+				pair.Value = v.(string)
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewOperation creates an object of type Operation if possible, returning an error if not.
+func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) {
+	errors := make([]error, 0)
+	x := &Operation{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"responses"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// repeated string tags = 1;
+		v1 := compiler.MapValueForKey(m, "tags")
+		if v1 != nil {
+			v, ok := v1.([]interface{})
+			if ok {
+				x.Tags = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string summary = 2;
+		v2 := compiler.MapValueForKey(m, "summary")
+		if v2 != nil {
+			x.Summary, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 3;
+		v3 := compiler.MapValueForKey(m, "description")
+		if v3 != nil {
+			x.Description, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// ExternalDocs external_docs = 4;
+		v4 := compiler.MapValueForKey(m, "externalDocs")
+		if v4 != nil {
+			var err error
+			x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string operation_id = 5;
+		v5 := compiler.MapValueForKey(m, "operationId")
+		if v5 != nil {
+			x.OperationId, ok = v5.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated string produces = 6;
+		v6 := compiler.MapValueForKey(m, "produces")
+		if v6 != nil {
+			v, ok := v6.([]interface{})
+			if ok {
+				x.Produces = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated string consumes = 7;
+		v7 := compiler.MapValueForKey(m, "consumes")
+		if v7 != nil {
+			v, ok := v7.([]interface{})
+			if ok {
+				x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated ParametersItem parameters = 8;
+		v8 := compiler.MapValueForKey(m, "parameters")
+		if v8 != nil {
+			// repeated ParametersItem
+			x.Parameters = make([]*ParametersItem, 0)
+			a, ok := v8.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewParametersItem(item, compiler.NewContext("parameters", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Parameters = append(x.Parameters, y)
+				}
+			}
+		}
+		// Responses responses = 9;
+		v9 := compiler.MapValueForKey(m, "responses")
+		if v9 != nil {
+			var err error
+			x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated string schemes = 10;
+		v10 := compiler.MapValueForKey(m, "schemes")
+		if v10 != nil {
+			v, ok := v10.([]interface{})
+			if ok {
+				x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [http https ws wss]
+			if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) {
+				message := fmt.Sprintf("has unexpected value for schemes: %+v", v10)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool deprecated = 11;
+		v11 := compiler.MapValueForKey(m, "deprecated")
+		if v11 != nil {
+			x.Deprecated, ok = v11.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated SecurityRequirement security = 12;
+		v12 := compiler.MapValueForKey(m, "security")
+		if v12 != nil {
+			// repeated SecurityRequirement
+			x.Security = make([]*SecurityRequirement, 0)
+			a, ok := v12.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewSecurityRequirement(item, compiler.NewContext("security", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Security = append(x.Security, y)
+				}
+			}
+		}
+		// repeated NamedAny vendor_extension = 13;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewParameter creates an object of type Parameter if possible, returning an error if not.
+func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) {
+	errors := make([]error, 0)
+	x := &Parameter{}
+	matched := false
+	// BodyParameter body_parameter = 1;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context))
+			if matchingError == nil {
+				x.Oneof = &Parameter_BodyParameter{BodyParameter: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// NonBodyParameter non_body_parameter = 2;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context))
+			if matchingError == nil {
+				x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	if matched {
+		// since the oneof matched one of its possibilities, discard any matching errors
+		errors = make([]error, 0)
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not.
+func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) {
+	errors := make([]error, 0)
+	x := &ParameterDefinitions{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedParameter additional_properties = 1;
+		// MAP: Parameter
+		x.AdditionalProperties = make([]*NamedParameter, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedParameter{}
+				pair.Name = k
+				var err error
+				pair.Value, err = NewParameter(v, compiler.NewContext(k, context))
+				if err != nil {
+					errors = append(errors, err)
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not.
+func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) {
+	errors := make([]error, 0)
+	x := &ParametersItem{}
+	matched := false
+	// Parameter parameter = 1;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewParameter(m, compiler.NewContext("parameter", context))
+			if matchingError == nil {
+				x.Oneof = &ParametersItem_Parameter{Parameter: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// JsonReference json_reference = 2;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context))
+			if matchingError == nil {
+				x.Oneof = &ParametersItem_JsonReference{JsonReference: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	if matched {
+		// since the oneof matched one of its possibilities, discard any matching errors
+		errors = make([]error, 0)
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewPathItem creates an object of type PathItem if possible, returning an error if not.
+func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) {
+	errors := make([]error, 0)
+	x := &PathItem{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string _ref = 1;
+		v1 := compiler.MapValueForKey(m, "$ref")
+		if v1 != nil {
+			x.XRef, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Operation get = 2;
+		v2 := compiler.MapValueForKey(m, "get")
+		if v2 != nil {
+			var err error
+			x.Get, err = NewOperation(v2, compiler.NewContext("get", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Operation put = 3;
+		v3 := compiler.MapValueForKey(m, "put")
+		if v3 != nil {
+			var err error
+			x.Put, err = NewOperation(v3, compiler.NewContext("put", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Operation post = 4;
+		v4 := compiler.MapValueForKey(m, "post")
+		if v4 != nil {
+			var err error
+			x.Post, err = NewOperation(v4, compiler.NewContext("post", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Operation delete = 5;
+		v5 := compiler.MapValueForKey(m, "delete")
+		if v5 != nil {
+			var err error
+			x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Operation options = 6;
+		v6 := compiler.MapValueForKey(m, "options")
+		if v6 != nil {
+			var err error
+			x.Options, err = NewOperation(v6, compiler.NewContext("options", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Operation head = 7;
+		v7 := compiler.MapValueForKey(m, "head")
+		if v7 != nil {
+			var err error
+			x.Head, err = NewOperation(v7, compiler.NewContext("head", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Operation patch = 8;
+		v8 := compiler.MapValueForKey(m, "patch")
+		if v8 != nil {
+			var err error
+			x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated ParametersItem parameters = 9;
+		v9 := compiler.MapValueForKey(m, "parameters")
+		if v9 != nil {
+			// repeated ParametersItem
+			x.Parameters = make([]*ParametersItem, 0)
+			a, ok := v9.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewParametersItem(item, compiler.NewContext("parameters", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Parameters = append(x.Parameters, y)
+				}
+			}
+		}
+		// repeated NamedAny vendor_extension = 10;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not.
+func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) {
+	errors := make([]error, 0)
+	x := &PathParameterSubSchema{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"required"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// bool required = 1;
+		v1 := compiler.MapValueForKey(m, "required")
+		if v1 != nil {
+			x.Required, ok = v1.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string in = 2;
+		v2 := compiler.MapValueForKey(m, "in")
+		if v2 != nil {
+			x.In, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [path]
+			if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 3;
+		v3 := compiler.MapValueForKey(m, "description")
+		if v3 != nil {
+			x.Description, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string name = 4;
+		v4 := compiler.MapValueForKey(m, "name")
+		if v4 != nil {
+			x.Name, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string type = 5;
+		v5 := compiler.MapValueForKey(m, "type")
+		if v5 != nil {
+			x.Type, ok = v5.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [string number boolean integer array]
+			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string format = 6;
+		v6 := compiler.MapValueForKey(m, "format")
+		if v6 != nil {
+			x.Format, ok = v6.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// PrimitivesItems items = 7;
+		v7 := compiler.MapValueForKey(m, "items")
+		if v7 != nil {
+			var err error
+			x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string collection_format = 8;
+		v8 := compiler.MapValueForKey(m, "collectionFormat")
+		if v8 != nil {
+			x.CollectionFormat, ok = v8.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [csv ssv tsv pipes]
+			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any default = 9;
+		v9 := compiler.MapValueForKey(m, "default")
+		if v9 != nil {
+			var err error
+			x.Default, err = NewAny(v9, compiler.NewContext("default", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// float maximum = 10;
+		v10 := compiler.MapValueForKey(m, "maximum")
+		if v10 != nil {
+			switch v10 := v10.(type) {
+			case float64:
+				x.Maximum = v10
+			case float32:
+				x.Maximum = float64(v10)
+			case uint64:
+				x.Maximum = float64(v10)
+			case uint32:
+				x.Maximum = float64(v10)
+			case int64:
+				x.Maximum = float64(v10)
+			case int32:
+				x.Maximum = float64(v10)
+			case int:
+				x.Maximum = float64(v10)
+			default:
+				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_maximum = 11;
+		v11 := compiler.MapValueForKey(m, "exclusiveMaximum")
+		if v11 != nil {
+			x.ExclusiveMaximum, ok = v11.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// float minimum = 12;
+		v12 := compiler.MapValueForKey(m, "minimum")
+		if v12 != nil {
+			switch v12 := v12.(type) {
+			case float64:
+				x.Minimum = v12
+			case float32:
+				x.Minimum = float64(v12)
+			case uint64:
+				x.Minimum = float64(v12)
+			case uint32:
+				x.Minimum = float64(v12)
+			case int64:
+				x.Minimum = float64(v12)
+			case int32:
+				x.Minimum = float64(v12)
+			case int:
+				x.Minimum = float64(v12)
+			default:
+				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_minimum = 13;
+		v13 := compiler.MapValueForKey(m, "exclusiveMinimum")
+		if v13 != nil {
+			x.ExclusiveMinimum, ok = v13.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_length = 14;
+		v14 := compiler.MapValueForKey(m, "maxLength")
+		if v14 != nil {
+			t, ok := v14.(int)
+			if ok {
+				x.MaxLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_length = 15;
+		v15 := compiler.MapValueForKey(m, "minLength")
+		if v15 != nil {
+			t, ok := v15.(int)
+			if ok {
+				x.MinLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string pattern = 16;
+		v16 := compiler.MapValueForKey(m, "pattern")
+		if v16 != nil {
+			x.Pattern, ok = v16.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_items = 17;
+		v17 := compiler.MapValueForKey(m, "maxItems")
+		if v17 != nil {
+			t, ok := v17.(int)
+			if ok {
+				x.MaxItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_items = 18;
+		v18 := compiler.MapValueForKey(m, "minItems")
+		if v18 != nil {
+			t, ok := v18.(int)
+			if ok {
+				x.MinItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool unique_items = 19;
+		v19 := compiler.MapValueForKey(m, "uniqueItems")
+		if v19 != nil {
+			x.UniqueItems, ok = v19.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated Any enum = 20;
+		v20 := compiler.MapValueForKey(m, "enum")
+		if v20 != nil {
+			// repeated Any
+			x.Enum = make([]*Any, 0)
+			a, ok := v20.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewAny(item, compiler.NewContext("enum", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Enum = append(x.Enum, y)
+				}
+			}
+		}
+		// float multiple_of = 21;
+		v21 := compiler.MapValueForKey(m, "multipleOf")
+		if v21 != nil {
+			switch v21 := v21.(type) {
+			case float64:
+				x.MultipleOf = v21
+			case float32:
+				x.MultipleOf = float64(v21)
+			case uint64:
+				x.MultipleOf = float64(v21)
+			case uint32:
+				x.MultipleOf = float64(v21)
+			case int64:
+				x.MultipleOf = float64(v21)
+			case int32:
+				x.MultipleOf = float64(v21)
+			case int:
+				x.MultipleOf = float64(v21)
+			default:
+				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 22;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewPaths creates an object of type Paths if possible, returning an error if not.
+func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) {
+	errors := make([]error, 0)
+	x := &Paths{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{}
+		allowedPatterns := []*regexp.Regexp{pattern0, pattern1}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// repeated NamedAny vendor_extension = 1;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+		// repeated NamedPathItem path = 2;
+		// MAP: PathItem ^/
+		x.Path = make([]*NamedPathItem, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "/") {
+					pair := &NamedPathItem{}
+					pair.Name = k
+					var err error
+					pair.Value, err = NewPathItem(v, compiler.NewContext(k, context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Path = append(x.Path, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not.
+func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) {
+	errors := make([]error, 0)
+	x := &PrimitivesItems{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string type = 1;
+		v1 := compiler.MapValueForKey(m, "type")
+		if v1 != nil {
+			x.Type, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [string number integer boolean array]
+			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string format = 2;
+		v2 := compiler.MapValueForKey(m, "format")
+		if v2 != nil {
+			x.Format, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// PrimitivesItems items = 3;
+		v3 := compiler.MapValueForKey(m, "items")
+		if v3 != nil {
+			var err error
+			x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string collection_format = 4;
+		v4 := compiler.MapValueForKey(m, "collectionFormat")
+		if v4 != nil {
+			x.CollectionFormat, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [csv ssv tsv pipes]
+			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any default = 5;
+		v5 := compiler.MapValueForKey(m, "default")
+		if v5 != nil {
+			var err error
+			x.Default, err = NewAny(v5, compiler.NewContext("default", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// float maximum = 6;
+		v6 := compiler.MapValueForKey(m, "maximum")
+		if v6 != nil {
+			switch v6 := v6.(type) {
+			case float64:
+				x.Maximum = v6
+			case float32:
+				x.Maximum = float64(v6)
+			case uint64:
+				x.Maximum = float64(v6)
+			case uint32:
+				x.Maximum = float64(v6)
+			case int64:
+				x.Maximum = float64(v6)
+			case int32:
+				x.Maximum = float64(v6)
+			case int:
+				x.Maximum = float64(v6)
+			default:
+				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_maximum = 7;
+		v7 := compiler.MapValueForKey(m, "exclusiveMaximum")
+		if v7 != nil {
+			x.ExclusiveMaximum, ok = v7.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// float minimum = 8;
+		v8 := compiler.MapValueForKey(m, "minimum")
+		if v8 != nil {
+			switch v8 := v8.(type) {
+			case float64:
+				x.Minimum = v8
+			case float32:
+				x.Minimum = float64(v8)
+			case uint64:
+				x.Minimum = float64(v8)
+			case uint32:
+				x.Minimum = float64(v8)
+			case int64:
+				x.Minimum = float64(v8)
+			case int32:
+				x.Minimum = float64(v8)
+			case int:
+				x.Minimum = float64(v8)
+			default:
+				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_minimum = 9;
+		v9 := compiler.MapValueForKey(m, "exclusiveMinimum")
+		if v9 != nil {
+			x.ExclusiveMinimum, ok = v9.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_length = 10;
+		v10 := compiler.MapValueForKey(m, "maxLength")
+		if v10 != nil {
+			t, ok := v10.(int)
+			if ok {
+				x.MaxLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_length = 11;
+		v11 := compiler.MapValueForKey(m, "minLength")
+		if v11 != nil {
+			t, ok := v11.(int)
+			if ok {
+				x.MinLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string pattern = 12;
+		v12 := compiler.MapValueForKey(m, "pattern")
+		if v12 != nil {
+			x.Pattern, ok = v12.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_items = 13;
+		v13 := compiler.MapValueForKey(m, "maxItems")
+		if v13 != nil {
+			t, ok := v13.(int)
+			if ok {
+				x.MaxItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_items = 14;
+		v14 := compiler.MapValueForKey(m, "minItems")
+		if v14 != nil {
+			t, ok := v14.(int)
+			if ok {
+				x.MinItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool unique_items = 15;
+		v15 := compiler.MapValueForKey(m, "uniqueItems")
+		if v15 != nil {
+			x.UniqueItems, ok = v15.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated Any enum = 16;
+		v16 := compiler.MapValueForKey(m, "enum")
+		if v16 != nil {
+			// repeated Any
+			x.Enum = make([]*Any, 0)
+			a, ok := v16.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewAny(item, compiler.NewContext("enum", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Enum = append(x.Enum, y)
+				}
+			}
+		}
+		// float multiple_of = 17;
+		v17 := compiler.MapValueForKey(m, "multipleOf")
+		if v17 != nil {
+			switch v17 := v17.(type) {
+			case float64:
+				x.MultipleOf = v17
+			case float32:
+				x.MultipleOf = float64(v17)
+			case uint64:
+				x.MultipleOf = float64(v17)
+			case uint32:
+				x.MultipleOf = float64(v17)
+			case int64:
+				x.MultipleOf = float64(v17)
+			case int32:
+				x.MultipleOf = float64(v17)
+			case int:
+				x.MultipleOf = float64(v17)
+			default:
+				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 18;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewProperties creates an object of type Properties if possible, returning an error if not.
+func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) {
+	errors := make([]error, 0)
+	x := &Properties{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedSchema additional_properties = 1;
+		// MAP: Schema
+		x.AdditionalProperties = make([]*NamedSchema, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedSchema{}
+				pair.Name = k
+				var err error
+				pair.Value, err = NewSchema(v, compiler.NewContext(k, context))
+				if err != nil {
+					errors = append(errors, err)
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not.
+func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) {
+	errors := make([]error, 0)
+	x := &QueryParameterSubSchema{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// bool required = 1;
+		v1 := compiler.MapValueForKey(m, "required")
+		if v1 != nil {
+			x.Required, ok = v1.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string in = 2;
+		v2 := compiler.MapValueForKey(m, "in")
+		if v2 != nil {
+			x.In, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [query]
+			if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 3;
+		v3 := compiler.MapValueForKey(m, "description")
+		if v3 != nil {
+			x.Description, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string name = 4;
+		v4 := compiler.MapValueForKey(m, "name")
+		if v4 != nil {
+			x.Name, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool allow_empty_value = 5;
+		v5 := compiler.MapValueForKey(m, "allowEmptyValue")
+		if v5 != nil {
+			x.AllowEmptyValue, ok = v5.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string type = 6;
+		v6 := compiler.MapValueForKey(m, "type")
+		if v6 != nil {
+			x.Type, ok = v6.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [string number boolean integer array]
+			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string format = 7;
+		v7 := compiler.MapValueForKey(m, "format")
+		if v7 != nil {
+			x.Format, ok = v7.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// PrimitivesItems items = 8;
+		v8 := compiler.MapValueForKey(m, "items")
+		if v8 != nil {
+			var err error
+			x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string collection_format = 9;
+		v9 := compiler.MapValueForKey(m, "collectionFormat")
+		if v9 != nil {
+			x.CollectionFormat, ok = v9.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [csv ssv tsv pipes multi]
+			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any default = 10;
+		v10 := compiler.MapValueForKey(m, "default")
+		if v10 != nil {
+			var err error
+			x.Default, err = NewAny(v10, compiler.NewContext("default", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// float maximum = 11;
+		v11 := compiler.MapValueForKey(m, "maximum")
+		if v11 != nil {
+			switch v11 := v11.(type) {
+			case float64:
+				x.Maximum = v11
+			case float32:
+				x.Maximum = float64(v11)
+			case uint64:
+				x.Maximum = float64(v11)
+			case uint32:
+				x.Maximum = float64(v11)
+			case int64:
+				x.Maximum = float64(v11)
+			case int32:
+				x.Maximum = float64(v11)
+			case int:
+				x.Maximum = float64(v11)
+			default:
+				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_maximum = 12;
+		v12 := compiler.MapValueForKey(m, "exclusiveMaximum")
+		if v12 != nil {
+			x.ExclusiveMaximum, ok = v12.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// float minimum = 13;
+		v13 := compiler.MapValueForKey(m, "minimum")
+		if v13 != nil {
+			switch v13 := v13.(type) {
+			case float64:
+				x.Minimum = v13
+			case float32:
+				x.Minimum = float64(v13)
+			case uint64:
+				x.Minimum = float64(v13)
+			case uint32:
+				x.Minimum = float64(v13)
+			case int64:
+				x.Minimum = float64(v13)
+			case int32:
+				x.Minimum = float64(v13)
+			case int:
+				x.Minimum = float64(v13)
+			default:
+				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_minimum = 14;
+		v14 := compiler.MapValueForKey(m, "exclusiveMinimum")
+		if v14 != nil {
+			x.ExclusiveMinimum, ok = v14.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_length = 15;
+		v15 := compiler.MapValueForKey(m, "maxLength")
+		if v15 != nil {
+			t, ok := v15.(int)
+			if ok {
+				x.MaxLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_length = 16;
+		v16 := compiler.MapValueForKey(m, "minLength")
+		if v16 != nil {
+			t, ok := v16.(int)
+			if ok {
+				x.MinLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string pattern = 17;
+		v17 := compiler.MapValueForKey(m, "pattern")
+		if v17 != nil {
+			x.Pattern, ok = v17.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_items = 18;
+		v18 := compiler.MapValueForKey(m, "maxItems")
+		if v18 != nil {
+			t, ok := v18.(int)
+			if ok {
+				x.MaxItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_items = 19;
+		v19 := compiler.MapValueForKey(m, "minItems")
+		if v19 != nil {
+			t, ok := v19.(int)
+			if ok {
+				x.MinItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool unique_items = 20;
+		v20 := compiler.MapValueForKey(m, "uniqueItems")
+		if v20 != nil {
+			x.UniqueItems, ok = v20.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated Any enum = 21;
+		v21 := compiler.MapValueForKey(m, "enum")
+		if v21 != nil {
+			// repeated Any
+			x.Enum = make([]*Any, 0)
+			a, ok := v21.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewAny(item, compiler.NewContext("enum", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Enum = append(x.Enum, y)
+				}
+			}
+		}
+		// float multiple_of = 22;
+		v22 := compiler.MapValueForKey(m, "multipleOf")
+		if v22 != nil {
+			switch v22 := v22.(type) {
+			case float64:
+				x.MultipleOf = v22
+			case float32:
+				x.MultipleOf = float64(v22)
+			case uint64:
+				x.MultipleOf = float64(v22)
+			case uint32:
+				x.MultipleOf = float64(v22)
+			case int64:
+				x.MultipleOf = float64(v22)
+			case int32:
+				x.MultipleOf = float64(v22)
+			case int:
+				x.MultipleOf = float64(v22)
+			default:
+				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 23;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewResponse creates an object of type Response if possible, returning an error if not.
+func NewResponse(in interface{}, context *compiler.Context) (*Response, error) {
+	errors := make([]error, 0)
+	x := &Response{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"description"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"description", "examples", "headers", "schema"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string description = 1;
+		v1 := compiler.MapValueForKey(m, "description")
+		if v1 != nil {
+			x.Description, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// SchemaItem schema = 2;
+		v2 := compiler.MapValueForKey(m, "schema")
+		if v2 != nil {
+			var err error
+			x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Headers headers = 3;
+		v3 := compiler.MapValueForKey(m, "headers")
+		if v3 != nil {
+			var err error
+			x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Examples examples = 4;
+		v4 := compiler.MapValueForKey(m, "examples")
+		if v4 != nil {
+			var err error
+			x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated NamedAny vendor_extension = 5;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not.
+func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) {
+	errors := make([]error, 0)
+	x := &ResponseDefinitions{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedResponse additional_properties = 1;
+		// MAP: Response
+		x.AdditionalProperties = make([]*NamedResponse, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedResponse{}
+				pair.Name = k
+				var err error
+				pair.Value, err = NewResponse(v, compiler.NewContext(k, context))
+				if err != nil {
+					errors = append(errors, err)
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not.
+func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) {
+	errors := make([]error, 0)
+	x := &ResponseValue{}
+	matched := false
+	// Response response = 1;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewResponse(m, compiler.NewContext("response", context))
+			if matchingError == nil {
+				x.Oneof = &ResponseValue_Response{Response: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// JsonReference json_reference = 2;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context))
+			if matchingError == nil {
+				x.Oneof = &ResponseValue_JsonReference{JsonReference: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	if matched {
+		// since the oneof matched one of its possibilities, discard any matching errors
+		errors = make([]error, 0)
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewResponses creates an object of type Responses if possible, returning an error if not.
+func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) {
+	errors := make([]error, 0)
+	x := &Responses{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{}
+		allowedPatterns := []*regexp.Regexp{pattern2, pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// repeated NamedResponseValue response_code = 1;
+		// MAP: ResponseValue ^([0-9]{3})$|^(default)$
+		x.ResponseCode = make([]*NamedResponseValue, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if pattern2.MatchString(k) {
+					pair := &NamedResponseValue{}
+					pair.Name = k
+					var err error
+					pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.ResponseCode = append(x.ResponseCode, pair)
+				}
+			}
+		}
+		// repeated NamedAny vendor_extension = 2;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewSchema creates an object of type Schema if possible, returning an error if not.
+func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
+	errors := make([]error, 0)
+	x := &Schema{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string _ref = 1;
+		v1 := compiler.MapValueForKey(m, "$ref")
+		if v1 != nil {
+			x.XRef, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string format = 2;
+		v2 := compiler.MapValueForKey(m, "format")
+		if v2 != nil {
+			x.Format, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string title = 3;
+		v3 := compiler.MapValueForKey(m, "title")
+		if v3 != nil {
+			x.Title, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 4;
+		v4 := compiler.MapValueForKey(m, "description")
+		if v4 != nil {
+			x.Description, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any default = 5;
+		v5 := compiler.MapValueForKey(m, "default")
+		if v5 != nil {
+			var err error
+			x.Default, err = NewAny(v5, compiler.NewContext("default", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// float multiple_of = 6;
+		v6 := compiler.MapValueForKey(m, "multipleOf")
+		if v6 != nil {
+			switch v6 := v6.(type) {
+			case float64:
+				x.MultipleOf = v6
+			case float32:
+				x.MultipleOf = float64(v6)
+			case uint64:
+				x.MultipleOf = float64(v6)
+			case uint32:
+				x.MultipleOf = float64(v6)
+			case int64:
+				x.MultipleOf = float64(v6)
+			case int32:
+				x.MultipleOf = float64(v6)
+			case int:
+				x.MultipleOf = float64(v6)
+			default:
+				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// float maximum = 7;
+		v7 := compiler.MapValueForKey(m, "maximum")
+		if v7 != nil {
+			switch v7 := v7.(type) {
+			case float64:
+				x.Maximum = v7
+			case float32:
+				x.Maximum = float64(v7)
+			case uint64:
+				x.Maximum = float64(v7)
+			case uint32:
+				x.Maximum = float64(v7)
+			case int64:
+				x.Maximum = float64(v7)
+			case int32:
+				x.Maximum = float64(v7)
+			case int:
+				x.Maximum = float64(v7)
+			default:
+				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_maximum = 8;
+		v8 := compiler.MapValueForKey(m, "exclusiveMaximum")
+		if v8 != nil {
+			x.ExclusiveMaximum, ok = v8.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// float minimum = 9;
+		v9 := compiler.MapValueForKey(m, "minimum")
+		if v9 != nil {
+			switch v9 := v9.(type) {
+			case float64:
+				x.Minimum = v9
+			case float32:
+				x.Minimum = float64(v9)
+			case uint64:
+				x.Minimum = float64(v9)
+			case uint32:
+				x.Minimum = float64(v9)
+			case int64:
+				x.Minimum = float64(v9)
+			case int32:
+				x.Minimum = float64(v9)
+			case int:
+				x.Minimum = float64(v9)
+			default:
+				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_minimum = 10;
+		v10 := compiler.MapValueForKey(m, "exclusiveMinimum")
+		if v10 != nil {
+			x.ExclusiveMinimum, ok = v10.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_length = 11;
+		v11 := compiler.MapValueForKey(m, "maxLength")
+		if v11 != nil {
+			t, ok := v11.(int)
+			if ok {
+				x.MaxLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_length = 12;
+		v12 := compiler.MapValueForKey(m, "minLength")
+		if v12 != nil {
+			t, ok := v12.(int)
+			if ok {
+				x.MinLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string pattern = 13;
+		v13 := compiler.MapValueForKey(m, "pattern")
+		if v13 != nil {
+			x.Pattern, ok = v13.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_items = 14;
+		v14 := compiler.MapValueForKey(m, "maxItems")
+		if v14 != nil {
+			t, ok := v14.(int)
+			if ok {
+				x.MaxItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_items = 15;
+		v15 := compiler.MapValueForKey(m, "minItems")
+		if v15 != nil {
+			t, ok := v15.(int)
+			if ok {
+				x.MinItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool unique_items = 16;
+		v16 := compiler.MapValueForKey(m, "uniqueItems")
+		if v16 != nil {
+			x.UniqueItems, ok = v16.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_properties = 17;
+		v17 := compiler.MapValueForKey(m, "maxProperties")
+		if v17 != nil {
+			t, ok := v17.(int)
+			if ok {
+				x.MaxProperties = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_properties = 18;
+		v18 := compiler.MapValueForKey(m, "minProperties")
+		if v18 != nil {
+			t, ok := v18.(int)
+			if ok {
+				x.MinProperties = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated string required = 19;
+		v19 := compiler.MapValueForKey(m, "required")
+		if v19 != nil {
+			v, ok := v19.([]interface{})
+			if ok {
+				x.Required = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated Any enum = 20;
+		v20 := compiler.MapValueForKey(m, "enum")
+		if v20 != nil {
+			// repeated Any
+			x.Enum = make([]*Any, 0)
+			a, ok := v20.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewAny(item, compiler.NewContext("enum", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Enum = append(x.Enum, y)
+				}
+			}
+		}
+		// AdditionalPropertiesItem additional_properties = 21;
+		v21 := compiler.MapValueForKey(m, "additionalProperties")
+		if v21 != nil {
+			var err error
+			x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// TypeItem type = 22;
+		v22 := compiler.MapValueForKey(m, "type")
+		if v22 != nil {
+			var err error
+			x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// ItemsItem items = 23;
+		v23 := compiler.MapValueForKey(m, "items")
+		if v23 != nil {
+			var err error
+			x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated Schema all_of = 24;
+		v24 := compiler.MapValueForKey(m, "allOf")
+		if v24 != nil {
+			// repeated Schema
+			x.AllOf = make([]*Schema, 0)
+			a, ok := v24.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewSchema(item, compiler.NewContext("allOf", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.AllOf = append(x.AllOf, y)
+				}
+			}
+		}
+		// Properties properties = 25;
+		v25 := compiler.MapValueForKey(m, "properties")
+		if v25 != nil {
+			var err error
+			x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string discriminator = 26;
+		v26 := compiler.MapValueForKey(m, "discriminator")
+		if v26 != nil {
+			x.Discriminator, ok = v26.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool read_only = 27;
+		v27 := compiler.MapValueForKey(m, "readOnly")
+		if v27 != nil {
+			x.ReadOnly, ok = v27.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Xml xml = 28;
+		v28 := compiler.MapValueForKey(m, "xml")
+		if v28 != nil {
+			var err error
+			x.Xml, err = NewXml(v28, compiler.NewContext("xml", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// ExternalDocs external_docs = 29;
+		v29 := compiler.MapValueForKey(m, "externalDocs")
+		if v29 != nil {
+			var err error
+			x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Any example = 30;
+		v30 := compiler.MapValueForKey(m, "example")
+		if v30 != nil {
+			var err error
+			x.Example, err = NewAny(v30, compiler.NewContext("example", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated NamedAny vendor_extension = 31;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not.
+func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) {
+	errors := make([]error, 0)
+	x := &SchemaItem{}
+	matched := false
+	// Schema schema = 1;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewSchema(m, compiler.NewContext("schema", context))
+			if matchingError == nil {
+				x.Oneof = &SchemaItem_Schema{Schema: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// FileSchema file_schema = 2;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context))
+			if matchingError == nil {
+				x.Oneof = &SchemaItem_FileSchema{FileSchema: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	if matched {
+		// since the oneof matched one of its possibilities, discard any matching errors
+		errors = make([]error, 0)
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not.
+func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) {
+	errors := make([]error, 0)
+	x := &SecurityDefinitions{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedSecurityDefinitionsItem additional_properties = 1;
+		// MAP: SecurityDefinitionsItem
+		x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedSecurityDefinitionsItem{}
+				pair.Name = k
+				var err error
+				pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context))
+				if err != nil {
+					errors = append(errors, err)
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not.
+func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) {
+	errors := make([]error, 0)
+	x := &SecurityDefinitionsItem{}
+	matched := false
+	// BasicAuthenticationSecurity basic_authentication_security = 1;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context))
+			if matchingError == nil {
+				x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// ApiKeySecurity api_key_security = 2;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context))
+			if matchingError == nil {
+				x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// Oauth2ImplicitSecurity oauth2_implicit_security = 3;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context))
+			if matchingError == nil {
+				x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// Oauth2PasswordSecurity oauth2_password_security = 4;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context))
+			if matchingError == nil {
+				x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// Oauth2ApplicationSecurity oauth2_application_security = 5;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context))
+			if matchingError == nil {
+				x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// Oauth2AccessCodeSecurity oauth2_access_code_security = 6;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context))
+			if matchingError == nil {
+				x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	if matched {
+		// since the oneof matched one of its possibilities, discard any matching errors
+		errors = make([]error, 0)
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not.
+func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) {
+	errors := make([]error, 0)
+	x := &SecurityRequirement{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedStringArray additional_properties = 1;
+		// MAP: StringArray
+		x.AdditionalProperties = make([]*NamedStringArray, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedStringArray{}
+				pair.Name = k
+				var err error
+				pair.Value, err = NewStringArray(v, compiler.NewContext(k, context))
+				if err != nil {
+					errors = append(errors, err)
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewStringArray creates an object of type StringArray if possible, returning an error if not.
+func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) {
+	errors := make([]error, 0)
+	x := &StringArray{}
+	a, ok := in.([]interface{})
+	if !ok {
+		message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		x.Value = make([]string, 0)
+		for _, s := range a {
+			x.Value = append(x.Value, s.(string))
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewTag creates an object of type Tag if possible, returning an error if not.
+func NewTag(in interface{}, context *compiler.Context) (*Tag, error) {
+	errors := make([]error, 0)
+	x := &Tag{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"name"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"description", "externalDocs", "name"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 2;
+		v2 := compiler.MapValueForKey(m, "description")
+		if v2 != nil {
+			x.Description, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// ExternalDocs external_docs = 3;
+		v3 := compiler.MapValueForKey(m, "externalDocs")
+		if v3 != nil {
+			var err error
+			x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated NamedAny vendor_extension = 4;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewTypeItem creates an object of type TypeItem if possible, returning an error if not.
+func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) {
+	errors := make([]error, 0)
+	x := &TypeItem{}
+	switch in := in.(type) {
+	case string:
+		x.Value = make([]string, 0)
+		x.Value = append(x.Value, in)
+	case []interface{}:
+		x.Value = make([]string, 0)
+		for _, v := range in {
+			value, ok := v.(string)
+			if ok {
+				x.Value = append(x.Value, value)
+			} else {
+				message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+	default:
+		message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not.
+func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) {
+	errors := make([]error, 0)
+	x := &VendorExtension{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedAny additional_properties = 1;
+		// MAP: Any
+		x.AdditionalProperties = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedAny{}
+				pair.Name = k
+				result := &Any{}
+				handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+				if handled {
+					if err != nil {
+						errors = append(errors, err)
+					} else {
+						bytes, _ := yaml.Marshal(v)
+						result.Yaml = string(bytes)
+						result.Value = resultFromExt
+						pair.Value = result
+					}
+				} else {
+					pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewXml creates an object of type Xml if possible, returning an error if not.
+func NewXml(in interface{}, context *compiler.Context) (*Xml, error) {
+	errors := make([]error, 0)
+	x := &Xml{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string namespace = 2;
+		v2 := compiler.MapValueForKey(m, "namespace")
+		if v2 != nil {
+			x.Namespace, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string prefix = 3;
+		v3 := compiler.MapValueForKey(m, "prefix")
+		if v3 != nil {
+			x.Prefix, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool attribute = 4;
+		v4 := compiler.MapValueForKey(m, "attribute")
+		if v4 != nil {
+			x.Attribute, ok = v4.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool wrapped = 5;
+		v5 := compiler.MapValueForKey(m, "wrapped")
+		if v5 != nil {
+			x.Wrapped, ok = v5.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 6;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside AdditionalPropertiesItem objects.
+func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	{
+		p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema)
+		if ok {
+			_, err := p.Schema.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Any objects.
+func (m *Any) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside ApiKeySecurity objects.
+func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects.
+func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside BodyParameter objects.
+func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Schema != nil {
+		_, err := m.Schema.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Contact objects.
+func (m *Contact) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Default objects.
+func (m *Default) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Definitions objects.
+func (m *Definitions) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Document objects.
+func (m *Document) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Info != nil {
+		_, err := m.Info.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Paths != nil {
+		_, err := m.Paths.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Definitions != nil {
+		_, err := m.Definitions.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Parameters != nil {
+		_, err := m.Parameters.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Responses != nil {
+		_, err := m.Responses.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Security {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	if m.SecurityDefinitions != nil {
+		_, err := m.SecurityDefinitions.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Tags {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	if m.ExternalDocs != nil {
+		_, err := m.ExternalDocs.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Examples objects.
+func (m *Examples) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside ExternalDocs objects.
+func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside FileSchema objects.
+func (m *FileSchema) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Default != nil {
+		_, err := m.Default.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.ExternalDocs != nil {
+		_, err := m.ExternalDocs.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Example != nil {
+		_, err := m.Example.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside FormDataParameterSubSchema objects.
+func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Items != nil {
+		_, err := m.Items.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Default != nil {
+		_, err := m.Default.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Enum {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Header objects.
+func (m *Header) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Items != nil {
+		_, err := m.Items.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Default != nil {
+		_, err := m.Default.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Enum {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside HeaderParameterSubSchema objects.
+func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Items != nil {
+		_, err := m.Items.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Default != nil {
+		_, err := m.Default.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Enum {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Headers objects.
+func (m *Headers) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Info objects.
+func (m *Info) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Contact != nil {
+		_, err := m.Contact.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.License != nil {
+		_, err := m.License.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside ItemsItem objects.
+func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.Schema {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside JsonReference objects.
+func (m *JsonReference) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.XRef != "" {
+		info, err := compiler.ReadInfoForRef(root, m.XRef)
+		if err != nil {
+			return nil, err
+		}
+		if info != nil {
+			replacement, err := NewJsonReference(info, nil)
+			if err == nil {
+				*m = *replacement
+				return m.ResolveReferences(root)
+			}
+		}
+		return info, nil
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside License objects.
+func (m *License) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedAny objects.
+func (m *NamedAny) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedHeader objects.
+func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedParameter objects.
+func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedPathItem objects.
+func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedResponse objects.
+func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedResponseValue objects.
+func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedSchema objects.
+func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects.
+func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedString objects.
+func (m *NamedString) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedStringArray objects.
+func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NonBodyParameter objects.
+func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	{
+		p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema)
+		if ok {
+			_, err := p.HeaderParameterSubSchema.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema)
+		if ok {
+			_, err := p.FormDataParameterSubSchema.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema)
+		if ok {
+			_, err := p.QueryParameterSubSchema.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema)
+		if ok {
+			_, err := p.PathParameterSubSchema.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects.
+func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Scopes != nil {
+		_, err := m.Scopes.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects.
+func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Scopes != nil {
+		_, err := m.Scopes.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects.
+func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Scopes != nil {
+		_, err := m.Scopes.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects.
+func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Scopes != nil {
+		_, err := m.Scopes.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Oauth2Scopes objects.
+func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Operation objects.
+func (m *Operation) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.ExternalDocs != nil {
+		_, err := m.ExternalDocs.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Parameters {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	if m.Responses != nil {
+		_, err := m.Responses.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Security {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Parameter objects.
+func (m *Parameter) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	{
+		p, ok := m.Oneof.(*Parameter_BodyParameter)
+		if ok {
+			_, err := p.BodyParameter.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*Parameter_NonBodyParameter)
+		if ok {
+			_, err := p.NonBodyParameter.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside ParameterDefinitions objects.
+func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside ParametersItem objects.
+func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	{
+		p, ok := m.Oneof.(*ParametersItem_Parameter)
+		if ok {
+			_, err := p.Parameter.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*ParametersItem_JsonReference)
+		if ok {
+			info, err := p.JsonReference.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			} else if info != nil {
+				n, err := NewParametersItem(info, nil)
+				if err != nil {
+					return nil, err
+				} else if n != nil {
+					*m = *n
+					return nil, nil
+				}
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside PathItem objects.
+func (m *PathItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.XRef != "" {
+		info, err := compiler.ReadInfoForRef(root, m.XRef)
+		if err != nil {
+			return nil, err
+		}
+		if info != nil {
+			replacement, err := NewPathItem(info, nil)
+			if err == nil {
+				*m = *replacement
+				return m.ResolveReferences(root)
+			}
+		}
+		return info, nil
+	}
+	if m.Get != nil {
+		_, err := m.Get.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Put != nil {
+		_, err := m.Put.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Post != nil {
+		_, err := m.Post.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Delete != nil {
+		_, err := m.Delete.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Options != nil {
+		_, err := m.Options.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Head != nil {
+		_, err := m.Head.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Patch != nil {
+		_, err := m.Patch.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Parameters {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside PathParameterSubSchema objects.
+func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Items != nil {
+		_, err := m.Items.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Default != nil {
+		_, err := m.Default.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Enum {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Paths objects.
+func (m *Paths) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.Path {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside PrimitivesItems objects.
+func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Items != nil {
+		_, err := m.Items.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Default != nil {
+		_, err := m.Default.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Enum {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Properties objects.
+func (m *Properties) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside QueryParameterSubSchema objects.
+func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Items != nil {
+		_, err := m.Items.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Default != nil {
+		_, err := m.Default.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Enum {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Response objects.
+func (m *Response) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Schema != nil {
+		_, err := m.Schema.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Headers != nil {
+		_, err := m.Headers.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Examples != nil {
+		_, err := m.Examples.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside ResponseDefinitions objects.
+func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside ResponseValue objects.
+func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	{
+		p, ok := m.Oneof.(*ResponseValue_Response)
+		if ok {
+			_, err := p.Response.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*ResponseValue_JsonReference)
+		if ok {
+			info, err := p.JsonReference.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			} else if info != nil {
+				n, err := NewResponseValue(info, nil)
+				if err != nil {
+					return nil, err
+				} else if n != nil {
+					*m = *n
+					return nil, nil
+				}
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Responses objects.
+func (m *Responses) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.ResponseCode {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Schema objects.
+func (m *Schema) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.XRef != "" {
+		info, err := compiler.ReadInfoForRef(root, m.XRef)
+		if err != nil {
+			return nil, err
+		}
+		if info != nil {
+			replacement, err := NewSchema(info, nil)
+			if err == nil {
+				*m = *replacement
+				return m.ResolveReferences(root)
+			}
+		}
+		return info, nil
+	}
+	if m.Default != nil {
+		_, err := m.Default.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Enum {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	if m.AdditionalProperties != nil {
+		_, err := m.AdditionalProperties.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Type != nil {
+		_, err := m.Type.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Items != nil {
+		_, err := m.Items.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.AllOf {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	if m.Properties != nil {
+		_, err := m.Properties.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Xml != nil {
+		_, err := m.Xml.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.ExternalDocs != nil {
+		_, err := m.ExternalDocs.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Example != nil {
+		_, err := m.Example.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside SchemaItem objects.
+func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	{
+		p, ok := m.Oneof.(*SchemaItem_Schema)
+		if ok {
+			_, err := p.Schema.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*SchemaItem_FileSchema)
+		if ok {
+			_, err := p.FileSchema.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside SecurityDefinitions objects.
+func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside SecurityDefinitionsItem objects.
+func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	{
+		p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity)
+		if ok {
+			_, err := p.BasicAuthenticationSecurity.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity)
+		if ok {
+			_, err := p.ApiKeySecurity.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity)
+		if ok {
+			_, err := p.Oauth2ImplicitSecurity.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity)
+		if ok {
+			_, err := p.Oauth2PasswordSecurity.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity)
+		if ok {
+			_, err := p.Oauth2ApplicationSecurity.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)
+		if ok {
+			_, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside SecurityRequirement objects.
+func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside StringArray objects.
+func (m *StringArray) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Tag objects.
+func (m *Tag) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.ExternalDocs != nil {
+		_, err := m.ExternalDocs.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside TypeItem objects.
+func (m *TypeItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside VendorExtension objects.
+func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Xml objects.
+func (m *Xml) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export.
+func (m *AdditionalPropertiesItem) ToRawInfo() interface{} {
+	// ONE OF WRAPPER
+	// AdditionalPropertiesItem
+	// {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v0 := m.GetSchema()
+	if v0 != nil {
+		return v0.ToRawInfo()
+	}
+	// {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok {
+		return v1.Boolean
+	}
+	return nil
+}
+
+// ToRawInfo returns a description of Any suitable for JSON or YAML export.
+func (m *Any) ToRawInfo() interface{} {
+	var err error
+	var info1 []yaml.MapSlice
+	err = yaml.Unmarshal([]byte(m.Yaml), &info1)
+	if err == nil {
+		return info1
+	}
+	var info2 yaml.MapSlice
+	err = yaml.Unmarshal([]byte(m.Yaml), &info2)
+	if err == nil {
+		return info2
+	}
+	var info3 interface{}
+	err = yaml.Unmarshal([]byte(m.Yaml), &info3)
+	if err == nil {
+		return info3
+	}
+	return nil
+}
+
+// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export.
+func (m *ApiKeySecurity) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.In != "" {
+		info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export.
+func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export.
+func (m *BodyParameter) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.In != "" {
+		info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+	}
+	if m.Required != false {
+		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+	}
+	if m.Schema != nil {
+		info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()})
+	}
+	// &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Contact suitable for JSON or YAML export.
+func (m *Contact) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.Url != "" {
+		info = append(info, yaml.MapItem{Key: "url", Value: m.Url})
+	}
+	if m.Email != "" {
+		info = append(info, yaml.MapItem{Key: "email", Value: m.Email})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Default suitable for JSON or YAML export.
+func (m *Default) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Definitions suitable for JSON or YAML export.
+func (m *Definitions) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Document suitable for JSON or YAML export.
+func (m *Document) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Swagger != "" {
+		info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger})
+	}
+	if m.Info != nil {
+		info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()})
+	}
+	// &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Host != "" {
+		info = append(info, yaml.MapItem{Key: "host", Value: m.Host})
+	}
+	if m.BasePath != "" {
+		info = append(info, yaml.MapItem{Key: "basePath", Value: m.BasePath})
+	}
+	if len(m.Schemes) != 0 {
+		info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes})
+	}
+	if len(m.Consumes) != 0 {
+		info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes})
+	}
+	if len(m.Produces) != 0 {
+		info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces})
+	}
+	if m.Paths != nil {
+		info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()})
+	}
+	// &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Definitions != nil {
+		info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()})
+	}
+	// &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Parameters != nil {
+		info = append(info, yaml.MapItem{Key: "parameters", Value: m.Parameters.ToRawInfo()})
+	}
+	// &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Responses != nil {
+		info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()})
+	}
+	// &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if len(m.Security) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Security {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "security", Value: items})
+	}
+	// &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.SecurityDefinitions != nil {
+		info = append(info, yaml.MapItem{Key: "securityDefinitions", Value: m.SecurityDefinitions.ToRawInfo()})
+	}
+	// &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if len(m.Tags) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Tags {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "tags", Value: items})
+	}
+	// &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.ExternalDocs != nil {
+		info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
+	}
+	// &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Examples suitable for JSON or YAML export.
+func (m *Examples) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export.
+func (m *ExternalDocs) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.Url != "" {
+		info = append(info, yaml.MapItem{Key: "url", Value: m.Url})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export.
+func (m *FileSchema) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Format != "" {
+		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+	}
+	if m.Title != "" {
+		info = append(info, yaml.MapItem{Key: "title", Value: m.Title})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.Default != nil {
+		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+	}
+	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if len(m.Required) != 0 {
+		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+	}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.ReadOnly != false {
+		info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly})
+	}
+	if m.ExternalDocs != nil {
+		info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
+	}
+	// &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Example != nil {
+		info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()})
+	}
+	// &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export.
+func (m *FormDataParameterSubSchema) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Required != false {
+		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+	}
+	if m.In != "" {
+		info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.AllowEmptyValue != false {
+		info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue})
+	}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.Format != "" {
+		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+	}
+	if m.Items != nil {
+		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+	}
+	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.CollectionFormat != "" {
+		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+	}
+	if m.Default != nil {
+		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+	}
+	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Maximum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+	}
+	if m.ExclusiveMaximum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+	}
+	if m.Minimum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+	}
+	if m.ExclusiveMinimum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+	}
+	if m.MaxLength != 0 {
+		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+	}
+	if m.MinLength != 0 {
+		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+	}
+	if m.Pattern != "" {
+		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+	}
+	if m.MaxItems != 0 {
+		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+	}
+	if m.MinItems != 0 {
+		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+	}
+	if m.UniqueItems != false {
+		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+	}
+	if len(m.Enum) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Enum {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "enum", Value: items})
+	}
+	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.MultipleOf != 0.0 {
+		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Header suitable for JSON or YAML export.
+func (m *Header) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.Format != "" {
+		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+	}
+	if m.Items != nil {
+		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+	}
+	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.CollectionFormat != "" {
+		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+	}
+	if m.Default != nil {
+		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+	}
+	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Maximum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+	}
+	if m.ExclusiveMaximum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+	}
+	if m.Minimum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+	}
+	if m.ExclusiveMinimum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+	}
+	if m.MaxLength != 0 {
+		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+	}
+	if m.MinLength != 0 {
+		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+	}
+	if m.Pattern != "" {
+		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+	}
+	if m.MaxItems != 0 {
+		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+	}
+	if m.MinItems != 0 {
+		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+	}
+	if m.UniqueItems != false {
+		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+	}
+	if len(m.Enum) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Enum {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "enum", Value: items})
+	}
+	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.MultipleOf != 0.0 {
+		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export.
+func (m *HeaderParameterSubSchema) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Required != false {
+		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+	}
+	if m.In != "" {
+		info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.Format != "" {
+		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+	}
+	if m.Items != nil {
+		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+	}
+	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.CollectionFormat != "" {
+		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+	}
+	if m.Default != nil {
+		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+	}
+	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Maximum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+	}
+	if m.ExclusiveMaximum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+	}
+	if m.Minimum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+	}
+	if m.ExclusiveMinimum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+	}
+	if m.MaxLength != 0 {
+		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+	}
+	if m.MinLength != 0 {
+		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+	}
+	if m.Pattern != "" {
+		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+	}
+	if m.MaxItems != 0 {
+		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+	}
+	if m.MinItems != 0 {
+		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+	}
+	if m.UniqueItems != false {
+		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+	}
+	if len(m.Enum) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Enum {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "enum", Value: items})
+	}
+	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.MultipleOf != 0.0 {
+		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Headers suitable for JSON or YAML export.
+func (m *Headers) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Info suitable for JSON or YAML export.
+func (m *Info) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Title != "" {
+		info = append(info, yaml.MapItem{Key: "title", Value: m.Title})
+	}
+	if m.Version != "" {
+		info = append(info, yaml.MapItem{Key: "version", Value: m.Version})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.TermsOfService != "" {
+		info = append(info, yaml.MapItem{Key: "termsOfService", Value: m.TermsOfService})
+	}
+	if m.Contact != nil {
+		info = append(info, yaml.MapItem{Key: "contact", Value: m.Contact.ToRawInfo()})
+	}
+	// &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.License != nil {
+		info = append(info, yaml.MapItem{Key: "license", Value: m.License.ToRawInfo()})
+	}
+	// &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export.
+func (m *ItemsItem) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if len(m.Schema) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Schema {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "schema", Value: items})
+	}
+	// &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	return info
+}
+
+// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export.
+func (m *JsonReference) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.XRef != "" {
+		info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	return info
+}
+
+// ToRawInfo returns a description of License suitable for JSON or YAML export.
+func (m *License) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.Url != "" {
+		info = append(info, yaml.MapItem{Key: "url", Value: m.Url})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export.
+func (m *NamedAny) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export.
+func (m *NamedHeader) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export.
+func (m *NamedParameter) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export.
+func (m *NamedPathItem) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export.
+func (m *NamedResponse) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export.
+func (m *NamedResponseValue) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export.
+func (m *NamedSchema) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export.
+func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NamedString suitable for JSON or YAML export.
+func (m *NamedString) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.Value != "" {
+		info = append(info, yaml.MapItem{Key: "value", Value: m.Value})
+	}
+	return info
+}
+
+// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export.
+func (m *NamedStringArray) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export.
+func (m *NonBodyParameter) ToRawInfo() interface{} {
+	// ONE OF WRAPPER
+	// NonBodyParameter
+	// {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v0 := m.GetHeaderParameterSubSchema()
+	if v0 != nil {
+		return v0.ToRawInfo()
+	}
+	// {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v1 := m.GetFormDataParameterSubSchema()
+	if v1 != nil {
+		return v1.ToRawInfo()
+	}
+	// {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v2 := m.GetQueryParameterSubSchema()
+	if v2 != nil {
+		return v2.ToRawInfo()
+	}
+	// {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v3 := m.GetPathParameterSubSchema()
+	if v3 != nil {
+		return v3.ToRawInfo()
+	}
+	return nil
+}
+
+// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export.
+func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.Flow != "" {
+		info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
+	}
+	if m.Scopes != nil {
+		info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
+	}
+	// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.AuthorizationUrl != "" {
+		info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl})
+	}
+	if m.TokenUrl != "" {
+		info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export.
+func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.Flow != "" {
+		info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
+	}
+	if m.Scopes != nil {
+		info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
+	}
+	// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.TokenUrl != "" {
+		info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export.
+func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.Flow != "" {
+		info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
+	}
+	if m.Scopes != nil {
+		info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
+	}
+	// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.AuthorizationUrl != "" {
+		info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export.
+func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.Flow != "" {
+		info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
+	}
+	if m.Scopes != nil {
+		info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
+	}
+	// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.TokenUrl != "" {
+		info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export.
+func (m *Oauth2Scopes) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	// &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Operation suitable for JSON or YAML export.
+func (m *Operation) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if len(m.Tags) != 0 {
+		info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags})
+	}
+	if m.Summary != "" {
+		info = append(info, yaml.MapItem{Key: "summary", Value: m.Summary})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.ExternalDocs != nil {
+		info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
+	}
+	// &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.OperationId != "" {
+		info = append(info, yaml.MapItem{Key: "operationId", Value: m.OperationId})
+	}
+	if len(m.Produces) != 0 {
+		info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces})
+	}
+	if len(m.Consumes) != 0 {
+		info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes})
+	}
+	if len(m.Parameters) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Parameters {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "parameters", Value: items})
+	}
+	// &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.}
+	if m.Responses != nil {
+		info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()})
+	}
+	// &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if len(m.Schemes) != 0 {
+		info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes})
+	}
+	if m.Deprecated != false {
+		info = append(info, yaml.MapItem{Key: "deprecated", Value: m.Deprecated})
+	}
+	if len(m.Security) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Security {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "security", Value: items})
+	}
+	// &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Parameter suitable for JSON or YAML export.
+func (m *Parameter) ToRawInfo() interface{} {
+	// ONE OF WRAPPER
+	// Parameter
+	// {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v0 := m.GetBodyParameter()
+	if v0 != nil {
+		return v0.ToRawInfo()
+	}
+	// {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v1 := m.GetNonBodyParameter()
+	if v1 != nil {
+		return v1.ToRawInfo()
+	}
+	return nil
+}
+
+// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export.
+func (m *ParameterDefinitions) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export.
+func (m *ParametersItem) ToRawInfo() interface{} {
+	// ONE OF WRAPPER
+	// ParametersItem
+	// {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v0 := m.GetParameter()
+	if v0 != nil {
+		return v0.ToRawInfo()
+	}
+	// {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v1 := m.GetJsonReference()
+	if v1 != nil {
+		return v1.ToRawInfo()
+	}
+	return nil
+}
+
+// ToRawInfo returns a description of PathItem suitable for JSON or YAML export.
+func (m *PathItem) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.XRef != "" {
+		info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
+	}
+	if m.Get != nil {
+		info = append(info, yaml.MapItem{Key: "get", Value: m.Get.ToRawInfo()})
+	}
+	// &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Put != nil {
+		info = append(info, yaml.MapItem{Key: "put", Value: m.Put.ToRawInfo()})
+	}
+	// &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Post != nil {
+		info = append(info, yaml.MapItem{Key: "post", Value: m.Post.ToRawInfo()})
+	}
+	// &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Delete != nil {
+		info = append(info, yaml.MapItem{Key: "delete", Value: m.Delete.ToRawInfo()})
+	}
+	// &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Options != nil {
+		info = append(info, yaml.MapItem{Key: "options", Value: m.Options.ToRawInfo()})
+	}
+	// &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Head != nil {
+		info = append(info, yaml.MapItem{Key: "head", Value: m.Head.ToRawInfo()})
+	}
+	// &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Patch != nil {
+		info = append(info, yaml.MapItem{Key: "patch", Value: m.Patch.ToRawInfo()})
+	}
+	// &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if len(m.Parameters) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Parameters {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "parameters", Value: items})
+	}
+	// &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export.
+func (m *PathParameterSubSchema) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Required != false {
+		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+	}
+	if m.In != "" {
+		info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.Format != "" {
+		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+	}
+	if m.Items != nil {
+		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+	}
+	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.CollectionFormat != "" {
+		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+	}
+	if m.Default != nil {
+		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+	}
+	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Maximum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+	}
+	if m.ExclusiveMaximum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+	}
+	if m.Minimum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+	}
+	if m.ExclusiveMinimum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+	}
+	if m.MaxLength != 0 {
+		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+	}
+	if m.MinLength != 0 {
+		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+	}
+	if m.Pattern != "" {
+		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+	}
+	if m.MaxItems != 0 {
+		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+	}
+	if m.MinItems != 0 {
+		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+	}
+	if m.UniqueItems != false {
+		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+	}
+	if len(m.Enum) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Enum {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "enum", Value: items})
+	}
+	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.MultipleOf != 0.0 {
+		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Paths suitable for JSON or YAML export.
+func (m *Paths) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	if m.Path != nil {
+		for _, item := range m.Path {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export.
+func (m *PrimitivesItems) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.Format != "" {
+		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+	}
+	if m.Items != nil {
+		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+	}
+	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.CollectionFormat != "" {
+		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+	}
+	if m.Default != nil {
+		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+	}
+	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Maximum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+	}
+	if m.ExclusiveMaximum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+	}
+	if m.Minimum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+	}
+	if m.ExclusiveMinimum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+	}
+	if m.MaxLength != 0 {
+		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+	}
+	if m.MinLength != 0 {
+		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+	}
+	if m.Pattern != "" {
+		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+	}
+	if m.MaxItems != 0 {
+		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+	}
+	if m.MinItems != 0 {
+		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+	}
+	if m.UniqueItems != false {
+		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+	}
+	if len(m.Enum) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Enum {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "enum", Value: items})
+	}
+	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.MultipleOf != 0.0 {
+		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Properties suitable for JSON or YAML export.
+func (m *Properties) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export.
+func (m *QueryParameterSubSchema) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Required != false {
+		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+	}
+	if m.In != "" {
+		info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.AllowEmptyValue != false {
+		info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue})
+	}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.Format != "" {
+		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+	}
+	if m.Items != nil {
+		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+	}
+	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.CollectionFormat != "" {
+		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+	}
+	if m.Default != nil {
+		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+	}
+	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Maximum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+	}
+	if m.ExclusiveMaximum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+	}
+	if m.Minimum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+	}
+	if m.ExclusiveMinimum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+	}
+	if m.MaxLength != 0 {
+		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+	}
+	if m.MinLength != 0 {
+		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+	}
+	if m.Pattern != "" {
+		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+	}
+	if m.MaxItems != 0 {
+		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+	}
+	if m.MinItems != 0 {
+		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+	}
+	if m.UniqueItems != false {
+		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+	}
+	if len(m.Enum) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Enum {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "enum", Value: items})
+	}
+	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.MultipleOf != 0.0 {
+		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Response suitable for JSON or YAML export.
+func (m *Response) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.Schema != nil {
+		info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()})
+	}
+	// &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Headers != nil {
+		info = append(info, yaml.MapItem{Key: "headers", Value: m.Headers.ToRawInfo()})
+	}
+	// &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Examples != nil {
+		info = append(info, yaml.MapItem{Key: "examples", Value: m.Examples.ToRawInfo()})
+	}
+	// &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export.
+func (m *ResponseDefinitions) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export.
+func (m *ResponseValue) ToRawInfo() interface{} {
+	// ONE OF WRAPPER
+	// ResponseValue
+	// {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v0 := m.GetResponse()
+	if v0 != nil {
+		return v0.ToRawInfo()
+	}
+	// {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v1 := m.GetJsonReference()
+	if v1 != nil {
+		return v1.ToRawInfo()
+	}
+	return nil
+}
+
+// ToRawInfo returns a description of Responses suitable for JSON or YAML export.
+func (m *Responses) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.ResponseCode != nil {
+		for _, item := range m.ResponseCode {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Schema suitable for JSON or YAML export.
+func (m *Schema) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.XRef != "" {
+		info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
+	}
+	if m.Format != "" {
+		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+	}
+	if m.Title != "" {
+		info = append(info, yaml.MapItem{Key: "title", Value: m.Title})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.Default != nil {
+		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+	}
+	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.MultipleOf != 0.0 {
+		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+	}
+	if m.Maximum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+	}
+	if m.ExclusiveMaximum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+	}
+	if m.Minimum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+	}
+	if m.ExclusiveMinimum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+	}
+	if m.MaxLength != 0 {
+		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+	}
+	if m.MinLength != 0 {
+		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+	}
+	if m.Pattern != "" {
+		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+	}
+	if m.MaxItems != 0 {
+		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+	}
+	if m.MinItems != 0 {
+		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+	}
+	if m.UniqueItems != false {
+		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+	}
+	if m.MaxProperties != 0 {
+		info = append(info, yaml.MapItem{Key: "maxProperties", Value: m.MaxProperties})
+	}
+	if m.MinProperties != 0 {
+		info = append(info, yaml.MapItem{Key: "minProperties", Value: m.MinProperties})
+	}
+	if len(m.Required) != 0 {
+		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+	}
+	if len(m.Enum) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Enum {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "enum", Value: items})
+	}
+	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.AdditionalProperties != nil {
+		info = append(info, yaml.MapItem{Key: "additionalProperties", Value: m.AdditionalProperties.ToRawInfo()})
+	}
+	// &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Type != nil {
+		if len(m.Type.Value) == 1 {
+			info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value[0]})
+		} else {
+			info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value})
+		}
+	}
+	// &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Items != nil {
+		items := make([]interface{}, 0)
+		for _, item := range m.Items.Schema {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "items", Value: items[0]})
+	}
+	// &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if len(m.AllOf) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.AllOf {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "allOf", Value: items})
+	}
+	// &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.Properties != nil {
+		info = append(info, yaml.MapItem{Key: "properties", Value: m.Properties.ToRawInfo()})
+	}
+	// &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Discriminator != "" {
+		info = append(info, yaml.MapItem{Key: "discriminator", Value: m.Discriminator})
+	}
+	if m.ReadOnly != false {
+		info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly})
+	}
+	if m.Xml != nil {
+		info = append(info, yaml.MapItem{Key: "xml", Value: m.Xml.ToRawInfo()})
+	}
+	// &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.ExternalDocs != nil {
+		info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
+	}
+	// &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Example != nil {
+		info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()})
+	}
+	// &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export.
+func (m *SchemaItem) ToRawInfo() interface{} {
+	// ONE OF WRAPPER
+	// SchemaItem
+	// {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v0 := m.GetSchema()
+	if v0 != nil {
+		return v0.ToRawInfo()
+	}
+	// {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v1 := m.GetFileSchema()
+	if v1 != nil {
+		return v1.ToRawInfo()
+	}
+	return nil
+}
+
+// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export.
+func (m *SecurityDefinitions) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export.
+func (m *SecurityDefinitionsItem) ToRawInfo() interface{} {
+	// ONE OF WRAPPER
+	// SecurityDefinitionsItem
+	// {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v0 := m.GetBasicAuthenticationSecurity()
+	if v0 != nil {
+		return v0.ToRawInfo()
+	}
+	// {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v1 := m.GetApiKeySecurity()
+	if v1 != nil {
+		return v1.ToRawInfo()
+	}
+	// {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v2 := m.GetOauth2ImplicitSecurity()
+	if v2 != nil {
+		return v2.ToRawInfo()
+	}
+	// {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v3 := m.GetOauth2PasswordSecurity()
+	if v3 != nil {
+		return v3.ToRawInfo()
+	}
+	// {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v4 := m.GetOauth2ApplicationSecurity()
+	if v4 != nil {
+		return v4.ToRawInfo()
+	}
+	// {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v5 := m.GetOauth2AccessCodeSecurity()
+	if v5 != nil {
+		return v5.ToRawInfo()
+	}
+	return nil
+}
+
+// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export.
+func (m *SecurityRequirement) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of StringArray suitable for JSON or YAML export.
+func (m *StringArray) ToRawInfo() interface{} {
+	return m.Value
+}
+
+// ToRawInfo returns a description of Tag suitable for JSON or YAML export.
+func (m *Tag) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.ExternalDocs != nil {
+		info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
+	}
+	// &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export.
+func (m *TypeItem) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if len(m.Value) != 0 {
+		info = append(info, yaml.MapItem{Key: "value", Value: m.Value})
+	}
+	return info
+}
+
+// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export.
+func (m *VendorExtension) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Xml suitable for JSON or YAML export.
+func (m *Xml) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.Namespace != "" {
+		info = append(info, yaml.MapItem{Key: "namespace", Value: m.Namespace})
+	}
+	if m.Prefix != "" {
+		info = append(info, yaml.MapItem{Key: "prefix", Value: m.Prefix})
+	}
+	if m.Attribute != false {
+		info = append(info, yaml.MapItem{Key: "attribute", Value: m.Attribute})
+	}
+	if m.Wrapped != false {
+		info = append(info, yaml.MapItem{Key: "wrapped", Value: m.Wrapped})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+var (
+	pattern0 = regexp.MustCompile("^x-")
+	pattern1 = regexp.MustCompile("^/")
+	pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$")
+)
diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto
new file mode 100644
index 0000000..557c880
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto
@@ -0,0 +1,663 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// THIS FILE IS AUTOMATICALLY GENERATED.
+
+syntax = "proto3";
+
+package openapi.v2;
+
+import "google/protobuf/any.proto";
+
+// This option lets the proto compiler generate Java code inside the package
+// name (see below) instead of inside an outer class. It creates a simpler
+// developer experience by reducing one-level of name nesting and be
+// consistent with most programming languages that don't support outer classes.
+option java_multiple_files = true;
+
+// The Java outer classname should be the filename in UpperCamelCase. This
+// class is only used to hold proto descriptor, so developers don't need to
+// work with it directly.
+option java_outer_classname = "OpenAPIProto";
+
+// The Java package name must be proto package name with proper prefix.
+option java_package = "org.openapi_v2";
+
+// A reasonable prefix for the Objective-C symbols generated from the package.
+// It should at a minimum be 3 characters long, all uppercase, and convention
+// is to use an abbreviation of the package name. Something short, but
+// hopefully unique enough to not conflict with things that may come along in
+// the future. 'GPB' is reserved for the protocol buffer implementation itself.
+option objc_class_prefix = "OAS";
+
+message AdditionalPropertiesItem {
+  oneof oneof {
+    Schema schema = 1;
+    bool boolean = 2;
+  }
+}
+
+message Any {
+  google.protobuf.Any value = 1;
+  string yaml = 2;
+}
+
+message ApiKeySecurity {
+  string type = 1;
+  string name = 2;
+  string in = 3;
+  string description = 4;
+  repeated NamedAny vendor_extension = 5;
+}
+
+message BasicAuthenticationSecurity {
+  string type = 1;
+  string description = 2;
+  repeated NamedAny vendor_extension = 3;
+}
+
+message BodyParameter {
+  // A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
+  string description = 1;
+  // The name of the parameter.
+  string name = 2;
+  // Determines the location of the parameter.
+  string in = 3;
+  // Determines whether or not this parameter is required or optional.
+  bool required = 4;
+  Schema schema = 5;
+  repeated NamedAny vendor_extension = 6;
+}
+
+// Contact information for the owners of the API.
+message Contact {
+  // The identifying name of the contact person/organization.
+  string name = 1;
+  // The URL pointing to the contact information.
+  string url = 2;
+  // The email address of the contact person/organization.
+  string email = 3;
+  repeated NamedAny vendor_extension = 4;
+}
+
+message Default {
+  repeated NamedAny additional_properties = 1;
+}
+
+// One or more JSON objects describing the schemas being consumed and produced by the API.
+message Definitions {
+  repeated NamedSchema additional_properties = 1;
+}
+
+message Document {
+  // The Swagger version of this document.
+  string swagger = 1;
+  Info info = 2;
+  // The host (name or ip) of the API. Example: 'swagger.io'
+  string host = 3;
+  // The base path to the API. Example: '/api'.
+  string base_path = 4;
+  // The transfer protocol of the API.
+  repeated string schemes = 5;
+  // A list of MIME types accepted by the API.
+  repeated string consumes = 6;
+  // A list of MIME types the API can produce.
+  repeated string produces = 7;
+  Paths paths = 8;
+  Definitions definitions = 9;
+  ParameterDefinitions parameters = 10;
+  ResponseDefinitions responses = 11;
+  repeated SecurityRequirement security = 12;
+  SecurityDefinitions security_definitions = 13;
+  repeated Tag tags = 14;
+  ExternalDocs external_docs = 15;
+  repeated NamedAny vendor_extension = 16;
+}
+
+message Examples {
+  repeated NamedAny additional_properties = 1;
+}
+
+// information about external documentation
+message ExternalDocs {
+  string description = 1;
+  string url = 2;
+  repeated NamedAny vendor_extension = 3;
+}
+
+// A deterministic version of a JSON Schema object.
+message FileSchema {
+  string format = 1;
+  string title = 2;
+  string description = 3;
+  Any default = 4;
+  repeated string required = 5;
+  string type = 6;
+  bool read_only = 7;
+  ExternalDocs external_docs = 8;
+  Any example = 9;
+  repeated NamedAny vendor_extension = 10;
+}
+
+message FormDataParameterSubSchema {
+  // Determines whether or not this parameter is required or optional.
+  bool required = 1;
+  // Determines the location of the parameter.
+  string in = 2;
+  // A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
+  string description = 3;
+  // The name of the parameter.
+  string name = 4;
+  // allows sending a parameter by name only or with an empty value.
+  bool allow_empty_value = 5;
+  string type = 6;
+  string format = 7;
+  PrimitivesItems items = 8;
+  string collection_format = 9;
+  Any default = 10;
+  double maximum = 11;
+  bool exclusive_maximum = 12;
+  double minimum = 13;
+  bool exclusive_minimum = 14;
+  int64 max_length = 15;
+  int64 min_length = 16;
+  string pattern = 17;
+  int64 max_items = 18;
+  int64 min_items = 19;
+  bool unique_items = 20;
+  repeated Any enum = 21;
+  double multiple_of = 22;
+  repeated NamedAny vendor_extension = 23;
+}
+
+message Header {
+  string type = 1;
+  string format = 2;
+  PrimitivesItems items = 3;
+  string collection_format = 4;
+  Any default = 5;
+  double maximum = 6;
+  bool exclusive_maximum = 7;
+  double minimum = 8;
+  bool exclusive_minimum = 9;
+  int64 max_length = 10;
+  int64 min_length = 11;
+  string pattern = 12;
+  int64 max_items = 13;
+  int64 min_items = 14;
+  bool unique_items = 15;
+  repeated Any enum = 16;
+  double multiple_of = 17;
+  string description = 18;
+  repeated NamedAny vendor_extension = 19;
+}
+
+message HeaderParameterSubSchema {
+  // Determines whether or not this parameter is required or optional.
+  bool required = 1;
+  // Determines the location of the parameter.
+  string in = 2;
+  // A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
+  string description = 3;
+  // The name of the parameter.
+  string name = 4;
+  string type = 5;
+  string format = 6;
+  PrimitivesItems items = 7;
+  string collection_format = 8;
+  Any default = 9;
+  double maximum = 10;
+  bool exclusive_maximum = 11;
+  double minimum = 12;
+  bool exclusive_minimum = 13;
+  int64 max_length = 14;
+  int64 min_length = 15;
+  string pattern = 16;
+  int64 max_items = 17;
+  int64 min_items = 18;
+  bool unique_items = 19;
+  repeated Any enum = 20;
+  double multiple_of = 21;
+  repeated NamedAny vendor_extension = 22;
+}
+
+message Headers {
+  repeated NamedHeader additional_properties = 1;
+}
+
+// General information about the API.
+message Info {
+  // A unique and precise title of the API.
+  string title = 1;
+  // A semantic version number of the API.
+  string version = 2;
+  // A longer description of the API. Should be different from the title.  GitHub Flavored Markdown is allowed.
+  string description = 3;
+  // The terms of service for the API.
+  string terms_of_service = 4;
+  Contact contact = 5;
+  License license = 6;
+  repeated NamedAny vendor_extension = 7;
+}
+
+message ItemsItem {
+  repeated Schema schema = 1;
+}
+
+message JsonReference {
+  string _ref = 1;
+  string description = 2;
+}
+
+message License {
+  // The name of the license type. It's encouraged to use an OSI compatible license.
+  string name = 1;
+  // The URL pointing to the license.
+  string url = 2;
+  repeated NamedAny vendor_extension = 3;
+}
+
+// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs.
+message NamedAny {
+  // Map key
+  string name = 1;
+  // Mapped value
+  Any value = 2;
+}
+
+// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs.
+message NamedHeader {
+  // Map key
+  string name = 1;
+  // Mapped value
+  Header value = 2;
+}
+
+// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs.
+message NamedParameter {
+  // Map key
+  string name = 1;
+  // Mapped value
+  Parameter value = 2;
+}
+
+// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs.
+message NamedPathItem {
+  // Map key
+  string name = 1;
+  // Mapped value
+  PathItem value = 2;
+}
+
+// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs.
+message NamedResponse {
+  // Map key
+  string name = 1;
+  // Mapped value
+  Response value = 2;
+}
+
+// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs.
+message NamedResponseValue {
+  // Map key
+  string name = 1;
+  // Mapped value
+  ResponseValue value = 2;
+}
+
+// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs.
+message NamedSchema {
+  // Map key
+  string name = 1;
+  // Mapped value
+  Schema value = 2;
+}
+
+// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs.
+message NamedSecurityDefinitionsItem {
+  // Map key
+  string name = 1;
+  // Mapped value
+  SecurityDefinitionsItem value = 2;
+}
+
+// Automatically-generated message used to represent maps of string as ordered (name,value) pairs.
+message NamedString {
+  // Map key
+  string name = 1;
+  // Mapped value
+  string value = 2;
+}
+
+// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs.
+message NamedStringArray {
+  // Map key
+  string name = 1;
+  // Mapped value
+  StringArray value = 2;
+}
+
+message NonBodyParameter {
+  oneof oneof {
+    HeaderParameterSubSchema header_parameter_sub_schema = 1;
+    FormDataParameterSubSchema form_data_parameter_sub_schema = 2;
+    QueryParameterSubSchema query_parameter_sub_schema = 3;
+    PathParameterSubSchema path_parameter_sub_schema = 4;
+  }
+}
+
+message Oauth2AccessCodeSecurity {
+  string type = 1;
+  string flow = 2;
+  Oauth2Scopes scopes = 3;
+  string authorization_url = 4;
+  string token_url = 5;
+  string description = 6;
+  repeated NamedAny vendor_extension = 7;
+}
+
+message Oauth2ApplicationSecurity {
+  string type = 1;
+  string flow = 2;
+  Oauth2Scopes scopes = 3;
+  string token_url = 4;
+  string description = 5;
+  repeated NamedAny vendor_extension = 6;
+}
+
+message Oauth2ImplicitSecurity {
+  string type = 1;
+  string flow = 2;
+  Oauth2Scopes scopes = 3;
+  string authorization_url = 4;
+  string description = 5;
+  repeated NamedAny vendor_extension = 6;
+}
+
+message Oauth2PasswordSecurity {
+  string type = 1;
+  string flow = 2;
+  Oauth2Scopes scopes = 3;
+  string token_url = 4;
+  string description = 5;
+  repeated NamedAny vendor_extension = 6;
+}
+
+message Oauth2Scopes {
+  repeated NamedString additional_properties = 1;
+}
+
+message Operation {
+  repeated string tags = 1;
+  // A brief summary of the operation.
+  string summary = 2;
+  // A longer description of the operation, GitHub Flavored Markdown is allowed.
+  string description = 3;
+  ExternalDocs external_docs = 4;
+  // A unique identifier of the operation.
+  string operation_id = 5;
+  // A list of MIME types the API can produce.
+  repeated string produces = 6;
+  // A list of MIME types the API can consume.
+  repeated string consumes = 7;
+  // The parameters needed to send a valid API call.
+  repeated ParametersItem parameters = 8;
+  Responses responses = 9;
+  // The transfer protocol of the API.
+  repeated string schemes = 10;
+  bool deprecated = 11;
+  repeated SecurityRequirement security = 12;
+  repeated NamedAny vendor_extension = 13;
+}
+
+message Parameter {
+  oneof oneof {
+    BodyParameter body_parameter = 1;
+    NonBodyParameter non_body_parameter = 2;
+  }
+}
+
+// One or more JSON representations for parameters
+message ParameterDefinitions {
+  repeated NamedParameter additional_properties = 1;
+}
+
+message ParametersItem {
+  oneof oneof {
+    Parameter parameter = 1;
+    JsonReference json_reference = 2;
+  }
+}
+
+message PathItem {
+  string _ref = 1;
+  Operation get = 2;
+  Operation put = 3;
+  Operation post = 4;
+  Operation delete = 5;
+  Operation options = 6;
+  Operation head = 7;
+  Operation patch = 8;
+  // The parameters needed to send a valid API call.
+  repeated ParametersItem parameters = 9;
+  repeated NamedAny vendor_extension = 10;
+}
+
+message PathParameterSubSchema {
+  // Determines whether or not this parameter is required or optional.
+  bool required = 1;
+  // Determines the location of the parameter.
+  string in = 2;
+  // A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
+  string description = 3;
+  // The name of the parameter.
+  string name = 4;
+  string type = 5;
+  string format = 6;
+  PrimitivesItems items = 7;
+  string collection_format = 8;
+  Any default = 9;
+  double maximum = 10;
+  bool exclusive_maximum = 11;
+  double minimum = 12;
+  bool exclusive_minimum = 13;
+  int64 max_length = 14;
+  int64 min_length = 15;
+  string pattern = 16;
+  int64 max_items = 17;
+  int64 min_items = 18;
+  bool unique_items = 19;
+  repeated Any enum = 20;
+  double multiple_of = 21;
+  repeated NamedAny vendor_extension = 22;
+}
+
+// Relative paths to the individual endpoints. They must be relative to the 'basePath'.
+message Paths {
+  repeated NamedAny vendor_extension = 1;
+  repeated NamedPathItem path = 2;
+}
+
+message PrimitivesItems {
+  string type = 1;
+  string format = 2;
+  PrimitivesItems items = 3;
+  string collection_format = 4;
+  Any default = 5;
+  double maximum = 6;
+  bool exclusive_maximum = 7;
+  double minimum = 8;
+  bool exclusive_minimum = 9;
+  int64 max_length = 10;
+  int64 min_length = 11;
+  string pattern = 12;
+  int64 max_items = 13;
+  int64 min_items = 14;
+  bool unique_items = 15;
+  repeated Any enum = 16;
+  double multiple_of = 17;
+  repeated NamedAny vendor_extension = 18;
+}
+
+message Properties {
+  repeated NamedSchema additional_properties = 1;
+}
+
+message QueryParameterSubSchema {
+  // Determines whether or not this parameter is required or optional.
+  bool required = 1;
+  // Determines the location of the parameter.
+  string in = 2;
+  // A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
+  string description = 3;
+  // The name of the parameter.
+  string name = 4;
+  // allows sending a parameter by name only or with an empty value.
+  bool allow_empty_value = 5;
+  string type = 6;
+  string format = 7;
+  PrimitivesItems items = 8;
+  string collection_format = 9;
+  Any default = 10;
+  double maximum = 11;
+  bool exclusive_maximum = 12;
+  double minimum = 13;
+  bool exclusive_minimum = 14;
+  int64 max_length = 15;
+  int64 min_length = 16;
+  string pattern = 17;
+  int64 max_items = 18;
+  int64 min_items = 19;
+  bool unique_items = 20;
+  repeated Any enum = 21;
+  double multiple_of = 22;
+  repeated NamedAny vendor_extension = 23;
+}
+
+message Response {
+  string description = 1;
+  SchemaItem schema = 2;
+  Headers headers = 3;
+  Examples examples = 4;
+  repeated NamedAny vendor_extension = 5;
+}
+
+// One or more JSON representations for parameters
+message ResponseDefinitions {
+  repeated NamedResponse additional_properties = 1;
+}
+
+message ResponseValue {
+  oneof oneof {
+    Response response = 1;
+    JsonReference json_reference = 2;
+  }
+}
+
+// Response objects names can either be any valid HTTP status code or 'default'.
+message Responses {
+  repeated NamedResponseValue response_code = 1;
+  repeated NamedAny vendor_extension = 2;
+}
+
+// A deterministic version of a JSON Schema object.
+message Schema {
+  string _ref = 1;
+  string format = 2;
+  string title = 3;
+  string description = 4;
+  Any default = 5;
+  double multiple_of = 6;
+  double maximum = 7;
+  bool exclusive_maximum = 8;
+  double minimum = 9;
+  bool exclusive_minimum = 10;
+  int64 max_length = 11;
+  int64 min_length = 12;
+  string pattern = 13;
+  int64 max_items = 14;
+  int64 min_items = 15;
+  bool unique_items = 16;
+  int64 max_properties = 17;
+  int64 min_properties = 18;
+  repeated string required = 19;
+  repeated Any enum = 20;
+  AdditionalPropertiesItem additional_properties = 21;
+  TypeItem type = 22;
+  ItemsItem items = 23;
+  repeated Schema all_of = 24;
+  Properties properties = 25;
+  string discriminator = 26;
+  bool read_only = 27;
+  Xml xml = 28;
+  ExternalDocs external_docs = 29;
+  Any example = 30;
+  repeated NamedAny vendor_extension = 31;
+}
+
+message SchemaItem {
+  oneof oneof {
+    Schema schema = 1;
+    FileSchema file_schema = 2;
+  }
+}
+
+message SecurityDefinitions {
+  repeated NamedSecurityDefinitionsItem additional_properties = 1;
+}
+
+message SecurityDefinitionsItem {
+  oneof oneof {
+    BasicAuthenticationSecurity basic_authentication_security = 1;
+    ApiKeySecurity api_key_security = 2;
+    Oauth2ImplicitSecurity oauth2_implicit_security = 3;
+    Oauth2PasswordSecurity oauth2_password_security = 4;
+    Oauth2ApplicationSecurity oauth2_application_security = 5;
+    Oauth2AccessCodeSecurity oauth2_access_code_security = 6;
+  }
+}
+
+message SecurityRequirement {
+  repeated NamedStringArray additional_properties = 1;
+}
+
+message StringArray {
+  repeated string value = 1;
+}
+
+message Tag {
+  string name = 1;
+  string description = 2;
+  ExternalDocs external_docs = 3;
+  repeated NamedAny vendor_extension = 4;
+}
+
+message TypeItem {
+  repeated string value = 1;
+}
+
+// Any property starting with x- is valid.
+message VendorExtension {
+  repeated NamedAny additional_properties = 1;
+}
+
+message Xml {
+  string name = 1;
+  string namespace = 2;
+  string prefix = 3;
+  bool attribute = 4;
+  bool wrapped = 5;
+  repeated NamedAny vendor_extension = 6;
+}
+
diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md
new file mode 100644
index 0000000..836fb32
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md
@@ -0,0 +1,16 @@
+# OpenAPI v2 Protocol Buffer Models
+
+This directory contains a Protocol Buffer-language model
+and related code for supporting OpenAPI v2.
+
+Gnostic applications and plugins can use OpenAPIv2.proto
+to generate Protocol Buffer support code for their preferred languages.
+
+OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI 
+descriptions into the Protocol Buffer-based datastructures 
+generated from OpenAPIv2.proto.
+
+OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic 
+compiler generator, and OpenAPIv2.pb.go is generated by 
+protoc, the Protocol Buffer compiler, and protoc-gen-go, the
+Protocol Buffer Go code generation plugin.
diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json
new file mode 100644
index 0000000..2815a26
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json
@@ -0,0 +1,1610 @@
+{
+  "title": "A JSON Schema for Swagger 2.0 API.",
+  "id": "http://swagger.io/v2/schema.json#",
+  "$schema": "http://json-schema.org/draft-04/schema#",
+  "type": "object",
+  "required": [
+    "swagger",
+    "info",
+    "paths"
+  ],
+  "additionalProperties": false,
+  "patternProperties": {
+    "^x-": {
+      "$ref": "#/definitions/vendorExtension"
+    }
+  },
+  "properties": {
+    "swagger": {
+      "type": "string",
+      "enum": [
+        "2.0"
+      ],
+      "description": "The Swagger version of this document."
+    },
+    "info": {
+      "$ref": "#/definitions/info"
+    },
+    "host": {
+      "type": "string",
+      "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$",
+      "description": "The host (name or ip) of the API. Example: 'swagger.io'"
+    },
+    "basePath": {
+      "type": "string",
+      "pattern": "^/",
+      "description": "The base path to the API. Example: '/api'."
+    },
+    "schemes": {
+      "$ref": "#/definitions/schemesList"
+    },
+    "consumes": {
+      "description": "A list of MIME types accepted by the API.",
+      "allOf": [
+        {
+          "$ref": "#/definitions/mediaTypeList"
+        }
+      ]
+    },
+    "produces": {
+      "description": "A list of MIME types the API can produce.",
+      "allOf": [
+        {
+          "$ref": "#/definitions/mediaTypeList"
+        }
+      ]
+    },
+    "paths": {
+      "$ref": "#/definitions/paths"
+    },
+    "definitions": {
+      "$ref": "#/definitions/definitions"
+    },
+    "parameters": {
+      "$ref": "#/definitions/parameterDefinitions"
+    },
+    "responses": {
+      "$ref": "#/definitions/responseDefinitions"
+    },
+    "security": {
+      "$ref": "#/definitions/security"
+    },
+    "securityDefinitions": {
+      "$ref": "#/definitions/securityDefinitions"
+    },
+    "tags": {
+      "type": "array",
+      "items": {
+        "$ref": "#/definitions/tag"
+      },
+      "uniqueItems": true
+    },
+    "externalDocs": {
+      "$ref": "#/definitions/externalDocs"
+    }
+  },
+  "definitions": {
+    "info": {
+      "type": "object",
+      "description": "General information about the API.",
+      "required": [
+        "version",
+        "title"
+      ],
+      "additionalProperties": false,
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "properties": {
+        "title": {
+          "type": "string",
+          "description": "A unique and precise title of the API."
+        },
+        "version": {
+          "type": "string",
+          "description": "A semantic version number of the API."
+        },
+        "description": {
+          "type": "string",
+          "description": "A longer description of the API. Should be different from the title.  GitHub Flavored Markdown is allowed."
+        },
+        "termsOfService": {
+          "type": "string",
+          "description": "The terms of service for the API."
+        },
+        "contact": {
+          "$ref": "#/definitions/contact"
+        },
+        "license": {
+          "$ref": "#/definitions/license"
+        }
+      }
+    },
+    "contact": {
+      "type": "object",
+      "description": "Contact information for the owners of the API.",
+      "additionalProperties": false,
+      "properties": {
+        "name": {
+          "type": "string",
+          "description": "The identifying name of the contact person/organization."
+        },
+        "url": {
+          "type": "string",
+          "description": "The URL pointing to the contact information.",
+          "format": "uri"
+        },
+        "email": {
+          "type": "string",
+          "description": "The email address of the contact person/organization.",
+          "format": "email"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "license": {
+      "type": "object",
+      "required": [
+        "name"
+      ],
+      "additionalProperties": false,
+      "properties": {
+        "name": {
+          "type": "string",
+          "description": "The name of the license type. It's encouraged to use an OSI compatible license."
+        },
+        "url": {
+          "type": "string",
+          "description": "The URL pointing to the license.",
+          "format": "uri"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "paths": {
+      "type": "object",
+      "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.",
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        },
+        "^/": {
+          "$ref": "#/definitions/pathItem"
+        }
+      },
+      "additionalProperties": false
+    },
+    "definitions": {
+      "type": "object",
+      "additionalProperties": {
+        "$ref": "#/definitions/schema"
+      },
+      "description": "One or more JSON objects describing the schemas being consumed and produced by the API."
+    },
+    "parameterDefinitions": {
+      "type": "object",
+      "additionalProperties": {
+        "$ref": "#/definitions/parameter"
+      },
+      "description": "One or more JSON representations for parameters"
+    },
+    "responseDefinitions": {
+      "type": "object",
+      "additionalProperties": {
+        "$ref": "#/definitions/response"
+      },
+      "description": "One or more JSON representations for parameters"
+    },
+    "externalDocs": {
+      "type": "object",
+      "additionalProperties": false,
+      "description": "information about external documentation",
+      "required": [
+        "url"
+      ],
+      "properties": {
+        "description": {
+          "type": "string"
+        },
+        "url": {
+          "type": "string",
+          "format": "uri"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "examples": {
+      "type": "object",
+      "additionalProperties": true
+    },
+    "mimeType": {
+      "type": "string",
+      "description": "The MIME type of the HTTP message."
+    },
+    "operation": {
+      "type": "object",
+      "required": [
+        "responses"
+      ],
+      "additionalProperties": false,
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "properties": {
+        "tags": {
+          "type": "array",
+          "items": {
+            "type": "string"
+          },
+          "uniqueItems": true
+        },
+        "summary": {
+          "type": "string",
+          "description": "A brief summary of the operation."
+        },
+        "description": {
+          "type": "string",
+          "description": "A longer description of the operation, GitHub Flavored Markdown is allowed."
+        },
+        "externalDocs": {
+          "$ref": "#/definitions/externalDocs"
+        },
+        "operationId": {
+          "type": "string",
+          "description": "A unique identifier of the operation."
+        },
+        "produces": {
+          "description": "A list of MIME types the API can produce.",
+          "allOf": [
+            {
+              "$ref": "#/definitions/mediaTypeList"
+            }
+          ]
+        },
+        "consumes": {
+          "description": "A list of MIME types the API can consume.",
+          "allOf": [
+            {
+              "$ref": "#/definitions/mediaTypeList"
+            }
+          ]
+        },
+        "parameters": {
+          "$ref": "#/definitions/parametersList"
+        },
+        "responses": {
+          "$ref": "#/definitions/responses"
+        },
+        "schemes": {
+          "$ref": "#/definitions/schemesList"
+        },
+        "deprecated": {
+          "type": "boolean",
+          "default": false
+        },
+        "security": {
+          "$ref": "#/definitions/security"
+        }
+      }
+    },
+    "pathItem": {
+      "type": "object",
+      "additionalProperties": false,
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "properties": {
+        "$ref": {
+          "type": "string"
+        },
+        "get": {
+          "$ref": "#/definitions/operation"
+        },
+        "put": {
+          "$ref": "#/definitions/operation"
+        },
+        "post": {
+          "$ref": "#/definitions/operation"
+        },
+        "delete": {
+          "$ref": "#/definitions/operation"
+        },
+        "options": {
+          "$ref": "#/definitions/operation"
+        },
+        "head": {
+          "$ref": "#/definitions/operation"
+        },
+        "patch": {
+          "$ref": "#/definitions/operation"
+        },
+        "parameters": {
+          "$ref": "#/definitions/parametersList"
+        }
+      }
+    },
+    "responses": {
+      "type": "object",
+      "description": "Response objects names can either be any valid HTTP status code or 'default'.",
+      "minProperties": 1,
+      "additionalProperties": false,
+      "patternProperties": {
+        "^([0-9]{3})$|^(default)$": {
+          "$ref": "#/definitions/responseValue"
+        },
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "not": {
+        "type": "object",
+        "additionalProperties": false,
+        "patternProperties": {
+          "^x-": {
+            "$ref": "#/definitions/vendorExtension"
+          }
+        }
+      }
+    },
+    "responseValue": {
+      "oneOf": [
+        {
+          "$ref": "#/definitions/response"
+        },
+        {
+          "$ref": "#/definitions/jsonReference"
+        }
+      ]
+    },
+    "response": {
+      "type": "object",
+      "required": [
+        "description"
+      ],
+      "properties": {
+        "description": {
+          "type": "string"
+        },
+        "schema": {
+          "oneOf": [
+            {
+              "$ref": "#/definitions/schema"
+            },
+            {
+              "$ref": "#/definitions/fileSchema"
+            }
+          ]
+        },
+        "headers": {
+          "$ref": "#/definitions/headers"
+        },
+        "examples": {
+          "$ref": "#/definitions/examples"
+        }
+      },
+      "additionalProperties": false,
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "headers": {
+      "type": "object",
+      "additionalProperties": {
+        "$ref": "#/definitions/header"
+      }
+    },
+    "header": {
+      "type": "object",
+      "additionalProperties": false,
+      "required": [
+        "type"
+      ],
+      "properties": {
+        "type": {
+          "type": "string",
+          "enum": [
+            "string",
+            "number",
+            "integer",
+            "boolean",
+            "array"
+          ]
+        },
+        "format": {
+          "type": "string"
+        },
+        "items": {
+          "$ref": "#/definitions/primitivesItems"
+        },
+        "collectionFormat": {
+          "$ref": "#/definitions/collectionFormat"
+        },
+        "default": {
+          "$ref": "#/definitions/default"
+        },
+        "maximum": {
+          "$ref": "#/definitions/maximum"
+        },
+        "exclusiveMaximum": {
+          "$ref": "#/definitions/exclusiveMaximum"
+        },
+        "minimum": {
+          "$ref": "#/definitions/minimum"
+        },
+        "exclusiveMinimum": {
+          "$ref": "#/definitions/exclusiveMinimum"
+        },
+        "maxLength": {
+          "$ref": "#/definitions/maxLength"
+        },
+        "minLength": {
+          "$ref": "#/definitions/minLength"
+        },
+        "pattern": {
+          "$ref": "#/definitions/pattern"
+        },
+        "maxItems": {
+          "$ref": "#/definitions/maxItems"
+        },
+        "minItems": {
+          "$ref": "#/definitions/minItems"
+        },
+        "uniqueItems": {
+          "$ref": "#/definitions/uniqueItems"
+        },
+        "enum": {
+          "$ref": "#/definitions/enum"
+        },
+        "multipleOf": {
+          "$ref": "#/definitions/multipleOf"
+        },
+        "description": {
+          "type": "string"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "vendorExtension": {
+      "description": "Any property starting with x- is valid.",
+      "additionalProperties": true,
+      "additionalItems": true
+    },
+    "bodyParameter": {
+      "type": "object",
+      "required": [
+        "name",
+        "in",
+        "schema"
+      ],
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "properties": {
+        "description": {
+          "type": "string",
+          "description": "A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed."
+        },
+        "name": {
+          "type": "string",
+          "description": "The name of the parameter."
+        },
+        "in": {
+          "type": "string",
+          "description": "Determines the location of the parameter.",
+          "enum": [
+            "body"
+          ]
+        },
+        "required": {
+          "type": "boolean",
+          "description": "Determines whether or not this parameter is required or optional.",
+          "default": false
+        },
+        "schema": {
+          "$ref": "#/definitions/schema"
+        }
+      },
+      "additionalProperties": false
+    },
+    "headerParameterSubSchema": {
+      "additionalProperties": false,
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "properties": {
+        "required": {
+          "type": "boolean",
+          "description": "Determines whether or not this parameter is required or optional.",
+          "default": false
+        },
+        "in": {
+          "type": "string",
+          "description": "Determines the location of the parameter.",
+          "enum": [
+            "header"
+          ]
+        },
+        "description": {
+          "type": "string",
+          "description": "A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed."
+        },
+        "name": {
+          "type": "string",
+          "description": "The name of the parameter."
+        },
+        "type": {
+          "type": "string",
+          "enum": [
+            "string",
+            "number",
+            "boolean",
+            "integer",
+            "array"
+          ]
+        },
+        "format": {
+          "type": "string"
+        },
+        "items": {
+          "$ref": "#/definitions/primitivesItems"
+        },
+        "collectionFormat": {
+          "$ref": "#/definitions/collectionFormat"
+        },
+        "default": {
+          "$ref": "#/definitions/default"
+        },
+        "maximum": {
+          "$ref": "#/definitions/maximum"
+        },
+        "exclusiveMaximum": {
+          "$ref": "#/definitions/exclusiveMaximum"
+        },
+        "minimum": {
+          "$ref": "#/definitions/minimum"
+        },
+        "exclusiveMinimum": {
+          "$ref": "#/definitions/exclusiveMinimum"
+        },
+        "maxLength": {
+          "$ref": "#/definitions/maxLength"
+        },
+        "minLength": {
+          "$ref": "#/definitions/minLength"
+        },
+        "pattern": {
+          "$ref": "#/definitions/pattern"
+        },
+        "maxItems": {
+          "$ref": "#/definitions/maxItems"
+        },
+        "minItems": {
+          "$ref": "#/definitions/minItems"
+        },
+        "uniqueItems": {
+          "$ref": "#/definitions/uniqueItems"
+        },
+        "enum": {
+          "$ref": "#/definitions/enum"
+        },
+        "multipleOf": {
+          "$ref": "#/definitions/multipleOf"
+        }
+      }
+    },
+    "queryParameterSubSchema": {
+      "additionalProperties": false,
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "properties": {
+        "required": {
+          "type": "boolean",
+          "description": "Determines whether or not this parameter is required or optional.",
+          "default": false
+        },
+        "in": {
+          "type": "string",
+          "description": "Determines the location of the parameter.",
+          "enum": [
+            "query"
+          ]
+        },
+        "description": {
+          "type": "string",
+          "description": "A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed."
+        },
+        "name": {
+          "type": "string",
+          "description": "The name of the parameter."
+        },
+        "allowEmptyValue": {
+          "type": "boolean",
+          "default": false,
+          "description": "allows sending a parameter by name only or with an empty value."
+        },
+        "type": {
+          "type": "string",
+          "enum": [
+            "string",
+            "number",
+            "boolean",
+            "integer",
+            "array"
+          ]
+        },
+        "format": {
+          "type": "string"
+        },
+        "items": {
+          "$ref": "#/definitions/primitivesItems"
+        },
+        "collectionFormat": {
+          "$ref": "#/definitions/collectionFormatWithMulti"
+        },
+        "default": {
+          "$ref": "#/definitions/default"
+        },
+        "maximum": {
+          "$ref": "#/definitions/maximum"
+        },
+        "exclusiveMaximum": {
+          "$ref": "#/definitions/exclusiveMaximum"
+        },
+        "minimum": {
+          "$ref": "#/definitions/minimum"
+        },
+        "exclusiveMinimum": {
+          "$ref": "#/definitions/exclusiveMinimum"
+        },
+        "maxLength": {
+          "$ref": "#/definitions/maxLength"
+        },
+        "minLength": {
+          "$ref": "#/definitions/minLength"
+        },
+        "pattern": {
+          "$ref": "#/definitions/pattern"
+        },
+        "maxItems": {
+          "$ref": "#/definitions/maxItems"
+        },
+        "minItems": {
+          "$ref": "#/definitions/minItems"
+        },
+        "uniqueItems": {
+          "$ref": "#/definitions/uniqueItems"
+        },
+        "enum": {
+          "$ref": "#/definitions/enum"
+        },
+        "multipleOf": {
+          "$ref": "#/definitions/multipleOf"
+        }
+      }
+    },
+    "formDataParameterSubSchema": {
+      "additionalProperties": false,
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "properties": {
+        "required": {
+          "type": "boolean",
+          "description": "Determines whether or not this parameter is required or optional.",
+          "default": false
+        },
+        "in": {
+          "type": "string",
+          "description": "Determines the location of the parameter.",
+          "enum": [
+            "formData"
+          ]
+        },
+        "description": {
+          "type": "string",
+          "description": "A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed."
+        },
+        "name": {
+          "type": "string",
+          "description": "The name of the parameter."
+        },
+        "allowEmptyValue": {
+          "type": "boolean",
+          "default": false,
+          "description": "allows sending a parameter by name only or with an empty value."
+        },
+        "type": {
+          "type": "string",
+          "enum": [
+            "string",
+            "number",
+            "boolean",
+            "integer",
+            "array",
+            "file"
+          ]
+        },
+        "format": {
+          "type": "string"
+        },
+        "items": {
+          "$ref": "#/definitions/primitivesItems"
+        },
+        "collectionFormat": {
+          "$ref": "#/definitions/collectionFormatWithMulti"
+        },
+        "default": {
+          "$ref": "#/definitions/default"
+        },
+        "maximum": {
+          "$ref": "#/definitions/maximum"
+        },
+        "exclusiveMaximum": {
+          "$ref": "#/definitions/exclusiveMaximum"
+        },
+        "minimum": {
+          "$ref": "#/definitions/minimum"
+        },
+        "exclusiveMinimum": {
+          "$ref": "#/definitions/exclusiveMinimum"
+        },
+        "maxLength": {
+          "$ref": "#/definitions/maxLength"
+        },
+        "minLength": {
+          "$ref": "#/definitions/minLength"
+        },
+        "pattern": {
+          "$ref": "#/definitions/pattern"
+        },
+        "maxItems": {
+          "$ref": "#/definitions/maxItems"
+        },
+        "minItems": {
+          "$ref": "#/definitions/minItems"
+        },
+        "uniqueItems": {
+          "$ref": "#/definitions/uniqueItems"
+        },
+        "enum": {
+          "$ref": "#/definitions/enum"
+        },
+        "multipleOf": {
+          "$ref": "#/definitions/multipleOf"
+        }
+      }
+    },
+    "pathParameterSubSchema": {
+      "additionalProperties": false,
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "required": [
+        "required"
+      ],
+      "properties": {
+        "required": {
+          "type": "boolean",
+          "enum": [
+            true
+          ],
+          "description": "Determines whether or not this parameter is required or optional."
+        },
+        "in": {
+          "type": "string",
+          "description": "Determines the location of the parameter.",
+          "enum": [
+            "path"
+          ]
+        },
+        "description": {
+          "type": "string",
+          "description": "A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed."
+        },
+        "name": {
+          "type": "string",
+          "description": "The name of the parameter."
+        },
+        "type": {
+          "type": "string",
+          "enum": [
+            "string",
+            "number",
+            "boolean",
+            "integer",
+            "array"
+          ]
+        },
+        "format": {
+          "type": "string"
+        },
+        "items": {
+          "$ref": "#/definitions/primitivesItems"
+        },
+        "collectionFormat": {
+          "$ref": "#/definitions/collectionFormat"
+        },
+        "default": {
+          "$ref": "#/definitions/default"
+        },
+        "maximum": {
+          "$ref": "#/definitions/maximum"
+        },
+        "exclusiveMaximum": {
+          "$ref": "#/definitions/exclusiveMaximum"
+        },
+        "minimum": {
+          "$ref": "#/definitions/minimum"
+        },
+        "exclusiveMinimum": {
+          "$ref": "#/definitions/exclusiveMinimum"
+        },
+        "maxLength": {
+          "$ref": "#/definitions/maxLength"
+        },
+        "minLength": {
+          "$ref": "#/definitions/minLength"
+        },
+        "pattern": {
+          "$ref": "#/definitions/pattern"
+        },
+        "maxItems": {
+          "$ref": "#/definitions/maxItems"
+        },
+        "minItems": {
+          "$ref": "#/definitions/minItems"
+        },
+        "uniqueItems": {
+          "$ref": "#/definitions/uniqueItems"
+        },
+        "enum": {
+          "$ref": "#/definitions/enum"
+        },
+        "multipleOf": {
+          "$ref": "#/definitions/multipleOf"
+        }
+      }
+    },
+    "nonBodyParameter": {
+      "type": "object",
+      "required": [
+        "name",
+        "in",
+        "type"
+      ],
+      "oneOf": [
+        {
+          "$ref": "#/definitions/headerParameterSubSchema"
+        },
+        {
+          "$ref": "#/definitions/formDataParameterSubSchema"
+        },
+        {
+          "$ref": "#/definitions/queryParameterSubSchema"
+        },
+        {
+          "$ref": "#/definitions/pathParameterSubSchema"
+        }
+      ]
+    },
+    "parameter": {
+      "oneOf": [
+        {
+          "$ref": "#/definitions/bodyParameter"
+        },
+        {
+          "$ref": "#/definitions/nonBodyParameter"
+        }
+      ]
+    },
+    "schema": {
+      "type": "object",
+      "description": "A deterministic version of a JSON Schema object.",
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "properties": {
+        "$ref": {
+          "type": "string"
+        },
+        "format": {
+          "type": "string"
+        },
+        "title": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+        },
+        "description": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+        },
+        "default": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+        },
+        "multipleOf": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf"
+        },
+        "maximum": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum"
+        },
+        "exclusiveMaximum": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum"
+        },
+        "minimum": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum"
+        },
+        "exclusiveMinimum": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum"
+        },
+        "maxLength": {
+          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+        },
+        "minLength": {
+          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+        },
+        "pattern": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern"
+        },
+        "maxItems": {
+          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+        },
+        "minItems": {
+          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+        },
+        "uniqueItems": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems"
+        },
+        "maxProperties": {
+          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+        },
+        "minProperties": {
+          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+        },
+        "required": {
+          "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray"
+        },
+        "enum": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/enum"
+        },
+        "additionalProperties": {
+          "oneOf": [
+            {
+              "$ref": "#/definitions/schema"
+            },
+            {
+              "type": "boolean"
+            }
+          ],
+          "default": {}
+        },
+        "type": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/type"
+        },
+        "items": {
+          "anyOf": [
+            {
+              "$ref": "#/definitions/schema"
+            },
+            {
+              "type": "array",
+              "minItems": 1,
+              "items": {
+                "$ref": "#/definitions/schema"
+              }
+            }
+          ],
+          "default": {}
+        },
+        "allOf": {
+          "type": "array",
+          "minItems": 1,
+          "items": {
+            "$ref": "#/definitions/schema"
+          }
+        },
+        "properties": {
+          "type": "object",
+          "additionalProperties": {
+            "$ref": "#/definitions/schema"
+          },
+          "default": {}
+        },
+        "discriminator": {
+          "type": "string"
+        },
+        "readOnly": {
+          "type": "boolean",
+          "default": false
+        },
+        "xml": {
+          "$ref": "#/definitions/xml"
+        },
+        "externalDocs": {
+          "$ref": "#/definitions/externalDocs"
+        },
+        "example": {}
+      },
+      "additionalProperties": false
+    },
+    "fileSchema": {
+      "type": "object",
+      "description": "A deterministic version of a JSON Schema object.",
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "required": [
+        "type"
+      ],
+      "properties": {
+        "format": {
+          "type": "string"
+        },
+        "title": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+        },
+        "description": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+        },
+        "default": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+        },
+        "required": {
+          "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray"
+        },
+        "type": {
+          "type": "string",
+          "enum": [
+            "file"
+          ]
+        },
+        "readOnly": {
+          "type": "boolean",
+          "default": false
+        },
+        "externalDocs": {
+          "$ref": "#/definitions/externalDocs"
+        },
+        "example": {}
+      },
+      "additionalProperties": false
+    },
+    "primitivesItems": {
+      "type": "object",
+      "additionalProperties": false,
+      "properties": {
+        "type": {
+          "type": "string",
+          "enum": [
+            "string",
+            "number",
+            "integer",
+            "boolean",
+            "array"
+          ]
+        },
+        "format": {
+          "type": "string"
+        },
+        "items": {
+          "$ref": "#/definitions/primitivesItems"
+        },
+        "collectionFormat": {
+          "$ref": "#/definitions/collectionFormat"
+        },
+        "default": {
+          "$ref": "#/definitions/default"
+        },
+        "maximum": {
+          "$ref": "#/definitions/maximum"
+        },
+        "exclusiveMaximum": {
+          "$ref": "#/definitions/exclusiveMaximum"
+        },
+        "minimum": {
+          "$ref": "#/definitions/minimum"
+        },
+        "exclusiveMinimum": {
+          "$ref": "#/definitions/exclusiveMinimum"
+        },
+        "maxLength": {
+          "$ref": "#/definitions/maxLength"
+        },
+        "minLength": {
+          "$ref": "#/definitions/minLength"
+        },
+        "pattern": {
+          "$ref": "#/definitions/pattern"
+        },
+        "maxItems": {
+          "$ref": "#/definitions/maxItems"
+        },
+        "minItems": {
+          "$ref": "#/definitions/minItems"
+        },
+        "uniqueItems": {
+          "$ref": "#/definitions/uniqueItems"
+        },
+        "enum": {
+          "$ref": "#/definitions/enum"
+        },
+        "multipleOf": {
+          "$ref": "#/definitions/multipleOf"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "security": {
+      "type": "array",
+      "items": {
+        "$ref": "#/definitions/securityRequirement"
+      },
+      "uniqueItems": true
+    },
+    "securityRequirement": {
+      "type": "object",
+      "additionalProperties": {
+        "type": "array",
+        "items": {
+          "type": "string"
+        },
+        "uniqueItems": true
+      }
+    },
+    "xml": {
+      "type": "object",
+      "additionalProperties": false,
+      "properties": {
+        "name": {
+          "type": "string"
+        },
+        "namespace": {
+          "type": "string"
+        },
+        "prefix": {
+          "type": "string"
+        },
+        "attribute": {
+          "type": "boolean",
+          "default": false
+        },
+        "wrapped": {
+          "type": "boolean",
+          "default": false
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "tag": {
+      "type": "object",
+      "additionalProperties": false,
+      "required": [
+        "name"
+      ],
+      "properties": {
+        "name": {
+          "type": "string"
+        },
+        "description": {
+          "type": "string"
+        },
+        "externalDocs": {
+          "$ref": "#/definitions/externalDocs"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "securityDefinitions": {
+      "type": "object",
+      "additionalProperties": {
+        "oneOf": [
+          {
+            "$ref": "#/definitions/basicAuthenticationSecurity"
+          },
+          {
+            "$ref": "#/definitions/apiKeySecurity"
+          },
+          {
+            "$ref": "#/definitions/oauth2ImplicitSecurity"
+          },
+          {
+            "$ref": "#/definitions/oauth2PasswordSecurity"
+          },
+          {
+            "$ref": "#/definitions/oauth2ApplicationSecurity"
+          },
+          {
+            "$ref": "#/definitions/oauth2AccessCodeSecurity"
+          }
+        ]
+      }
+    },
+    "basicAuthenticationSecurity": {
+      "type": "object",
+      "additionalProperties": false,
+      "required": [
+        "type"
+      ],
+      "properties": {
+        "type": {
+          "type": "string",
+          "enum": [
+            "basic"
+          ]
+        },
+        "description": {
+          "type": "string"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "apiKeySecurity": {
+      "type": "object",
+      "additionalProperties": false,
+      "required": [
+        "type",
+        "name",
+        "in"
+      ],
+      "properties": {
+        "type": {
+          "type": "string",
+          "enum": [
+            "apiKey"
+          ]
+        },
+        "name": {
+          "type": "string"
+        },
+        "in": {
+          "type": "string",
+          "enum": [
+            "header",
+            "query"
+          ]
+        },
+        "description": {
+          "type": "string"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "oauth2ImplicitSecurity": {
+      "type": "object",
+      "additionalProperties": false,
+      "required": [
+        "type",
+        "flow",
+        "authorizationUrl"
+      ],
+      "properties": {
+        "type": {
+          "type": "string",
+          "enum": [
+            "oauth2"
+          ]
+        },
+        "flow": {
+          "type": "string",
+          "enum": [
+            "implicit"
+          ]
+        },
+        "scopes": {
+          "$ref": "#/definitions/oauth2Scopes"
+        },
+        "authorizationUrl": {
+          "type": "string",
+          "format": "uri"
+        },
+        "description": {
+          "type": "string"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "oauth2PasswordSecurity": {
+      "type": "object",
+      "additionalProperties": false,
+      "required": [
+        "type",
+        "flow",
+        "tokenUrl"
+      ],
+      "properties": {
+        "type": {
+          "type": "string",
+          "enum": [
+            "oauth2"
+          ]
+        },
+        "flow": {
+          "type": "string",
+          "enum": [
+            "password"
+          ]
+        },
+        "scopes": {
+          "$ref": "#/definitions/oauth2Scopes"
+        },
+        "tokenUrl": {
+          "type": "string",
+          "format": "uri"
+        },
+        "description": {
+          "type": "string"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "oauth2ApplicationSecurity": {
+      "type": "object",
+      "additionalProperties": false,
+      "required": [
+        "type",
+        "flow",
+        "tokenUrl"
+      ],
+      "properties": {
+        "type": {
+          "type": "string",
+          "enum": [
+            "oauth2"
+          ]
+        },
+        "flow": {
+          "type": "string",
+          "enum": [
+            "application"
+          ]
+        },
+        "scopes": {
+          "$ref": "#/definitions/oauth2Scopes"
+        },
+        "tokenUrl": {
+          "type": "string",
+          "format": "uri"
+        },
+        "description": {
+          "type": "string"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "oauth2AccessCodeSecurity": {
+      "type": "object",
+      "additionalProperties": false,
+      "required": [
+        "type",
+        "flow",
+        "authorizationUrl",
+        "tokenUrl"
+      ],
+      "properties": {
+        "type": {
+          "type": "string",
+          "enum": [
+            "oauth2"
+          ]
+        },
+        "flow": {
+          "type": "string",
+          "enum": [
+            "accessCode"
+          ]
+        },
+        "scopes": {
+          "$ref": "#/definitions/oauth2Scopes"
+        },
+        "authorizationUrl": {
+          "type": "string",
+          "format": "uri"
+        },
+        "tokenUrl": {
+          "type": "string",
+          "format": "uri"
+        },
+        "description": {
+          "type": "string"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "oauth2Scopes": {
+      "type": "object",
+      "additionalProperties": {
+        "type": "string"
+      }
+    },
+    "mediaTypeList": {
+      "type": "array",
+      "items": {
+        "$ref": "#/definitions/mimeType"
+      },
+      "uniqueItems": true
+    },
+    "parametersList": {
+      "type": "array",
+      "description": "The parameters needed to send a valid API call.",
+      "additionalItems": false,
+      "items": {
+        "oneOf": [
+          {
+            "$ref": "#/definitions/parameter"
+          },
+          {
+            "$ref": "#/definitions/jsonReference"
+          }
+        ]
+      },
+      "uniqueItems": true
+    },
+    "schemesList": {
+      "type": "array",
+      "description": "The transfer protocol of the API.",
+      "items": {
+        "type": "string",
+        "enum": [
+          "http",
+          "https",
+          "ws",
+          "wss"
+        ]
+      },
+      "uniqueItems": true
+    },
+    "collectionFormat": {
+      "type": "string",
+      "enum": [
+        "csv",
+        "ssv",
+        "tsv",
+        "pipes"
+      ],
+      "default": "csv"
+    },
+    "collectionFormatWithMulti": {
+      "type": "string",
+      "enum": [
+        "csv",
+        "ssv",
+        "tsv",
+        "pipes",
+        "multi"
+      ],
+      "default": "csv"
+    },
+    "title": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+    },
+    "description": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+    },
+    "default": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+    },
+    "multipleOf": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf"
+    },
+    "maximum": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum"
+    },
+    "exclusiveMaximum": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum"
+    },
+    "minimum": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum"
+    },
+    "exclusiveMinimum": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum"
+    },
+    "maxLength": {
+      "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+    },
+    "minLength": {
+      "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+    },
+    "pattern": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern"
+    },
+    "maxItems": {
+      "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+    },
+    "minItems": {
+      "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+    },
+    "uniqueItems": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems"
+    },
+    "enum": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/enum"
+    },
+    "jsonReference": {
+      "type": "object",
+      "required": [
+        "$ref"
+      ],
+      "additionalProperties": false,
+      "properties": {
+        "$ref": {
+          "type": "string"
+        },
+        "description": {
+          "type": "string"
+        }
+      }
+    }
+  }
+}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md
new file mode 100644
index 0000000..848b16c
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/compiler/README.md
@@ -0,0 +1,3 @@
+# Compiler support code
+
+This directory contains compiler support code used by Gnostic and Gnostic extensions.
\ No newline at end of file
diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go
new file mode 100644
index 0000000..a64c1b7
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/compiler/context.go
@@ -0,0 +1,43 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compiler
+
+// Context contains state of the compiler as it traverses a document.
+type Context struct {
+	Parent            *Context
+	Name              string
+	ExtensionHandlers *[]ExtensionHandler
+}
+
+// NewContextWithExtensions returns a new object representing the compiler state
+func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context {
+	return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers}
+}
+
+// NewContext returns a new object representing the compiler state
+func NewContext(name string, parent *Context) *Context {
+	if parent != nil {
+		return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers}
+	}
+	return &Context{Name: name, Parent: parent, ExtensionHandlers: nil}
+}
+
+// Description returns a text description of the compiler state
+func (context *Context) Description() string {
+	if context.Parent != nil {
+		return context.Parent.Description() + "." + context.Name
+	}
+	return context.Name
+}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go
new file mode 100644
index 0000000..d8672c1
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/compiler/error.go
@@ -0,0 +1,61 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compiler
+
+// Error represents compiler errors and their location in the document.
+type Error struct {
+	Context *Context
+	Message string
+}
+
+// NewError creates an Error.
+func NewError(context *Context, message string) *Error {
+	return &Error{Context: context, Message: message}
+}
+
+// Error returns the string value of an Error.
+func (err *Error) Error() string {
+	if err.Context == nil {
+		return "ERROR " + err.Message
+	}
+	return "ERROR " + err.Context.Description() + " " + err.Message
+}
+
+// ErrorGroup is a container for groups of Error values.
+type ErrorGroup struct {
+	Errors []error
+}
+
+// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty.
+func NewErrorGroupOrNil(errors []error) error {
+	if len(errors) == 0 {
+		return nil
+	} else if len(errors) == 1 {
+		return errors[0]
+	} else {
+		return &ErrorGroup{Errors: errors}
+	}
+}
+
+func (group *ErrorGroup) Error() string {
+	result := ""
+	for i, err := range group.Errors {
+		if i > 0 {
+			result += "\n"
+		}
+		result += err.Error()
+	}
+	return result
+}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go
new file mode 100644
index 0000000..1f85b65
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go
@@ -0,0 +1,101 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compiler
+
+import (
+	"bytes"
+	"fmt"
+	"os/exec"
+
+	"strings"
+
+	"errors"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes/any"
+	ext_plugin "github.com/googleapis/gnostic/extensions"
+	yaml "gopkg.in/yaml.v2"
+)
+
+// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.
+type ExtensionHandler struct {
+	Name string
+}
+
+// HandleExtension calls a binary extension handler.
+func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) {
+	handled := false
+	var errFromPlugin error
+	var outFromPlugin *any.Any
+
+	if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 {
+		for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) {
+			outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName)
+			if outFromPlugin == nil {
+				continue
+			} else {
+				handled = true
+				break
+			}
+		}
+	}
+	return handled, outFromPlugin, errFromPlugin
+}
+
+func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) {
+	if extensionHandlers.Name != "" {
+		binary, _ := yaml.Marshal(in)
+
+		request := &ext_plugin.ExtensionHandlerRequest{}
+
+		version := &ext_plugin.Version{}
+		version.Major = 0
+		version.Minor = 1
+		version.Patch = 0
+		request.CompilerVersion = version
+
+		request.Wrapper = &ext_plugin.Wrapper{}
+
+		request.Wrapper.Version = "v2"
+		request.Wrapper.Yaml = string(binary)
+		request.Wrapper.ExtensionName = extensionName
+
+		requestBytes, _ := proto.Marshal(request)
+		cmd := exec.Command(extensionHandlers.Name)
+		cmd.Stdin = bytes.NewReader(requestBytes)
+		output, err := cmd.Output()
+
+		if err != nil {
+			fmt.Printf("Error: %+v\n", err)
+			return nil, err
+		}
+		response := &ext_plugin.ExtensionHandlerResponse{}
+		err = proto.Unmarshal(output, response)
+		if err != nil {
+			fmt.Printf("Error: %+v\n", err)
+			fmt.Printf("%s\n", string(output))
+			return nil, err
+		}
+		if !response.Handled {
+			return nil, nil
+		}
+		if len(response.Error) != 0 {
+			message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ","))
+			return nil, errors.New(message)
+		}
+		return response.Value, nil
+	}
+	return nil, nil
+}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go
new file mode 100644
index 0000000..76df635
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/compiler/helpers.go
@@ -0,0 +1,197 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compiler
+
+import (
+	"fmt"
+	"gopkg.in/yaml.v2"
+	"regexp"
+	"sort"
+	"strconv"
+)
+
+// compiler helper functions, usually called from generated code
+
+// UnpackMap gets a yaml.MapSlice if possible.
+func UnpackMap(in interface{}) (yaml.MapSlice, bool) {
+	m, ok := in.(yaml.MapSlice)
+	if ok {
+		return m, true
+	}
+	// do we have an empty array?
+	a, ok := in.([]interface{})
+	if ok && len(a) == 0 {
+		// if so, return an empty map
+		return yaml.MapSlice{}, true
+	}
+	return nil, false
+}
+
+// SortedKeysForMap returns the sorted keys of a yaml.MapSlice.
+func SortedKeysForMap(m yaml.MapSlice) []string {
+	keys := make([]string, 0)
+	for _, item := range m {
+		keys = append(keys, item.Key.(string))
+	}
+	sort.Strings(keys)
+	return keys
+}
+
+// MapHasKey returns true if a yaml.MapSlice contains a specified key.
+func MapHasKey(m yaml.MapSlice, key string) bool {
+	for _, item := range m {
+		itemKey, ok := item.Key.(string)
+		if ok && key == itemKey {
+			return true
+		}
+	}
+	return false
+}
+
+// MapValueForKey gets the value of a map value for a specified key.
+func MapValueForKey(m yaml.MapSlice, key string) interface{} {
+	for _, item := range m {
+		itemKey, ok := item.Key.(string)
+		if ok && key == itemKey {
+			return item.Value
+		}
+	}
+	return nil
+}
+
+// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible.
+func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string {
+	stringArray := make([]string, 0)
+	for _, item := range interfaceArray {
+		v, ok := item.(string)
+		if ok {
+			stringArray = append(stringArray, v)
+		}
+	}
+	return stringArray
+}
+
+// MissingKeysInMap identifies which keys from a list of required keys are not in a map.
+func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string {
+	missingKeys := make([]string, 0)
+	for _, k := range requiredKeys {
+		if !MapHasKey(m, k) {
+			missingKeys = append(missingKeys, k)
+		}
+	}
+	return missingKeys
+}
+
+// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns.
+func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string {
+	invalidKeys := make([]string, 0)
+	for _, item := range m {
+		itemKey, ok := item.Key.(string)
+		if ok {
+			key := itemKey
+			found := false
+			// does the key match an allowed key?
+			for _, allowedKey := range allowedKeys {
+				if key == allowedKey {
+					found = true
+					break
+				}
+			}
+			if !found {
+				// does the key match an allowed pattern?
+				for _, allowedPattern := range allowedPatterns {
+					if allowedPattern.MatchString(key) {
+						found = true
+						break
+					}
+				}
+				if !found {
+					invalidKeys = append(invalidKeys, key)
+				}
+			}
+		}
+	}
+	return invalidKeys
+}
+
+// DescribeMap describes a map (for debugging purposes).
+func DescribeMap(in interface{}, indent string) string {
+	description := ""
+	m, ok := in.(map[string]interface{})
+	if ok {
+		keys := make([]string, 0)
+		for k := range m {
+			keys = append(keys, k)
+		}
+		sort.Strings(keys)
+		for _, k := range keys {
+			v := m[k]
+			description += fmt.Sprintf("%s%s:\n", indent, k)
+			description += DescribeMap(v, indent+"  ")
+		}
+		return description
+	}
+	a, ok := in.([]interface{})
+	if ok {
+		for i, v := range a {
+			description += fmt.Sprintf("%s%d:\n", indent, i)
+			description += DescribeMap(v, indent+"  ")
+		}
+		return description
+	}
+	description += fmt.Sprintf("%s%+v\n", indent, in)
+	return description
+}
+
+// PluralProperties returns the string "properties" pluralized.
+func PluralProperties(count int) string {
+	if count == 1 {
+		return "property"
+	}
+	return "properties"
+}
+
+// StringArrayContainsValue returns true if a string array contains a specified value.
+func StringArrayContainsValue(array []string, value string) bool {
+	for _, item := range array {
+		if item == value {
+			return true
+		}
+	}
+	return false
+}
+
+// StringArrayContainsValues returns true if a string array contains all of a list of specified values.
+func StringArrayContainsValues(array []string, values []string) bool {
+	for _, value := range values {
+		if !StringArrayContainsValue(array, value) {
+			return false
+		}
+	}
+	return true
+}
+
+// StringValue returns the string value of an item.
+func StringValue(item interface{}) (value string, ok bool) {
+	value, ok = item.(string)
+	if ok {
+		return value, ok
+	}
+	intValue, ok := item.(int)
+	if ok {
+		return strconv.Itoa(intValue), true
+	}
+	return "", false
+}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go
new file mode 100644
index 0000000..9713a21
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/compiler/main.go
@@ -0,0 +1,16 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package compiler provides support functions to generated compiler code.
+package compiler
diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go
new file mode 100644
index 0000000..c954a2d
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go
@@ -0,0 +1,175 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compiler
+
+import (
+	"errors"
+	"fmt"
+	"gopkg.in/yaml.v2"
+	"io/ioutil"
+	"log"
+	"net/http"
+	"net/url"
+	"path/filepath"
+	"strings"
+)
+
+var fileCache map[string][]byte
+var infoCache map[string]interface{}
+var count int64
+
+var verboseReader = false
+
+func initializeFileCache() {
+	if fileCache == nil {
+		fileCache = make(map[string][]byte, 0)
+	}
+}
+
+func initializeInfoCache() {
+	if infoCache == nil {
+		infoCache = make(map[string]interface{}, 0)
+	}
+}
+
+// FetchFile gets a specified file from the local filesystem or a remote location.
+func FetchFile(fileurl string) ([]byte, error) {
+	initializeFileCache()
+	bytes, ok := fileCache[fileurl]
+	if ok {
+		if verboseReader {
+			log.Printf("Cache hit %s", fileurl)
+		}
+		return bytes, nil
+	}
+	if verboseReader {
+		log.Printf("Fetching %s", fileurl)
+	}
+	response, err := http.Get(fileurl)
+	if err != nil {
+		return nil, err
+	}
+	if response.StatusCode != 200 {
+		return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status))
+	}
+	defer response.Body.Close()
+	bytes, err = ioutil.ReadAll(response.Body)
+	if err == nil {
+		fileCache[fileurl] = bytes
+	}
+	return bytes, err
+}
+
+// ReadBytesForFile reads the bytes of a file.
+func ReadBytesForFile(filename string) ([]byte, error) {
+	// is the filename a url?
+	fileurl, _ := url.Parse(filename)
+	if fileurl.Scheme != "" {
+		// yes, fetch it
+		bytes, err := FetchFile(filename)
+		if err != nil {
+			return nil, err
+		}
+		return bytes, nil
+	}
+	// no, it's a local filename
+	bytes, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return nil, err
+	}
+	return bytes, nil
+}
+
+// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice.
+func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) {
+	initializeInfoCache()
+	cachedInfo, ok := infoCache[filename]
+	if ok {
+		if verboseReader {
+			log.Printf("Cache hit info for file %s", filename)
+		}
+		return cachedInfo, nil
+	}
+	if verboseReader {
+		log.Printf("Reading info for file %s", filename)
+	}
+	var info yaml.MapSlice
+	err := yaml.Unmarshal(bytes, &info)
+	if err != nil {
+		return nil, err
+	}
+	if len(filename) > 0 {
+		infoCache[filename] = info
+	}
+	return info, nil
+}
+
+// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref.
+func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
+	initializeInfoCache()
+	{
+		info, ok := infoCache[ref]
+		if ok {
+			if verboseReader {
+				log.Printf("Cache hit for ref %s#%s", basefile, ref)
+			}
+			return info, nil
+		}
+	}
+	if verboseReader {
+		log.Printf("Reading info for ref %s#%s", basefile, ref)
+	}
+	count = count + 1
+	basedir, _ := filepath.Split(basefile)
+	parts := strings.Split(ref, "#")
+	var filename string
+	if parts[0] != "" {
+		filename = basedir + parts[0]
+	} else {
+		filename = basefile
+	}
+	bytes, err := ReadBytesForFile(filename)
+	if err != nil {
+		return nil, err
+	}
+	info, err := ReadInfoFromBytes(filename, bytes)
+	if err != nil {
+		log.Printf("File error: %v\n", err)
+	} else {
+		if len(parts) > 1 {
+			path := strings.Split(parts[1], "/")
+			for i, key := range path {
+				if i > 0 {
+					m, ok := info.(yaml.MapSlice)
+					if ok {
+						found := false
+						for _, section := range m {
+							if section.Key == key {
+								info = section.Value
+								found = true
+							}
+						}
+						if !found {
+							infoCache[ref] = nil
+							return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref))
+						}
+					}
+				}
+			}
+		}
+	}
+	infoCache[ref] = info
+	return info, nil
+}
diff --git a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh
new file mode 100755
index 0000000..68d02a0
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh
@@ -0,0 +1,5 @@
+go get github.com/golang/protobuf/protoc-gen-go
+
+protoc \
+--go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. *.proto 
+
diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md
new file mode 100644
index 0000000..ff1c2eb
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/extensions/README.md
@@ -0,0 +1,5 @@
+# Extensions
+
+This directory contains support code for building Gnostic extensions and associated examples.
+
+Extensions are used to compile vendor or specification extensions into protocol buffer structures.
diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto
new file mode 100644
index 0000000..04856f9
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/extensions/extension.proto
@@ -0,0 +1,93 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+import "google/protobuf/any.proto";
+package openapiextension.v1;
+
+// This option lets the proto compiler generate Java code inside the package
+// name (see below) instead of inside an outer class. It creates a simpler
+// developer experience by reducing one-level of name nesting and be
+// consistent with most programming languages that don't support outer classes.
+option java_multiple_files = true;
+
+// The Java outer classname should be the filename in UpperCamelCase. This
+// class is only used to hold proto descriptor, so developers don't need to
+// work with it directly.
+option java_outer_classname = "OpenAPIExtensionV1";
+
+// The Java package name must be proto package name with proper prefix.
+option java_package = "org.gnostic.v1";
+
+// A reasonable prefix for the Objective-C symbols generated from the package.
+// It should at a minimum be 3 characters long, all uppercase, and convention
+// is to use an abbreviation of the package name. Something short, but
+// hopefully unique enough to not conflict with things that may come along in
+// the future. 'GPB' is reserved for the protocol buffer implementation itself.
+//
+option objc_class_prefix = "OAE"; // "OpenAPI Extension"
+
+// The version number of OpenAPI compiler.
+message Version {
+  int32 major = 1;
+  int32 minor = 2;
+  int32 patch = 3;
+  // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
+  // be empty for mainline stable releases.
+  string suffix = 4;
+}
+
+// An encoded Request is written to the ExtensionHandler's stdin.
+message ExtensionHandlerRequest {
+
+  // The OpenAPI descriptions that were explicitly listed on the command line.
+  // The specifications will appear in the order they are specified to gnostic.
+  Wrapper wrapper = 1;
+
+  // The version number of openapi compiler.
+  Version compiler_version = 3;
+}
+
+// The extensions writes an encoded ExtensionHandlerResponse to stdout.
+message ExtensionHandlerResponse {
+
+  // true if the extension is handled by the extension handler; false otherwise
+  bool handled = 1;
+
+  // Error message.  If non-empty, the extension handling failed.
+  // The extension handler process should exit with status code zero
+  // even if it reports an error in this way.
+  //
+  // This should be used to indicate errors which prevent the extension from
+  // operating as intended.  Errors which indicate a problem in gnostic
+  // itself -- such as the input Document being unparseable -- should be
+  // reported by writing a message to stderr and exiting with a non-zero
+  // status code.
+  repeated string error = 2;
+
+  // text output
+  google.protobuf.Any value = 3;
+}
+
+message Wrapper {
+  // version of the OpenAPI specification in which this extension was written.
+  string version = 1;
+
+  // Name of the extension
+  string extension_name = 2;
+
+  // Must be a valid yaml for the proto
+  string yaml = 3;
+}
diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go
new file mode 100644
index 0000000..94a8e62
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/extensions/extensions.go
@@ -0,0 +1,82 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package openapiextension_v1
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+)
+
+type documentHandler func(version string, extensionName string, document string)
+type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error)
+
+func forInputYamlFromOpenapic(handler documentHandler) {
+	data, err := ioutil.ReadAll(os.Stdin)
+	if err != nil {
+		fmt.Println("File error:", err.Error())
+		os.Exit(1)
+	}
+	if len(data) == 0 {
+		fmt.Println("No input data.")
+		os.Exit(1)
+	}
+	request := &ExtensionHandlerRequest{}
+	err = proto.Unmarshal(data, request)
+	if err != nil {
+		fmt.Println("Input error:", err.Error())
+		os.Exit(1)
+	}
+	handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml)
+}
+
+// ProcessExtension calles the handler for a specified extension.
+func ProcessExtension(handleExtension extensionHandler) {
+	response := &ExtensionHandlerResponse{}
+	forInputYamlFromOpenapic(
+		func(version string, extensionName string, yamlInput string) {
+			var newObject proto.Message
+			var err error
+
+			handled, newObject, err := handleExtension(extensionName, yamlInput)
+			if !handled {
+				responseBytes, _ := proto.Marshal(response)
+				os.Stdout.Write(responseBytes)
+				os.Exit(0)
+			}
+
+			// If we reach here, then the extension is handled
+			response.Handled = true
+			if err != nil {
+				response.Error = append(response.Error, err.Error())
+				responseBytes, _ := proto.Marshal(response)
+				os.Stdout.Write(responseBytes)
+				os.Exit(0)
+			}
+			response.Value, err = ptypes.MarshalAny(newObject)
+			if err != nil {
+				response.Error = append(response.Error, err.Error())
+				responseBytes, _ := proto.Marshal(response)
+				os.Stdout.Write(responseBytes)
+				os.Exit(0)
+			}
+		})
+
+	responseBytes, _ := proto.Marshal(response)
+	os.Stdout.Write(responseBytes)
+}
diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore
new file mode 100644
index 0000000..cd3fcd1
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+.idea/
+*.iml
diff --git a/vendor/github.com/gorilla/websocket/.travis.yml b/vendor/github.com/gorilla/websocket/.travis.yml
new file mode 100644
index 0000000..a49db51
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/.travis.yml
@@ -0,0 +1,19 @@
+language: go
+sudo: false
+
+matrix:
+  include:
+    - go: 1.7.x
+    - go: 1.8.x
+    - go: 1.9.x
+    - go: 1.10.x
+    - go: 1.11.x
+    - go: tip
+  allow_failures:
+    - go: tip
+
+script:
+  - go get -t -v ./...
+  - diff -u <(echo -n) <(gofmt -d .)
+  - go vet $(go list ./... | grep -v /vendor/)
+  - go test -v -race ./...
diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS
new file mode 100644
index 0000000..1931f40
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/AUTHORS
@@ -0,0 +1,9 @@
+# This is the official list of Gorilla WebSocket authors for copyright
+# purposes.
+#
+# Please keep the list sorted.
+
+Gary Burd <gary@beagledreams.com>
+Google LLC (https://opensource.google.com/)
+Joachim Bauch <mail@joachim-bauch.de>
+
diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE
new file mode 100644
index 0000000..9171c97
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+  Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+  Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md
new file mode 100644
index 0000000..20e391f
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/README.md
@@ -0,0 +1,64 @@
+# Gorilla WebSocket
+
+Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
+[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
+
+[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket)
+[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket)
+
+### Documentation
+
+* [API Reference](http://godoc.org/github.com/gorilla/websocket)
+* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
+* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
+* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
+* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
+
+### Status
+
+The Gorilla WebSocket package provides a complete and tested implementation of
+the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
+package API is stable.
+
+### Installation
+
+    go get github.com/gorilla/websocket
+
+### Protocol Compliance
+
+The Gorilla WebSocket package passes the server tests in the [Autobahn Test
+Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn
+subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
+
+### Gorilla WebSocket compared with other packages
+
+<table>
+<tr>
+<th></th>
+<th><a href="http://godoc.org/github.com/gorilla/websocket">github.com/gorilla</a></th>
+<th><a href="http://godoc.org/golang.org/x/net/websocket">golang.org/x/net</a></th>
+</tr>
+<tr>
+<tr><td colspan="3"><a href="http://tools.ietf.org/html/rfc6455">RFC 6455</a> Features</td></tr>
+<tr><td>Passes <a href="http://autobahn.ws/testsuite/">Autobahn Test Suite</a></td><td><a href="https://github.com/gorilla/websocket/tree/master/examples/autobahn">Yes</a></td><td>No</td></tr>
+<tr><td>Receive <a href="https://tools.ietf.org/html/rfc6455#section-5.4">fragmented</a> message<td>Yes</td><td><a href="https://code.google.com/p/go/issues/detail?id=7632">No</a>, see note 1</td></tr>
+<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.1">close</a> message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=4588">No</a></td></tr>
+<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.2">pings</a> and receive <a href="https://tools.ietf.org/html/rfc6455#section-5.5.3">pongs</a></td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td>No</td></tr>
+<tr><td>Get the <a href="https://tools.ietf.org/html/rfc6455#section-5.6">type</a> of a received data message</td><td>Yes</td><td>Yes, see note 2</td></tr>
+<tr><td colspan="3">Other Features</tr></td>
+<tr><td><a href="https://tools.ietf.org/html/rfc7692">Compression Extensions</a></td><td>Experimental</td><td>No</td></tr>
+<tr><td>Read message using io.Reader</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextReader">Yes</a></td><td>No, see note 3</td></tr>
+<tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr>
+</table>
+
+Notes:
+
+1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
+2. The application can get the type of a received data message by implementing
+   a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
+   function.
+3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
+  Read returns when the input buffer is full or a frame boundary is
+  encountered. Each call to Write sends a single frame message. The Gorilla
+  io.Reader and io.WriteCloser operate on a single WebSocket message.
+
diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go
new file mode 100644
index 0000000..2e32fd5
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/client.go
@@ -0,0 +1,395 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+	"bytes"
+	"context"
+	"crypto/tls"
+	"errors"
+	"io"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/http/httptrace"
+	"net/url"
+	"strings"
+	"time"
+)
+
+// ErrBadHandshake is returned when the server response to opening handshake is
+// invalid.
+var ErrBadHandshake = errors.New("websocket: bad handshake")
+
+var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
+
+// NewClient creates a new client connection using the given net connection.
+// The URL u specifies the host and request URI. Use requestHeader to specify
+// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
+// (Cookie). Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etc.
+//
+// Deprecated: Use Dialer instead.
+func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
+	d := Dialer{
+		ReadBufferSize:  readBufSize,
+		WriteBufferSize: writeBufSize,
+		NetDial: func(net, addr string) (net.Conn, error) {
+			return netConn, nil
+		},
+	}
+	return d.Dial(u.String(), requestHeader)
+}
+
+// A Dialer contains options for connecting to WebSocket server.
+type Dialer struct {
+	// NetDial specifies the dial function for creating TCP connections. If
+	// NetDial is nil, net.Dial is used.
+	NetDial func(network, addr string) (net.Conn, error)
+
+	// NetDialContext specifies the dial function for creating TCP connections. If
+	// NetDialContext is nil, net.DialContext is used.
+	NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
+
+	// Proxy specifies a function to return a proxy for a given
+	// Request. If the function returns a non-nil error, the
+	// request is aborted with the provided error.
+	// If Proxy is nil or returns a nil *URL, no proxy is used.
+	Proxy func(*http.Request) (*url.URL, error)
+
+	// TLSClientConfig specifies the TLS configuration to use with tls.Client.
+	// If nil, the default configuration is used.
+	TLSClientConfig *tls.Config
+
+	// HandshakeTimeout specifies the duration for the handshake to complete.
+	HandshakeTimeout time.Duration
+
+	// ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer
+	// size is zero, then a useful default size is used. The I/O buffer sizes
+	// do not limit the size of the messages that can be sent or received.
+	ReadBufferSize, WriteBufferSize int
+
+	// WriteBufferPool is a pool of buffers for write operations. If the value
+	// is not set, then write buffers are allocated to the connection for the
+	// lifetime of the connection.
+	//
+	// A pool is most useful when the application has a modest volume of writes
+	// across a large number of connections.
+	//
+	// Applications should use a single pool for each unique value of
+	// WriteBufferSize.
+	WriteBufferPool BufferPool
+
+	// Subprotocols specifies the client's requested subprotocols.
+	Subprotocols []string
+
+	// EnableCompression specifies if the client should attempt to negotiate
+	// per message compression (RFC 7692). Setting this value to true does not
+	// guarantee that compression will be supported. Currently only "no context
+	// takeover" modes are supported.
+	EnableCompression bool
+
+	// Jar specifies the cookie jar.
+	// If Jar is nil, cookies are not sent in requests and ignored
+	// in responses.
+	Jar http.CookieJar
+}
+
+// Dial creates a new client connection by calling DialContext with a background context.
+func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
+	return d.DialContext(context.Background(), urlStr, requestHeader)
+}
+
+var errMalformedURL = errors.New("malformed ws or wss URL")
+
+func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
+	hostPort = u.Host
+	hostNoPort = u.Host
+	if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
+		hostNoPort = hostNoPort[:i]
+	} else {
+		switch u.Scheme {
+		case "wss":
+			hostPort += ":443"
+		case "https":
+			hostPort += ":443"
+		default:
+			hostPort += ":80"
+		}
+	}
+	return hostPort, hostNoPort
+}
+
+// DefaultDialer is a dialer with all fields set to the default values.
+var DefaultDialer = &Dialer{
+	Proxy:            http.ProxyFromEnvironment,
+	HandshakeTimeout: 45 * time.Second,
+}
+
+// nilDialer is dialer to use when receiver is nil.
+var nilDialer = *DefaultDialer
+
+// DialContext creates a new client connection. Use requestHeader to specify the
+// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
+// Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// The context will be used in the request and in the Dialer
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etcetera. The response body may not contain the entire response and does not
+// need to be closed by the application.
+func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
+	if d == nil {
+		d = &nilDialer
+	}
+
+	challengeKey, err := generateChallengeKey()
+	if err != nil {
+		return nil, nil, err
+	}
+
+	u, err := url.Parse(urlStr)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	switch u.Scheme {
+	case "ws":
+		u.Scheme = "http"
+	case "wss":
+		u.Scheme = "https"
+	default:
+		return nil, nil, errMalformedURL
+	}
+
+	if u.User != nil {
+		// User name and password are not allowed in websocket URIs.
+		return nil, nil, errMalformedURL
+	}
+
+	req := &http.Request{
+		Method:     "GET",
+		URL:        u,
+		Proto:      "HTTP/1.1",
+		ProtoMajor: 1,
+		ProtoMinor: 1,
+		Header:     make(http.Header),
+		Host:       u.Host,
+	}
+	req = req.WithContext(ctx)
+
+	// Set the cookies present in the cookie jar of the dialer
+	if d.Jar != nil {
+		for _, cookie := range d.Jar.Cookies(u) {
+			req.AddCookie(cookie)
+		}
+	}
+
+	// Set the request headers using the capitalization for names and values in
+	// RFC examples. Although the capitalization shouldn't matter, there are
+	// servers that depend on it. The Header.Set method is not used because the
+	// method canonicalizes the header names.
+	req.Header["Upgrade"] = []string{"websocket"}
+	req.Header["Connection"] = []string{"Upgrade"}
+	req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
+	req.Header["Sec-WebSocket-Version"] = []string{"13"}
+	if len(d.Subprotocols) > 0 {
+		req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
+	}
+	for k, vs := range requestHeader {
+		switch {
+		case k == "Host":
+			if len(vs) > 0 {
+				req.Host = vs[0]
+			}
+		case k == "Upgrade" ||
+			k == "Connection" ||
+			k == "Sec-Websocket-Key" ||
+			k == "Sec-Websocket-Version" ||
+			k == "Sec-Websocket-Extensions" ||
+			(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
+			return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
+		case k == "Sec-Websocket-Protocol":
+			req.Header["Sec-WebSocket-Protocol"] = vs
+		default:
+			req.Header[k] = vs
+		}
+	}
+
+	if d.EnableCompression {
+		req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
+	}
+
+	if d.HandshakeTimeout != 0 {
+		var cancel func()
+		ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
+		defer cancel()
+	}
+
+	// Get network dial function.
+	var netDial func(network, add string) (net.Conn, error)
+
+	if d.NetDialContext != nil {
+		netDial = func(network, addr string) (net.Conn, error) {
+			return d.NetDialContext(ctx, network, addr)
+		}
+	} else if d.NetDial != nil {
+		netDial = d.NetDial
+	} else {
+		netDialer := &net.Dialer{}
+		netDial = func(network, addr string) (net.Conn, error) {
+			return netDialer.DialContext(ctx, network, addr)
+		}
+	}
+
+	// If needed, wrap the dial function to set the connection deadline.
+	if deadline, ok := ctx.Deadline(); ok {
+		forwardDial := netDial
+		netDial = func(network, addr string) (net.Conn, error) {
+			c, err := forwardDial(network, addr)
+			if err != nil {
+				return nil, err
+			}
+			err = c.SetDeadline(deadline)
+			if err != nil {
+				c.Close()
+				return nil, err
+			}
+			return c, nil
+		}
+	}
+
+	// If needed, wrap the dial function to connect through a proxy.
+	if d.Proxy != nil {
+		proxyURL, err := d.Proxy(req)
+		if err != nil {
+			return nil, nil, err
+		}
+		if proxyURL != nil {
+			dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
+			if err != nil {
+				return nil, nil, err
+			}
+			netDial = dialer.Dial
+		}
+	}
+
+	hostPort, hostNoPort := hostPortNoPort(u)
+	trace := httptrace.ContextClientTrace(ctx)
+	if trace != nil && trace.GetConn != nil {
+		trace.GetConn(hostPort)
+	}
+
+	netConn, err := netDial("tcp", hostPort)
+	if trace != nil && trace.GotConn != nil {
+		trace.GotConn(httptrace.GotConnInfo{
+			Conn: netConn,
+		})
+	}
+	if err != nil {
+		return nil, nil, err
+	}
+
+	defer func() {
+		if netConn != nil {
+			netConn.Close()
+		}
+	}()
+
+	if u.Scheme == "https" {
+		cfg := cloneTLSConfig(d.TLSClientConfig)
+		if cfg.ServerName == "" {
+			cfg.ServerName = hostNoPort
+		}
+		tlsConn := tls.Client(netConn, cfg)
+		netConn = tlsConn
+
+		var err error
+		if trace != nil {
+			err = doHandshakeWithTrace(trace, tlsConn, cfg)
+		} else {
+			err = doHandshake(tlsConn, cfg)
+		}
+
+		if err != nil {
+			return nil, nil, err
+		}
+	}
+
+	conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
+
+	if err := req.Write(netConn); err != nil {
+		return nil, nil, err
+	}
+
+	if trace != nil && trace.GotFirstResponseByte != nil {
+		if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
+			trace.GotFirstResponseByte()
+		}
+	}
+
+	resp, err := http.ReadResponse(conn.br, req)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	if d.Jar != nil {
+		if rc := resp.Cookies(); len(rc) > 0 {
+			d.Jar.SetCookies(u, rc)
+		}
+	}
+
+	if resp.StatusCode != 101 ||
+		!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
+		!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
+		resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
+		// Before closing the network connection on return from this
+		// function, slurp up some of the response to aid application
+		// debugging.
+		buf := make([]byte, 1024)
+		n, _ := io.ReadFull(resp.Body, buf)
+		resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
+		return nil, resp, ErrBadHandshake
+	}
+
+	for _, ext := range parseExtensions(resp.Header) {
+		if ext[""] != "permessage-deflate" {
+			continue
+		}
+		_, snct := ext["server_no_context_takeover"]
+		_, cnct := ext["client_no_context_takeover"]
+		if !snct || !cnct {
+			return nil, resp, errInvalidCompression
+		}
+		conn.newCompressionWriter = compressNoContextTakeover
+		conn.newDecompressionReader = decompressNoContextTakeover
+		break
+	}
+
+	resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
+	conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
+
+	netConn.SetDeadline(time.Time{})
+	netConn = nil // to avoid close in defer.
+	return conn, resp, nil
+}
+
+func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error {
+	if err := tlsConn.Handshake(); err != nil {
+		return err
+	}
+	if !cfg.InsecureSkipVerify {
+		if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/gorilla/websocket/client_clone.go b/vendor/github.com/gorilla/websocket/client_clone.go
new file mode 100644
index 0000000..4f0d943
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/client_clone.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package websocket
+
+import "crypto/tls"
+
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+	if cfg == nil {
+		return &tls.Config{}
+	}
+	return cfg.Clone()
+}
diff --git a/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/vendor/github.com/gorilla/websocket/client_clone_legacy.go
new file mode 100644
index 0000000..babb007
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/client_clone_legacy.go
@@ -0,0 +1,38 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package websocket
+
+import "crypto/tls"
+
+// cloneTLSConfig clones all public fields except the fields
+// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
+// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
+// config in active use.
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+	if cfg == nil {
+		return &tls.Config{}
+	}
+	return &tls.Config{
+		Rand:                     cfg.Rand,
+		Time:                     cfg.Time,
+		Certificates:             cfg.Certificates,
+		NameToCertificate:        cfg.NameToCertificate,
+		GetCertificate:           cfg.GetCertificate,
+		RootCAs:                  cfg.RootCAs,
+		NextProtos:               cfg.NextProtos,
+		ServerName:               cfg.ServerName,
+		ClientAuth:               cfg.ClientAuth,
+		ClientCAs:                cfg.ClientCAs,
+		InsecureSkipVerify:       cfg.InsecureSkipVerify,
+		CipherSuites:             cfg.CipherSuites,
+		PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+		ClientSessionCache:       cfg.ClientSessionCache,
+		MinVersion:               cfg.MinVersion,
+		MaxVersion:               cfg.MaxVersion,
+		CurvePreferences:         cfg.CurvePreferences,
+	}
+}
diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go
new file mode 100644
index 0000000..813ffb1
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/compression.go
@@ -0,0 +1,148 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+	"compress/flate"
+	"errors"
+	"io"
+	"strings"
+	"sync"
+)
+
+const (
+	minCompressionLevel     = -2 // flate.HuffmanOnly not defined in Go < 1.6
+	maxCompressionLevel     = flate.BestCompression
+	defaultCompressionLevel = 1
+)
+
+var (
+	flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
+	flateReaderPool  = sync.Pool{New: func() interface{} {
+		return flate.NewReader(nil)
+	}}
+)
+
+func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
+	const tail =
+	// Add four bytes as specified in RFC
+	"\x00\x00\xff\xff" +
+		// Add final block to squelch unexpected EOF error from flate reader.
+		"\x01\x00\x00\xff\xff"
+
+	fr, _ := flateReaderPool.Get().(io.ReadCloser)
+	fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
+	return &flateReadWrapper{fr}
+}
+
+func isValidCompressionLevel(level int) bool {
+	return minCompressionLevel <= level && level <= maxCompressionLevel
+}
+
+func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
+	p := &flateWriterPools[level-minCompressionLevel]
+	tw := &truncWriter{w: w}
+	fw, _ := p.Get().(*flate.Writer)
+	if fw == nil {
+		fw, _ = flate.NewWriter(tw, level)
+	} else {
+		fw.Reset(tw)
+	}
+	return &flateWriteWrapper{fw: fw, tw: tw, p: p}
+}
+
+// truncWriter is an io.Writer that writes all but the last four bytes of the
+// stream to another io.Writer.
+type truncWriter struct {
+	w io.WriteCloser
+	n int
+	p [4]byte
+}
+
+func (w *truncWriter) Write(p []byte) (int, error) {
+	n := 0
+
+	// fill buffer first for simplicity.
+	if w.n < len(w.p) {
+		n = copy(w.p[w.n:], p)
+		p = p[n:]
+		w.n += n
+		if len(p) == 0 {
+			return n, nil
+		}
+	}
+
+	m := len(p)
+	if m > len(w.p) {
+		m = len(w.p)
+	}
+
+	if nn, err := w.w.Write(w.p[:m]); err != nil {
+		return n + nn, err
+	}
+
+	copy(w.p[:], w.p[m:])
+	copy(w.p[len(w.p)-m:], p[len(p)-m:])
+	nn, err := w.w.Write(p[:len(p)-m])
+	return n + nn, err
+}
+
+type flateWriteWrapper struct {
+	fw *flate.Writer
+	tw *truncWriter
+	p  *sync.Pool
+}
+
+func (w *flateWriteWrapper) Write(p []byte) (int, error) {
+	if w.fw == nil {
+		return 0, errWriteClosed
+	}
+	return w.fw.Write(p)
+}
+
+func (w *flateWriteWrapper) Close() error {
+	if w.fw == nil {
+		return errWriteClosed
+	}
+	err1 := w.fw.Flush()
+	w.p.Put(w.fw)
+	w.fw = nil
+	if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
+		return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
+	}
+	err2 := w.tw.w.Close()
+	if err1 != nil {
+		return err1
+	}
+	return err2
+}
+
+type flateReadWrapper struct {
+	fr io.ReadCloser
+}
+
+func (r *flateReadWrapper) Read(p []byte) (int, error) {
+	if r.fr == nil {
+		return 0, io.ErrClosedPipe
+	}
+	n, err := r.fr.Read(p)
+	if err == io.EOF {
+		// Preemptively place the reader back in the pool. This helps with
+		// scenarios where the application does not call NextReader() soon after
+		// this final read.
+		r.Close()
+	}
+	return n, err
+}
+
+func (r *flateReadWrapper) Close() error {
+	if r.fr == nil {
+		return io.ErrClosedPipe
+	}
+	err := r.fr.Close()
+	flateReaderPool.Put(r.fr)
+	r.fr = nil
+	return err
+}
diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go
new file mode 100644
index 0000000..d2a21c1
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/conn.go
@@ -0,0 +1,1165 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+	"bufio"
+	"encoding/binary"
+	"errors"
+	"io"
+	"io/ioutil"
+	"math/rand"
+	"net"
+	"strconv"
+	"sync"
+	"time"
+	"unicode/utf8"
+)
+
+const (
+	// Frame header byte 0 bits from Section 5.2 of RFC 6455
+	finalBit = 1 << 7
+	rsv1Bit  = 1 << 6
+	rsv2Bit  = 1 << 5
+	rsv3Bit  = 1 << 4
+
+	// Frame header byte 1 bits from Section 5.2 of RFC 6455
+	maskBit = 1 << 7
+
+	maxFrameHeaderSize         = 2 + 8 + 4 // Fixed header + length + mask
+	maxControlFramePayloadSize = 125
+
+	writeWait = time.Second
+
+	defaultReadBufferSize  = 4096
+	defaultWriteBufferSize = 4096
+
+	continuationFrame = 0
+	noFrame           = -1
+)
+
+// Close codes defined in RFC 6455, section 11.7.
+const (
+	CloseNormalClosure           = 1000
+	CloseGoingAway               = 1001
+	CloseProtocolError           = 1002
+	CloseUnsupportedData         = 1003
+	CloseNoStatusReceived        = 1005
+	CloseAbnormalClosure         = 1006
+	CloseInvalidFramePayloadData = 1007
+	ClosePolicyViolation         = 1008
+	CloseMessageTooBig           = 1009
+	CloseMandatoryExtension      = 1010
+	CloseInternalServerErr       = 1011
+	CloseServiceRestart          = 1012
+	CloseTryAgainLater           = 1013
+	CloseTLSHandshake            = 1015
+)
+
+// The message types are defined in RFC 6455, section 11.8.
+const (
+	// TextMessage denotes a text data message. The text message payload is
+	// interpreted as UTF-8 encoded text data.
+	TextMessage = 1
+
+	// BinaryMessage denotes a binary data message.
+	BinaryMessage = 2
+
+	// CloseMessage denotes a close control message. The optional message
+	// payload contains a numeric code and text. Use the FormatCloseMessage
+	// function to format a close message payload.
+	CloseMessage = 8
+
+	// PingMessage denotes a ping control message. The optional message payload
+	// is UTF-8 encoded text.
+	PingMessage = 9
+
+	// PongMessage denotes a pong control message. The optional message payload
+	// is UTF-8 encoded text.
+	PongMessage = 10
+)
+
+// ErrCloseSent is returned when the application writes a message to the
+// connection after sending a close message.
+var ErrCloseSent = errors.New("websocket: close sent")
+
+// ErrReadLimit is returned when reading a message that is larger than the
+// read limit set for the connection.
+var ErrReadLimit = errors.New("websocket: read limit exceeded")
+
+// netError satisfies the net Error interface.
+type netError struct {
+	msg       string
+	temporary bool
+	timeout   bool
+}
+
+func (e *netError) Error() string   { return e.msg }
+func (e *netError) Temporary() bool { return e.temporary }
+func (e *netError) Timeout() bool   { return e.timeout }
+
+// CloseError represents a close message.
+type CloseError struct {
+	// Code is defined in RFC 6455, section 11.7.
+	Code int
+
+	// Text is the optional text payload.
+	Text string
+}
+
+func (e *CloseError) Error() string {
+	s := []byte("websocket: close ")
+	s = strconv.AppendInt(s, int64(e.Code), 10)
+	switch e.Code {
+	case CloseNormalClosure:
+		s = append(s, " (normal)"...)
+	case CloseGoingAway:
+		s = append(s, " (going away)"...)
+	case CloseProtocolError:
+		s = append(s, " (protocol error)"...)
+	case CloseUnsupportedData:
+		s = append(s, " (unsupported data)"...)
+	case CloseNoStatusReceived:
+		s = append(s, " (no status)"...)
+	case CloseAbnormalClosure:
+		s = append(s, " (abnormal closure)"...)
+	case CloseInvalidFramePayloadData:
+		s = append(s, " (invalid payload data)"...)
+	case ClosePolicyViolation:
+		s = append(s, " (policy violation)"...)
+	case CloseMessageTooBig:
+		s = append(s, " (message too big)"...)
+	case CloseMandatoryExtension:
+		s = append(s, " (mandatory extension missing)"...)
+	case CloseInternalServerErr:
+		s = append(s, " (internal server error)"...)
+	case CloseTLSHandshake:
+		s = append(s, " (TLS handshake error)"...)
+	}
+	if e.Text != "" {
+		s = append(s, ": "...)
+		s = append(s, e.Text...)
+	}
+	return string(s)
+}
+
+// IsCloseError returns boolean indicating whether the error is a *CloseError
+// with one of the specified codes.
+func IsCloseError(err error, codes ...int) bool {
+	if e, ok := err.(*CloseError); ok {
+		for _, code := range codes {
+			if e.Code == code {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// IsUnexpectedCloseError returns boolean indicating whether the error is a
+// *CloseError with a code not in the list of expected codes.
+func IsUnexpectedCloseError(err error, expectedCodes ...int) bool {
+	if e, ok := err.(*CloseError); ok {
+		for _, code := range expectedCodes {
+			if e.Code == code {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
+
+var (
+	errWriteTimeout        = &netError{msg: "websocket: write timeout", timeout: true, temporary: true}
+	errUnexpectedEOF       = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()}
+	errBadWriteOpCode      = errors.New("websocket: bad write message type")
+	errWriteClosed         = errors.New("websocket: write closed")
+	errInvalidControlFrame = errors.New("websocket: invalid control frame")
+)
+
+func newMaskKey() [4]byte {
+	n := rand.Uint32()
+	return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}
+}
+
+func hideTempErr(err error) error {
+	if e, ok := err.(net.Error); ok && e.Temporary() {
+		err = &netError{msg: e.Error(), timeout: e.Timeout()}
+	}
+	return err
+}
+
+func isControl(frameType int) bool {
+	return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage
+}
+
+func isData(frameType int) bool {
+	return frameType == TextMessage || frameType == BinaryMessage
+}
+
+var validReceivedCloseCodes = map[int]bool{
+	// see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+
+	CloseNormalClosure:           true,
+	CloseGoingAway:               true,
+	CloseProtocolError:           true,
+	CloseUnsupportedData:         true,
+	CloseNoStatusReceived:        false,
+	CloseAbnormalClosure:         false,
+	CloseInvalidFramePayloadData: true,
+	ClosePolicyViolation:         true,
+	CloseMessageTooBig:           true,
+	CloseMandatoryExtension:      true,
+	CloseInternalServerErr:       true,
+	CloseServiceRestart:          true,
+	CloseTryAgainLater:           true,
+	CloseTLSHandshake:            false,
+}
+
+func isValidReceivedCloseCode(code int) bool {
+	return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999)
+}
+
+// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this
+// interface.  The type of the value stored in a pool is not specified.
+type BufferPool interface {
+	// Get gets a value from the pool or returns nil if the pool is empty.
+	Get() interface{}
+	// Put adds a value to the pool.
+	Put(interface{})
+}
+
+// writePoolData is the type added to the write buffer pool. This wrapper is
+// used to prevent applications from peeking at and depending on the values
+// added to the pool.
+type writePoolData struct{ buf []byte }
+
+// The Conn type represents a WebSocket connection.
+type Conn struct {
+	conn        net.Conn
+	isServer    bool
+	subprotocol string
+
+	// Write fields
+	mu            chan bool // used as mutex to protect write to conn
+	writeBuf      []byte    // frame is constructed in this buffer.
+	writePool     BufferPool
+	writeBufSize  int
+	writeDeadline time.Time
+	writer        io.WriteCloser // the current writer returned to the application
+	isWriting     bool           // for best-effort concurrent write detection
+
+	writeErrMu sync.Mutex
+	writeErr   error
+
+	enableWriteCompression bool
+	compressionLevel       int
+	newCompressionWriter   func(io.WriteCloser, int) io.WriteCloser
+
+	// Read fields
+	reader        io.ReadCloser // the current reader returned to the application
+	readErr       error
+	br            *bufio.Reader
+	readRemaining int64 // bytes remaining in current frame.
+	readFinal     bool  // true the current message has more frames.
+	readLength    int64 // Message size.
+	readLimit     int64 // Maximum message size.
+	readMaskPos   int
+	readMaskKey   [4]byte
+	handlePong    func(string) error
+	handlePing    func(string) error
+	handleClose   func(int, string) error
+	readErrCount  int
+	messageReader *messageReader // the current low-level reader
+
+	readDecompress         bool // whether last read frame had RSV1 set
+	newDecompressionReader func(io.Reader) io.ReadCloser
+}
+
+func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn {
+
+	if br == nil {
+		if readBufferSize == 0 {
+			readBufferSize = defaultReadBufferSize
+		} else if readBufferSize < maxControlFramePayloadSize {
+			// must be large enough for control frame
+			readBufferSize = maxControlFramePayloadSize
+		}
+		br = bufio.NewReaderSize(conn, readBufferSize)
+	}
+
+	if writeBufferSize <= 0 {
+		writeBufferSize = defaultWriteBufferSize
+	}
+	writeBufferSize += maxFrameHeaderSize
+
+	if writeBuf == nil && writeBufferPool == nil {
+		writeBuf = make([]byte, writeBufferSize)
+	}
+
+	mu := make(chan bool, 1)
+	mu <- true
+	c := &Conn{
+		isServer:               isServer,
+		br:                     br,
+		conn:                   conn,
+		mu:                     mu,
+		readFinal:              true,
+		writeBuf:               writeBuf,
+		writePool:              writeBufferPool,
+		writeBufSize:           writeBufferSize,
+		enableWriteCompression: true,
+		compressionLevel:       defaultCompressionLevel,
+	}
+	c.SetCloseHandler(nil)
+	c.SetPingHandler(nil)
+	c.SetPongHandler(nil)
+	return c
+}
+
+// Subprotocol returns the negotiated protocol for the connection.
+func (c *Conn) Subprotocol() string {
+	return c.subprotocol
+}
+
+// Close closes the underlying network connection without sending or waiting
+// for a close message.
+func (c *Conn) Close() error {
+	return c.conn.Close()
+}
+
+// LocalAddr returns the local network address.
+func (c *Conn) LocalAddr() net.Addr {
+	return c.conn.LocalAddr()
+}
+
+// RemoteAddr returns the remote network address.
+func (c *Conn) RemoteAddr() net.Addr {
+	return c.conn.RemoteAddr()
+}
+
+// Write methods
+
+func (c *Conn) writeFatal(err error) error {
+	err = hideTempErr(err)
+	c.writeErrMu.Lock()
+	if c.writeErr == nil {
+		c.writeErr = err
+	}
+	c.writeErrMu.Unlock()
+	return err
+}
+
+func (c *Conn) read(n int) ([]byte, error) {
+	p, err := c.br.Peek(n)
+	if err == io.EOF {
+		err = errUnexpectedEOF
+	}
+	c.br.Discard(len(p))
+	return p, err
+}
+
+func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error {
+	<-c.mu
+	defer func() { c.mu <- true }()
+
+	c.writeErrMu.Lock()
+	err := c.writeErr
+	c.writeErrMu.Unlock()
+	if err != nil {
+		return err
+	}
+
+	c.conn.SetWriteDeadline(deadline)
+	if len(buf1) == 0 {
+		_, err = c.conn.Write(buf0)
+	} else {
+		err = c.writeBufs(buf0, buf1)
+	}
+	if err != nil {
+		return c.writeFatal(err)
+	}
+	if frameType == CloseMessage {
+		c.writeFatal(ErrCloseSent)
+	}
+	return nil
+}
+
+// WriteControl writes a control message with the given deadline. The allowed
+// message types are CloseMessage, PingMessage and PongMessage.
+func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error {
+	if !isControl(messageType) {
+		return errBadWriteOpCode
+	}
+	if len(data) > maxControlFramePayloadSize {
+		return errInvalidControlFrame
+	}
+
+	b0 := byte(messageType) | finalBit
+	b1 := byte(len(data))
+	if !c.isServer {
+		b1 |= maskBit
+	}
+
+	buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize)
+	buf = append(buf, b0, b1)
+
+	if c.isServer {
+		buf = append(buf, data...)
+	} else {
+		key := newMaskKey()
+		buf = append(buf, key[:]...)
+		buf = append(buf, data...)
+		maskBytes(key, 0, buf[6:])
+	}
+
+	d := time.Hour * 1000
+	if !deadline.IsZero() {
+		d = deadline.Sub(time.Now())
+		if d < 0 {
+			return errWriteTimeout
+		}
+	}
+
+	timer := time.NewTimer(d)
+	select {
+	case <-c.mu:
+		timer.Stop()
+	case <-timer.C:
+		return errWriteTimeout
+	}
+	defer func() { c.mu <- true }()
+
+	c.writeErrMu.Lock()
+	err := c.writeErr
+	c.writeErrMu.Unlock()
+	if err != nil {
+		return err
+	}
+
+	c.conn.SetWriteDeadline(deadline)
+	_, err = c.conn.Write(buf)
+	if err != nil {
+		return c.writeFatal(err)
+	}
+	if messageType == CloseMessage {
+		c.writeFatal(ErrCloseSent)
+	}
+	return err
+}
+
+func (c *Conn) prepWrite(messageType int) error {
+	// Close previous writer if not already closed by the application. It's
+	// probably better to return an error in this situation, but we cannot
+	// change this without breaking existing applications.
+	if c.writer != nil {
+		c.writer.Close()
+		c.writer = nil
+	}
+
+	if !isControl(messageType) && !isData(messageType) {
+		return errBadWriteOpCode
+	}
+
+	c.writeErrMu.Lock()
+	err := c.writeErr
+	c.writeErrMu.Unlock()
+	if err != nil {
+		return err
+	}
+
+	if c.writeBuf == nil {
+		wpd, ok := c.writePool.Get().(writePoolData)
+		if ok {
+			c.writeBuf = wpd.buf
+		} else {
+			c.writeBuf = make([]byte, c.writeBufSize)
+		}
+	}
+	return nil
+}
+
+// NextWriter returns a writer for the next message to send. The writer's Close
+// method flushes the complete message to the network.
+//
+// There can be at most one open writer on a connection. NextWriter closes the
+// previous writer if the application has not already done so.
+//
+// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and
+// PongMessage) are supported.
+func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
+	if err := c.prepWrite(messageType); err != nil {
+		return nil, err
+	}
+
+	mw := &messageWriter{
+		c:         c,
+		frameType: messageType,
+		pos:       maxFrameHeaderSize,
+	}
+	c.writer = mw
+	if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) {
+		w := c.newCompressionWriter(c.writer, c.compressionLevel)
+		mw.compress = true
+		c.writer = w
+	}
+	return c.writer, nil
+}
+
+type messageWriter struct {
+	c         *Conn
+	compress  bool // whether next call to flushFrame should set RSV1
+	pos       int  // end of data in writeBuf.
+	frameType int  // type of the current frame.
+	err       error
+}
+
+func (w *messageWriter) fatal(err error) error {
+	if w.err != nil {
+		w.err = err
+		w.c.writer = nil
+	}
+	return err
+}
+
+// flushFrame writes buffered data and extra as a frame to the network. The
+// final argument indicates that this is the last frame in the message.
+func (w *messageWriter) flushFrame(final bool, extra []byte) error {
+	c := w.c
+	length := w.pos - maxFrameHeaderSize + len(extra)
+
+	// Check for invalid control frames.
+	if isControl(w.frameType) &&
+		(!final || length > maxControlFramePayloadSize) {
+		return w.fatal(errInvalidControlFrame)
+	}
+
+	b0 := byte(w.frameType)
+	if final {
+		b0 |= finalBit
+	}
+	if w.compress {
+		b0 |= rsv1Bit
+	}
+	w.compress = false
+
+	b1 := byte(0)
+	if !c.isServer {
+		b1 |= maskBit
+	}
+
+	// Assume that the frame starts at beginning of c.writeBuf.
+	framePos := 0
+	if c.isServer {
+		// Adjust up if mask not included in the header.
+		framePos = 4
+	}
+
+	switch {
+	case length >= 65536:
+		c.writeBuf[framePos] = b0
+		c.writeBuf[framePos+1] = b1 | 127
+		binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length))
+	case length > 125:
+		framePos += 6
+		c.writeBuf[framePos] = b0
+		c.writeBuf[framePos+1] = b1 | 126
+		binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length))
+	default:
+		framePos += 8
+		c.writeBuf[framePos] = b0
+		c.writeBuf[framePos+1] = b1 | byte(length)
+	}
+
+	if !c.isServer {
+		key := newMaskKey()
+		copy(c.writeBuf[maxFrameHeaderSize-4:], key[:])
+		maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos])
+		if len(extra) > 0 {
+			return c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))
+		}
+	}
+
+	// Write the buffers to the connection with best-effort detection of
+	// concurrent writes. See the concurrency section in the package
+	// documentation for more info.
+
+	if c.isWriting {
+		panic("concurrent write to websocket connection")
+	}
+	c.isWriting = true
+
+	err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra)
+
+	if !c.isWriting {
+		panic("concurrent write to websocket connection")
+	}
+	c.isWriting = false
+
+	if err != nil {
+		return w.fatal(err)
+	}
+
+	if final {
+		c.writer = nil
+		if c.writePool != nil {
+			c.writePool.Put(writePoolData{buf: c.writeBuf})
+			c.writeBuf = nil
+		}
+		return nil
+	}
+
+	// Setup for next frame.
+	w.pos = maxFrameHeaderSize
+	w.frameType = continuationFrame
+	return nil
+}
+
+func (w *messageWriter) ncopy(max int) (int, error) {
+	n := len(w.c.writeBuf) - w.pos
+	if n <= 0 {
+		if err := w.flushFrame(false, nil); err != nil {
+			return 0, err
+		}
+		n = len(w.c.writeBuf) - w.pos
+	}
+	if n > max {
+		n = max
+	}
+	return n, nil
+}
+
+func (w *messageWriter) Write(p []byte) (int, error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+
+	if len(p) > 2*len(w.c.writeBuf) && w.c.isServer {
+		// Don't buffer large messages.
+		err := w.flushFrame(false, p)
+		if err != nil {
+			return 0, err
+		}
+		return len(p), nil
+	}
+
+	nn := len(p)
+	for len(p) > 0 {
+		n, err := w.ncopy(len(p))
+		if err != nil {
+			return 0, err
+		}
+		copy(w.c.writeBuf[w.pos:], p[:n])
+		w.pos += n
+		p = p[n:]
+	}
+	return nn, nil
+}
+
+func (w *messageWriter) WriteString(p string) (int, error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+
+	nn := len(p)
+	for len(p) > 0 {
+		n, err := w.ncopy(len(p))
+		if err != nil {
+			return 0, err
+		}
+		copy(w.c.writeBuf[w.pos:], p[:n])
+		w.pos += n
+		p = p[n:]
+	}
+	return nn, nil
+}
+
+func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	for {
+		if w.pos == len(w.c.writeBuf) {
+			err = w.flushFrame(false, nil)
+			if err != nil {
+				break
+			}
+		}
+		var n int
+		n, err = r.Read(w.c.writeBuf[w.pos:])
+		w.pos += n
+		nn += int64(n)
+		if err != nil {
+			if err == io.EOF {
+				err = nil
+			}
+			break
+		}
+	}
+	return nn, err
+}
+
+func (w *messageWriter) Close() error {
+	if w.err != nil {
+		return w.err
+	}
+	if err := w.flushFrame(true, nil); err != nil {
+		return err
+	}
+	w.err = errWriteClosed
+	return nil
+}
+
+// WritePreparedMessage writes prepared message into connection.
+func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error {
+	frameType, frameData, err := pm.frame(prepareKey{
+		isServer:         c.isServer,
+		compress:         c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType),
+		compressionLevel: c.compressionLevel,
+	})
+	if err != nil {
+		return err
+	}
+	if c.isWriting {
+		panic("concurrent write to websocket connection")
+	}
+	c.isWriting = true
+	err = c.write(frameType, c.writeDeadline, frameData, nil)
+	if !c.isWriting {
+		panic("concurrent write to websocket connection")
+	}
+	c.isWriting = false
+	return err
+}
+
+// WriteMessage is a helper method for getting a writer using NextWriter,
+// writing the message and closing the writer.
+func (c *Conn) WriteMessage(messageType int, data []byte) error {
+
+	if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) {
+		// Fast path with no allocations and single frame.
+
+		if err := c.prepWrite(messageType); err != nil {
+			return err
+		}
+		mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize}
+		n := copy(c.writeBuf[mw.pos:], data)
+		mw.pos += n
+		data = data[n:]
+		return mw.flushFrame(true, data)
+	}
+
+	w, err := c.NextWriter(messageType)
+	if err != nil {
+		return err
+	}
+	if _, err = w.Write(data); err != nil {
+		return err
+	}
+	return w.Close()
+}
+
+// SetWriteDeadline sets the write deadline on the underlying network
+// connection. After a write has timed out, the websocket state is corrupt and
+// all future writes will return an error. A zero value for t means writes will
+// not time out.
+func (c *Conn) SetWriteDeadline(t time.Time) error {
+	c.writeDeadline = t
+	return nil
+}
+
+// Read methods
+
+func (c *Conn) advanceFrame() (int, error) {
+	// 1. Skip remainder of previous frame.
+
+	if c.readRemaining > 0 {
+		if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil {
+			return noFrame, err
+		}
+	}
+
+	// 2. Read and parse first two bytes of frame header.
+
+	p, err := c.read(2)
+	if err != nil {
+		return noFrame, err
+	}
+
+	final := p[0]&finalBit != 0
+	frameType := int(p[0] & 0xf)
+	mask := p[1]&maskBit != 0
+	c.readRemaining = int64(p[1] & 0x7f)
+
+	c.readDecompress = false
+	if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 {
+		c.readDecompress = true
+		p[0] &^= rsv1Bit
+	}
+
+	if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 {
+		return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16))
+	}
+
+	switch frameType {
+	case CloseMessage, PingMessage, PongMessage:
+		if c.readRemaining > maxControlFramePayloadSize {
+			return noFrame, c.handleProtocolError("control frame length > 125")
+		}
+		if !final {
+			return noFrame, c.handleProtocolError("control frame not final")
+		}
+	case TextMessage, BinaryMessage:
+		if !c.readFinal {
+			return noFrame, c.handleProtocolError("message start before final message frame")
+		}
+		c.readFinal = final
+	case continuationFrame:
+		if c.readFinal {
+			return noFrame, c.handleProtocolError("continuation after final message frame")
+		}
+		c.readFinal = final
+	default:
+		return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType))
+	}
+
+	// 3. Read and parse frame length.
+
+	switch c.readRemaining {
+	case 126:
+		p, err := c.read(2)
+		if err != nil {
+			return noFrame, err
+		}
+		c.readRemaining = int64(binary.BigEndian.Uint16(p))
+	case 127:
+		p, err := c.read(8)
+		if err != nil {
+			return noFrame, err
+		}
+		c.readRemaining = int64(binary.BigEndian.Uint64(p))
+	}
+
+	// 4. Handle frame masking.
+
+	if mask != c.isServer {
+		return noFrame, c.handleProtocolError("incorrect mask flag")
+	}
+
+	if mask {
+		c.readMaskPos = 0
+		p, err := c.read(len(c.readMaskKey))
+		if err != nil {
+			return noFrame, err
+		}
+		copy(c.readMaskKey[:], p)
+	}
+
+	// 5. For text and binary messages, enforce read limit and return.
+
+	if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage {
+
+		c.readLength += c.readRemaining
+		if c.readLimit > 0 && c.readLength > c.readLimit {
+			c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait))
+			return noFrame, ErrReadLimit
+		}
+
+		return frameType, nil
+	}
+
+	// 6. Read control frame payload.
+
+	var payload []byte
+	if c.readRemaining > 0 {
+		payload, err = c.read(int(c.readRemaining))
+		c.readRemaining = 0
+		if err != nil {
+			return noFrame, err
+		}
+		if c.isServer {
+			maskBytes(c.readMaskKey, 0, payload)
+		}
+	}
+
+	// 7. Process control frame payload.
+
+	switch frameType {
+	case PongMessage:
+		if err := c.handlePong(string(payload)); err != nil {
+			return noFrame, err
+		}
+	case PingMessage:
+		if err := c.handlePing(string(payload)); err != nil {
+			return noFrame, err
+		}
+	case CloseMessage:
+		closeCode := CloseNoStatusReceived
+		closeText := ""
+		if len(payload) >= 2 {
+			closeCode = int(binary.BigEndian.Uint16(payload))
+			if !isValidReceivedCloseCode(closeCode) {
+				return noFrame, c.handleProtocolError("invalid close code")
+			}
+			closeText = string(payload[2:])
+			if !utf8.ValidString(closeText) {
+				return noFrame, c.handleProtocolError("invalid utf8 payload in close frame")
+			}
+		}
+		if err := c.handleClose(closeCode, closeText); err != nil {
+			return noFrame, err
+		}
+		return noFrame, &CloseError{Code: closeCode, Text: closeText}
+	}
+
+	return frameType, nil
+}
+
+func (c *Conn) handleProtocolError(message string) error {
+	c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait))
+	return errors.New("websocket: " + message)
+}
+
+// NextReader returns the next data message received from the peer. The
+// returned messageType is either TextMessage or BinaryMessage.
+//
+// There can be at most one open reader on a connection. NextReader discards
+// the previous message if the application has not already consumed it.
+//
+// Applications must break out of the application's read loop when this method
+// returns a non-nil error value. Errors returned from this method are
+// permanent. Once this method returns a non-nil error, all subsequent calls to
+// this method return the same error.
+func (c *Conn) NextReader() (messageType int, r io.Reader, err error) {
+	// Close previous reader, only relevant for decompression.
+	if c.reader != nil {
+		c.reader.Close()
+		c.reader = nil
+	}
+
+	c.messageReader = nil
+	c.readLength = 0
+
+	for c.readErr == nil {
+		frameType, err := c.advanceFrame()
+		if err != nil {
+			c.readErr = hideTempErr(err)
+			break
+		}
+		if frameType == TextMessage || frameType == BinaryMessage {
+			c.messageReader = &messageReader{c}
+			c.reader = c.messageReader
+			if c.readDecompress {
+				c.reader = c.newDecompressionReader(c.reader)
+			}
+			return frameType, c.reader, nil
+		}
+	}
+
+	// Applications that do handle the error returned from this method spin in
+	// tight loop on connection failure. To help application developers detect
+	// this error, panic on repeated reads to the failed connection.
+	c.readErrCount++
+	if c.readErrCount >= 1000 {
+		panic("repeated read on failed websocket connection")
+	}
+
+	return noFrame, nil, c.readErr
+}
+
+type messageReader struct{ c *Conn }
+
+func (r *messageReader) Read(b []byte) (int, error) {
+	c := r.c
+	if c.messageReader != r {
+		return 0, io.EOF
+	}
+
+	for c.readErr == nil {
+
+		if c.readRemaining > 0 {
+			if int64(len(b)) > c.readRemaining {
+				b = b[:c.readRemaining]
+			}
+			n, err := c.br.Read(b)
+			c.readErr = hideTempErr(err)
+			if c.isServer {
+				c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n])
+			}
+			c.readRemaining -= int64(n)
+			if c.readRemaining > 0 && c.readErr == io.EOF {
+				c.readErr = errUnexpectedEOF
+			}
+			return n, c.readErr
+		}
+
+		if c.readFinal {
+			c.messageReader = nil
+			return 0, io.EOF
+		}
+
+		frameType, err := c.advanceFrame()
+		switch {
+		case err != nil:
+			c.readErr = hideTempErr(err)
+		case frameType == TextMessage || frameType == BinaryMessage:
+			c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader")
+		}
+	}
+
+	err := c.readErr
+	if err == io.EOF && c.messageReader == r {
+		err = errUnexpectedEOF
+	}
+	return 0, err
+}
+
+func (r *messageReader) Close() error {
+	return nil
+}
+
+// ReadMessage is a helper method for getting a reader using NextReader and
+// reading from that reader to a buffer.
+func (c *Conn) ReadMessage() (messageType int, p []byte, err error) {
+	var r io.Reader
+	messageType, r, err = c.NextReader()
+	if err != nil {
+		return messageType, nil, err
+	}
+	p, err = ioutil.ReadAll(r)
+	return messageType, p, err
+}
+
+// SetReadDeadline sets the read deadline on the underlying network connection.
+// After a read has timed out, the websocket connection state is corrupt and
+// all future reads will return an error. A zero value for t means reads will
+// not time out.
+func (c *Conn) SetReadDeadline(t time.Time) error {
+	return c.conn.SetReadDeadline(t)
+}
+
+// SetReadLimit sets the maximum size for a message read from the peer. If a
+// message exceeds the limit, the connection sends a close message to the peer
+// and returns ErrReadLimit to the application.
+func (c *Conn) SetReadLimit(limit int64) {
+	c.readLimit = limit
+}
+
+// CloseHandler returns the current close handler
+func (c *Conn) CloseHandler() func(code int, text string) error {
+	return c.handleClose
+}
+
+// SetCloseHandler sets the handler for close messages received from the peer.
+// The code argument to h is the received close code or CloseNoStatusReceived
+// if the close message is empty. The default close handler sends a close
+// message back to the peer.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// close messages as described in the section on Control Messages above.
+//
+// The connection read methods return a CloseError when a close message is
+// received. Most applications should handle close messages as part of their
+// normal error handling. Applications should only set a close handler when the
+// application must perform some action before sending a close message back to
+// the peer.
+func (c *Conn) SetCloseHandler(h func(code int, text string) error) {
+	if h == nil {
+		h = func(code int, text string) error {
+			message := FormatCloseMessage(code, "")
+			c.WriteControl(CloseMessage, message, time.Now().Add(writeWait))
+			return nil
+		}
+	}
+	c.handleClose = h
+}
+
+// PingHandler returns the current ping handler
+func (c *Conn) PingHandler() func(appData string) error {
+	return c.handlePing
+}
+
+// SetPingHandler sets the handler for ping messages received from the peer.
+// The appData argument to h is the PING message application data. The default
+// ping handler sends a pong to the peer.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// ping messages as described in the section on Control Messages above.
+func (c *Conn) SetPingHandler(h func(appData string) error) {
+	if h == nil {
+		h = func(message string) error {
+			err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait))
+			if err == ErrCloseSent {
+				return nil
+			} else if e, ok := err.(net.Error); ok && e.Temporary() {
+				return nil
+			}
+			return err
+		}
+	}
+	c.handlePing = h
+}
+
+// PongHandler returns the current pong handler
+func (c *Conn) PongHandler() func(appData string) error {
+	return c.handlePong
+}
+
+// SetPongHandler sets the handler for pong messages received from the peer.
+// The appData argument to h is the PONG message application data. The default
+// pong handler does nothing.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// pong messages as described in the section on Control Messages above.
+func (c *Conn) SetPongHandler(h func(appData string) error) {
+	if h == nil {
+		h = func(string) error { return nil }
+	}
+	c.handlePong = h
+}
+
+// UnderlyingConn returns the internal net.Conn. This can be used to further
+// modifications to connection specific flags.
+func (c *Conn) UnderlyingConn() net.Conn {
+	return c.conn
+}
+
+// EnableWriteCompression enables and disables write compression of
+// subsequent text and binary messages. This function is a noop if
+// compression was not negotiated with the peer.
+func (c *Conn) EnableWriteCompression(enable bool) {
+	c.enableWriteCompression = enable
+}
+
+// SetCompressionLevel sets the flate compression level for subsequent text and
+// binary messages. This function is a noop if compression was not negotiated
+// with the peer. See the compress/flate package for a description of
+// compression levels.
+func (c *Conn) SetCompressionLevel(level int) error {
+	if !isValidCompressionLevel(level) {
+		return errors.New("websocket: invalid compression level")
+	}
+	c.compressionLevel = level
+	return nil
+}
+
+// FormatCloseMessage formats closeCode and text as a WebSocket close message.
+// An empty message is returned for code CloseNoStatusReceived.
+func FormatCloseMessage(closeCode int, text string) []byte {
+	if closeCode == CloseNoStatusReceived {
+		// Return empty message because it's illegal to send
+		// CloseNoStatusReceived. Return non-nil value in case application
+		// checks for nil.
+		return []byte{}
+	}
+	buf := make([]byte, 2+len(text))
+	binary.BigEndian.PutUint16(buf, uint16(closeCode))
+	copy(buf[2:], text)
+	return buf
+}
diff --git a/vendor/github.com/gorilla/websocket/conn_write.go b/vendor/github.com/gorilla/websocket/conn_write.go
new file mode 100644
index 0000000..a509a21
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/conn_write.go
@@ -0,0 +1,15 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package websocket
+
+import "net"
+
+func (c *Conn) writeBufs(bufs ...[]byte) error {
+	b := net.Buffers(bufs)
+	_, err := b.WriteTo(c.conn)
+	return err
+}
diff --git a/vendor/github.com/gorilla/websocket/conn_write_legacy.go b/vendor/github.com/gorilla/websocket/conn_write_legacy.go
new file mode 100644
index 0000000..37edaff
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/conn_write_legacy.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package websocket
+
+func (c *Conn) writeBufs(bufs ...[]byte) error {
+	for _, buf := range bufs {
+		if len(buf) > 0 {
+			if _, err := c.conn.Write(buf); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go
new file mode 100644
index 0000000..dcce1a6
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/doc.go
@@ -0,0 +1,180 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package websocket implements the WebSocket protocol defined in RFC 6455.
+//
+// Overview
+//
+// The Conn type represents a WebSocket connection. A server application calls
+// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
+//
+//  var upgrader = websocket.Upgrader{
+//      ReadBufferSize:  1024,
+//      WriteBufferSize: 1024,
+//  }
+//
+//  func handler(w http.ResponseWriter, r *http.Request) {
+//      conn, err := upgrader.Upgrade(w, r, nil)
+//      if err != nil {
+//          log.Println(err)
+//          return
+//      }
+//      ... Use conn to send and receive messages.
+//  }
+//
+// Call the connection's WriteMessage and ReadMessage methods to send and
+// receive messages as a slice of bytes. This snippet of code shows how to echo
+// messages using these methods:
+//
+//  for {
+//      messageType, p, err := conn.ReadMessage()
+//      if err != nil {
+//          log.Println(err)
+//          return
+//      }
+//      if err := conn.WriteMessage(messageType, p); err != nil {
+//          log.Println(err)
+//          return
+//      }
+//  }
+//
+// In above snippet of code, p is a []byte and messageType is an int with value
+// websocket.BinaryMessage or websocket.TextMessage.
+//
+// An application can also send and receive messages using the io.WriteCloser
+// and io.Reader interfaces. To send a message, call the connection NextWriter
+// method to get an io.WriteCloser, write the message to the writer and close
+// the writer when done. To receive a message, call the connection NextReader
+// method to get an io.Reader and read until io.EOF is returned. This snippet
+// shows how to echo messages using the NextWriter and NextReader methods:
+//
+//  for {
+//      messageType, r, err := conn.NextReader()
+//      if err != nil {
+//          return
+//      }
+//      w, err := conn.NextWriter(messageType)
+//      if err != nil {
+//          return err
+//      }
+//      if _, err := io.Copy(w, r); err != nil {
+//          return err
+//      }
+//      if err := w.Close(); err != nil {
+//          return err
+//      }
+//  }
+//
+// Data Messages
+//
+// The WebSocket protocol distinguishes between text and binary data messages.
+// Text messages are interpreted as UTF-8 encoded text. The interpretation of
+// binary messages is left to the application.
+//
+// This package uses the TextMessage and BinaryMessage integer constants to
+// identify the two data message types. The ReadMessage and NextReader methods
+// return the type of the received message. The messageType argument to the
+// WriteMessage and NextWriter methods specifies the type of a sent message.
+//
+// It is the application's responsibility to ensure that text messages are
+// valid UTF-8 encoded text.
+//
+// Control Messages
+//
+// The WebSocket protocol defines three types of control messages: close, ping
+// and pong. Call the connection WriteControl, WriteMessage or NextWriter
+// methods to send a control message to the peer.
+//
+// Connections handle received close messages by calling the handler function
+// set with the SetCloseHandler method and by returning a *CloseError from the
+// NextReader, ReadMessage or the message Read method. The default close
+// handler sends a close message to the peer.
+//
+// Connections handle received ping messages by calling the handler function
+// set with the SetPingHandler method. The default ping handler sends a pong
+// message to the peer.
+//
+// Connections handle received pong messages by calling the handler function
+// set with the SetPongHandler method. The default pong handler does nothing.
+// If an application sends ping messages, then the application should set a
+// pong handler to receive the corresponding pong.
+//
+// The control message handler functions are called from the NextReader,
+// ReadMessage and message reader Read methods. The default close and ping
+// handlers can block these methods for a short time when the handler writes to
+// the connection.
+//
+// The application must read the connection to process close, ping and pong
+// messages sent from the peer. If the application is not otherwise interested
+// in messages from the peer, then the application should start a goroutine to
+// read and discard messages from the peer. A simple example is:
+//
+//  func readLoop(c *websocket.Conn) {
+//      for {
+//          if _, _, err := c.NextReader(); err != nil {
+//              c.Close()
+//              break
+//          }
+//      }
+//  }
+//
+// Concurrency
+//
+// Connections support one concurrent reader and one concurrent writer.
+//
+// Applications are responsible for ensuring that no more than one goroutine
+// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
+// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
+// that no more than one goroutine calls the read methods (NextReader,
+// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
+// concurrently.
+//
+// The Close and WriteControl methods can be called concurrently with all other
+// methods.
+//
+// Origin Considerations
+//
+// Web browsers allow Javascript applications to open a WebSocket connection to
+// any host. It's up to the server to enforce an origin policy using the Origin
+// request header sent by the browser.
+//
+// The Upgrader calls the function specified in the CheckOrigin field to check
+// the origin. If the CheckOrigin function returns false, then the Upgrade
+// method fails the WebSocket handshake with HTTP status 403.
+//
+// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
+// the handshake if the Origin request header is present and the Origin host is
+// not equal to the Host request header.
+//
+// The deprecated package-level Upgrade function does not perform origin
+// checking. The application is responsible for checking the Origin header
+// before calling the Upgrade function.
+//
+// Compression EXPERIMENTAL
+//
+// Per message compression extensions (RFC 7692) are experimentally supported
+// by this package in a limited capacity. Setting the EnableCompression option
+// to true in Dialer or Upgrader will attempt to negotiate per message deflate
+// support.
+//
+//  var upgrader = websocket.Upgrader{
+//      EnableCompression: true,
+//  }
+//
+// If compression was successfully negotiated with the connection's peer, any
+// message received in compressed form will be automatically decompressed.
+// All Read methods will return uncompressed bytes.
+//
+// Per message compression of messages written to a connection can be enabled
+// or disabled by calling the corresponding Conn method:
+//
+//  conn.EnableWriteCompression(false)
+//
+// Currently this package does not support compression with "context takeover".
+// This means that messages must be compressed and decompressed in isolation,
+// without retaining sliding window or dictionary state across messages. For
+// more details refer to RFC 7692.
+//
+// Use of compression is experimental and may result in decreased performance.
+package websocket
diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go
new file mode 100644
index 0000000..dc2c1f6
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/json.go
@@ -0,0 +1,60 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+	"encoding/json"
+	"io"
+)
+
+// WriteJSON writes the JSON encoding of v as a message.
+//
+// Deprecated: Use c.WriteJSON instead.
+func WriteJSON(c *Conn, v interface{}) error {
+	return c.WriteJSON(v)
+}
+
+// WriteJSON writes the JSON encoding of v as a message.
+//
+// See the documentation for encoding/json Marshal for details about the
+// conversion of Go values to JSON.
+func (c *Conn) WriteJSON(v interface{}) error {
+	w, err := c.NextWriter(TextMessage)
+	if err != nil {
+		return err
+	}
+	err1 := json.NewEncoder(w).Encode(v)
+	err2 := w.Close()
+	if err1 != nil {
+		return err1
+	}
+	return err2
+}
+
+// ReadJSON reads the next JSON-encoded message from the connection and stores
+// it in the value pointed to by v.
+//
+// Deprecated: Use c.ReadJSON instead.
+func ReadJSON(c *Conn, v interface{}) error {
+	return c.ReadJSON(v)
+}
+
+// ReadJSON reads the next JSON-encoded message from the connection and stores
+// it in the value pointed to by v.
+//
+// See the documentation for the encoding/json Unmarshal function for details
+// about the conversion of JSON to a Go value.
+func (c *Conn) ReadJSON(v interface{}) error {
+	_, r, err := c.NextReader()
+	if err != nil {
+		return err
+	}
+	err = json.NewDecoder(r).Decode(v)
+	if err == io.EOF {
+		// One value is expected in the message.
+		err = io.ErrUnexpectedEOF
+	}
+	return err
+}
diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go
new file mode 100644
index 0000000..577fce9
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/mask.go
@@ -0,0 +1,54 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.  Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+// +build !appengine
+
+package websocket
+
+import "unsafe"
+
+const wordSize = int(unsafe.Sizeof(uintptr(0)))
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+	// Mask one byte at a time for small buffers.
+	if len(b) < 2*wordSize {
+		for i := range b {
+			b[i] ^= key[pos&3]
+			pos++
+		}
+		return pos & 3
+	}
+
+	// Mask one byte at a time to word boundary.
+	if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
+		n = wordSize - n
+		for i := range b[:n] {
+			b[i] ^= key[pos&3]
+			pos++
+		}
+		b = b[n:]
+	}
+
+	// Create aligned word size key.
+	var k [wordSize]byte
+	for i := range k {
+		k[i] = key[(pos+i)&3]
+	}
+	kw := *(*uintptr)(unsafe.Pointer(&k))
+
+	// Mask one word at a time.
+	n := (len(b) / wordSize) * wordSize
+	for i := 0; i < n; i += wordSize {
+		*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
+	}
+
+	// Mask one byte at a time for remaining bytes.
+	b = b[n:]
+	for i := range b {
+		b[i] ^= key[pos&3]
+		pos++
+	}
+
+	return pos & 3
+}
diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go
new file mode 100644
index 0000000..2aac060
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/mask_safe.go
@@ -0,0 +1,15 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.  Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+// +build appengine
+
+package websocket
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+	for i := range b {
+		b[i] ^= key[pos&3]
+		pos++
+	}
+	return pos & 3
+}
diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go
new file mode 100644
index 0000000..74ec565
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/prepared.go
@@ -0,0 +1,102 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+	"bytes"
+	"net"
+	"sync"
+	"time"
+)
+
+// PreparedMessage caches on the wire representations of a message payload.
+// Use PreparedMessage to efficiently send a message payload to multiple
+// connections. PreparedMessage is especially useful when compression is used
+// because the CPU and memory expensive compression operation can be executed
+// once for a given set of compression options.
+type PreparedMessage struct {
+	messageType int
+	data        []byte
+	mu          sync.Mutex
+	frames      map[prepareKey]*preparedFrame
+}
+
+// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
+type prepareKey struct {
+	isServer         bool
+	compress         bool
+	compressionLevel int
+}
+
+// preparedFrame contains data in wire representation.
+type preparedFrame struct {
+	once sync.Once
+	data []byte
+}
+
+// NewPreparedMessage returns an initialized PreparedMessage. You can then send
+// it to connection using WritePreparedMessage method. Valid wire
+// representation will be calculated lazily only once for a set of current
+// connection options.
+func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
+	pm := &PreparedMessage{
+		messageType: messageType,
+		frames:      make(map[prepareKey]*preparedFrame),
+		data:        data,
+	}
+
+	// Prepare a plain server frame.
+	_, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
+	if err != nil {
+		return nil, err
+	}
+
+	// To protect against caller modifying the data argument, remember the data
+	// copied to the plain server frame.
+	pm.data = frameData[len(frameData)-len(data):]
+	return pm, nil
+}
+
+func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
+	pm.mu.Lock()
+	frame, ok := pm.frames[key]
+	if !ok {
+		frame = &preparedFrame{}
+		pm.frames[key] = frame
+	}
+	pm.mu.Unlock()
+
+	var err error
+	frame.once.Do(func() {
+		// Prepare a frame using a 'fake' connection.
+		// TODO: Refactor code in conn.go to allow more direct construction of
+		// the frame.
+		mu := make(chan bool, 1)
+		mu <- true
+		var nc prepareConn
+		c := &Conn{
+			conn:                   &nc,
+			mu:                     mu,
+			isServer:               key.isServer,
+			compressionLevel:       key.compressionLevel,
+			enableWriteCompression: true,
+			writeBuf:               make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
+		}
+		if key.compress {
+			c.newCompressionWriter = compressNoContextTakeover
+		}
+		err = c.WriteMessage(pm.messageType, pm.data)
+		frame.data = nc.buf.Bytes()
+	})
+	return pm.messageType, frame.data, err
+}
+
+type prepareConn struct {
+	buf bytes.Buffer
+	net.Conn
+}
+
+func (pc *prepareConn) Write(p []byte) (int, error)        { return pc.buf.Write(p) }
+func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go
new file mode 100644
index 0000000..bf2478e
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/proxy.go
@@ -0,0 +1,77 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+	"bufio"
+	"encoding/base64"
+	"errors"
+	"net"
+	"net/http"
+	"net/url"
+	"strings"
+)
+
+type netDialerFunc func(network, addr string) (net.Conn, error)
+
+func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
+	return fn(network, addr)
+}
+
+func init() {
+	proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
+		return &httpProxyDialer{proxyURL: proxyURL, fowardDial: forwardDialer.Dial}, nil
+	})
+}
+
+type httpProxyDialer struct {
+	proxyURL   *url.URL
+	fowardDial func(network, addr string) (net.Conn, error)
+}
+
+func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
+	hostPort, _ := hostPortNoPort(hpd.proxyURL)
+	conn, err := hpd.fowardDial(network, hostPort)
+	if err != nil {
+		return nil, err
+	}
+
+	connectHeader := make(http.Header)
+	if user := hpd.proxyURL.User; user != nil {
+		proxyUser := user.Username()
+		if proxyPassword, passwordSet := user.Password(); passwordSet {
+			credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
+			connectHeader.Set("Proxy-Authorization", "Basic "+credential)
+		}
+	}
+
+	connectReq := &http.Request{
+		Method: "CONNECT",
+		URL:    &url.URL{Opaque: addr},
+		Host:   addr,
+		Header: connectHeader,
+	}
+
+	if err := connectReq.Write(conn); err != nil {
+		conn.Close()
+		return nil, err
+	}
+
+	// Read response. It's OK to use and discard buffered reader here becaue
+	// the remote server does not speak until spoken to.
+	br := bufio.NewReader(conn)
+	resp, err := http.ReadResponse(br, connectReq)
+	if err != nil {
+		conn.Close()
+		return nil, err
+	}
+
+	if resp.StatusCode != 200 {
+		conn.Close()
+		f := strings.SplitN(resp.Status, " ", 2)
+		return nil, errors.New(f[1])
+	}
+	return conn, nil
+}
diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go
new file mode 100644
index 0000000..a761824
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/server.go
@@ -0,0 +1,363 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+	"bufio"
+	"errors"
+	"io"
+	"net/http"
+	"net/url"
+	"strings"
+	"time"
+)
+
+// HandshakeError describes an error with the handshake from the peer.
+type HandshakeError struct {
+	message string
+}
+
+func (e HandshakeError) Error() string { return e.message }
+
+// Upgrader specifies parameters for upgrading an HTTP connection to a
+// WebSocket connection.
+type Upgrader struct {
+	// HandshakeTimeout specifies the duration for the handshake to complete.
+	HandshakeTimeout time.Duration
+
+	// ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer
+	// size is zero, then buffers allocated by the HTTP server are used. The
+	// I/O buffer sizes do not limit the size of the messages that can be sent
+	// or received.
+	ReadBufferSize, WriteBufferSize int
+
+	// WriteBufferPool is a pool of buffers for write operations. If the value
+	// is not set, then write buffers are allocated to the connection for the
+	// lifetime of the connection.
+	//
+	// A pool is most useful when the application has a modest volume of writes
+	// across a large number of connections.
+	//
+	// Applications should use a single pool for each unique value of
+	// WriteBufferSize.
+	WriteBufferPool BufferPool
+
+	// Subprotocols specifies the server's supported protocols in order of
+	// preference. If this field is not nil, then the Upgrade method negotiates a
+	// subprotocol by selecting the first match in this list with a protocol
+	// requested by the client. If there's no match, then no protocol is
+	// negotiated (the Sec-Websocket-Protocol header is not included in the
+	// handshake response).
+	Subprotocols []string
+
+	// Error specifies the function for generating HTTP error responses. If Error
+	// is nil, then http.Error is used to generate the HTTP response.
+	Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
+
+	// CheckOrigin returns true if the request Origin header is acceptable. If
+	// CheckOrigin is nil, then a safe default is used: return false if the
+	// Origin request header is present and the origin host is not equal to
+	// request Host header.
+	//
+	// A CheckOrigin function should carefully validate the request origin to
+	// prevent cross-site request forgery.
+	CheckOrigin func(r *http.Request) bool
+
+	// EnableCompression specify if the server should attempt to negotiate per
+	// message compression (RFC 7692). Setting this value to true does not
+	// guarantee that compression will be supported. Currently only "no context
+	// takeover" modes are supported.
+	EnableCompression bool
+}
+
+func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
+	err := HandshakeError{reason}
+	if u.Error != nil {
+		u.Error(w, r, status, err)
+	} else {
+		w.Header().Set("Sec-Websocket-Version", "13")
+		http.Error(w, http.StatusText(status), status)
+	}
+	return nil, err
+}
+
+// checkSameOrigin returns true if the origin is not set or is equal to the request host.
+func checkSameOrigin(r *http.Request) bool {
+	origin := r.Header["Origin"]
+	if len(origin) == 0 {
+		return true
+	}
+	u, err := url.Parse(origin[0])
+	if err != nil {
+		return false
+	}
+	return equalASCIIFold(u.Host, r.Host)
+}
+
+func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
+	if u.Subprotocols != nil {
+		clientProtocols := Subprotocols(r)
+		for _, serverProtocol := range u.Subprotocols {
+			for _, clientProtocol := range clientProtocols {
+				if clientProtocol == serverProtocol {
+					return clientProtocol
+				}
+			}
+		}
+	} else if responseHeader != nil {
+		return responseHeader.Get("Sec-Websocket-Protocol")
+	}
+	return ""
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie) and the
+// application negotiated subprotocol (Sec-WebSocket-Protocol).
+//
+// If the upgrade fails, then Upgrade replies to the client with an HTTP error
+// response.
+func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
+	const badHandshake = "websocket: the client is not using the websocket protocol: "
+
+	if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
+		return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
+	}
+
+	if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
+		return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
+	}
+
+	if r.Method != "GET" {
+		return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
+	}
+
+	if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
+		return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
+	}
+
+	if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
+		return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
+	}
+
+	checkOrigin := u.CheckOrigin
+	if checkOrigin == nil {
+		checkOrigin = checkSameOrigin
+	}
+	if !checkOrigin(r) {
+		return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
+	}
+
+	challengeKey := r.Header.Get("Sec-Websocket-Key")
+	if challengeKey == "" {
+		return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-WebSocket-Key' header is missing or blank")
+	}
+
+	subprotocol := u.selectSubprotocol(r, responseHeader)
+
+	// Negotiate PMCE
+	var compress bool
+	if u.EnableCompression {
+		for _, ext := range parseExtensions(r.Header) {
+			if ext[""] != "permessage-deflate" {
+				continue
+			}
+			compress = true
+			break
+		}
+	}
+
+	h, ok := w.(http.Hijacker)
+	if !ok {
+		return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
+	}
+	var brw *bufio.ReadWriter
+	netConn, brw, err := h.Hijack()
+	if err != nil {
+		return u.returnError(w, r, http.StatusInternalServerError, err.Error())
+	}
+
+	if brw.Reader.Buffered() > 0 {
+		netConn.Close()
+		return nil, errors.New("websocket: client sent data before handshake is complete")
+	}
+
+	var br *bufio.Reader
+	if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
+		// Reuse hijacked buffered reader as connection reader.
+		br = brw.Reader
+	}
+
+	buf := bufioWriterBuffer(netConn, brw.Writer)
+
+	var writeBuf []byte
+	if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
+		// Reuse hijacked write buffer as connection buffer.
+		writeBuf = buf
+	}
+
+	c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
+	c.subprotocol = subprotocol
+
+	if compress {
+		c.newCompressionWriter = compressNoContextTakeover
+		c.newDecompressionReader = decompressNoContextTakeover
+	}
+
+	// Use larger of hijacked buffer and connection write buffer for header.
+	p := buf
+	if len(c.writeBuf) > len(p) {
+		p = c.writeBuf
+	}
+	p = p[:0]
+
+	p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
+	p = append(p, computeAcceptKey(challengeKey)...)
+	p = append(p, "\r\n"...)
+	if c.subprotocol != "" {
+		p = append(p, "Sec-WebSocket-Protocol: "...)
+		p = append(p, c.subprotocol...)
+		p = append(p, "\r\n"...)
+	}
+	if compress {
+		p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
+	}
+	for k, vs := range responseHeader {
+		if k == "Sec-Websocket-Protocol" {
+			continue
+		}
+		for _, v := range vs {
+			p = append(p, k...)
+			p = append(p, ": "...)
+			for i := 0; i < len(v); i++ {
+				b := v[i]
+				if b <= 31 {
+					// prevent response splitting.
+					b = ' '
+				}
+				p = append(p, b)
+			}
+			p = append(p, "\r\n"...)
+		}
+	}
+	p = append(p, "\r\n"...)
+
+	// Clear deadlines set by HTTP server.
+	netConn.SetDeadline(time.Time{})
+
+	if u.HandshakeTimeout > 0 {
+		netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
+	}
+	if _, err = netConn.Write(p); err != nil {
+		netConn.Close()
+		return nil, err
+	}
+	if u.HandshakeTimeout > 0 {
+		netConn.SetWriteDeadline(time.Time{})
+	}
+
+	return c, nil
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// Deprecated: Use websocket.Upgrader instead.
+//
+// Upgrade does not perform origin checking. The application is responsible for
+// checking the Origin header before calling Upgrade. An example implementation
+// of the same origin policy check is:
+//
+//	if req.Header.Get("Origin") != "http://"+req.Host {
+//		http.Error(w, "Origin not allowed", http.StatusForbidden)
+//		return
+//	}
+//
+// If the endpoint supports subprotocols, then the application is responsible
+// for negotiating the protocol used on the connection. Use the Subprotocols()
+// function to get the subprotocols requested by the client. Use the
+// Sec-Websocket-Protocol response header to specify the subprotocol selected
+// by the application.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie) and the
+// negotiated subprotocol (Sec-Websocket-Protocol).
+//
+// The connection buffers IO to the underlying network connection. The
+// readBufSize and writeBufSize parameters specify the size of the buffers to
+// use. Messages can be larger than the buffers.
+//
+// If the request is not a valid WebSocket handshake, then Upgrade returns an
+// error of type HandshakeError. Applications should handle this error by
+// replying to the client with an HTTP error response.
+func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
+	u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
+	u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
+		// don't return errors to maintain backwards compatibility
+	}
+	u.CheckOrigin = func(r *http.Request) bool {
+		// allow all connections by default
+		return true
+	}
+	return u.Upgrade(w, r, responseHeader)
+}
+
+// Subprotocols returns the subprotocols requested by the client in the
+// Sec-Websocket-Protocol header.
+func Subprotocols(r *http.Request) []string {
+	h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
+	if h == "" {
+		return nil
+	}
+	protocols := strings.Split(h, ",")
+	for i := range protocols {
+		protocols[i] = strings.TrimSpace(protocols[i])
+	}
+	return protocols
+}
+
+// IsWebSocketUpgrade returns true if the client requested upgrade to the
+// WebSocket protocol.
+func IsWebSocketUpgrade(r *http.Request) bool {
+	return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
+		tokenListContainsValue(r.Header, "Upgrade", "websocket")
+}
+
+// bufioReaderSize size returns the size of a bufio.Reader.
+func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
+	// This code assumes that peek on a reset reader returns
+	// bufio.Reader.buf[:0].
+	// TODO: Use bufio.Reader.Size() after Go 1.10
+	br.Reset(originalReader)
+	if p, err := br.Peek(0); err == nil {
+		return cap(p)
+	}
+	return 0
+}
+
+// writeHook is an io.Writer that records the last slice passed to it vio
+// io.Writer.Write.
+type writeHook struct {
+	p []byte
+}
+
+func (wh *writeHook) Write(p []byte) (int, error) {
+	wh.p = p
+	return len(p), nil
+}
+
+// bufioWriterBuffer grabs the buffer from a bufio.Writer.
+func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
+	// This code assumes that bufio.Writer.buf[:1] is passed to the
+	// bufio.Writer's underlying writer.
+	var wh writeHook
+	bw.Reset(&wh)
+	bw.WriteByte(0)
+	bw.Flush()
+
+	bw.Reset(originalWriter)
+
+	return wh.p[:cap(wh.p)]
+}
diff --git a/vendor/github.com/gorilla/websocket/trace.go b/vendor/github.com/gorilla/websocket/trace.go
new file mode 100644
index 0000000..834f122
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/trace.go
@@ -0,0 +1,19 @@
+// +build go1.8
+
+package websocket
+
+import (
+	"crypto/tls"
+	"net/http/httptrace"
+)
+
+func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
+	if trace.TLSHandshakeStart != nil {
+		trace.TLSHandshakeStart()
+	}
+	err := doHandshake(tlsConn, cfg)
+	if trace.TLSHandshakeDone != nil {
+		trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
+	}
+	return err
+}
diff --git a/vendor/github.com/gorilla/websocket/trace_17.go b/vendor/github.com/gorilla/websocket/trace_17.go
new file mode 100644
index 0000000..77d05a0
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/trace_17.go
@@ -0,0 +1,12 @@
+// +build !go1.8
+
+package websocket
+
+import (
+	"crypto/tls"
+	"net/http/httptrace"
+)
+
+func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
+	return doHandshake(tlsConn, cfg)
+}
diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go
new file mode 100644
index 0000000..354001e
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/util.go
@@ -0,0 +1,237 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+	"crypto/rand"
+	"crypto/sha1"
+	"encoding/base64"
+	"io"
+	"net/http"
+	"strings"
+	"unicode/utf8"
+)
+
+var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
+
+func computeAcceptKey(challengeKey string) string {
+	h := sha1.New()
+	h.Write([]byte(challengeKey))
+	h.Write(keyGUID)
+	return base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
+
+func generateChallengeKey() (string, error) {
+	p := make([]byte, 16)
+	if _, err := io.ReadFull(rand.Reader, p); err != nil {
+		return "", err
+	}
+	return base64.StdEncoding.EncodeToString(p), nil
+}
+
+// Octet types from RFC 2616.
+var octetTypes [256]byte
+
+const (
+	isTokenOctet = 1 << iota
+	isSpaceOctet
+)
+
+func init() {
+	// From RFC 2616
+	//
+	// OCTET      = <any 8-bit sequence of data>
+	// CHAR       = <any US-ASCII character (octets 0 - 127)>
+	// CTL        = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
+	// CR         = <US-ASCII CR, carriage return (13)>
+	// LF         = <US-ASCII LF, linefeed (10)>
+	// SP         = <US-ASCII SP, space (32)>
+	// HT         = <US-ASCII HT, horizontal-tab (9)>
+	// <">        = <US-ASCII double-quote mark (34)>
+	// CRLF       = CR LF
+	// LWS        = [CRLF] 1*( SP | HT )
+	// TEXT       = <any OCTET except CTLs, but including LWS>
+	// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+	//              | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+	// token      = 1*<any CHAR except CTLs or separators>
+	// qdtext     = <any TEXT except <">>
+
+	for c := 0; c < 256; c++ {
+		var t byte
+		isCtl := c <= 31 || c == 127
+		isChar := 0 <= c && c <= 127
+		isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
+		if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
+			t |= isSpaceOctet
+		}
+		if isChar && !isCtl && !isSeparator {
+			t |= isTokenOctet
+		}
+		octetTypes[c] = t
+	}
+}
+
+func skipSpace(s string) (rest string) {
+	i := 0
+	for ; i < len(s); i++ {
+		if octetTypes[s[i]]&isSpaceOctet == 0 {
+			break
+		}
+	}
+	return s[i:]
+}
+
+func nextToken(s string) (token, rest string) {
+	i := 0
+	for ; i < len(s); i++ {
+		if octetTypes[s[i]]&isTokenOctet == 0 {
+			break
+		}
+	}
+	return s[:i], s[i:]
+}
+
+func nextTokenOrQuoted(s string) (value string, rest string) {
+	if !strings.HasPrefix(s, "\"") {
+		return nextToken(s)
+	}
+	s = s[1:]
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		case '"':
+			return s[:i], s[i+1:]
+		case '\\':
+			p := make([]byte, len(s)-1)
+			j := copy(p, s[:i])
+			escape := true
+			for i = i + 1; i < len(s); i++ {
+				b := s[i]
+				switch {
+				case escape:
+					escape = false
+					p[j] = b
+					j++
+				case b == '\\':
+					escape = true
+				case b == '"':
+					return string(p[:j]), s[i+1:]
+				default:
+					p[j] = b
+					j++
+				}
+			}
+			return "", ""
+		}
+	}
+	return "", ""
+}
+
+// equalASCIIFold returns true if s is equal to t with ASCII case folding.
+func equalASCIIFold(s, t string) bool {
+	for s != "" && t != "" {
+		sr, size := utf8.DecodeRuneInString(s)
+		s = s[size:]
+		tr, size := utf8.DecodeRuneInString(t)
+		t = t[size:]
+		if sr == tr {
+			continue
+		}
+		if 'A' <= sr && sr <= 'Z' {
+			sr = sr + 'a' - 'A'
+		}
+		if 'A' <= tr && tr <= 'Z' {
+			tr = tr + 'a' - 'A'
+		}
+		if sr != tr {
+			return false
+		}
+	}
+	return s == t
+}
+
+// tokenListContainsValue returns true if the 1#token header with the given
+// name contains a token equal to value with ASCII case folding.
+func tokenListContainsValue(header http.Header, name string, value string) bool {
+headers:
+	for _, s := range header[name] {
+		for {
+			var t string
+			t, s = nextToken(skipSpace(s))
+			if t == "" {
+				continue headers
+			}
+			s = skipSpace(s)
+			if s != "" && s[0] != ',' {
+				continue headers
+			}
+			if equalASCIIFold(t, value) {
+				return true
+			}
+			if s == "" {
+				continue headers
+			}
+			s = s[1:]
+		}
+	}
+	return false
+}
+
+// parseExtensions parses WebSocket extensions from a header.
+func parseExtensions(header http.Header) []map[string]string {
+	// From RFC 6455:
+	//
+	//  Sec-WebSocket-Extensions = extension-list
+	//  extension-list = 1#extension
+	//  extension = extension-token *( ";" extension-param )
+	//  extension-token = registered-token
+	//  registered-token = token
+	//  extension-param = token [ "=" (token | quoted-string) ]
+	//     ;When using the quoted-string syntax variant, the value
+	//     ;after quoted-string unescaping MUST conform to the
+	//     ;'token' ABNF.
+
+	var result []map[string]string
+headers:
+	for _, s := range header["Sec-Websocket-Extensions"] {
+		for {
+			var t string
+			t, s = nextToken(skipSpace(s))
+			if t == "" {
+				continue headers
+			}
+			ext := map[string]string{"": t}
+			for {
+				s = skipSpace(s)
+				if !strings.HasPrefix(s, ";") {
+					break
+				}
+				var k string
+				k, s = nextToken(skipSpace(s[1:]))
+				if k == "" {
+					continue headers
+				}
+				s = skipSpace(s)
+				var v string
+				if strings.HasPrefix(s, "=") {
+					v, s = nextTokenOrQuoted(skipSpace(s[1:]))
+					s = skipSpace(s)
+				}
+				if s != "" && s[0] != ',' && s[0] != ';' {
+					continue headers
+				}
+				ext[k] = v
+			}
+			if s != "" && s[0] != ',' {
+				continue headers
+			}
+			result = append(result, ext)
+			if s == "" {
+				continue headers
+			}
+			s = s[1:]
+		}
+	}
+	return result
+}
diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go
new file mode 100644
index 0000000..2e668f6
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go
@@ -0,0 +1,473 @@
+// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
+//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
+
+// Package proxy provides support for a variety of protocols to proxy network
+// data.
+//
+
+package websocket
+
+import (
+	"errors"
+	"io"
+	"net"
+	"net/url"
+	"os"
+	"strconv"
+	"strings"
+	"sync"
+)
+
+type proxy_direct struct{}
+
+// Direct is a direct proxy: one that makes network connections directly.
+var proxy_Direct = proxy_direct{}
+
+func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
+	return net.Dial(network, addr)
+}
+
+// A PerHost directs connections to a default Dialer unless the host name
+// requested matches one of a number of exceptions.
+type proxy_PerHost struct {
+	def, bypass proxy_Dialer
+
+	bypassNetworks []*net.IPNet
+	bypassIPs      []net.IP
+	bypassZones    []string
+	bypassHosts    []string
+}
+
+// NewPerHost returns a PerHost Dialer that directs connections to either
+// defaultDialer or bypass, depending on whether the connection matches one of
+// the configured rules.
+func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
+	return &proxy_PerHost{
+		def:    defaultDialer,
+		bypass: bypass,
+	}
+}
+
+// Dial connects to the address addr on the given network through either
+// defaultDialer or bypass.
+func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
+	host, _, err := net.SplitHostPort(addr)
+	if err != nil {
+		return nil, err
+	}
+
+	return p.dialerForRequest(host).Dial(network, addr)
+}
+
+func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
+	if ip := net.ParseIP(host); ip != nil {
+		for _, net := range p.bypassNetworks {
+			if net.Contains(ip) {
+				return p.bypass
+			}
+		}
+		for _, bypassIP := range p.bypassIPs {
+			if bypassIP.Equal(ip) {
+				return p.bypass
+			}
+		}
+		return p.def
+	}
+
+	for _, zone := range p.bypassZones {
+		if strings.HasSuffix(host, zone) {
+			return p.bypass
+		}
+		if host == zone[1:] {
+			// For a zone ".example.com", we match "example.com"
+			// too.
+			return p.bypass
+		}
+	}
+	for _, bypassHost := range p.bypassHosts {
+		if bypassHost == host {
+			return p.bypass
+		}
+	}
+	return p.def
+}
+
+// AddFromString parses a string that contains comma-separated values
+// specifying hosts that should use the bypass proxy. Each value is either an
+// IP address, a CIDR range, a zone (*.example.com) or a host name
+// (localhost). A best effort is made to parse the string and errors are
+// ignored.
+func (p *proxy_PerHost) AddFromString(s string) {
+	hosts := strings.Split(s, ",")
+	for _, host := range hosts {
+		host = strings.TrimSpace(host)
+		if len(host) == 0 {
+			continue
+		}
+		if strings.Contains(host, "/") {
+			// We assume that it's a CIDR address like 127.0.0.0/8
+			if _, net, err := net.ParseCIDR(host); err == nil {
+				p.AddNetwork(net)
+			}
+			continue
+		}
+		if ip := net.ParseIP(host); ip != nil {
+			p.AddIP(ip)
+			continue
+		}
+		if strings.HasPrefix(host, "*.") {
+			p.AddZone(host[1:])
+			continue
+		}
+		p.AddHost(host)
+	}
+}
+
+// AddIP specifies an IP address that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match an IP.
+func (p *proxy_PerHost) AddIP(ip net.IP) {
+	p.bypassIPs = append(p.bypassIPs, ip)
+}
+
+// AddNetwork specifies an IP range that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match.
+func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
+	p.bypassNetworks = append(p.bypassNetworks, net)
+}
+
+// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
+// "example.com" matches "example.com" and all of its subdomains.
+func (p *proxy_PerHost) AddZone(zone string) {
+	if strings.HasSuffix(zone, ".") {
+		zone = zone[:len(zone)-1]
+	}
+	if !strings.HasPrefix(zone, ".") {
+		zone = "." + zone
+	}
+	p.bypassZones = append(p.bypassZones, zone)
+}
+
+// AddHost specifies a host name that will use the bypass proxy.
+func (p *proxy_PerHost) AddHost(host string) {
+	if strings.HasSuffix(host, ".") {
+		host = host[:len(host)-1]
+	}
+	p.bypassHosts = append(p.bypassHosts, host)
+}
+
+// A Dialer is a means to establish a connection.
+type proxy_Dialer interface {
+	// Dial connects to the given address via the proxy.
+	Dial(network, addr string) (c net.Conn, err error)
+}
+
+// Auth contains authentication parameters that specific Dialers may require.
+type proxy_Auth struct {
+	User, Password string
+}
+
+// FromEnvironment returns the dialer specified by the proxy related variables in
+// the environment.
+func proxy_FromEnvironment() proxy_Dialer {
+	allProxy := proxy_allProxyEnv.Get()
+	if len(allProxy) == 0 {
+		return proxy_Direct
+	}
+
+	proxyURL, err := url.Parse(allProxy)
+	if err != nil {
+		return proxy_Direct
+	}
+	proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
+	if err != nil {
+		return proxy_Direct
+	}
+
+	noProxy := proxy_noProxyEnv.Get()
+	if len(noProxy) == 0 {
+		return proxy
+	}
+
+	perHost := proxy_NewPerHost(proxy, proxy_Direct)
+	perHost.AddFromString(noProxy)
+	return perHost
+}
+
+// proxySchemes is a map from URL schemes to a function that creates a Dialer
+// from a URL with such a scheme.
+var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
+
+// RegisterDialerType takes a URL scheme and a function to generate Dialers from
+// a URL with that scheme and a forwarding Dialer. Registered schemes are used
+// by FromURL.
+func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
+	if proxy_proxySchemes == nil {
+		proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
+	}
+	proxy_proxySchemes[scheme] = f
+}
+
+// FromURL returns a Dialer given a URL specification and an underlying
+// Dialer for it to make network requests.
+func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
+	var auth *proxy_Auth
+	if u.User != nil {
+		auth = new(proxy_Auth)
+		auth.User = u.User.Username()
+		if p, ok := u.User.Password(); ok {
+			auth.Password = p
+		}
+	}
+
+	switch u.Scheme {
+	case "socks5":
+		return proxy_SOCKS5("tcp", u.Host, auth, forward)
+	}
+
+	// If the scheme doesn't match any of the built-in schemes, see if it
+	// was registered by another package.
+	if proxy_proxySchemes != nil {
+		if f, ok := proxy_proxySchemes[u.Scheme]; ok {
+			return f(u, forward)
+		}
+	}
+
+	return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
+}
+
+var (
+	proxy_allProxyEnv = &proxy_envOnce{
+		names: []string{"ALL_PROXY", "all_proxy"},
+	}
+	proxy_noProxyEnv = &proxy_envOnce{
+		names: []string{"NO_PROXY", "no_proxy"},
+	}
+)
+
+// envOnce looks up an environment variable (optionally by multiple
+// names) once. It mitigates expensive lookups on some platforms
+// (e.g. Windows).
+// (Borrowed from net/http/transport.go)
+type proxy_envOnce struct {
+	names []string
+	once  sync.Once
+	val   string
+}
+
+func (e *proxy_envOnce) Get() string {
+	e.once.Do(e.init)
+	return e.val
+}
+
+func (e *proxy_envOnce) init() {
+	for _, n := range e.names {
+		e.val = os.Getenv(n)
+		if e.val != "" {
+			return
+		}
+	}
+}
+
+// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
+// with an optional username and password. See RFC 1928 and RFC 1929.
+func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
+	s := &proxy_socks5{
+		network: network,
+		addr:    addr,
+		forward: forward,
+	}
+	if auth != nil {
+		s.user = auth.User
+		s.password = auth.Password
+	}
+
+	return s, nil
+}
+
+type proxy_socks5 struct {
+	user, password string
+	network, addr  string
+	forward        proxy_Dialer
+}
+
+const proxy_socks5Version = 5
+
+const (
+	proxy_socks5AuthNone     = 0
+	proxy_socks5AuthPassword = 2
+)
+
+const proxy_socks5Connect = 1
+
+const (
+	proxy_socks5IP4    = 1
+	proxy_socks5Domain = 3
+	proxy_socks5IP6    = 4
+)
+
+var proxy_socks5Errors = []string{
+	"",
+	"general failure",
+	"connection forbidden",
+	"network unreachable",
+	"host unreachable",
+	"connection refused",
+	"TTL expired",
+	"command not supported",
+	"address type not supported",
+}
+
+// Dial connects to the address addr on the given network via the SOCKS5 proxy.
+func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
+	switch network {
+	case "tcp", "tcp6", "tcp4":
+	default:
+		return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
+	}
+
+	conn, err := s.forward.Dial(s.network, s.addr)
+	if err != nil {
+		return nil, err
+	}
+	if err := s.connect(conn, addr); err != nil {
+		conn.Close()
+		return nil, err
+	}
+	return conn, nil
+}
+
+// connect takes an existing connection to a socks5 proxy server,
+// and commands the server to extend that connection to target,
+// which must be a canonical address with a host and port.
+func (s *proxy_socks5) connect(conn net.Conn, target string) error {
+	host, portStr, err := net.SplitHostPort(target)
+	if err != nil {
+		return err
+	}
+
+	port, err := strconv.Atoi(portStr)
+	if err != nil {
+		return errors.New("proxy: failed to parse port number: " + portStr)
+	}
+	if port < 1 || port > 0xffff {
+		return errors.New("proxy: port number out of range: " + portStr)
+	}
+
+	// the size here is just an estimate
+	buf := make([]byte, 0, 6+len(host))
+
+	buf = append(buf, proxy_socks5Version)
+	if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
+		buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
+	} else {
+		buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
+	}
+
+	if _, err := conn.Write(buf); err != nil {
+		return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+	}
+
+	if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+		return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+	}
+	if buf[0] != 5 {
+		return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
+	}
+	if buf[1] == 0xff {
+		return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
+	}
+
+	// See RFC 1929
+	if buf[1] == proxy_socks5AuthPassword {
+		buf = buf[:0]
+		buf = append(buf, 1 /* password protocol version */)
+		buf = append(buf, uint8(len(s.user)))
+		buf = append(buf, s.user...)
+		buf = append(buf, uint8(len(s.password)))
+		buf = append(buf, s.password...)
+
+		if _, err := conn.Write(buf); err != nil {
+			return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+		}
+
+		if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+			return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+		}
+
+		if buf[1] != 0 {
+			return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
+		}
+	}
+
+	buf = buf[:0]
+	buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
+
+	if ip := net.ParseIP(host); ip != nil {
+		if ip4 := ip.To4(); ip4 != nil {
+			buf = append(buf, proxy_socks5IP4)
+			ip = ip4
+		} else {
+			buf = append(buf, proxy_socks5IP6)
+		}
+		buf = append(buf, ip...)
+	} else {
+		if len(host) > 255 {
+			return errors.New("proxy: destination host name too long: " + host)
+		}
+		buf = append(buf, proxy_socks5Domain)
+		buf = append(buf, byte(len(host)))
+		buf = append(buf, host...)
+	}
+	buf = append(buf, byte(port>>8), byte(port))
+
+	if _, err := conn.Write(buf); err != nil {
+		return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+	}
+
+	if _, err := io.ReadFull(conn, buf[:4]); err != nil {
+		return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+	}
+
+	failure := "unknown error"
+	if int(buf[1]) < len(proxy_socks5Errors) {
+		failure = proxy_socks5Errors[buf[1]]
+	}
+
+	if len(failure) > 0 {
+		return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
+	}
+
+	bytesToDiscard := 0
+	switch buf[3] {
+	case proxy_socks5IP4:
+		bytesToDiscard = net.IPv4len
+	case proxy_socks5IP6:
+		bytesToDiscard = net.IPv6len
+	case proxy_socks5Domain:
+		_, err := io.ReadFull(conn, buf[:1])
+		if err != nil {
+			return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+		}
+		bytesToDiscard = int(buf[0])
+	default:
+		return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
+	}
+
+	if cap(buf) < bytesToDiscard {
+		buf = make([]byte, bytesToDiscard)
+	} else {
+		buf = buf[:bytesToDiscard]
+	}
+	if _, err := io.ReadFull(conn, buf); err != nil {
+		return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+	}
+
+	// Also need to discard the port number
+	if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+		return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml
new file mode 100644
index 0000000..b5ffbe0
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/.travis.yml
@@ -0,0 +1,19 @@
+sudo: false
+language: go
+go:
+  - 1.6.x
+  - 1.7.x
+  - 1.8.x
+  - 1.9.x
+  - master
+matrix:
+  allow_failures:
+    - go: master
+  fast_finish: true
+install:
+  - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
+script:
+  - go get -t -v ./...
+  - diff -u <(echo -n) <(gofmt -d .)
+  - go tool vet .
+  - go test -v -race ./...
diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt
new file mode 100644
index 0000000..81316be
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/LICENSE.txt
@@ -0,0 +1,7 @@
+Copyright © 2012 Greg Jones (greg.jones@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md
new file mode 100644
index 0000000..09c9e7c
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/README.md
@@ -0,0 +1,25 @@
+httpcache
+=========
+
+[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache)
+
+Package httpcache provides a http.RoundTripper implementation that works as a mostly [RFC 7234](https://tools.ietf.org/html/rfc7234) compliant cache for http responses.
+
+It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy).
+
+Cache Backends
+--------------
+
+- The built-in 'memory' cache stores responses in an in-memory map.
+- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library.
+- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers.
+- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage.
+- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb).
+- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries.
+- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache.
+- [`github.com/birkelund/boltdbcache`](https://github.com/birkelund/boltdbcache) provides a BoltDB implementation (based on the [bbolt](https://github.com/coreos/bbolt) fork).
+
+License
+-------
+
+-	[MIT License](LICENSE.txt)
diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
new file mode 100644
index 0000000..42e3129
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
@@ -0,0 +1,61 @@
+// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package
+// to supplement an in-memory map with persistent storage
+//
+package diskcache
+
+import (
+	"bytes"
+	"crypto/md5"
+	"encoding/hex"
+	"github.com/peterbourgon/diskv"
+	"io"
+)
+
+// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage
+type Cache struct {
+	d *diskv.Diskv
+}
+
+// Get returns the response corresponding to key if present
+func (c *Cache) Get(key string) (resp []byte, ok bool) {
+	key = keyToFilename(key)
+	resp, err := c.d.Read(key)
+	if err != nil {
+		return []byte{}, false
+	}
+	return resp, true
+}
+
+// Set saves a response to the cache as key
+func (c *Cache) Set(key string, resp []byte) {
+	key = keyToFilename(key)
+	c.d.WriteStream(key, bytes.NewReader(resp), true)
+}
+
+// Delete removes the response with key from the cache
+func (c *Cache) Delete(key string) {
+	key = keyToFilename(key)
+	c.d.Erase(key)
+}
+
+func keyToFilename(key string) string {
+	h := md5.New()
+	io.WriteString(h, key)
+	return hex.EncodeToString(h.Sum(nil))
+}
+
+// New returns a new Cache that will store files in basePath
+func New(basePath string) *Cache {
+	return &Cache{
+		d: diskv.New(diskv.Options{
+			BasePath:     basePath,
+			CacheSizeMax: 100 * 1024 * 1024, // 100MB
+		}),
+	}
+}
+
+// NewWithDiskv returns a new Cache using the provided Diskv as underlying
+// storage.
+func NewWithDiskv(d *diskv.Diskv) *Cache {
+	return &Cache{d}
+}
diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go
new file mode 100644
index 0000000..f6a2ec4
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/httpcache.go
@@ -0,0 +1,551 @@
+// Package httpcache provides a http.RoundTripper implementation that works as a
+// mostly RFC-compliant cache for http responses.
+//
+// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
+// and not for a shared proxy).
+//
+package httpcache
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/http/httputil"
+	"strings"
+	"sync"
+	"time"
+)
+
+const (
+	stale = iota
+	fresh
+	transparent
+	// XFromCache is the header added to responses that are returned from the cache
+	XFromCache = "X-From-Cache"
+)
+
+// A Cache interface is used by the Transport to store and retrieve responses.
+type Cache interface {
+	// Get returns the []byte representation of a cached response and a bool
+	// set to true if the value isn't empty
+	Get(key string) (responseBytes []byte, ok bool)
+	// Set stores the []byte representation of a response against a key
+	Set(key string, responseBytes []byte)
+	// Delete removes the value associated with the key
+	Delete(key string)
+}
+
+// cacheKey returns the cache key for req.
+func cacheKey(req *http.Request) string {
+	if req.Method == http.MethodGet {
+		return req.URL.String()
+	} else {
+		return req.Method + " " + req.URL.String()
+	}
+}
+
+// CachedResponse returns the cached http.Response for req if present, and nil
+// otherwise.
+func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) {
+	cachedVal, ok := c.Get(cacheKey(req))
+	if !ok {
+		return
+	}
+
+	b := bytes.NewBuffer(cachedVal)
+	return http.ReadResponse(bufio.NewReader(b), req)
+}
+
+// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
+type MemoryCache struct {
+	mu    sync.RWMutex
+	items map[string][]byte
+}
+
+// Get returns the []byte representation of the response and true if present, false if not
+func (c *MemoryCache) Get(key string) (resp []byte, ok bool) {
+	c.mu.RLock()
+	resp, ok = c.items[key]
+	c.mu.RUnlock()
+	return resp, ok
+}
+
+// Set saves response resp to the cache with key
+func (c *MemoryCache) Set(key string, resp []byte) {
+	c.mu.Lock()
+	c.items[key] = resp
+	c.mu.Unlock()
+}
+
+// Delete removes key from the cache
+func (c *MemoryCache) Delete(key string) {
+	c.mu.Lock()
+	delete(c.items, key)
+	c.mu.Unlock()
+}
+
+// NewMemoryCache returns a new Cache that will store items in an in-memory map
+func NewMemoryCache() *MemoryCache {
+	c := &MemoryCache{items: map[string][]byte{}}
+	return c
+}
+
+// Transport is an implementation of http.RoundTripper that will return values from a cache
+// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
+// to repeated requests allowing servers to return 304 / Not Modified
+type Transport struct {
+	// The RoundTripper interface actually used to make requests
+	// If nil, http.DefaultTransport is used
+	Transport http.RoundTripper
+	Cache     Cache
+	// If true, responses returned from the cache will be given an extra header, X-From-Cache
+	MarkCachedResponses bool
+}
+
+// NewTransport returns a new Transport with the
+// provided Cache implementation and MarkCachedResponses set to true
+func NewTransport(c Cache) *Transport {
+	return &Transport{Cache: c, MarkCachedResponses: true}
+}
+
+// Client returns an *http.Client that caches responses.
+func (t *Transport) Client() *http.Client {
+	return &http.Client{Transport: t}
+}
+
+// varyMatches will return false unless all of the cached values for the headers listed in Vary
+// match the new request
+func varyMatches(cachedResp *http.Response, req *http.Request) bool {
+	for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") {
+		header = http.CanonicalHeaderKey(header)
+		if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) {
+			return false
+		}
+	}
+	return true
+}
+
+// RoundTrip takes a Request and returns a Response
+//
+// If there is a fresh Response already in cache, then it will be returned without connecting to
+// the server.
+//
+// If there is a stale Response, then any validators it contains will be set on the new request
+// to give the server a chance to respond with NotModified. If this happens, then the cached Response
+// will be returned.
+func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
+	cacheKey := cacheKey(req)
+	cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
+	var cachedResp *http.Response
+	if cacheable {
+		cachedResp, err = CachedResponse(t.Cache, req)
+	} else {
+		// Need to invalidate an existing value
+		t.Cache.Delete(cacheKey)
+	}
+
+	transport := t.Transport
+	if transport == nil {
+		transport = http.DefaultTransport
+	}
+
+	if cacheable && cachedResp != nil && err == nil {
+		if t.MarkCachedResponses {
+			cachedResp.Header.Set(XFromCache, "1")
+		}
+
+		if varyMatches(cachedResp, req) {
+			// Can only use cached value if the new request doesn't Vary significantly
+			freshness := getFreshness(cachedResp.Header, req.Header)
+			if freshness == fresh {
+				return cachedResp, nil
+			}
+
+			if freshness == stale {
+				var req2 *http.Request
+				// Add validators if caller hasn't already done so
+				etag := cachedResp.Header.Get("etag")
+				if etag != "" && req.Header.Get("etag") == "" {
+					req2 = cloneRequest(req)
+					req2.Header.Set("if-none-match", etag)
+				}
+				lastModified := cachedResp.Header.Get("last-modified")
+				if lastModified != "" && req.Header.Get("last-modified") == "" {
+					if req2 == nil {
+						req2 = cloneRequest(req)
+					}
+					req2.Header.Set("if-modified-since", lastModified)
+				}
+				if req2 != nil {
+					req = req2
+				}
+			}
+		}
+
+		resp, err = transport.RoundTrip(req)
+		if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified {
+			// Replace the 304 response with the one from cache, but update with some new headers
+			endToEndHeaders := getEndToEndHeaders(resp.Header)
+			for _, header := range endToEndHeaders {
+				cachedResp.Header[header] = resp.Header[header]
+			}
+			resp = cachedResp
+		} else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) &&
+			req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) {
+			// In case of transport failure and stale-if-error activated, returns cached content
+			// when available
+			return cachedResp, nil
+		} else {
+			if err != nil || resp.StatusCode != http.StatusOK {
+				t.Cache.Delete(cacheKey)
+			}
+			if err != nil {
+				return nil, err
+			}
+		}
+	} else {
+		reqCacheControl := parseCacheControl(req.Header)
+		if _, ok := reqCacheControl["only-if-cached"]; ok {
+			resp = newGatewayTimeoutResponse(req)
+		} else {
+			resp, err = transport.RoundTrip(req)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+
+	if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) {
+		for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") {
+			varyKey = http.CanonicalHeaderKey(varyKey)
+			fakeHeader := "X-Varied-" + varyKey
+			reqValue := req.Header.Get(varyKey)
+			if reqValue != "" {
+				resp.Header.Set(fakeHeader, reqValue)
+			}
+		}
+		switch req.Method {
+		case "GET":
+			// Delay caching until EOF is reached.
+			resp.Body = &cachingReadCloser{
+				R: resp.Body,
+				OnEOF: func(r io.Reader) {
+					resp := *resp
+					resp.Body = ioutil.NopCloser(r)
+					respBytes, err := httputil.DumpResponse(&resp, true)
+					if err == nil {
+						t.Cache.Set(cacheKey, respBytes)
+					}
+				},
+			}
+		default:
+			respBytes, err := httputil.DumpResponse(resp, true)
+			if err == nil {
+				t.Cache.Set(cacheKey, respBytes)
+			}
+		}
+	} else {
+		t.Cache.Delete(cacheKey)
+	}
+	return resp, nil
+}
+
+// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
+var ErrNoDateHeader = errors.New("no Date header")
+
+// Date parses and returns the value of the Date header.
+func Date(respHeaders http.Header) (date time.Time, err error) {
+	dateHeader := respHeaders.Get("date")
+	if dateHeader == "" {
+		err = ErrNoDateHeader
+		return
+	}
+
+	return time.Parse(time.RFC1123, dateHeader)
+}
+
+type realClock struct{}
+
+func (c *realClock) since(d time.Time) time.Duration {
+	return time.Since(d)
+}
+
+type timer interface {
+	since(d time.Time) time.Duration
+}
+
+var clock timer = &realClock{}
+
+// getFreshness will return one of fresh/stale/transparent based on the cache-control
+// values of the request and the response
+//
+// fresh indicates the response can be returned
+// stale indicates that the response needs validating before it is returned
+// transparent indicates the response should not be used to fulfil the request
+//
+// Because this is only a private cache, 'public' and 'private' in cache-control aren't
+// signficant. Similarly, smax-age isn't used.
+func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
+	respCacheControl := parseCacheControl(respHeaders)
+	reqCacheControl := parseCacheControl(reqHeaders)
+	if _, ok := reqCacheControl["no-cache"]; ok {
+		return transparent
+	}
+	if _, ok := respCacheControl["no-cache"]; ok {
+		return stale
+	}
+	if _, ok := reqCacheControl["only-if-cached"]; ok {
+		return fresh
+	}
+
+	date, err := Date(respHeaders)
+	if err != nil {
+		return stale
+	}
+	currentAge := clock.since(date)
+
+	var lifetime time.Duration
+	var zeroDuration time.Duration
+
+	// If a response includes both an Expires header and a max-age directive,
+	// the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
+	if maxAge, ok := respCacheControl["max-age"]; ok {
+		lifetime, err = time.ParseDuration(maxAge + "s")
+		if err != nil {
+			lifetime = zeroDuration
+		}
+	} else {
+		expiresHeader := respHeaders.Get("Expires")
+		if expiresHeader != "" {
+			expires, err := time.Parse(time.RFC1123, expiresHeader)
+			if err != nil {
+				lifetime = zeroDuration
+			} else {
+				lifetime = expires.Sub(date)
+			}
+		}
+	}
+
+	if maxAge, ok := reqCacheControl["max-age"]; ok {
+		// the client is willing to accept a response whose age is no greater than the specified time in seconds
+		lifetime, err = time.ParseDuration(maxAge + "s")
+		if err != nil {
+			lifetime = zeroDuration
+		}
+	}
+	if minfresh, ok := reqCacheControl["min-fresh"]; ok {
+		//  the client wants a response that will still be fresh for at least the specified number of seconds.
+		minfreshDuration, err := time.ParseDuration(minfresh + "s")
+		if err == nil {
+			currentAge = time.Duration(currentAge + minfreshDuration)
+		}
+	}
+
+	if maxstale, ok := reqCacheControl["max-stale"]; ok {
+		// Indicates that the client is willing to accept a response that has exceeded its expiration time.
+		// If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
+		// its expiration time by no more than the specified number of seconds.
+		// If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
+		//
+		// Responses served only because of a max-stale value are supposed to have a Warning header added to them,
+		// but that seems like a  hassle, and is it actually useful? If so, then there needs to be a different
+		// return-value available here.
+		if maxstale == "" {
+			return fresh
+		}
+		maxstaleDuration, err := time.ParseDuration(maxstale + "s")
+		if err == nil {
+			currentAge = time.Duration(currentAge - maxstaleDuration)
+		}
+	}
+
+	if lifetime > currentAge {
+		return fresh
+	}
+
+	return stale
+}
+
+// Returns true if either the request or the response includes the stale-if-error
+// cache control extension: https://tools.ietf.org/html/rfc5861
+func canStaleOnError(respHeaders, reqHeaders http.Header) bool {
+	respCacheControl := parseCacheControl(respHeaders)
+	reqCacheControl := parseCacheControl(reqHeaders)
+
+	var err error
+	lifetime := time.Duration(-1)
+
+	if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok {
+		if staleMaxAge != "" {
+			lifetime, err = time.ParseDuration(staleMaxAge + "s")
+			if err != nil {
+				return false
+			}
+		} else {
+			return true
+		}
+	}
+	if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok {
+		if staleMaxAge != "" {
+			lifetime, err = time.ParseDuration(staleMaxAge + "s")
+			if err != nil {
+				return false
+			}
+		} else {
+			return true
+		}
+	}
+
+	if lifetime >= 0 {
+		date, err := Date(respHeaders)
+		if err != nil {
+			return false
+		}
+		currentAge := clock.since(date)
+		if lifetime > currentAge {
+			return true
+		}
+	}
+
+	return false
+}
+
+func getEndToEndHeaders(respHeaders http.Header) []string {
+	// These headers are always hop-by-hop
+	hopByHopHeaders := map[string]struct{}{
+		"Connection":          struct{}{},
+		"Keep-Alive":          struct{}{},
+		"Proxy-Authenticate":  struct{}{},
+		"Proxy-Authorization": struct{}{},
+		"Te":                struct{}{},
+		"Trailers":          struct{}{},
+		"Transfer-Encoding": struct{}{},
+		"Upgrade":           struct{}{},
+	}
+
+	for _, extra := range strings.Split(respHeaders.Get("connection"), ",") {
+		// any header listed in connection, if present, is also considered hop-by-hop
+		if strings.Trim(extra, " ") != "" {
+			hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
+		}
+	}
+	endToEndHeaders := []string{}
+	for respHeader, _ := range respHeaders {
+		if _, ok := hopByHopHeaders[respHeader]; !ok {
+			endToEndHeaders = append(endToEndHeaders, respHeader)
+		}
+	}
+	return endToEndHeaders
+}
+
+func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) {
+	if _, ok := respCacheControl["no-store"]; ok {
+		return false
+	}
+	if _, ok := reqCacheControl["no-store"]; ok {
+		return false
+	}
+	return true
+}
+
+func newGatewayTimeoutResponse(req *http.Request) *http.Response {
+	var braw bytes.Buffer
+	braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n")
+	resp, err := http.ReadResponse(bufio.NewReader(&braw), req)
+	if err != nil {
+		panic(err)
+	}
+	return resp
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
+func cloneRequest(r *http.Request) *http.Request {
+	// shallow copy of the struct
+	r2 := new(http.Request)
+	*r2 = *r
+	// deep copy of the Header
+	r2.Header = make(http.Header)
+	for k, s := range r.Header {
+		r2.Header[k] = s
+	}
+	return r2
+}
+
+type cacheControl map[string]string
+
+func parseCacheControl(headers http.Header) cacheControl {
+	cc := cacheControl{}
+	ccHeader := headers.Get("Cache-Control")
+	for _, part := range strings.Split(ccHeader, ",") {
+		part = strings.Trim(part, " ")
+		if part == "" {
+			continue
+		}
+		if strings.ContainsRune(part, '=') {
+			keyval := strings.Split(part, "=")
+			cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
+		} else {
+			cc[part] = ""
+		}
+	}
+	return cc
+}
+
+// headerAllCommaSepValues returns all comma-separated values (each
+// with whitespace trimmed) for header name in headers. According to
+// Section 4.2 of the HTTP/1.1 spec
+// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
+// values from multiple occurrences of a header should be concatenated, if
+// the header's value is a comma-separated list.
+func headerAllCommaSepValues(headers http.Header, name string) []string {
+	var vals []string
+	for _, val := range headers[http.CanonicalHeaderKey(name)] {
+		fields := strings.Split(val, ",")
+		for i, f := range fields {
+			fields[i] = strings.TrimSpace(f)
+		}
+		vals = append(vals, fields...)
+	}
+	return vals
+}
+
+// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF
+// handler with a full copy of the content read from R when EOF is
+// reached.
+type cachingReadCloser struct {
+	// Underlying ReadCloser.
+	R io.ReadCloser
+	// OnEOF is called with a copy of the content of R when EOF is reached.
+	OnEOF func(io.Reader)
+
+	buf bytes.Buffer // buf stores a copy of the content of R.
+}
+
+// Read reads the next len(p) bytes from R or until R is drained. The
+// return value n is the number of bytes read. If R has no data to
+// return, err is io.EOF and OnEOF is called with a full copy of what
+// has been read so far.
+func (r *cachingReadCloser) Read(p []byte) (n int, err error) {
+	n, err = r.R.Read(p)
+	r.buf.Write(p[:n])
+	if err == io.EOF {
+		r.OnEOF(bytes.NewReader(r.buf.Bytes()))
+	}
+	return n, err
+}
+
+func (r *cachingReadCloser) Close() error {
+	return r.R.Close()
+}
+
+// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
+func NewMemoryCacheTransport() *Transport {
+	c := NewMemoryCache()
+	t := NewTransport(c)
+	return t
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore
new file mode 100644
index 0000000..c198e6a
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore
@@ -0,0 +1,202 @@
+# Created by .ignore support plugin (hsz.mobi)
+### Go template
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+### Windows template
+# Windows image file caches
+Thumbs.db
+ehthumbs.db
+
+# Folder config file
+Desktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
+### Kate template
+# Swap Files #
+.*.kate-swp
+.swp.*
+### SublimeText template
+# cache files for sublime text
+*.tmlanguage.cache
+*.tmPreferences.cache
+*.stTheme.cache
+
+# workspace files are user-specific
+*.sublime-workspace
+
+# project files should be checked into the repository, unless a significant
+# proportion of contributors will probably not be using SublimeText
+# *.sublime-project
+
+# sftp configuration file
+sftp-config.json
+### Linux template
+*~
+
+# temporary files which can be created if a process still has a handle open of a deleted file
+.fuse_hidden*
+
+# KDE directory preferences
+.directory
+
+# Linux trash folder which might appear on any partition or disk
+.Trash-*
+### JetBrains template
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff:
+.idea
+.idea/tasks.xml
+.idea/dictionaries
+.idea/vcs.xml
+.idea/jsLibraryMappings.xml
+
+# Sensitive or high-churn files:
+.idea/dataSources.ids
+.idea/dataSources.xml
+.idea/dataSources.local.xml
+.idea/sqlDataSources.xml
+.idea/dynamic.xml
+.idea/uiDesigner.xml
+
+# Gradle:
+.idea/gradle.xml
+.idea/libraries
+
+# Mongo Explorer plugin:
+.idea/mongoSettings.xml
+
+## File-based project format:
+*.iws
+
+## Plugin-specific files:
+
+# IntelliJ
+/out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+### Xcode template
+# Xcode
+#
+# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore
+
+## Build generated
+build/
+DerivedData/
+
+## Various settings
+*.pbxuser
+!default.pbxuser
+*.mode1v3
+!default.mode1v3
+*.mode2v3
+!default.mode2v3
+*.perspectivev3
+!default.perspectivev3
+xcuserdata/
+
+## Other
+*.moved-aside
+*.xccheckout
+*.xcscmblueprint
+### Eclipse template
+
+.metadata
+bin/
+tmp/
+*.tmp
+*.bak
+*.swp
+*~.nib
+local.properties
+.settings/
+.loadpath
+.recommenders
+
+# Eclipse Core
+.project
+
+# External tool builders
+.externalToolBuilders/
+
+# Locally stored "Eclipse launch configurations"
+*.launch
+
+# PyDev specific (Python IDE for Eclipse)
+*.pydevproject
+
+# CDT-specific (C/C++ Development Tooling)
+.cproject
+
+# JDT-specific (Eclipse Java Development Tools)
+.classpath
+
+# Java annotation processor (APT)
+.factorypath
+
+# PDT-specific (PHP Development Tools)
+.buildpath
+
+# sbteclipse plugin
+.target
+
+# Tern plugin
+.tern-project
+
+# TeXlipse plugin
+.texlipse
+
+# STS (Spring Tool Suite)
+.springBeans
+
+# Code Recommenders
+.recommenders/
+
+
+coverage.txt
+
+#vendor
+vendor/
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
new file mode 100644
index 0000000..2fc21dd
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
@@ -0,0 +1,22 @@
+sudo: false
+language: go
+go:
+  - 1.8.x
+env:
+  - DEP_VERSION="0.3.2"
+
+before_install:
+  # Download the binary to bin folder in $GOPATH
+  - curl -L -s https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 -o $GOPATH/bin/dep
+  # Make the binary executable
+  - chmod +x $GOPATH/bin/dep
+
+install:
+  - dep ensure
+
+script:
+ - make checkdocs
+ - make test
+ 
+after_success:
+  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md
new file mode 100644
index 0000000..dd52ab8
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md
@@ -0,0 +1,20 @@
+# Contributing
+
+We would love to have people submit pull requests and help make `grpc-ecosystem/go-grpc-middleware` even better 👍.
+
+Fork, then clone the repo:
+
+```bash
+git clone git@github.com:your-username/go-grpc-middleware.git
+```    
+
+Before checking in please run the following:
+
+```bash
+make all
+```
+
+This will `vet`, `fmt`, regenerate documentation and run all tests.
+
+
+Push to your fork and open a pull request.
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/DOC.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/DOC.md
new file mode 100644
index 0000000..511d953
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/DOC.md
@@ -0,0 +1,166 @@
+# grpc_middleware
+`import "github.com/grpc-ecosystem/go-grpc-middleware"`
+
+* [Overview](#pkg-overview)
+* [Imported Packages](#pkg-imports)
+* [Index](#pkg-index)
+
+## <a name="pkg-overview">Overview</a>
+`grpc_middleware` is a collection of gRPC middleware packages: interceptors, helpers and tools.
+
+### Middleware
+gRPC is a fantastic RPC middleware, which sees a lot of adoption in the Golang world. However, the
+upstream gRPC codebase is relatively bare bones.
+
+This package, and most of its child packages provides commonly needed middleware for gRPC:
+client-side interceptors for retires, server-side interceptors for input validation and auth,
+functions for chaining said interceptors, metadata convenience methods and more.
+
+### Chaining
+By default, gRPC doesn't allow one to have more than one interceptor either on the client nor on
+the server side. `grpc_middleware` provides convenient chaining methods
+
+Simple way of turning a multiple interceptors into a single interceptor. Here's an example for
+server chaining:
+
+	myServer := grpc.NewServer(
+	    grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(loggingStream, monitoringStream, authStream)),
+	    grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(loggingUnary, monitoringUnary, authUnary),
+	)
+
+These interceptors will be executed from left to right: logging, monitoring and auth.
+
+Here's an example for client side chaining:
+
+	clientConn, err = grpc.Dial(
+	    address,
+	        grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(monitoringClientUnary, retryUnary)),
+	        grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(monitoringClientStream, retryStream)),
+	)
+	client = pb_testproto.NewTestServiceClient(clientConn)
+	resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"})
+
+These interceptors will be executed from left to right: monitoring and then retry logic.
+
+The retry interceptor will call every interceptor that follows it whenever when a retry happens.
+
+### Writing Your Own
+Implementing your own interceptor is pretty trivial: there are interfaces for that. But the interesting
+bit exposing common data to handlers (and other middleware), similarly to HTTP Middleware design.
+For example, you may want to pass the identity of the caller from the auth interceptor all the way
+to the handling function.
+
+For example, a client side interceptor example for auth looks like:
+
+	func FakeAuthUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+	   newCtx := context.WithValue(ctx, "user_id", "john@example.com")
+	   return handler(newCtx, req)
+	}
+
+Unfortunately, it's not as easy for streaming RPCs. These have the `context.Context` embedded within
+the `grpc.ServerStream` object. To pass values through context, a wrapper (`WrappedServerStream`) is
+needed. For example:
+
+	func FakeAuthStreamingInterceptor(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+	   newStream := grpc_middleware.WrapServerStream(stream)
+	   newStream.WrappedContext = context.WithValue(ctx, "user_id", "john@example.com")
+	   return handler(srv, stream)
+	}
+
+## <a name="pkg-imports">Imported Packages</a>
+
+- [golang.org/x/net/context](https://godoc.org/golang.org/x/net/context)
+- [google.golang.org/grpc](https://godoc.org/google.golang.org/grpc)
+
+## <a name="pkg-index">Index</a>
+* [func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor](#ChainStreamClient)
+* [func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor](#ChainStreamServer)
+* [func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor](#ChainUnaryClient)
+* [func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor](#ChainUnaryServer)
+* [func WithStreamServerChain(interceptors ...grpc.StreamServerInterceptor) grpc.ServerOption](#WithStreamServerChain)
+* [func WithUnaryServerChain(interceptors ...grpc.UnaryServerInterceptor) grpc.ServerOption](#WithUnaryServerChain)
+* [type WrappedServerStream](#WrappedServerStream)
+  * [func WrapServerStream(stream grpc.ServerStream) \*WrappedServerStream](#WrapServerStream)
+  * [func (w \*WrappedServerStream) Context() context.Context](#WrappedServerStream.Context)
+
+#### <a name="pkg-files">Package files</a>
+[chain.go](./chain.go) [doc.go](./doc.go) [wrappers.go](./wrappers.go) 
+
+## <a name="ChainStreamClient">func</a> [ChainStreamClient](./chain.go#L136)
+``` go
+func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor
+```
+ChainStreamClient creates a single interceptor out of a chain of many interceptors.
+
+Execution is done in left-to-right order, including passing of context.
+For example ChainStreamClient(one, two, three) will execute one before two before three.
+
+## <a name="ChainStreamServer">func</a> [ChainStreamServer](./chain.go#L58)
+``` go
+func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor
+```
+ChainStreamServer creates a single interceptor out of a chain of many interceptors.
+
+Execution is done in left-to-right order, including passing of context.
+For example ChainUnaryServer(one, two, three) will execute one before two before three.
+If you want to pass context between interceptors, use WrapServerStream.
+
+## <a name="ChainUnaryClient">func</a> [ChainUnaryClient](./chain.go#L97)
+``` go
+func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor
+```
+ChainUnaryClient creates a single interceptor out of a chain of many interceptors.
+
+Execution is done in left-to-right order, including passing of context.
+For example ChainUnaryClient(one, two, three) will execute one before two before three.
+
+## <a name="ChainUnaryServer">func</a> [ChainUnaryServer](./chain.go#L18)
+``` go
+func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor
+```
+ChainUnaryServer creates a single interceptor out of a chain of many interceptors.
+
+Execution is done in left-to-right order, including passing of context.
+For example ChainUnaryServer(one, two, three) will execute one before two before three, and three
+will see context changes of one and two.
+
+## <a name="WithStreamServerChain">func</a> [WithStreamServerChain](./chain.go#L181)
+``` go
+func WithStreamServerChain(interceptors ...grpc.StreamServerInterceptor) grpc.ServerOption
+```
+WithStreamServerChain is a grpc.Server config option that accepts multiple stream interceptors.
+Basically syntactic sugar.
+
+## <a name="WithUnaryServerChain">func</a> [WithUnaryServerChain](./chain.go#L175)
+``` go
+func WithUnaryServerChain(interceptors ...grpc.UnaryServerInterceptor) grpc.ServerOption
+```
+Chain creates a single interceptor out of a chain of many interceptors.
+
+WithUnaryServerChain is a grpc.Server config option that accepts multiple unary interceptors.
+Basically syntactic sugar.
+
+## <a name="WrappedServerStream">type</a> [WrappedServerStream](./wrappers.go#L12-L16)
+``` go
+type WrappedServerStream struct {
+    grpc.ServerStream
+    // WrappedContext is the wrapper's own Context. You can assign it.
+    WrappedContext context.Context
+}
+```
+WrappedServerStream is a thin wrapper around grpc.ServerStream that allows modifying context.
+
+### <a name="WrapServerStream">func</a> [WrapServerStream](./wrappers.go#L24)
+``` go
+func WrapServerStream(stream grpc.ServerStream) *WrappedServerStream
+```
+WrapServerStream returns a ServerStream that has the ability to overwrite context.
+
+### <a name="WrappedServerStream.Context">func</a> (\*WrappedServerStream) [Context](./wrappers.go#L19)
+``` go
+func (w *WrappedServerStream) Context() context.Context
+```
+Context returns the wrapper's WrappedContext, overwriting the nested grpc.ServerStream.Context()
+
+- - -
+Generated by [godoc2ghmd](https://github.com/GandalfUK/godoc2ghmd)
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.lock b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.lock
new file mode 100644
index 0000000..ebdcb75
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.lock
@@ -0,0 +1,123 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+  name = "cloud.google.com/go"
+  packages = ["compute/metadata"]
+  revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
+  version = "v0.16.0"
+
+[[projects]]
+  name = "github.com/davecgh/go-spew"
+  packages = ["spew"]
+  revision = "346938d642f2ec3594ed81d874461961cd0faa76"
+  version = "v1.1.0"
+
+[[projects]]
+  name = "github.com/gogo/protobuf"
+  packages = ["gogoproto","proto","protoc-gen-gogo/descriptor"]
+  revision = "342cbe0a04158f6dcb03ca0079991a51a4248c02"
+  version = "v0.5"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/golang/protobuf"
+  packages = ["jsonpb","proto","ptypes","ptypes/any","ptypes/duration","ptypes/struct","ptypes/timestamp"]
+  revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
+
+[[projects]]
+  name = "github.com/opentracing/opentracing-go"
+  packages = [".","ext","log","mocktracer"]
+  revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
+  version = "v1.0.2"
+
+[[projects]]
+  name = "github.com/pmezard/go-difflib"
+  packages = ["difflib"]
+  revision = "792786c7400a136282c1664665ae0a8db921c6c2"
+  version = "v1.0.0"
+
+[[projects]]
+  name = "github.com/sirupsen/logrus"
+  packages = ["."]
+  revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
+  version = "v1.0.3"
+
+[[projects]]
+  name = "github.com/stretchr/testify"
+  packages = ["assert","require","suite"]
+  revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
+  version = "v1.1.4"
+
+[[projects]]
+  name = "go.uber.org/atomic"
+  packages = ["."]
+  revision = "8474b86a5a6f79c443ce4b2992817ff32cf208b8"
+  version = "v1.3.1"
+
+[[projects]]
+  name = "go.uber.org/multierr"
+  packages = ["."]
+  revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
+  version = "v1.1.0"
+
+[[projects]]
+  name = "go.uber.org/zap"
+  packages = [".","buffer","internal/bufferpool","internal/color","internal/exit","zapcore"]
+  revision = "35aad584952c3e7020db7b839f6b102de6271f89"
+  version = "v1.7.1"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/crypto"
+  packages = ["ssh/terminal"]
+  revision = "94eea52f7b742c7cbe0b03b22f0c4c8631ece122"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/net"
+  packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
+  revision = "a8b9294777976932365dabb6640cf1468d95c70f"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/oauth2"
+  packages = [".","google","internal","jws","jwt"]
+  revision = "f95fa95eaa936d9d87489b15d1d18b97c1ba9c28"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/sys"
+  packages = ["unix","windows"]
+  revision = "13fcbd661c8ececa8807a29b48407d674b1d8ed8"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/text"
+  packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
+  revision = "75cc3cad82b5f47d3fb229ddda8c5167da14f294"
+
+[[projects]]
+  name = "google.golang.org/appengine"
+  packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
+  revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
+  version = "v1.0.0"
+
+[[projects]]
+  branch = "master"
+  name = "google.golang.org/genproto"
+  packages = ["googleapis/rpc/status"]
+  revision = "7f0da29060c682909f650ad8ed4e515bd74fa12a"
+
+[[projects]]
+  name = "google.golang.org/grpc"
+  packages = [".","balancer","balancer/roundrobin","codes","connectivity","credentials","credentials/oauth","encoding","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
+  revision = "5a9f7b402fe85096d2e1d0383435ee1876e863d0"
+  version = "v1.8.0"
+
+[solve-meta]
+  analyzer-name = "dep"
+  analyzer-version = 1
+  inputs-digest = "b24c6670412eb0bc44ed1db77fecc52333f8725f3e3272bdc568f5683a63031f"
+  solver-name = "gps-cdcl"
+  solver-version = 1
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.toml b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.toml
new file mode 100644
index 0000000..0a7d4c1
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.toml
@@ -0,0 +1,35 @@
+[[constraint]]
+  name = "github.com/gogo/protobuf"
+  version = "0.5.0"
+
+[[constraint]]
+  branch = "master"
+  name = "github.com/golang/protobuf"
+
+[[constraint]]
+  name = "github.com/opentracing/opentracing-go"
+  version = "1.0.2"
+
+[[constraint]]
+  name = "github.com/sirupsen/logrus"
+  version = "1.0.3"
+
+[[constraint]]
+  name = "github.com/stretchr/testify"
+  version = "1.1.4"
+
+[[constraint]]
+  name = "go.uber.org/zap"
+  version = "1.7.1"
+
+[[constraint]]
+  branch = "master"
+  name = "golang.org/x/net"
+
+[[constraint]]
+  branch = "master"
+  name = "golang.org/x/oauth2"
+
+[[constraint]]
+  name = "google.golang.org/grpc"
+  version = "1.8.0"
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE
new file mode 100644
index 0000000..b2b0650
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE
@@ -0,0 +1,201 @@
+                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
new file mode 100644
index 0000000..52e5373
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
@@ -0,0 +1,86 @@
+# Go gRPC Middleware
+
+[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware.svg?branch=master)](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware)
+[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-middleware)](https://goreportcard.com/report/github.com/grpc-ecosystem/go-grpc-middleware)
+[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware)
+[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/?badge)
+[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware)
+[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
+[![quality: production](https://img.shields.io/badge/quality-production-orange.svg)](#status)
+[![Slack](slack.png)](https://join.slack.com/t/improbable-eng/shared_invite/enQtMzQ1ODcyMzQ5MjM4LWY5ZWZmNGM2ODc5MmViNmQ3ZTA3ZTY3NzQwOTBlMTkzZmIxZTIxODk0OWU3YjZhNWVlNDU3MDlkZGViZjhkMjc)
+
+[gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities.
+
+**Important** The repo recently moved to `github.com/grpc-ecosystem/go-grpc-middleware`, please update your import paths.
+
+## Middleware
+
+[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for
+Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs) 
+that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client either around the user call. It is a perfect way to implement
+common patterns: auth, logging, message, validation, retries or monitoring.
+
+These are generic building blocks that make it easy to build multiple microservices easily.
+The purpose of this repository is to act as a go-to point for such reusable functionality. It contains
+some of them itself, but also will link to useful external repos.
+
+`grpc_middleware` itself provides support for chaining interceptors. See [Documentation](DOC.md), but here's an example:
+
+```go
+import "github.com/grpc-ecosystem/go-grpc-middleware"
+
+myServer := grpc.NewServer(
+    grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
+        grpc_ctxtags.StreamServerInterceptor(),
+        grpc_opentracing.StreamServerInterceptor(),
+        grpc_prometheus.StreamServerInterceptor,
+        grpc_zap.StreamServerInterceptor(zapLogger),
+        grpc_auth.StreamServerInterceptor(myAuthFunction),
+        grpc_recovery.StreamServerInterceptor(),
+    )),
+    grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
+        grpc_ctxtags.UnaryServerInterceptor(),
+        grpc_opentracing.UnaryServerInterceptor(),
+        grpc_prometheus.UnaryServerInterceptor,
+        grpc_zap.UnaryServerInterceptor(zapLogger),
+        grpc_auth.UnaryServerInterceptor(myAuthFunction),
+        grpc_recovery.UnaryServerInterceptor(),
+    )),
+)
+```
+
+## Interceptors
+
+*Please send a PR to add new interceptors or middleware to this list*
+
+#### Auth
+   * [`grpc_auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware 
+
+#### Logging
+   * [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body
+   * [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers.
+   * [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers.
+
+
+#### Monitoring
+   * [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware
+   * [`otgrpc`⚡](https://github.com/grpc-ecosystem/grpc-opentracing/tree/master/go/otgrpc) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors
+   * [`grpc_opentracing`](tracing/opentracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags
+
+#### Client
+   * [`grpc_retry`](retry/) - a generic gRPC response code retry mechanism, client-side middleware
+
+#### Server
+   * [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options
+   * [`grpc_recovery`](recovery/) - turn panics into gRPC errors
+
+
+## Status
+
+This code has been running in *production* since May 2016 as the basis of the gRPC micro services stack at [Improbable](https://improbable.io).
+
+Additional tooling will be added, and contributions are welcome.
+
+## License
+
+`go-grpc-middleware` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/README.md
new file mode 120000
index 0000000..71bfc07
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/README.md
@@ -0,0 +1 @@
+DOC.md
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
new file mode 100644
index 0000000..45a2f5f
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
@@ -0,0 +1,183 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+// gRPC Server Interceptor chaining middleware.
+
+package grpc_middleware
+
+import (
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+)
+
+// ChainUnaryServer creates a single interceptor out of a chain of many interceptors.
+//
+// Execution is done in left-to-right order, including passing of context.
+// For example ChainUnaryServer(one, two, three) will execute one before two before three, and three
+// will see context changes of one and two.
+func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor {
+	n := len(interceptors)
+
+	if n > 1 {
+		lastI := n - 1
+		return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+			var (
+				chainHandler grpc.UnaryHandler
+				curI         int
+			)
+
+			chainHandler = func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
+				if curI == lastI {
+					return handler(currentCtx, currentReq)
+				}
+				curI++
+				resp, err := interceptors[curI](currentCtx, currentReq, info, chainHandler)
+				curI--
+				return resp, err
+			}
+
+			return interceptors[0](ctx, req, info, chainHandler)
+		}
+	}
+
+	if n == 1 {
+		return interceptors[0]
+	}
+
+	// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
+	return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+		return handler(ctx, req)
+	}
+}
+
+// ChainStreamServer creates a single interceptor out of a chain of many interceptors.
+//
+// Execution is done in left-to-right order, including passing of context.
+// For example ChainUnaryServer(one, two, three) will execute one before two before three.
+// If you want to pass context between interceptors, use WrapServerStream.
+func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor {
+	n := len(interceptors)
+
+	if n > 1 {
+		lastI := n - 1
+		return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+			var (
+				chainHandler grpc.StreamHandler
+				curI         int
+			)
+
+			chainHandler = func(currentSrv interface{}, currentStream grpc.ServerStream) error {
+				if curI == lastI {
+					return handler(currentSrv, currentStream)
+				}
+				curI++
+				err := interceptors[curI](currentSrv, currentStream, info, chainHandler)
+				curI--
+				return err
+			}
+
+			return interceptors[0](srv, stream, info, chainHandler)
+		}
+	}
+
+	if n == 1 {
+		return interceptors[0]
+	}
+
+	// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
+	return func(srv interface{}, stream grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+		return handler(srv, stream)
+	}
+}
+
+// ChainUnaryClient creates a single interceptor out of a chain of many interceptors.
+//
+// Execution is done in left-to-right order, including passing of context.
+// For example ChainUnaryClient(one, two, three) will execute one before two before three.
+func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor {
+	n := len(interceptors)
+
+	if n > 1 {
+		lastI := n - 1
+		return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+			var (
+				chainHandler grpc.UnaryInvoker
+				curI         int
+			)
+
+			chainHandler = func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
+				if curI == lastI {
+					return invoker(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentOpts...)
+				}
+				curI++
+				err := interceptors[curI](currentCtx, currentMethod, currentReq, currentRepl, currentConn, chainHandler, currentOpts...)
+				curI--
+				return err
+			}
+
+			return interceptors[0](ctx, method, req, reply, cc, chainHandler, opts...)
+		}
+	}
+
+	if n == 1 {
+		return interceptors[0]
+	}
+
+	// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
+	return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+		return invoker(ctx, method, req, reply, cc, opts...)
+	}
+}
+
+// ChainStreamClient creates a single interceptor out of a chain of many interceptors.
+//
+// Execution is done in left-to-right order, including passing of context.
+// For example ChainStreamClient(one, two, three) will execute one before two before three.
+func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor {
+	n := len(interceptors)
+
+	if n > 1 {
+		lastI := n - 1
+		return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+			var (
+				chainHandler grpc.Streamer
+				curI         int
+			)
+
+			chainHandler = func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
+				if curI == lastI {
+					return streamer(currentCtx, currentDesc, currentConn, currentMethod, currentOpts...)
+				}
+				curI++
+				stream, err := interceptors[curI](currentCtx, currentDesc, currentConn, currentMethod, chainHandler, currentOpts...)
+				curI--
+				return stream, err
+			}
+
+			return interceptors[0](ctx, desc, cc, method, chainHandler, opts...)
+		}
+	}
+
+	if n == 1 {
+		return interceptors[0]
+	}
+
+	// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
+	return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+		return streamer(ctx, desc, cc, method, opts...)
+	}
+}
+
+// Chain creates a single interceptor out of a chain of many interceptors.
+//
+// WithUnaryServerChain is a grpc.Server config option that accepts multiple unary interceptors.
+// Basically syntactic sugar.
+func WithUnaryServerChain(interceptors ...grpc.UnaryServerInterceptor) grpc.ServerOption {
+	return grpc.UnaryInterceptor(ChainUnaryServer(interceptors...))
+}
+
+// WithStreamServerChain is a grpc.Server config option that accepts multiple stream interceptors.
+// Basically syntactic sugar.
+func WithStreamServerChain(interceptors ...grpc.StreamServerInterceptor) grpc.ServerOption {
+	return grpc.StreamInterceptor(ChainStreamServer(interceptors...))
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go
new file mode 100644
index 0000000..7168950
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go
@@ -0,0 +1,69 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+/*
+`grpc_middleware` is a collection of gRPC middleware packages: interceptors, helpers and tools.
+
+Middleware
+
+gRPC is a fantastic RPC middleware, which sees a lot of adoption in the Golang world. However, the
+upstream gRPC codebase is relatively bare bones.
+
+This package, and most of its child packages provides commonly needed middleware for gRPC:
+client-side interceptors for retires, server-side interceptors for input validation and auth,
+functions for chaining said interceptors, metadata convenience methods and more.
+
+Chaining
+
+By default, gRPC doesn't allow one to have more than one interceptor either on the client nor on
+the server side. `grpc_middleware` provides convenient chaining methods
+
+Simple way of turning a multiple interceptors into a single interceptor. Here's an example for
+server chaining:
+
+	myServer := grpc.NewServer(
+	    grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(loggingStream, monitoringStream, authStream)),
+	    grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(loggingUnary, monitoringUnary, authUnary),
+	)
+
+These interceptors will be executed from left to right: logging, monitoring and auth.
+
+Here's an example for client side chaining:
+
+	clientConn, err = grpc.Dial(
+	    address,
+	        grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(monitoringClientUnary, retryUnary)),
+	        grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(monitoringClientStream, retryStream)),
+	)
+	client = pb_testproto.NewTestServiceClient(clientConn)
+	resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"})
+
+These interceptors will be executed from left to right: monitoring and then retry logic.
+
+The retry interceptor will call every interceptor that follows it whenever when a retry happens.
+
+Writing Your Own
+
+Implementing your own interceptor is pretty trivial: there are interfaces for that. But the interesting
+bit exposing common data to handlers (and other middleware), similarly to HTTP Middleware design.
+For example, you may want to pass the identity of the caller from the auth interceptor all the way
+to the handling function.
+
+For example, a client side interceptor example for auth looks like:
+
+	func FakeAuthUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+	   newCtx := context.WithValue(ctx, "user_id", "john@example.com")
+	   return handler(newCtx, req)
+	}
+
+Unfortunately, it's not as easy for streaming RPCs. These have the `context.Context` embedded within
+the `grpc.ServerStream` object. To pass values through context, a wrapper (`WrappedServerStream`) is
+needed. For example:
+
+	func FakeAuthStreamingInterceptor(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+	   newStream := grpc_middleware.WrapServerStream(stream)
+	   newStream.WrappedContext = context.WithValue(ctx, "user_id", "john@example.com")
+	   return handler(srv, stream)
+	}
+*/
+package grpc_middleware
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/README.md
new file mode 120000
index 0000000..71bfc07
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/README.md
@@ -0,0 +1 @@
+DOC.md
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/README.md
new file mode 120000
index 0000000..71bfc07
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/README.md
@@ -0,0 +1 @@
+DOC.md
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/README.md
new file mode 120000
index 0000000..71bfc07
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/README.md
@@ -0,0 +1 @@
+DOC.md
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile
new file mode 100644
index 0000000..3e0f296
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile
@@ -0,0 +1,22 @@
+SHELL="/bin/bash"
+
+GOFILES_NOVENDOR = $(shell go list ./... | grep -v /vendor/)
+
+all: vet fmt docs test
+
+docs:
+	./scripts/docs.sh generate
+
+checkdocs:
+	./scripts/docs.sh check
+
+fmt:
+	go fmt $(GOFILES_NOVENDOR)
+
+vet:
+	go vet $(GOFILES_NOVENDOR)
+
+test: vet
+	./scripts/test_all.sh
+
+.PHONY: all docs validate test
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/README.md
new file mode 120000
index 0000000..71bfc07
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/README.md
@@ -0,0 +1 @@
+DOC.md
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/README.md
new file mode 120000
index 0000000..71bfc07
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/README.md
@@ -0,0 +1 @@
+DOC.md
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/slack.png b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/slack.png
new file mode 100644
index 0000000..cc8f9a6
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/slack.png
Binary files differ
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/README.md
new file mode 120000
index 0000000..71bfc07
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/README.md
@@ -0,0 +1 @@
+DOC.md
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/README.md
new file mode 120000
index 0000000..71bfc07
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/README.md
@@ -0,0 +1 @@
+DOC.md
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/README.md
new file mode 120000
index 0000000..71bfc07
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/README.md
@@ -0,0 +1 @@
+DOC.md
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go
new file mode 100644
index 0000000..597b862
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go
@@ -0,0 +1,29 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_middleware
+
+import (
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+)
+
+// WrappedServerStream is a thin wrapper around grpc.ServerStream that allows modifying context.
+type WrappedServerStream struct {
+	grpc.ServerStream
+	// WrappedContext is the wrapper's own Context. You can assign it.
+	WrappedContext context.Context
+}
+
+// Context returns the wrapper's WrappedContext, overwriting the nested grpc.ServerStream.Context()
+func (w *WrappedServerStream) Context() context.Context {
+	return w.WrappedContext
+}
+
+// WrapServerStream returns a ServerStream that has the ability to overwrite context.
+func WrapServerStream(stream grpc.ServerStream) *WrappedServerStream {
+	if existing, ok := stream.(*WrappedServerStream); ok {
+		return existing
+	}
+	return &WrappedServerStream{ServerStream: stream, WrappedContext: stream.Context()}
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore
new file mode 100644
index 0000000..2233cff
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore
@@ -0,0 +1,201 @@
+#vendor
+vendor/
+
+# Created by .ignore support plugin (hsz.mobi)
+coverage.txt
+### Go template
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+### Windows template
+# Windows image file caches
+Thumbs.db
+ehthumbs.db
+
+# Folder config file
+Desktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
+### Kate template
+# Swap Files #
+.*.kate-swp
+.swp.*
+### SublimeText template
+# cache files for sublime text
+*.tmlanguage.cache
+*.tmPreferences.cache
+*.stTheme.cache
+
+# workspace files are user-specific
+*.sublime-workspace
+
+# project files should be checked into the repository, unless a significant
+# proportion of contributors will probably not be using SublimeText
+# *.sublime-project
+
+# sftp configuration file
+sftp-config.json
+### Linux template
+*~
+
+# temporary files which can be created if a process still has a handle open of a deleted file
+.fuse_hidden*
+
+# KDE directory preferences
+.directory
+
+# Linux trash folder which might appear on any partition or disk
+.Trash-*
+### JetBrains template
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff:
+.idea
+.idea/tasks.xml
+.idea/dictionaries
+.idea/vcs.xml
+.idea/jsLibraryMappings.xml
+
+# Sensitive or high-churn files:
+.idea/dataSources.ids
+.idea/dataSources.xml
+.idea/dataSources.local.xml
+.idea/sqlDataSources.xml
+.idea/dynamic.xml
+.idea/uiDesigner.xml
+
+# Gradle:
+.idea/gradle.xml
+.idea/libraries
+
+# Mongo Explorer plugin:
+.idea/mongoSettings.xml
+
+## File-based project format:
+*.iws
+
+## Plugin-specific files:
+
+# IntelliJ
+/out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+### Xcode template
+# Xcode
+#
+# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore
+
+## Build generated
+build/
+DerivedData/
+
+## Various settings
+*.pbxuser
+!default.pbxuser
+*.mode1v3
+!default.mode1v3
+*.mode2v3
+!default.mode2v3
+*.perspectivev3
+!default.perspectivev3
+xcuserdata/
+
+## Other
+*.moved-aside
+*.xccheckout
+*.xcscmblueprint
+### Eclipse template
+
+.metadata
+bin/
+tmp/
+*.tmp
+*.bak
+*.swp
+*~.nib
+local.properties
+.settings/
+.loadpath
+.recommenders
+
+# Eclipse Core
+.project
+
+# External tool builders
+.externalToolBuilders/
+
+# Locally stored "Eclipse launch configurations"
+*.launch
+
+# PyDev specific (Python IDE for Eclipse)
+*.pydevproject
+
+# CDT-specific (C/C++ Development Tooling)
+.cproject
+
+# JDT-specific (Eclipse Java Development Tools)
+.classpath
+
+# Java annotation processor (APT)
+.factorypath
+
+# PDT-specific (PHP Development Tools)
+.buildpath
+
+# sbteclipse plugin
+.target
+
+# Tern plugin
+.tern-project
+
+# TeXlipse plugin
+.texlipse
+
+# STS (Spring Tool Suite)
+.springBeans
+
+# Code Recommenders
+.recommenders/
+
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml
new file mode 100644
index 0000000..2a845b9
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml
@@ -0,0 +1,25 @@
+sudo: false
+language: go
+# * github.com/grpc/grpc-go still supports go1.6
+#   - When we drop support for go1.6 we can remove golang.org/x/net/context
+#     below as it is part of the Go std library since go1.7
+# * github.com/prometheus/client_golang already requires at least go1.7 since
+#   September 2017
+go:
+  - 1.6.x
+  - 1.7.x
+  - 1.8.x
+  - 1.9.x
+  - 1.10.x
+  - master
+
+install:
+  - go get github.com/prometheus/client_golang/prometheus
+  - go get google.golang.org/grpc
+  - go get golang.org/x/net/context
+  - go get github.com/stretchr/testify
+script:
+ - make test 
+
+after_success:
+  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md
new file mode 100644
index 0000000..19a8059
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md
@@ -0,0 +1,24 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
+and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+## [1.2.0](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases/tag/v1.2.0) - 2018-06-04
+
+### Added
+
+* Provide metrics object as `prometheus.Collector`, for conventional metric registration.
+* Support non-default/global Prometheus registry.
+* Allow configuring counters with `prometheus.CounterOpts`.
+
+### Changed
+
+* Remove usage of deprecated `grpc.Code()`.
+* Remove usage of deprecated `grpc.Errorf` and replace with `status.Errorf`.
+
+---
+
+This changelog was started with version `v1.2.0`, for earlier versions refer to the respective [GitHub releases](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases).
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE
new file mode 100644
index 0000000..b2b0650
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE
@@ -0,0 +1,201 @@
+                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
new file mode 100644
index 0000000..499c583
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
@@ -0,0 +1,247 @@
+# Go gRPC Interceptors for Prometheus monitoring 
+
+[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus.svg)](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus)
+[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-prometheus)](http://goreportcard.com/report/grpc-ecosystem/go-grpc-prometheus)
+[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-prometheus)
+[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/?badge)
+[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus)
+[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
+
+[Prometheus](https://prometheus.io/) monitoring for your [gRPC Go](https://github.com/grpc/grpc-go) servers and clients.
+
+A sister implementation for [gRPC Java](https://github.com/grpc/grpc-java) (same metrics, same semantics) is in [grpc-ecosystem/java-grpc-prometheus](https://github.com/grpc-ecosystem/java-grpc-prometheus).
+
+## Interceptors
+
+[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for Interceptors, i.e. middleware that is executed
+by a gRPC Server before the request is passed onto the user's application logic. It is a perfect way to implement
+common patterns: auth, logging and... monitoring.
+
+To use Interceptors in chains, please see [`go-grpc-middleware`](https://github.com/mwitkow/go-grpc-middleware).
+
+## Usage
+
+There are two types of interceptors: client-side and server-side. This package provides monitoring Interceptors for both.
+
+### Server-side
+
+```go
+import "github.com/grpc-ecosystem/go-grpc-prometheus"
+...
+    // Initialize your gRPC server's interceptor.
+    myServer := grpc.NewServer(
+        grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
+        grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
+    )
+    // Register your gRPC service implementations.
+    myservice.RegisterMyServiceServer(s.server, &myServiceImpl{})
+    // After all your registrations, make sure all of the Prometheus metrics are initialized.
+    grpc_prometheus.Register(myServer)
+    // Register Prometheus metrics handler.    
+    http.Handle("/metrics", promhttp.Handler())
+...
+```
+
+### Client-side
+
+```go
+import "github.com/grpc-ecosystem/go-grpc-prometheus"
+...
+   clientConn, err = grpc.Dial(
+       address,
+		   grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor),
+		   grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor)
+   )
+   client = pb_testproto.NewTestServiceClient(clientConn)
+   resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"})
+...
+```
+
+# Metrics
+
+## Labels
+
+All server-side metrics start with `grpc_server` as Prometheus subsystem name. All client-side metrics start with `grpc_client`. Both of them have mirror-concepts. Similarly all methods
+contain the same rich labels:
+  
+  * `grpc_service` - the [gRPC service](http://www.grpc.io/docs/#defining-a-service) name, which is the combination of protobuf `package` and
+    the `grpc_service` section name. E.g. for `package = mwitkow.testproto` and 
+     `service TestService` the label will be `grpc_service="mwitkow.testproto.TestService"`
+  * `grpc_method` - the name of the method called on the gRPC service. E.g.  
+    `grpc_method="Ping"`
+  * `grpc_type` - the gRPC [type of request](http://www.grpc.io/docs/guides/concepts.html#rpc-life-cycle). 
+    Differentiating between the two is important especially for latency measurements.
+
+     - `unary` is single request, single response RPC
+     - `client_stream` is a multi-request, single response RPC
+     - `server_stream` is a single request, multi-response RPC
+     - `bidi_stream` is a multi-request, multi-response RPC
+    
+
+Additionally for completed RPCs, the following labels are used:
+
+  * `grpc_code` - the human-readable [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go).
+    The list of all statuses is to long, but here are some common ones:
+      
+      - `OK` - means the RPC was successful
+      - `IllegalArgument` - RPC contained bad values
+      - `Internal` - server-side error not disclosed to the clients
+      
+## Counters
+
+The counters and their up to date documentation is in [server_reporter.go](server_reporter.go) and [client_reporter.go](client_reporter.go) 
+the respective Prometheus handler (usually `/metrics`). 
+
+For the purpose of this documentation we will only discuss `grpc_server` metrics. The `grpc_client` ones contain mirror concepts.
+
+For simplicity, let's assume we're tracking a single server-side RPC call of [`mwitkow.testproto.TestService`](examples/testproto/test.proto),
+calling the method `PingList`. The call succeeds and returns 20 messages in the stream.
+
+First, immediately after the server receives the call it will increment the
+`grpc_server_started_total` and start the handling time clock (if histograms are enabled). 
+
+```jsoniq
+grpc_server_started_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
+```
+
+Then the user logic gets invoked. It receives one message from the client containing the request 
+(it's a `server_stream`):
+
+```jsoniq
+grpc_server_msg_received_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
+```
+
+The user logic may return an error, or send multiple messages back to the client. In this case, on 
+each of the 20 messages sent back, a counter will be incremented:
+
+```jsoniq
+grpc_server_msg_sent_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 20
+```
+
+After the call completes, its status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go)) 
+and the relevant call labels increment the `grpc_server_handled_total` counter.
+
+```jsoniq
+grpc_server_handled_total{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
+```
+
+## Histograms
+
+[Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way
+to measure latency distributions of your RPCs. However, since it is bad practice to have metrics
+of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels)
+the latency monitoring metrics are disabled by default. To enable them please call the following
+in your server initialization code:
+
+```jsoniq
+grpc_prometheus.EnableHandlingTimeHistogram()
+```
+
+After the call completes, its handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram)
+variable `grpc_server_handling_seconds`. The histogram variable contains three sub-metrics:
+
+ * `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method 
+ * `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for 
+   calculating average handling times
+ * `grpc_server_handling_seconds_bucket` - contains the counts of RPCs by status and method in respective
+   handling-time buckets. These buckets can be used by Prometheus to estimate SLAs (see [here](https://prometheus.io/docs/practices/histograms/))
+
+The counter values will look as follows:
+
+```jsoniq
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.005"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.01"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.025"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.05"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.1"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.25"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.5"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="1"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="2.5"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="5"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="10"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="+Inf"} 1
+grpc_server_handling_seconds_sum{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 0.0003866430000000001
+grpc_server_handling_seconds_count{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
+```
+
+
+## Useful query examples
+
+Prometheus philosophy is to provide raw metrics to the monitoring system, and
+let the aggregations be handled there. The verbosity of above metrics make it possible to have that
+flexibility. Here's a couple of useful monitoring queries:
+
+
+### request inbound rate
+```jsoniq
+sum(rate(grpc_server_started_total{job="foo"}[1m])) by (grpc_service)
+```
+For `job="foo"` (common label to differentiate between Prometheus monitoring targets), calculate the
+rate of requests per second (1 minute window) for each gRPC `grpc_service` that the job has. Please note
+how the `grpc_method` is being omitted here: all methods of a given gRPC service will be summed together.
+
+### unary request error rate
+```jsoniq
+sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service)
+```
+For `job="foo"`, calculate the per-`grpc_service` rate of `unary` (1:1) RPCs that failed, i.e. the 
+ones that didn't finish with `OK` code.
+
+### unary request error percentage
+```jsoniq
+sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service)
+ / 
+sum(rate(grpc_server_started_total{job="foo",grpc_type="unary"}[1m])) by (grpc_service)
+ * 100.0
+```
+For `job="foo"`, calculate the percentage of failed requests by service. It's easy to notice that
+this is a combination of the two above examples. This is an example of a query you would like to
+[alert on](https://prometheus.io/docs/alerting/rules/) in your system for SLA violations, e.g.
+"no more than 1% requests should fail".
+
+### average response stream size
+```jsoniq
+sum(rate(grpc_server_msg_sent_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service)
+ /
+sum(rate(grpc_server_started_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service)
+```
+For `job="foo"` what is the `grpc_service`-wide `10m` average of messages returned for all `
+server_stream` RPCs. This allows you to track the stream sizes returned by your system, e.g. allows 
+you to track when clients started to send "wide" queries that ret
+Note the divisor is the number of started RPCs, in order to account for in-flight requests.
+
+### 99%-tile latency of unary requests
+```jsoniq
+histogram_quantile(0.99, 
+  sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary"}[5m])) by (grpc_service,le)
+)
+```
+For `job="foo"`, returns an 99%-tile [quantile estimation](https://prometheus.io/docs/practices/histograms/#quantiles)
+of the handling time of RPCs per service. Please note the `5m` rate, this means that the quantile
+estimation will take samples in a rolling `5m` window. When combined with other quantiles
+(e.g. 50%, 90%), this query gives you tremendous insight into the responsiveness of your system 
+(e.g. impact of caching).
+
+### percentage of slow unary queries (>250ms)
+```jsoniq
+100.0 - (
+sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary",le="0.25"}[5m])) by (grpc_service)
+ / 
+sum(rate(grpc_server_handling_seconds_count{job="foo",grpc_type="unary"}[5m])) by (grpc_service)
+) * 100.0
+```
+For `job="foo"` calculate the by-`grpc_service` fraction of slow requests that took longer than `0.25` 
+seconds. This query is relatively complex, since the Prometheus aggregations use `le` (less or equal)
+buckets, meaning that counting "fast" requests fractions is easier. However, simple maths helps.
+This is an example of a query you would like to alert on in your system for SLA violations, 
+e.g. "less than 1% of requests are slower than 250ms".
+
+
+## Status
+
+This code has been used since August 2015 as the basis for monitoring of *production* gRPC micro services  at [Improbable](https://improbable.io).
+
+## License
+
+`go-grpc-prometheus` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go
new file mode 100644
index 0000000..751a4c7
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go
@@ -0,0 +1,39 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+// gRPC Prometheus monitoring interceptors for client-side gRPC.
+
+package grpc_prometheus
+
+import (
+	prom "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+	// DefaultClientMetrics is the default instance of ClientMetrics. It is
+	// intended to be used in conjunction the default Prometheus metrics
+	// registry.
+	DefaultClientMetrics = NewClientMetrics()
+
+	// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
+	UnaryClientInterceptor = DefaultClientMetrics.UnaryClientInterceptor()
+
+	// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+	StreamClientInterceptor = DefaultClientMetrics.StreamClientInterceptor()
+)
+
+func init() {
+	prom.MustRegister(DefaultClientMetrics.clientStartedCounter)
+	prom.MustRegister(DefaultClientMetrics.clientHandledCounter)
+	prom.MustRegister(DefaultClientMetrics.clientStreamMsgReceived)
+	prom.MustRegister(DefaultClientMetrics.clientStreamMsgSent)
+}
+
+// EnableClientHandlingTimeHistogram turns on recording of handling time of
+// RPCs. Histogram metrics can be very expensive for Prometheus to retain and
+// query. This function acts on the DefaultClientMetrics variable and the
+// default Prometheus metrics registry.
+func EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
+	DefaultClientMetrics.EnableClientHandlingTimeHistogram(opts...)
+	prom.Register(DefaultClientMetrics.clientHandledHistogram)
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
new file mode 100644
index 0000000..9b476f9
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
@@ -0,0 +1,170 @@
+package grpc_prometheus
+
+import (
+	"io"
+
+	prom "github.com/prometheus/client_golang/prometheus"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+// ClientMetrics represents a collection of metrics to be registered on a
+// Prometheus metrics registry for a gRPC client.
+type ClientMetrics struct {
+	clientStartedCounter          *prom.CounterVec
+	clientHandledCounter          *prom.CounterVec
+	clientStreamMsgReceived       *prom.CounterVec
+	clientStreamMsgSent           *prom.CounterVec
+	clientHandledHistogramEnabled bool
+	clientHandledHistogramOpts    prom.HistogramOpts
+	clientHandledHistogram        *prom.HistogramVec
+}
+
+// NewClientMetrics returns a ClientMetrics object. Use a new instance of
+// ClientMetrics when not using the default Prometheus metrics registry, for
+// example when wanting to control which metrics are added to a registry as
+// opposed to automatically adding metrics via init functions.
+func NewClientMetrics(counterOpts ...CounterOption) *ClientMetrics {
+	opts := counterOptions(counterOpts)
+	return &ClientMetrics{
+		clientStartedCounter: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_client_started_total",
+				Help: "Total number of RPCs started on the client.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+
+		clientHandledCounter: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_client_handled_total",
+				Help: "Total number of RPCs completed by the client, regardless of success or failure.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
+
+		clientStreamMsgReceived: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_client_msg_received_total",
+				Help: "Total number of RPC stream messages received by the client.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+
+		clientStreamMsgSent: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_client_msg_sent_total",
+				Help: "Total number of gRPC stream messages sent by the client.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+
+		clientHandledHistogramEnabled: false,
+		clientHandledHistogramOpts: prom.HistogramOpts{
+			Name:    "grpc_client_handling_seconds",
+			Help:    "Histogram of response latency (seconds) of the gRPC until it is finished by the application.",
+			Buckets: prom.DefBuckets,
+		},
+		clientHandledHistogram: nil,
+	}
+}
+
+// Describe sends the super-set of all possible descriptors of metrics
+// collected by this Collector to the provided channel and returns once
+// the last descriptor has been sent.
+func (m *ClientMetrics) Describe(ch chan<- *prom.Desc) {
+	m.clientStartedCounter.Describe(ch)
+	m.clientHandledCounter.Describe(ch)
+	m.clientStreamMsgReceived.Describe(ch)
+	m.clientStreamMsgSent.Describe(ch)
+	if m.clientHandledHistogramEnabled {
+		m.clientHandledHistogram.Describe(ch)
+	}
+}
+
+// Collect is called by the Prometheus registry when collecting
+// metrics. The implementation sends each collected metric via the
+// provided channel and returns once the last metric has been sent.
+func (m *ClientMetrics) Collect(ch chan<- prom.Metric) {
+	m.clientStartedCounter.Collect(ch)
+	m.clientHandledCounter.Collect(ch)
+	m.clientStreamMsgReceived.Collect(ch)
+	m.clientStreamMsgSent.Collect(ch)
+	if m.clientHandledHistogramEnabled {
+		m.clientHandledHistogram.Collect(ch)
+	}
+}
+
+// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs.
+// Histogram metrics can be very expensive for Prometheus to retain and query.
+func (m *ClientMetrics) EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
+	for _, o := range opts {
+		o(&m.clientHandledHistogramOpts)
+	}
+	if !m.clientHandledHistogramEnabled {
+		m.clientHandledHistogram = prom.NewHistogramVec(
+			m.clientHandledHistogramOpts,
+			[]string{"grpc_type", "grpc_service", "grpc_method"},
+		)
+	}
+	m.clientHandledHistogramEnabled = true
+}
+
+// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
+func (m *ClientMetrics) UnaryClientInterceptor() func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+	return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+		monitor := newClientReporter(m, Unary, method)
+		monitor.SentMessage()
+		err := invoker(ctx, method, req, reply, cc, opts...)
+		if err != nil {
+			monitor.ReceivedMessage()
+		}
+		st, _ := status.FromError(err)
+		monitor.Handled(st.Code())
+		return err
+	}
+}
+
+// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+func (m *ClientMetrics) StreamClientInterceptor() func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+	return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+		monitor := newClientReporter(m, clientStreamType(desc), method)
+		clientStream, err := streamer(ctx, desc, cc, method, opts...)
+		if err != nil {
+			st, _ := status.FromError(err)
+			monitor.Handled(st.Code())
+			return nil, err
+		}
+		return &monitoredClientStream{clientStream, monitor}, nil
+	}
+}
+
+func clientStreamType(desc *grpc.StreamDesc) grpcType {
+	if desc.ClientStreams && !desc.ServerStreams {
+		return ClientStream
+	} else if !desc.ClientStreams && desc.ServerStreams {
+		return ServerStream
+	}
+	return BidiStream
+}
+
+// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters.
+type monitoredClientStream struct {
+	grpc.ClientStream
+	monitor *clientReporter
+}
+
+func (s *monitoredClientStream) SendMsg(m interface{}) error {
+	err := s.ClientStream.SendMsg(m)
+	if err == nil {
+		s.monitor.SentMessage()
+	}
+	return err
+}
+
+func (s *monitoredClientStream) RecvMsg(m interface{}) error {
+	err := s.ClientStream.RecvMsg(m)
+	if err == nil {
+		s.monitor.ReceivedMessage()
+	} else if err == io.EOF {
+		s.monitor.Handled(codes.OK)
+	} else {
+		st, _ := status.FromError(err)
+		s.monitor.Handled(st.Code())
+	}
+	return err
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go
new file mode 100644
index 0000000..cbf1532
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go
@@ -0,0 +1,46 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_prometheus
+
+import (
+	"time"
+
+	"google.golang.org/grpc/codes"
+)
+
+type clientReporter struct {
+	metrics     *ClientMetrics
+	rpcType     grpcType
+	serviceName string
+	methodName  string
+	startTime   time.Time
+}
+
+func newClientReporter(m *ClientMetrics, rpcType grpcType, fullMethod string) *clientReporter {
+	r := &clientReporter{
+		metrics: m,
+		rpcType: rpcType,
+	}
+	if r.metrics.clientHandledHistogramEnabled {
+		r.startTime = time.Now()
+	}
+	r.serviceName, r.methodName = splitMethodName(fullMethod)
+	r.metrics.clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+	return r
+}
+
+func (r *clientReporter) ReceivedMessage() {
+	r.metrics.clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+}
+
+func (r *clientReporter) SentMessage() {
+	r.metrics.clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+}
+
+func (r *clientReporter) Handled(code codes.Code) {
+	r.metrics.clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
+	if r.metrics.clientHandledHistogramEnabled {
+		r.metrics.clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile
new file mode 100644
index 0000000..74c0842
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile
@@ -0,0 +1,16 @@
+SHELL="/bin/bash"
+
+GOFILES_NOVENDOR = $(shell go list ./... | grep -v /vendor/)
+
+all: vet fmt test
+
+fmt:
+	go fmt $(GOFILES_NOVENDOR)
+
+vet:
+	go vet $(GOFILES_NOVENDOR)
+
+test: vet
+	./scripts/test_all.sh
+
+.PHONY: all vet test
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go
new file mode 100644
index 0000000..9d51aec
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go
@@ -0,0 +1,41 @@
+package grpc_prometheus
+
+import (
+	prom "github.com/prometheus/client_golang/prometheus"
+)
+
+// A CounterOption lets you add options to Counter metrics using With* funcs.
+type CounterOption func(*prom.CounterOpts)
+
+type counterOptions []CounterOption
+
+func (co counterOptions) apply(o prom.CounterOpts) prom.CounterOpts {
+	for _, f := range co {
+		f(&o)
+	}
+	return o
+}
+
+// WithConstLabels allows you to add ConstLabels to Counter metrics.
+func WithConstLabels(labels prom.Labels) CounterOption {
+	return func(o *prom.CounterOpts) {
+		o.ConstLabels = labels
+	}
+}
+
+// A HistogramOption lets you add options to Histogram metrics using With*
+// funcs.
+type HistogramOption func(*prom.HistogramOpts)
+
+// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on.
+func WithHistogramBuckets(buckets []float64) HistogramOption {
+	return func(o *prom.HistogramOpts) { o.Buckets = buckets }
+}
+
+// WithHistogramConstLabels allows you to add custom ConstLabels to
+// histograms metrics.
+func WithHistogramConstLabels(labels prom.Labels) HistogramOption {
+	return func(o *prom.HistogramOpts) {
+		o.ConstLabels = labels
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go
new file mode 100644
index 0000000..322f990
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go
@@ -0,0 +1,48 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+// gRPC Prometheus monitoring interceptors for server-side gRPC.
+
+package grpc_prometheus
+
+import (
+	prom "github.com/prometheus/client_golang/prometheus"
+	"google.golang.org/grpc"
+)
+
+var (
+	// DefaultServerMetrics is the default instance of ServerMetrics. It is
+	// intended to be used in conjunction the default Prometheus metrics
+	// registry.
+	DefaultServerMetrics = NewServerMetrics()
+
+	// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
+	UnaryServerInterceptor = DefaultServerMetrics.UnaryServerInterceptor()
+
+	// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+	StreamServerInterceptor = DefaultServerMetrics.StreamServerInterceptor()
+)
+
+func init() {
+	prom.MustRegister(DefaultServerMetrics.serverStartedCounter)
+	prom.MustRegister(DefaultServerMetrics.serverHandledCounter)
+	prom.MustRegister(DefaultServerMetrics.serverStreamMsgReceived)
+	prom.MustRegister(DefaultServerMetrics.serverStreamMsgSent)
+}
+
+// Register takes a gRPC server and pre-initializes all counters to 0. This
+// allows for easier monitoring in Prometheus (no missing metrics), and should
+// be called *after* all services have been registered with the server. This
+// function acts on the DefaultServerMetrics variable.
+func Register(server *grpc.Server) {
+	DefaultServerMetrics.InitializeMetrics(server)
+}
+
+// EnableHandlingTimeHistogram turns on recording of handling time
+// of RPCs. Histogram metrics can be very expensive for Prometheus
+// to retain and query. This function acts on the DefaultServerMetrics
+// variable and the default Prometheus metrics registry.
+func EnableHandlingTimeHistogram(opts ...HistogramOption) {
+	DefaultServerMetrics.EnableHandlingTimeHistogram(opts...)
+	prom.Register(DefaultServerMetrics.serverHandledHistogram)
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go
new file mode 100644
index 0000000..5b1467e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go
@@ -0,0 +1,185 @@
+package grpc_prometheus
+
+import (
+	prom "github.com/prometheus/client_golang/prometheus"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/status"
+)
+
+// ServerMetrics represents a collection of metrics to be registered on a
+// Prometheus metrics registry for a gRPC server.
+type ServerMetrics struct {
+	serverStartedCounter          *prom.CounterVec
+	serverHandledCounter          *prom.CounterVec
+	serverStreamMsgReceived       *prom.CounterVec
+	serverStreamMsgSent           *prom.CounterVec
+	serverHandledHistogramEnabled bool
+	serverHandledHistogramOpts    prom.HistogramOpts
+	serverHandledHistogram        *prom.HistogramVec
+}
+
+// NewServerMetrics returns a ServerMetrics object. Use a new instance of
+// ServerMetrics when not using the default Prometheus metrics registry, for
+// example when wanting to control which metrics are added to a registry as
+// opposed to automatically adding metrics via init functions.
+func NewServerMetrics(counterOpts ...CounterOption) *ServerMetrics {
+	opts := counterOptions(counterOpts)
+	return &ServerMetrics{
+		serverStartedCounter: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_server_started_total",
+				Help: "Total number of RPCs started on the server.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+		serverHandledCounter: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_server_handled_total",
+				Help: "Total number of RPCs completed on the server, regardless of success or failure.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
+		serverStreamMsgReceived: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_server_msg_received_total",
+				Help: "Total number of RPC stream messages received on the server.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+		serverStreamMsgSent: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_server_msg_sent_total",
+				Help: "Total number of gRPC stream messages sent by the server.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+		serverHandledHistogramEnabled: false,
+		serverHandledHistogramOpts: prom.HistogramOpts{
+			Name:    "grpc_server_handling_seconds",
+			Help:    "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.",
+			Buckets: prom.DefBuckets,
+		},
+		serverHandledHistogram: nil,
+	}
+}
+
+// EnableHandlingTimeHistogram enables histograms being registered when
+// registering the ServerMetrics on a Prometheus registry. Histograms can be
+// expensive on Prometheus servers. It takes options to configure histogram
+// options such as the defined buckets.
+func (m *ServerMetrics) EnableHandlingTimeHistogram(opts ...HistogramOption) {
+	for _, o := range opts {
+		o(&m.serverHandledHistogramOpts)
+	}
+	if !m.serverHandledHistogramEnabled {
+		m.serverHandledHistogram = prom.NewHistogramVec(
+			m.serverHandledHistogramOpts,
+			[]string{"grpc_type", "grpc_service", "grpc_method"},
+		)
+	}
+	m.serverHandledHistogramEnabled = true
+}
+
+// Describe sends the super-set of all possible descriptors of metrics
+// collected by this Collector to the provided channel and returns once
+// the last descriptor has been sent.
+func (m *ServerMetrics) Describe(ch chan<- *prom.Desc) {
+	m.serverStartedCounter.Describe(ch)
+	m.serverHandledCounter.Describe(ch)
+	m.serverStreamMsgReceived.Describe(ch)
+	m.serverStreamMsgSent.Describe(ch)
+	if m.serverHandledHistogramEnabled {
+		m.serverHandledHistogram.Describe(ch)
+	}
+}
+
+// Collect is called by the Prometheus registry when collecting
+// metrics. The implementation sends each collected metric via the
+// provided channel and returns once the last metric has been sent.
+func (m *ServerMetrics) Collect(ch chan<- prom.Metric) {
+	m.serverStartedCounter.Collect(ch)
+	m.serverHandledCounter.Collect(ch)
+	m.serverStreamMsgReceived.Collect(ch)
+	m.serverStreamMsgSent.Collect(ch)
+	if m.serverHandledHistogramEnabled {
+		m.serverHandledHistogram.Collect(ch)
+	}
+}
+
+// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
+func (m *ServerMetrics) UnaryServerInterceptor() func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+	return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+		monitor := newServerReporter(m, Unary, info.FullMethod)
+		monitor.ReceivedMessage()
+		resp, err := handler(ctx, req)
+		st, _ := status.FromError(err)
+		monitor.Handled(st.Code())
+		if err == nil {
+			monitor.SentMessage()
+		}
+		return resp, err
+	}
+}
+
+// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+func (m *ServerMetrics) StreamServerInterceptor() func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+	return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+		monitor := newServerReporter(m, streamRPCType(info), info.FullMethod)
+		err := handler(srv, &monitoredServerStream{ss, monitor})
+		st, _ := status.FromError(err)
+		monitor.Handled(st.Code())
+		return err
+	}
+}
+
+// InitializeMetrics initializes all metrics, with their appropriate null
+// value, for all gRPC methods registered on a gRPC server. This is useful, to
+// ensure that all metrics exist when collecting and querying.
+func (m *ServerMetrics) InitializeMetrics(server *grpc.Server) {
+	serviceInfo := server.GetServiceInfo()
+	for serviceName, info := range serviceInfo {
+		for _, mInfo := range info.Methods {
+			preRegisterMethod(m, serviceName, &mInfo)
+		}
+	}
+}
+
+func streamRPCType(info *grpc.StreamServerInfo) grpcType {
+	if info.IsClientStream && !info.IsServerStream {
+		return ClientStream
+	} else if !info.IsClientStream && info.IsServerStream {
+		return ServerStream
+	}
+	return BidiStream
+}
+
+// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters.
+type monitoredServerStream struct {
+	grpc.ServerStream
+	monitor *serverReporter
+}
+
+func (s *monitoredServerStream) SendMsg(m interface{}) error {
+	err := s.ServerStream.SendMsg(m)
+	if err == nil {
+		s.monitor.SentMessage()
+	}
+	return err
+}
+
+func (s *monitoredServerStream) RecvMsg(m interface{}) error {
+	err := s.ServerStream.RecvMsg(m)
+	if err == nil {
+		s.monitor.ReceivedMessage()
+	}
+	return err
+}
+
+// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated.
+func preRegisterMethod(metrics *ServerMetrics, serviceName string, mInfo *grpc.MethodInfo) {
+	methodName := mInfo.Name
+	methodType := string(typeFromMethodInfo(mInfo))
+	// These are just references (no increments), as just referencing will create the labels but not set values.
+	metrics.serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName)
+	metrics.serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName)
+	metrics.serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName)
+	if metrics.serverHandledHistogramEnabled {
+		metrics.serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName)
+	}
+	for _, code := range allCodes {
+		metrics.serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String())
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go
new file mode 100644
index 0000000..aa9db54
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go
@@ -0,0 +1,46 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_prometheus
+
+import (
+	"time"
+
+	"google.golang.org/grpc/codes"
+)
+
+type serverReporter struct {
+	metrics     *ServerMetrics
+	rpcType     grpcType
+	serviceName string
+	methodName  string
+	startTime   time.Time
+}
+
+func newServerReporter(m *ServerMetrics, rpcType grpcType, fullMethod string) *serverReporter {
+	r := &serverReporter{
+		metrics: m,
+		rpcType: rpcType,
+	}
+	if r.metrics.serverHandledHistogramEnabled {
+		r.startTime = time.Now()
+	}
+	r.serviceName, r.methodName = splitMethodName(fullMethod)
+	r.metrics.serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+	return r
+}
+
+func (r *serverReporter) ReceivedMessage() {
+	r.metrics.serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+}
+
+func (r *serverReporter) SentMessage() {
+	r.metrics.serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+}
+
+func (r *serverReporter) Handled(code codes.Code) {
+	r.metrics.serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
+	if r.metrics.serverHandledHistogramEnabled {
+		r.metrics.serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go
new file mode 100644
index 0000000..7987de3
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go
@@ -0,0 +1,50 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_prometheus
+
+import (
+	"strings"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+)
+
+type grpcType string
+
+const (
+	Unary        grpcType = "unary"
+	ClientStream grpcType = "client_stream"
+	ServerStream grpcType = "server_stream"
+	BidiStream   grpcType = "bidi_stream"
+)
+
+var (
+	allCodes = []codes.Code{
+		codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound,
+		codes.AlreadyExists, codes.PermissionDenied, codes.Unauthenticated, codes.ResourceExhausted,
+		codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.Unimplemented, codes.Internal,
+		codes.Unavailable, codes.DataLoss,
+	}
+)
+
+func splitMethodName(fullMethodName string) (string, string) {
+	fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash
+	if i := strings.Index(fullMethodName, "/"); i >= 0 {
+		return fullMethodName[:i], fullMethodName[i+1:]
+	}
+	return "unknown", "unknown"
+}
+
+func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType {
+	if !mInfo.IsClientStream && !mInfo.IsServerStream {
+		return Unary
+	}
+	if mInfo.IsClientStream && !mInfo.IsServerStream {
+		return ClientStream
+	}
+	if !mInfo.IsClientStream && mInfo.IsServerStream {
+		return ServerStream
+	}
+	return BidiStream
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
new file mode 100644
index 0000000..3645162
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
@@ -0,0 +1,27 @@
+Copyright (c) 2015, Gengo, Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice,
+      this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    * Neither the name of Gengo, Inc. nor the names of its
+      contributors may be used to endorse or promote products derived from this
+      software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
new file mode 100644
index 0000000..9fce044
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
@@ -0,0 +1,83 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "context.go",
+        "convert.go",
+        "doc.go",
+        "errors.go",
+        "handler.go",
+        "marshal_json.go",
+        "marshal_jsonpb.go",
+        "marshal_proto.go",
+        "marshaler.go",
+        "marshaler_registry.go",
+        "mux.go",
+        "pattern.go",
+        "proto2_convert.go",
+        "proto_errors.go",
+        "query.go",
+    ],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime",
+    deps = [
+        "//runtime/internal:go_default_library",
+        "//utilities:go_default_library",
+        "@com_github_golang_protobuf//jsonpb:go_default_library",
+        "@com_github_golang_protobuf//proto:go_default_library",
+        "@com_github_golang_protobuf//ptypes:go_default_library",
+        "@com_github_golang_protobuf//ptypes/any:go_default_library",
+        "@com_github_golang_protobuf//ptypes/duration:go_default_library",
+        "@com_github_golang_protobuf//ptypes/timestamp:go_default_library",
+        "@org_golang_google_grpc//codes:go_default_library",
+        "@org_golang_google_grpc//grpclog:go_default_library",
+        "@org_golang_google_grpc//metadata:go_default_library",
+        "@org_golang_google_grpc//status:go_default_library",
+    ],
+)
+
+go_test(
+    name = "go_default_test",
+    size = "small",
+    srcs = ["pattern_test.go"],
+    embed = [":go_default_library"],
+    deps = ["//utilities:go_default_library"],
+)
+
+go_test(
+    name = "go_default_xtest",
+    size = "small",
+    srcs = [
+        "context_test.go",
+        "errors_test.go",
+        "handler_test.go",
+        "marshal_json_test.go",
+        "marshal_jsonpb_test.go",
+        "marshal_proto_test.go",
+        "marshaler_registry_test.go",
+        "mux_test.go",
+        "query_test.go",
+    ],
+    deps = [
+        ":go_default_library",
+        "//examples/proto/examplepb:go_default_library",
+        "//runtime/internal:go_default_library",
+        "//utilities:go_default_library",
+        "@com_github_golang_protobuf//jsonpb:go_default_library",
+        "@com_github_golang_protobuf//proto:go_default_library",
+        "@com_github_golang_protobuf//ptypes:go_default_library",
+        "@com_github_golang_protobuf//ptypes/duration:go_default_library",
+        "@com_github_golang_protobuf//ptypes/empty:go_default_library",
+        "@com_github_golang_protobuf//ptypes/struct:go_default_library",
+        "@com_github_golang_protobuf//ptypes/timestamp:go_default_library",
+        "@com_github_golang_protobuf//ptypes/wrappers:go_default_library",
+        "@org_golang_google_genproto//protobuf/field_mask:go_default_library",
+        "@org_golang_google_genproto//googleapis/rpc/errdetails:go_default_library",
+        "@org_golang_google_grpc//:go_default_library",
+        "@org_golang_google_grpc//codes:go_default_library",
+        "@org_golang_google_grpc//metadata:go_default_library",
+        "@org_golang_google_grpc//status:go_default_library",
+    ],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
new file mode 100644
index 0000000..896057e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
@@ -0,0 +1,210 @@
+package runtime
+
+import (
+	"context"
+	"encoding/base64"
+	"fmt"
+	"net"
+	"net/http"
+	"net/textproto"
+	"strconv"
+	"strings"
+	"time"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+// MetadataHeaderPrefix is the http prefix that represents custom metadata
+// parameters to or from a gRPC call.
+const MetadataHeaderPrefix = "Grpc-Metadata-"
+
+// MetadataPrefix is prepended to permanent HTTP header keys (as specified
+// by the IANA) when added to the gRPC context.
+const MetadataPrefix = "grpcgateway-"
+
+// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
+// HTTP headers in a response handled by grpc-gateway
+const MetadataTrailerPrefix = "Grpc-Trailer-"
+
+const metadataGrpcTimeout = "Grpc-Timeout"
+const metadataHeaderBinarySuffix = "-Bin"
+
+const xForwardedFor = "X-Forwarded-For"
+const xForwardedHost = "X-Forwarded-Host"
+
+var (
+	// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
+	// header isn't present. If the value is 0 the sent `context` will not have a timeout.
+	DefaultContextTimeout = 0 * time.Second
+)
+
+func decodeBinHeader(v string) ([]byte, error) {
+	if len(v)%4 == 0 {
+		// Input was padded, or padding was not necessary.
+		return base64.StdEncoding.DecodeString(v)
+	}
+	return base64.RawStdEncoding.DecodeString(v)
+}
+
+/*
+AnnotateContext adds context information such as metadata from the request.
+
+At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
+except that the forwarded destination is not another HTTP service but rather
+a gRPC service.
+*/
+func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
+	var pairs []string
+	timeout := DefaultContextTimeout
+	if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
+		var err error
+		timeout, err = timeoutDecode(tm)
+		if err != nil {
+			return nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
+		}
+	}
+
+	for key, vals := range req.Header {
+		for _, val := range vals {
+			key = textproto.CanonicalMIMEHeaderKey(key)
+			// For backwards-compatibility, pass through 'authorization' header with no prefix.
+			if key == "Authorization" {
+				pairs = append(pairs, "authorization", val)
+			}
+			if h, ok := mux.incomingHeaderMatcher(key); ok {
+				// Handles "-bin" metadata in grpc, since grpc will do another base64
+				// encode before sending to server, we need to decode it first.
+				if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
+					b, err := decodeBinHeader(val)
+					if err != nil {
+						return nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
+					}
+
+					val = string(b)
+				}
+				pairs = append(pairs, h, val)
+			}
+		}
+	}
+	if host := req.Header.Get(xForwardedHost); host != "" {
+		pairs = append(pairs, strings.ToLower(xForwardedHost), host)
+	} else if req.Host != "" {
+		pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
+	}
+
+	if addr := req.RemoteAddr; addr != "" {
+		if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
+			if fwd := req.Header.Get(xForwardedFor); fwd == "" {
+				pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
+			} else {
+				pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
+			}
+		} else {
+			grpclog.Infof("invalid remote addr: %s", addr)
+		}
+	}
+
+	if timeout != 0 {
+		ctx, _ = context.WithTimeout(ctx, timeout)
+	}
+	if len(pairs) == 0 {
+		return ctx, nil
+	}
+	md := metadata.Pairs(pairs...)
+	for _, mda := range mux.metadataAnnotators {
+		md = metadata.Join(md, mda(ctx, req))
+	}
+	return metadata.NewOutgoingContext(ctx, md), nil
+}
+
+// ServerMetadata consists of metadata sent from gRPC server.
+type ServerMetadata struct {
+	HeaderMD  metadata.MD
+	TrailerMD metadata.MD
+}
+
+type serverMetadataKey struct{}
+
+// NewServerMetadataContext creates a new context with ServerMetadata
+func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
+	return context.WithValue(ctx, serverMetadataKey{}, md)
+}
+
+// ServerMetadataFromContext returns the ServerMetadata in ctx
+func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
+	md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
+	return
+}
+
+func timeoutDecode(s string) (time.Duration, error) {
+	size := len(s)
+	if size < 2 {
+		return 0, fmt.Errorf("timeout string is too short: %q", s)
+	}
+	d, ok := timeoutUnitToDuration(s[size-1])
+	if !ok {
+		return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
+	}
+	t, err := strconv.ParseInt(s[:size-1], 10, 64)
+	if err != nil {
+		return 0, err
+	}
+	return d * time.Duration(t), nil
+}
+
+func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
+	switch u {
+	case 'H':
+		return time.Hour, true
+	case 'M':
+		return time.Minute, true
+	case 'S':
+		return time.Second, true
+	case 'm':
+		return time.Millisecond, true
+	case 'u':
+		return time.Microsecond, true
+	case 'n':
+		return time.Nanosecond, true
+	default:
+	}
+	return
+}
+
+// isPermanentHTTPHeader checks whether hdr belongs to the list of
+// permenant request headers maintained by IANA.
+// http://www.iana.org/assignments/message-headers/message-headers.xml
+func isPermanentHTTPHeader(hdr string) bool {
+	switch hdr {
+	case
+		"Accept",
+		"Accept-Charset",
+		"Accept-Language",
+		"Accept-Ranges",
+		"Authorization",
+		"Cache-Control",
+		"Content-Type",
+		"Cookie",
+		"Date",
+		"Expect",
+		"From",
+		"Host",
+		"If-Match",
+		"If-Modified-Since",
+		"If-None-Match",
+		"If-Schedule-Tag-Match",
+		"If-Unmodified-Since",
+		"Max-Forwards",
+		"Origin",
+		"Pragma",
+		"Referer",
+		"User-Agent",
+		"Via",
+		"Warning":
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
new file mode 100644
index 0000000..bd54ba1
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
@@ -0,0 +1,254 @@
+package runtime
+
+import (
+	"encoding/base64"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/golang/protobuf/jsonpb"
+	"github.com/golang/protobuf/ptypes/duration"
+	"github.com/golang/protobuf/ptypes/timestamp"
+)
+
+// String just returns the given string.
+// It is just for compatibility to other types.
+func String(val string) (string, error) {
+	return val, nil
+}
+
+// StringSlice converts 'val' where individual strings are separated by
+// 'sep' into a string slice.
+func StringSlice(val, sep string) ([]string, error) {
+	return strings.Split(val, sep), nil
+}
+
+// Bool converts the given string representation of a boolean value into bool.
+func Bool(val string) (bool, error) {
+	return strconv.ParseBool(val)
+}
+
+// BoolSlice converts 'val' where individual booleans are separated by
+// 'sep' into a bool slice.
+func BoolSlice(val, sep string) ([]bool, error) {
+	s := strings.Split(val, sep)
+	values := make([]bool, len(s))
+	for i, v := range s {
+		value, err := Bool(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Float64 converts the given string representation into representation of a floating point number into float64.
+func Float64(val string) (float64, error) {
+	return strconv.ParseFloat(val, 64)
+}
+
+// Float64Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float64 slice.
+func Float64Slice(val, sep string) ([]float64, error) {
+	s := strings.Split(val, sep)
+	values := make([]float64, len(s))
+	for i, v := range s {
+		value, err := Float64(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Float32 converts the given string representation of a floating point number into float32.
+func Float32(val string) (float32, error) {
+	f, err := strconv.ParseFloat(val, 32)
+	if err != nil {
+		return 0, err
+	}
+	return float32(f), nil
+}
+
+// Float32Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float32 slice.
+func Float32Slice(val, sep string) ([]float32, error) {
+	s := strings.Split(val, sep)
+	values := make([]float32, len(s))
+	for i, v := range s {
+		value, err := Float32(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Int64 converts the given string representation of an integer into int64.
+func Int64(val string) (int64, error) {
+	return strconv.ParseInt(val, 0, 64)
+}
+
+// Int64Slice converts 'val' where individual integers are separated by
+// 'sep' into a int64 slice.
+func Int64Slice(val, sep string) ([]int64, error) {
+	s := strings.Split(val, sep)
+	values := make([]int64, len(s))
+	for i, v := range s {
+		value, err := Int64(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Int32 converts the given string representation of an integer into int32.
+func Int32(val string) (int32, error) {
+	i, err := strconv.ParseInt(val, 0, 32)
+	if err != nil {
+		return 0, err
+	}
+	return int32(i), nil
+}
+
+// Int32Slice converts 'val' where individual integers are separated by
+// 'sep' into a int32 slice.
+func Int32Slice(val, sep string) ([]int32, error) {
+	s := strings.Split(val, sep)
+	values := make([]int32, len(s))
+	for i, v := range s {
+		value, err := Int32(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Uint64 converts the given string representation of an integer into uint64.
+func Uint64(val string) (uint64, error) {
+	return strconv.ParseUint(val, 0, 64)
+}
+
+// Uint64Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint64 slice.
+func Uint64Slice(val, sep string) ([]uint64, error) {
+	s := strings.Split(val, sep)
+	values := make([]uint64, len(s))
+	for i, v := range s {
+		value, err := Uint64(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Uint32 converts the given string representation of an integer into uint32.
+func Uint32(val string) (uint32, error) {
+	i, err := strconv.ParseUint(val, 0, 32)
+	if err != nil {
+		return 0, err
+	}
+	return uint32(i), nil
+}
+
+// Uint32Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint32 slice.
+func Uint32Slice(val, sep string) ([]uint32, error) {
+	s := strings.Split(val, sep)
+	values := make([]uint32, len(s))
+	for i, v := range s {
+		value, err := Uint32(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Bytes converts the given string representation of a byte sequence into a slice of bytes
+// A bytes sequence is encoded in URL-safe base64 without padding
+func Bytes(val string) ([]byte, error) {
+	b, err := base64.StdEncoding.DecodeString(val)
+	if err != nil {
+		b, err = base64.URLEncoding.DecodeString(val)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return b, nil
+}
+
+// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
+// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
+func BytesSlice(val, sep string) ([][]byte, error) {
+	s := strings.Split(val, sep)
+	values := make([][]byte, len(s))
+	for i, v := range s {
+		value, err := Bytes(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
+func Timestamp(val string) (*timestamp.Timestamp, error) {
+	var r *timestamp.Timestamp
+	err := jsonpb.UnmarshalString(val, r)
+	return r, err
+}
+
+// Duration converts the given string into a timestamp.Duration.
+func Duration(val string) (*duration.Duration, error) {
+	var r *duration.Duration
+	err := jsonpb.UnmarshalString(val, r)
+	return r, err
+}
+
+// Enum converts the given string into an int32 that should be type casted into the
+// correct enum proto type.
+func Enum(val string, enumValMap map[string]int32) (int32, error) {
+	e, ok := enumValMap[val]
+	if ok {
+		return e, nil
+	}
+
+	i, err := Int32(val)
+	if err != nil {
+		return 0, fmt.Errorf("%s is not valid", val)
+	}
+	for _, v := range enumValMap {
+		if v == i {
+			return i, nil
+		}
+	}
+	return 0, fmt.Errorf("%s is not valid", val)
+}
+
+// EnumSlice converts 'val' where individual enums are separated by 'sep'
+// into a int32 slice. Each individual int32 should be type casted into the
+// correct enum proto type.
+func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
+	s := strings.Split(val, sep)
+	values := make([]int32, len(s))
+	for i, v := range s {
+		value, err := Enum(v, enumValMap)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
new file mode 100644
index 0000000..b6e5ddf
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
@@ -0,0 +1,5 @@
+/*
+Package runtime contains runtime helper functions used by
+servers which protoc-gen-grpc-gateway generates.
+*/
+package runtime
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
new file mode 100644
index 0000000..b39f55e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
@@ -0,0 +1,136 @@
+package runtime
+
+import (
+	"context"
+	"io"
+	"net/http"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes/any"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/status"
+)
+
+// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
+// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
+func HTTPStatusFromCode(code codes.Code) int {
+	switch code {
+	case codes.OK:
+		return http.StatusOK
+	case codes.Canceled:
+		return http.StatusRequestTimeout
+	case codes.Unknown:
+		return http.StatusInternalServerError
+	case codes.InvalidArgument:
+		return http.StatusBadRequest
+	case codes.DeadlineExceeded:
+		return http.StatusGatewayTimeout
+	case codes.NotFound:
+		return http.StatusNotFound
+	case codes.AlreadyExists:
+		return http.StatusConflict
+	case codes.PermissionDenied:
+		return http.StatusForbidden
+	case codes.Unauthenticated:
+		return http.StatusUnauthorized
+	case codes.ResourceExhausted:
+		return http.StatusTooManyRequests
+	case codes.FailedPrecondition:
+		return http.StatusPreconditionFailed
+	case codes.Aborted:
+		return http.StatusConflict
+	case codes.OutOfRange:
+		return http.StatusBadRequest
+	case codes.Unimplemented:
+		return http.StatusNotImplemented
+	case codes.Internal:
+		return http.StatusInternalServerError
+	case codes.Unavailable:
+		return http.StatusServiceUnavailable
+	case codes.DataLoss:
+		return http.StatusInternalServerError
+	}
+
+	grpclog.Infof("Unknown gRPC error code: %v", code)
+	return http.StatusInternalServerError
+}
+
+var (
+	// HTTPError replies to the request with the error.
+	// You can set a custom function to this variable to customize error format.
+	HTTPError = DefaultHTTPError
+	// OtherErrorHandler handles the following error used by the gateway: StatusMethodNotAllowed StatusNotFound and StatusBadRequest
+	OtherErrorHandler = DefaultOtherErrorHandler
+)
+
+type errorBody struct {
+	Error   string     `protobuf:"bytes,1,name=error" json:"error"`
+	// This is to make the error more compatible with users that expect errors to be Status objects:
+	// https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
+	// It should be the exact same message as the Error field.
+	Message string     `protobuf:"bytes,1,name=message" json:"message"`
+	Code    int32      `protobuf:"varint,2,name=code" json:"code"`
+	Details []*any.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"`
+}
+
+// Make this also conform to proto.Message for builtin JSONPb Marshaler
+func (e *errorBody) Reset()         { *e = errorBody{} }
+func (e *errorBody) String() string { return proto.CompactTextString(e) }
+func (*errorBody) ProtoMessage()    {}
+
+// DefaultHTTPError is the default implementation of HTTPError.
+// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
+// If otherwise, it replies with http.StatusInternalServerError.
+//
+// The response body returned by this function is a JSON object,
+// which contains a member whose key is "error" and whose value is err.Error().
+func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
+	const fallback = `{"error": "failed to marshal error message"}`
+
+	w.Header().Del("Trailer")
+	w.Header().Set("Content-Type", marshaler.ContentType())
+
+	s, ok := status.FromError(err)
+	if !ok {
+		s = status.New(codes.Unknown, err.Error())
+	}
+
+	body := &errorBody{
+		Error:   s.Message(),
+		Message: s.Message(),
+		Code:    int32(s.Code()),
+		Details: s.Proto().GetDetails(),
+	}
+
+	buf, merr := marshaler.Marshal(body)
+	if merr != nil {
+		grpclog.Infof("Failed to marshal error message %q: %v", body, merr)
+		w.WriteHeader(http.StatusInternalServerError)
+		if _, err := io.WriteString(w, fallback); err != nil {
+			grpclog.Infof("Failed to write response: %v", err)
+		}
+		return
+	}
+
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+	}
+
+	handleForwardResponseServerMetadata(w, mux, md)
+	handleForwardResponseTrailerHeader(w, md)
+	st := HTTPStatusFromCode(s.Code())
+	w.WriteHeader(st)
+	if _, err := w.Write(buf); err != nil {
+		grpclog.Infof("Failed to write response: %v", err)
+	}
+
+	handleForwardResponseTrailer(w, md)
+}
+
+// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler.
+// It simply writes a string representation of the given error into "w".
+func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) {
+	http.Error(w, msg, code)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
new file mode 100644
index 0000000..8ad9d76
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
@@ -0,0 +1,206 @@
+package runtime
+
+import (
+	"fmt"
+	"io"
+	"net/http"
+	"net/textproto"
+
+	"context"
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes/any"
+	"github.com/grpc-ecosystem/grpc-gateway/runtime/internal"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/status"
+)
+
+// ForwardResponseStream forwards the stream from gRPC server to REST client.
+func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+	f, ok := w.(http.Flusher)
+	if !ok {
+		grpclog.Infof("Flush not supported in %T", w)
+		http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
+		return
+	}
+
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+		http.Error(w, "unexpected error", http.StatusInternalServerError)
+		return
+	}
+	handleForwardResponseServerMetadata(w, mux, md)
+
+	w.Header().Set("Transfer-Encoding", "chunked")
+	w.Header().Set("Content-Type", marshaler.ContentType())
+	if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
+		HTTPError(ctx, mux, marshaler, w, req, err)
+		return
+	}
+
+	var delimiter []byte
+	if d, ok := marshaler.(Delimited); ok {
+		delimiter = d.Delimiter()
+	} else {
+		delimiter = []byte("\n")
+	}
+
+	var wroteHeader bool
+	for {
+		resp, err := recv()
+		if err == io.EOF {
+			return
+		}
+		if err != nil {
+			handleForwardResponseStreamError(wroteHeader, marshaler, w, err)
+			return
+		}
+		if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+			handleForwardResponseStreamError(wroteHeader, marshaler, w, err)
+			return
+		}
+
+		buf, err := marshaler.Marshal(streamChunk(resp, nil))
+		if err != nil {
+			grpclog.Infof("Failed to marshal response chunk: %v", err)
+			handleForwardResponseStreamError(wroteHeader, marshaler, w, err)
+			return
+		}
+		if _, err = w.Write(buf); err != nil {
+			grpclog.Infof("Failed to send response chunk: %v", err)
+			return
+		}
+		wroteHeader = true
+		if _, err = w.Write(delimiter); err != nil {
+			grpclog.Infof("Failed to send delimiter chunk: %v", err)
+			return
+		}
+		f.Flush()
+	}
+}
+
+func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
+	for k, vs := range md.HeaderMD {
+		if h, ok := mux.outgoingHeaderMatcher(k); ok {
+			for _, v := range vs {
+				w.Header().Add(h, v)
+			}
+		}
+	}
+}
+
+func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
+	for k := range md.TrailerMD {
+		tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
+		w.Header().Add("Trailer", tKey)
+	}
+}
+
+func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
+	for k, vs := range md.TrailerMD {
+		tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
+		for _, v := range vs {
+			w.Header().Add(tKey, v)
+		}
+	}
+}
+
+// responseBody interface contains method for getting field for marshaling to the response body
+// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
+type responseBody interface {
+	XXX_ResponseBody() interface{}
+}
+
+// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
+func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+	}
+
+	handleForwardResponseServerMetadata(w, mux, md)
+	handleForwardResponseTrailerHeader(w, md)
+	w.Header().Set("Content-Type", marshaler.ContentType())
+	if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+		HTTPError(ctx, mux, marshaler, w, req, err)
+		return
+	}
+	var buf []byte
+	var err error
+	if rb, ok := resp.(responseBody); ok {
+		buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
+	} else {
+		buf, err = marshaler.Marshal(resp)
+	}
+	if err != nil {
+		grpclog.Infof("Marshal error: %v", err)
+		HTTPError(ctx, mux, marshaler, w, req, err)
+		return
+	}
+
+	if _, err = w.Write(buf); err != nil {
+		grpclog.Infof("Failed to write response: %v", err)
+	}
+
+	handleForwardResponseTrailer(w, md)
+}
+
+func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
+	if len(opts) == 0 {
+		return nil
+	}
+	for _, opt := range opts {
+		if err := opt(ctx, w, resp); err != nil {
+			grpclog.Infof("Error handling ForwardResponseOptions: %v", err)
+			return err
+		}
+	}
+	return nil
+}
+
+func handleForwardResponseStreamError(wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, err error) {
+	buf, merr := marshaler.Marshal(streamChunk(nil, err))
+	if merr != nil {
+		grpclog.Infof("Failed to marshal an error: %v", merr)
+		return
+	}
+	if !wroteHeader {
+		s, ok := status.FromError(err)
+		if !ok {
+			s = status.New(codes.Unknown, err.Error())
+		}
+		w.WriteHeader(HTTPStatusFromCode(s.Code()))
+	}
+	if _, werr := w.Write(buf); werr != nil {
+		grpclog.Infof("Failed to notify error to client: %v", werr)
+		return
+	}
+}
+
+func streamChunk(result proto.Message, err error) map[string]proto.Message {
+	if err != nil {
+		grpcCode := codes.Unknown
+		grpcMessage := err.Error()
+		var grpcDetails []*any.Any
+		if s, ok := status.FromError(err); ok {
+			grpcCode = s.Code()
+			grpcMessage = s.Message()
+			grpcDetails = s.Proto().GetDetails()
+		}
+		httpCode := HTTPStatusFromCode(grpcCode)
+		return map[string]proto.Message{
+			"error": &internal.StreamError{
+				GrpcCode:   int32(grpcCode),
+				HttpCode:   int32(httpCode),
+				Message:    grpcMessage,
+				HttpStatus: http.StatusText(httpCode),
+				Details:    grpcDetails,
+			},
+		}
+	}
+	if result == nil {
+		return streamChunk(nil, fmt.Errorf("empty response"))
+	}
+	return map[string]proto.Message{"result": result}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/BUILD.bazel
new file mode 100644
index 0000000..937e660
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/BUILD.bazel
@@ -0,0 +1,23 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+
+package(default_visibility = ["//runtime:__subpackages__"])
+
+proto_library(
+    name = "internal_proto",
+    srcs = ["stream_chunk.proto"],
+    deps = ["@com_google_protobuf//:any_proto"],
+)
+
+go_proto_library(
+    name = "internal_go_proto",
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime/internal",
+    proto = ":internal_proto",
+    deps = ["@com_github_golang_protobuf//ptypes/any:go_default_library"],
+)
+
+go_library(
+    name = "go_default_library",
+    embed = [":internal_go_proto"],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime/internal",
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.proto
new file mode 100644
index 0000000..55f42ce
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.proto
@@ -0,0 +1,15 @@
+syntax = "proto3";
+package grpc.gateway.runtime;
+option go_package = "internal";
+
+import "google/protobuf/any.proto";
+
+// StreamError is a response type which is returned when
+// streaming rpc returns an error.
+message StreamError {
+	int32 grpc_code = 1;
+	int32 http_code = 2;
+	string message = 3;
+	string http_status = 4;
+	repeated google.protobuf.Any details = 5;
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
new file mode 100644
index 0000000..f9d3a58
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
@@ -0,0 +1,45 @@
+package runtime
+
+import (
+	"encoding/json"
+	"io"
+)
+
+// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
+// with the standard "encoding/json" package of Golang.
+// Although it is generally faster for simple proto messages than JSONPb,
+// it does not support advanced features of protobuf, e.g. map, oneof, ....
+//
+// The NewEncoder and NewDecoder types return *json.Encoder and
+// *json.Decoder respectively.
+type JSONBuiltin struct{}
+
+// ContentType always Returns "application/json".
+func (*JSONBuiltin) ContentType() string {
+	return "application/json"
+}
+
+// Marshal marshals "v" into JSON
+func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
+	return json.Marshal(v)
+}
+
+// Unmarshal unmarshals JSON data into "v".
+func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
+	return json.Unmarshal(data, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
+	return json.NewDecoder(r)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
+	return json.NewEncoder(w)
+}
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONBuiltin) Delimiter() []byte {
+	return []byte("\n")
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
new file mode 100644
index 0000000..f56072a
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
@@ -0,0 +1,203 @@
+package runtime
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"reflect"
+
+	"github.com/golang/protobuf/jsonpb"
+	"github.com/golang/protobuf/proto"
+)
+
+// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
+// with the "github.com/golang/protobuf/jsonpb".
+// It supports fully functionality of protobuf unlike JSONBuiltin.
+//
+// The NewDecoder method returns a DecoderWrapper, so the underlying
+// *json.Decoder methods can be used.
+type JSONPb jsonpb.Marshaler
+
+// ContentType always returns "application/json".
+func (*JSONPb) ContentType() string {
+	return "application/json"
+}
+
+// Marshal marshals "v" into JSON.
+func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
+	if _, ok := v.(proto.Message); !ok {
+		return j.marshalNonProtoField(v)
+	}
+
+	var buf bytes.Buffer
+	if err := j.marshalTo(&buf, v); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
+	p, ok := v.(proto.Message)
+	if !ok {
+		buf, err := j.marshalNonProtoField(v)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(buf)
+		return err
+	}
+	return (*jsonpb.Marshaler)(j).Marshal(w, p)
+}
+
+// marshalNonProto marshals a non-message field of a protobuf message.
+// This function does not correctly marshals arbitrary data structure into JSON,
+// but it is only capable of marshaling non-message field values of protobuf,
+// i.e. primitive types, enums; pointers to primitives or enums; maps from
+// integer/string types to primitives/enums/pointers to messages.
+func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
+	if v == nil {
+		return []byte("null"), nil
+	}
+	rv := reflect.ValueOf(v)
+	for rv.Kind() == reflect.Ptr {
+		if rv.IsNil() {
+			return []byte("null"), nil
+		}
+		rv = rv.Elem()
+	}
+
+	if rv.Kind() == reflect.Map {
+		m := make(map[string]*json.RawMessage)
+		for _, k := range rv.MapKeys() {
+			buf, err := j.Marshal(rv.MapIndex(k).Interface())
+			if err != nil {
+				return nil, err
+			}
+			m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
+		}
+		if j.Indent != "" {
+			return json.MarshalIndent(m, "", j.Indent)
+		}
+		return json.Marshal(m)
+	}
+	if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts {
+		return json.Marshal(enum.String())
+	}
+	return json.Marshal(rv.Interface())
+}
+
+// Unmarshal unmarshals JSON "data" into "v"
+func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
+	return unmarshalJSONPb(data, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
+	d := json.NewDecoder(r)
+	return DecoderWrapper{Decoder: d}
+}
+
+// DecoderWrapper is a wrapper around a *json.Decoder that adds
+// support for protos to the Decode method.
+type DecoderWrapper struct {
+	*json.Decoder
+}
+
+// Decode wraps the embedded decoder's Decode method to support
+// protos using a jsonpb.Unmarshaler.
+func (d DecoderWrapper) Decode(v interface{}) error {
+	return decodeJSONPb(d.Decoder, v)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
+	return EncoderFunc(func(v interface{}) error { return j.marshalTo(w, v) })
+}
+
+func unmarshalJSONPb(data []byte, v interface{}) error {
+	d := json.NewDecoder(bytes.NewReader(data))
+	return decodeJSONPb(d, v)
+}
+
+func decodeJSONPb(d *json.Decoder, v interface{}) error {
+	p, ok := v.(proto.Message)
+	if !ok {
+		return decodeNonProtoField(d, v)
+	}
+	unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true}
+	return unmarshaler.UnmarshalNext(d, p)
+}
+
+func decodeNonProtoField(d *json.Decoder, v interface{}) error {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Ptr {
+		return fmt.Errorf("%T is not a pointer", v)
+	}
+	for rv.Kind() == reflect.Ptr {
+		if rv.IsNil() {
+			rv.Set(reflect.New(rv.Type().Elem()))
+		}
+		if rv.Type().ConvertibleTo(typeProtoMessage) {
+			unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true}
+			return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message))
+		}
+		rv = rv.Elem()
+	}
+	if rv.Kind() == reflect.Map {
+		if rv.IsNil() {
+			rv.Set(reflect.MakeMap(rv.Type()))
+		}
+		conv, ok := convFromType[rv.Type().Key().Kind()]
+		if !ok {
+			return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
+		}
+
+		m := make(map[string]*json.RawMessage)
+		if err := d.Decode(&m); err != nil {
+			return err
+		}
+		for k, v := range m {
+			result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
+			if err := result[1].Interface(); err != nil {
+				return err.(error)
+			}
+			bk := result[0]
+			bv := reflect.New(rv.Type().Elem())
+			if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil {
+				return err
+			}
+			rv.SetMapIndex(bk, bv.Elem())
+		}
+		return nil
+	}
+	if _, ok := rv.Interface().(protoEnum); ok {
+		var repr interface{}
+		if err := d.Decode(&repr); err != nil {
+			return err
+		}
+		switch repr.(type) {
+		case string:
+			// TODO(yugui) Should use proto.StructProperties?
+			return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
+		case float64:
+			rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type()))
+			return nil
+		default:
+			return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
+		}
+	}
+	return d.Decode(v)
+}
+
+type protoEnum interface {
+	fmt.Stringer
+	EnumDescriptor() ([]byte, []int)
+}
+
+var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONPb) Delimiter() []byte {
+	return []byte("\n")
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
new file mode 100644
index 0000000..f65d1a2
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
@@ -0,0 +1,62 @@
+package runtime
+
+import (
+	"io"
+
+	"errors"
+	"github.com/golang/protobuf/proto"
+	"io/ioutil"
+)
+
+// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
+type ProtoMarshaller struct{}
+
+// ContentType always returns "application/octet-stream".
+func (*ProtoMarshaller) ContentType() string {
+	return "application/octet-stream"
+}
+
+// Marshal marshals "value" into Proto
+func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
+	message, ok := value.(proto.Message)
+	if !ok {
+		return nil, errors.New("unable to marshal non proto field")
+	}
+	return proto.Marshal(message)
+}
+
+// Unmarshal unmarshals proto "data" into "value"
+func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
+	message, ok := value.(proto.Message)
+	if !ok {
+		return errors.New("unable to unmarshal non proto field")
+	}
+	return proto.Unmarshal(data, message)
+}
+
+// NewDecoder returns a Decoder which reads proto stream from "reader".
+func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
+	return DecoderFunc(func(value interface{}) error {
+		buffer, err := ioutil.ReadAll(reader)
+		if err != nil {
+			return err
+		}
+		return marshaller.Unmarshal(buffer, value)
+	})
+}
+
+// NewEncoder returns an Encoder which writes proto stream into "writer".
+func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
+	return EncoderFunc(func(value interface{}) error {
+		buffer, err := marshaller.Marshal(value)
+		if err != nil {
+			return err
+		}
+		_, err = writer.Write(buffer)
+		if err != nil {
+			return err
+		}
+
+		return nil
+	})
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
new file mode 100644
index 0000000..98fe6e8
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
@@ -0,0 +1,48 @@
+package runtime
+
+import (
+	"io"
+)
+
+// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
+type Marshaler interface {
+	// Marshal marshals "v" into byte sequence.
+	Marshal(v interface{}) ([]byte, error)
+	// Unmarshal unmarshals "data" into "v".
+	// "v" must be a pointer value.
+	Unmarshal(data []byte, v interface{}) error
+	// NewDecoder returns a Decoder which reads byte sequence from "r".
+	NewDecoder(r io.Reader) Decoder
+	// NewEncoder returns an Encoder which writes bytes sequence into "w".
+	NewEncoder(w io.Writer) Encoder
+	// ContentType returns the Content-Type which this marshaler is responsible for.
+	ContentType() string
+}
+
+// Decoder decodes a byte sequence
+type Decoder interface {
+	Decode(v interface{}) error
+}
+
+// Encoder encodes gRPC payloads / fields into byte sequence.
+type Encoder interface {
+	Encode(v interface{}) error
+}
+
+// DecoderFunc adapts an decoder function into Decoder.
+type DecoderFunc func(v interface{}) error
+
+// Decode delegates invocations to the underlying function itself.
+func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
+
+// EncoderFunc adapts an encoder function into Encoder
+type EncoderFunc func(v interface{}) error
+
+// Encode delegates invocations to the underlying function itself.
+func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
+
+// Delimited defines the streaming delimiter.
+type Delimited interface {
+	// Delimiter returns the record seperator for the stream.
+	Delimiter() []byte
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
new file mode 100644
index 0000000..5cc53ae
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
@@ -0,0 +1,91 @@
+package runtime
+
+import (
+	"errors"
+	"net/http"
+)
+
+// MIMEWildcard is the fallback MIME type used for requests which do not match
+// a registered MIME type.
+const MIMEWildcard = "*"
+
+var (
+	acceptHeader      = http.CanonicalHeaderKey("Accept")
+	contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
+
+	defaultMarshaler = &JSONPb{OrigName: true}
+)
+
+// MarshalerForRequest returns the inbound/outbound marshalers for this request.
+// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
+// If it isn't set (or the request Content-Type is empty), checks for "*".
+// If there are multiple Content-Type headers set, choose the first one that it can
+// exactly match in the registry.
+// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
+func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
+	for _, acceptVal := range r.Header[acceptHeader] {
+		if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
+			outbound = m
+			break
+		}
+	}
+
+	for _, contentTypeVal := range r.Header[contentTypeHeader] {
+		if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok {
+			inbound = m
+			break
+		}
+	}
+
+	if inbound == nil {
+		inbound = mux.marshalers.mimeMap[MIMEWildcard]
+	}
+	if outbound == nil {
+		outbound = inbound
+	}
+
+	return inbound, outbound
+}
+
+// marshalerRegistry is a mapping from MIME types to Marshalers.
+type marshalerRegistry struct {
+	mimeMap map[string]Marshaler
+}
+
+// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
+// MIME type).
+func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
+	if len(mime) == 0 {
+		return errors.New("empty MIME type")
+	}
+
+	m.mimeMap[mime] = marshaler
+
+	return nil
+}
+
+// makeMarshalerMIMERegistry returns a new registry of marshalers.
+// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
+//
+// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
+// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
+// with a "application/json" Content-Type.
+// "*" can be used to match any Content-Type.
+// This can be attached to a ServerMux with the marshaler option.
+func makeMarshalerMIMERegistry() marshalerRegistry {
+	return marshalerRegistry{
+		mimeMap: map[string]Marshaler{
+			MIMEWildcard: defaultMarshaler,
+		},
+	}
+}
+
+// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
+// Marshalers to a MIME type in mux.
+func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
+	return func(mux *ServeMux) {
+		if err := mux.marshalers.add(mime, marshaler); err != nil {
+			panic(err)
+		}
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
new file mode 100644
index 0000000..463084a
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
@@ -0,0 +1,258 @@
+package runtime
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+// A HandlerFunc handles a specific pair of path pattern and HTTP method.
+type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
+
+// ServeMux is a request multiplexer for grpc-gateway.
+// It matches http requests to patterns and invokes the corresponding handler.
+type ServeMux struct {
+	// handlers maps HTTP method to a list of handlers.
+	handlers               map[string][]handler
+	forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
+	marshalers             marshalerRegistry
+	incomingHeaderMatcher  HeaderMatcherFunc
+	outgoingHeaderMatcher  HeaderMatcherFunc
+	metadataAnnotators     []func(context.Context, *http.Request) metadata.MD
+	protoErrorHandler      ProtoErrorHandlerFunc
+}
+
+// ServeMuxOption is an option that can be given to a ServeMux on construction.
+type ServeMuxOption func(*ServeMux)
+
+// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
+//
+// forwardResponseOption is an option that will be called on the relevant context.Context,
+// http.ResponseWriter, and proto.Message before every forwarded response.
+//
+// The message may be nil in the case where just a header is being sent.
+func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
+	}
+}
+
+// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
+type HeaderMatcherFunc func(string) (string, bool)
+
+// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
+// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
+// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
+func DefaultHeaderMatcher(key string) (string, bool) {
+	if isPermanentHTTPHeader(key) {
+		return MetadataPrefix + key, true
+	} else if strings.HasPrefix(key, MetadataHeaderPrefix) {
+		return key[len(MetadataHeaderPrefix):], true
+	}
+	return "", false
+}
+
+// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
+//
+// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
+// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
+func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+	return func(mux *ServeMux) {
+		mux.incomingHeaderMatcher = fn
+	}
+}
+
+// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
+//
+// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
+// passed to http response returned from gateway. To transform the header before passing to response,
+// matcher should return modified header.
+func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+	return func(mux *ServeMux) {
+		mux.outgoingHeaderMatcher = fn
+	}
+}
+
+// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
+//
+// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
+// is reading token from cookie and adding it in gRPC context.
+func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
+	}
+}
+
+// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context.
+//
+// This can be used to handle an error as general proto message defined by gRPC.
+// The response including body and status is not backward compatible with the default error handler.
+// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization.
+func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.protoErrorHandler = fn
+	}
+}
+
+// NewServeMux returns a new ServeMux whose internal mapping is empty.
+func NewServeMux(opts ...ServeMuxOption) *ServeMux {
+	serveMux := &ServeMux{
+		handlers:               make(map[string][]handler),
+		forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
+		marshalers:             makeMarshalerMIMERegistry(),
+	}
+
+	for _, opt := range opts {
+		opt(serveMux)
+	}
+
+	if serveMux.protoErrorHandler != nil {
+		HTTPError = serveMux.protoErrorHandler
+		// OtherErrorHandler is no longer used when protoErrorHandler is set.
+		// Overwritten by a special error handler to return Unknown.
+		OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) {
+			ctx := context.Background()
+			_, outboundMarshaler := MarshalerForRequest(serveMux, r)
+			sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler")
+			serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr)
+		}
+	}
+
+	if serveMux.incomingHeaderMatcher == nil {
+		serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
+	}
+
+	if serveMux.outgoingHeaderMatcher == nil {
+		serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
+			return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
+		}
+	}
+
+	return serveMux
+}
+
+// Handle associates "h" to the pair of HTTP method and path pattern.
+func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
+	s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h})
+}
+
+// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
+func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx := r.Context()
+
+	path := r.URL.Path
+	if !strings.HasPrefix(path, "/") {
+		if s.protoErrorHandler != nil {
+			_, outboundMarshaler := MarshalerForRequest(s, r)
+			sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest))
+			s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+		} else {
+			OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
+		}
+		return
+	}
+
+	components := strings.Split(path[1:], "/")
+	l := len(components)
+	var verb string
+	if idx := strings.LastIndex(components[l-1], ":"); idx == 0 {
+		if s.protoErrorHandler != nil {
+			_, outboundMarshaler := MarshalerForRequest(s, r)
+			sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented))
+			s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+		} else {
+			OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
+		}
+		return
+	} else if idx > 0 {
+		c := components[l-1]
+		components[l-1], verb = c[:idx], c[idx+1:]
+	}
+
+	if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && isPathLengthFallback(r) {
+		r.Method = strings.ToUpper(override)
+		if err := r.ParseForm(); err != nil {
+			if s.protoErrorHandler != nil {
+				_, outboundMarshaler := MarshalerForRequest(s, r)
+				sterr := status.Error(codes.InvalidArgument, err.Error())
+				s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+			} else {
+				OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
+			}
+			return
+		}
+	}
+	for _, h := range s.handlers[r.Method] {
+		pathParams, err := h.pat.Match(components, verb)
+		if err != nil {
+			continue
+		}
+		h.h(w, r, pathParams)
+		return
+	}
+
+	// lookup other methods to handle fallback from GET to POST and
+	// to determine if it is MethodNotAllowed or NotFound.
+	for m, handlers := range s.handlers {
+		if m == r.Method {
+			continue
+		}
+		for _, h := range handlers {
+			pathParams, err := h.pat.Match(components, verb)
+			if err != nil {
+				continue
+			}
+			// X-HTTP-Method-Override is optional. Always allow fallback to POST.
+			if isPathLengthFallback(r) {
+				if err := r.ParseForm(); err != nil {
+					if s.protoErrorHandler != nil {
+						_, outboundMarshaler := MarshalerForRequest(s, r)
+						sterr := status.Error(codes.InvalidArgument, err.Error())
+						s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+					} else {
+						OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
+					}
+					return
+				}
+				h.h(w, r, pathParams)
+				return
+			}
+			if s.protoErrorHandler != nil {
+				_, outboundMarshaler := MarshalerForRequest(s, r)
+				sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusMethodNotAllowed))
+				s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+			} else {
+				OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
+			}
+			return
+		}
+	}
+
+	if s.protoErrorHandler != nil {
+		_, outboundMarshaler := MarshalerForRequest(s, r)
+		sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented))
+		s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+	} else {
+		OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
+	}
+}
+
+// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
+func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
+	return s.forwardResponseOptions
+}
+
+func isPathLengthFallback(r *http.Request) bool {
+	return r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
+}
+
+type handler struct {
+	pat Pattern
+	h   HandlerFunc
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
new file mode 100644
index 0000000..f16a84a
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
@@ -0,0 +1,227 @@
+package runtime
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/grpc-ecosystem/grpc-gateway/utilities"
+	"google.golang.org/grpc/grpclog"
+)
+
+var (
+	// ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
+	ErrNotMatch = errors.New("not match to the path pattern")
+	// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
+	ErrInvalidPattern = errors.New("invalid pattern")
+)
+
+type op struct {
+	code    utilities.OpCode
+	operand int
+}
+
+// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto.
+type Pattern struct {
+	// ops is a list of operations
+	ops []op
+	// pool is a constant pool indexed by the operands or vars.
+	pool []string
+	// vars is a list of variables names to be bound by this pattern
+	vars []string
+	// stacksize is the max depth of the stack
+	stacksize int
+	// tailLen is the length of the fixed-size segments after a deep wildcard
+	tailLen int
+	// verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
+	verb string
+}
+
+// NewPattern returns a new Pattern from the given definition values.
+// "ops" is a sequence of op codes. "pool" is a constant pool.
+// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
+// "version" must be 1 for now.
+// It returns an error if the given definition is invalid.
+func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
+	if version != 1 {
+		grpclog.Infof("unsupported version: %d", version)
+		return Pattern{}, ErrInvalidPattern
+	}
+
+	l := len(ops)
+	if l%2 != 0 {
+		grpclog.Infof("odd number of ops codes: %d", l)
+		return Pattern{}, ErrInvalidPattern
+	}
+
+	var (
+		typedOps        []op
+		stack, maxstack int
+		tailLen         int
+		pushMSeen       bool
+		vars            []string
+	)
+	for i := 0; i < l; i += 2 {
+		op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
+		switch op.code {
+		case utilities.OpNop:
+			continue
+		case utilities.OpPush:
+			if pushMSeen {
+				tailLen++
+			}
+			stack++
+		case utilities.OpPushM:
+			if pushMSeen {
+				grpclog.Infof("pushM appears twice")
+				return Pattern{}, ErrInvalidPattern
+			}
+			pushMSeen = true
+			stack++
+		case utilities.OpLitPush:
+			if op.operand < 0 || len(pool) <= op.operand {
+				grpclog.Infof("negative literal index: %d", op.operand)
+				return Pattern{}, ErrInvalidPattern
+			}
+			if pushMSeen {
+				tailLen++
+			}
+			stack++
+		case utilities.OpConcatN:
+			if op.operand <= 0 {
+				grpclog.Infof("negative concat size: %d", op.operand)
+				return Pattern{}, ErrInvalidPattern
+			}
+			stack -= op.operand
+			if stack < 0 {
+				grpclog.Print("stack underflow")
+				return Pattern{}, ErrInvalidPattern
+			}
+			stack++
+		case utilities.OpCapture:
+			if op.operand < 0 || len(pool) <= op.operand {
+				grpclog.Infof("variable name index out of bound: %d", op.operand)
+				return Pattern{}, ErrInvalidPattern
+			}
+			v := pool[op.operand]
+			op.operand = len(vars)
+			vars = append(vars, v)
+			stack--
+			if stack < 0 {
+				grpclog.Infof("stack underflow")
+				return Pattern{}, ErrInvalidPattern
+			}
+		default:
+			grpclog.Infof("invalid opcode: %d", op.code)
+			return Pattern{}, ErrInvalidPattern
+		}
+
+		if maxstack < stack {
+			maxstack = stack
+		}
+		typedOps = append(typedOps, op)
+	}
+	return Pattern{
+		ops:       typedOps,
+		pool:      pool,
+		vars:      vars,
+		stacksize: maxstack,
+		tailLen:   tailLen,
+		verb:      verb,
+	}, nil
+}
+
+// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
+func MustPattern(p Pattern, err error) Pattern {
+	if err != nil {
+		grpclog.Fatalf("Pattern initialization failed: %v", err)
+	}
+	return p
+}
+
+// Match examines components if it matches to the Pattern.
+// If it matches, the function returns a mapping from field paths to their captured values.
+// If otherwise, the function returns an error.
+func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
+	if p.verb != verb {
+		return nil, ErrNotMatch
+	}
+
+	var pos int
+	stack := make([]string, 0, p.stacksize)
+	captured := make([]string, len(p.vars))
+	l := len(components)
+	for _, op := range p.ops {
+		switch op.code {
+		case utilities.OpNop:
+			continue
+		case utilities.OpPush, utilities.OpLitPush:
+			if pos >= l {
+				return nil, ErrNotMatch
+			}
+			c := components[pos]
+			if op.code == utilities.OpLitPush {
+				if lit := p.pool[op.operand]; c != lit {
+					return nil, ErrNotMatch
+				}
+			}
+			stack = append(stack, c)
+			pos++
+		case utilities.OpPushM:
+			end := len(components)
+			if end < pos+p.tailLen {
+				return nil, ErrNotMatch
+			}
+			end -= p.tailLen
+			stack = append(stack, strings.Join(components[pos:end], "/"))
+			pos = end
+		case utilities.OpConcatN:
+			n := op.operand
+			l := len(stack) - n
+			stack = append(stack[:l], strings.Join(stack[l:], "/"))
+		case utilities.OpCapture:
+			n := len(stack) - 1
+			captured[op.operand] = stack[n]
+			stack = stack[:n]
+		}
+	}
+	if pos < l {
+		return nil, ErrNotMatch
+	}
+	bindings := make(map[string]string)
+	for i, val := range captured {
+		bindings[p.vars[i]] = val
+	}
+	return bindings, nil
+}
+
+// Verb returns the verb part of the Pattern.
+func (p Pattern) Verb() string { return p.verb }
+
+func (p Pattern) String() string {
+	var stack []string
+	for _, op := range p.ops {
+		switch op.code {
+		case utilities.OpNop:
+			continue
+		case utilities.OpPush:
+			stack = append(stack, "*")
+		case utilities.OpLitPush:
+			stack = append(stack, p.pool[op.operand])
+		case utilities.OpPushM:
+			stack = append(stack, "**")
+		case utilities.OpConcatN:
+			n := op.operand
+			l := len(stack) - n
+			stack = append(stack[:l], strings.Join(stack[l:], "/"))
+		case utilities.OpCapture:
+			n := len(stack) - 1
+			stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
+		}
+	}
+	segs := strings.Join(stack, "/")
+	if p.verb != "" {
+		return fmt.Sprintf("/%s:%s", segs, p.verb)
+	}
+	return "/" + segs
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
new file mode 100644
index 0000000..a3151e2
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
@@ -0,0 +1,80 @@
+package runtime
+
+import (
+	"github.com/golang/protobuf/proto"
+)
+
+// StringP returns a pointer to a string whose pointee is same as the given string value.
+func StringP(val string) (*string, error) {
+	return proto.String(val), nil
+}
+
+// BoolP parses the given string representation of a boolean value,
+// and returns a pointer to a bool whose value is same as the parsed value.
+func BoolP(val string) (*bool, error) {
+	b, err := Bool(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Bool(b), nil
+}
+
+// Float64P parses the given string representation of a floating point number,
+// and returns a pointer to a float64 whose value is same as the parsed number.
+func Float64P(val string) (*float64, error) {
+	f, err := Float64(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Float64(f), nil
+}
+
+// Float32P parses the given string representation of a floating point number,
+// and returns a pointer to a float32 whose value is same as the parsed number.
+func Float32P(val string) (*float32, error) {
+	f, err := Float32(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Float32(f), nil
+}
+
+// Int64P parses the given string representation of an integer
+// and returns a pointer to a int64 whose value is same as the parsed integer.
+func Int64P(val string) (*int64, error) {
+	i, err := Int64(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Int64(i), nil
+}
+
+// Int32P parses the given string representation of an integer
+// and returns a pointer to a int32 whose value is same as the parsed integer.
+func Int32P(val string) (*int32, error) {
+	i, err := Int32(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Int32(i), err
+}
+
+// Uint64P parses the given string representation of an integer
+// and returns a pointer to a uint64 whose value is same as the parsed integer.
+func Uint64P(val string) (*uint64, error) {
+	i, err := Uint64(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Uint64(i), err
+}
+
+// Uint32P parses the given string representation of an integer
+// and returns a pointer to a uint32 whose value is same as the parsed integer.
+func Uint32P(val string) (*uint32, error) {
+	i, err := Uint32(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Uint32(i), err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
new file mode 100644
index 0000000..43fafca
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
@@ -0,0 +1,61 @@
+package runtime
+
+import (
+	"io"
+	"net/http"
+
+	"context"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/status"
+)
+
+// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request.
+type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
+
+var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler
+
+// DefaultHTTPProtoErrorHandler is an implementation of HTTPError.
+// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
+// If otherwise, it replies with http.StatusInternalServerError.
+//
+// The response body returned by this function is a Status message marshaled by a Marshaler.
+//
+// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead.
+func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
+	// return Internal when Marshal failed
+	const fallback = `{"code": 13, "message": "failed to marshal error message"}`
+
+	w.Header().Del("Trailer")
+	w.Header().Set("Content-Type", marshaler.ContentType())
+
+	s, ok := status.FromError(err)
+	if !ok {
+		s = status.New(codes.Unknown, err.Error())
+	}
+
+	buf, merr := marshaler.Marshal(s.Proto())
+	if merr != nil {
+		grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr)
+		w.WriteHeader(http.StatusInternalServerError)
+		if _, err := io.WriteString(w, fallback); err != nil {
+			grpclog.Infof("Failed to write response: %v", err)
+		}
+		return
+	}
+
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+	}
+
+	handleForwardResponseServerMetadata(w, mux, md)
+	handleForwardResponseTrailerHeader(w, md)
+	st := HTTPStatusFromCode(s.Code())
+	w.WriteHeader(st)
+	if _, err := w.Write(buf); err != nil {
+		grpclog.Infof("Failed to write response: %v", err)
+	}
+
+	handleForwardResponseTrailer(w, md)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
new file mode 100644
index 0000000..bb9359f
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
@@ -0,0 +1,392 @@
+package runtime
+
+import (
+	"encoding/base64"
+	"fmt"
+	"net/url"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/grpc-ecosystem/grpc-gateway/utilities"
+	"google.golang.org/grpc/grpclog"
+)
+
+// PopulateQueryParameters populates "values" into "msg".
+// A value is ignored if its key starts with one of the elements in "filter".
+func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
+	for key, values := range values {
+		re, err := regexp.Compile("^(.*)\\[(.*)\\]$")
+		if err != nil {
+			return err
+		}
+		match := re.FindStringSubmatch(key)
+		if len(match) == 3 {
+			key = match[1]
+			values = append([]string{match[2]}, values...)
+		}
+		fieldPath := strings.Split(key, ".")
+		if filter.HasCommonPrefix(fieldPath) {
+			continue
+		}
+		if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// PopulateFieldFromPath sets a value in a nested Protobuf structure.
+// It instantiates missing protobuf fields as it goes.
+func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
+	fieldPath := strings.Split(fieldPathString, ".")
+	return populateFieldValueFromPath(msg, fieldPath, []string{value})
+}
+
+func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error {
+	m := reflect.ValueOf(msg)
+	if m.Kind() != reflect.Ptr {
+		return fmt.Errorf("unexpected type %T: %v", msg, msg)
+	}
+	var props *proto.Properties
+	m = m.Elem()
+	for i, fieldName := range fieldPath {
+		isLast := i == len(fieldPath)-1
+		if !isLast && m.Kind() != reflect.Struct {
+			return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, "."))
+		}
+		var f reflect.Value
+		var err error
+		f, props, err = fieldByProtoName(m, fieldName)
+		if err != nil {
+			return err
+		} else if !f.IsValid() {
+			grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, "."))
+			return nil
+		}
+
+		switch f.Kind() {
+		case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64:
+			if !isLast {
+				return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
+			}
+			m = f
+		case reflect.Slice:
+			if !isLast {
+				return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, "."))
+			}
+			// Handle []byte
+			if f.Type().Elem().Kind() == reflect.Uint8 {
+				m = f
+				break
+			}
+			return populateRepeatedField(f, values, props)
+		case reflect.Ptr:
+			if f.IsNil() {
+				m = reflect.New(f.Type().Elem())
+				f.Set(m.Convert(f.Type()))
+			}
+			m = f.Elem()
+			continue
+		case reflect.Struct:
+			m = f
+			continue
+		case reflect.Map:
+			if !isLast {
+				return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
+			}
+			return populateMapField(f, values, props)
+		default:
+			return fmt.Errorf("unexpected type %s in %T", f.Type(), msg)
+		}
+	}
+	switch len(values) {
+	case 0:
+		return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, "."))
+	case 1:
+	default:
+		grpclog.Infof("too many field values: %s", strings.Join(fieldPath, "."))
+	}
+	return populateField(m, values[0], props)
+}
+
+// fieldByProtoName looks up a field whose corresponding protobuf field name is "name".
+// "m" must be a struct value. It returns zero reflect.Value if no such field found.
+func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) {
+	props := proto.GetProperties(m.Type())
+
+	// look up field name in oneof map
+	if op, ok := props.OneofTypes[name]; ok {
+		v := reflect.New(op.Type.Elem())
+		field := m.Field(op.Field)
+		if !field.IsNil() {
+			return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName)
+		}
+		field.Set(v)
+		return v.Elem().Field(0), op.Prop, nil
+	}
+
+	for _, p := range props.Prop {
+		if p.OrigName == name {
+			return m.FieldByName(p.Name), p, nil
+		}
+		if p.JSONName == name {
+			return m.FieldByName(p.Name), p, nil
+		}
+	}
+	return reflect.Value{}, nil, nil
+}
+
+func populateMapField(f reflect.Value, values []string, props *proto.Properties) error {
+	if len(values) != 2 {
+		return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name)
+	}
+
+	key, value := values[0], values[1]
+	keyType := f.Type().Key()
+	valueType := f.Type().Elem()
+	if f.IsNil() {
+		f.Set(reflect.MakeMap(f.Type()))
+	}
+
+	keyConv, ok := convFromType[keyType.Kind()]
+	if !ok {
+		return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name)
+	}
+	valueConv, ok := convFromType[valueType.Kind()]
+	if !ok {
+		return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name)
+	}
+
+	keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)})
+	if err := keyV[1].Interface(); err != nil {
+		return err.(error)
+	}
+	valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)})
+	if err := valueV[1].Interface(); err != nil {
+		return err.(error)
+	}
+
+	f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType))
+
+	return nil
+}
+
+func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error {
+	elemType := f.Type().Elem()
+
+	// is the destination field a slice of an enumeration type?
+	if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
+		return populateFieldEnumRepeated(f, values, enumValMap)
+	}
+
+	conv, ok := convFromType[elemType.Kind()]
+	if !ok {
+		return fmt.Errorf("unsupported field type %s", elemType)
+	}
+	f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
+	for i, v := range values {
+		result := conv.Call([]reflect.Value{reflect.ValueOf(v)})
+		if err := result[1].Interface(); err != nil {
+			return err.(error)
+		}
+		f.Index(i).Set(result[0].Convert(f.Index(i).Type()))
+	}
+	return nil
+}
+
+func populateField(f reflect.Value, value string, props *proto.Properties) error {
+	i := f.Addr().Interface()
+
+	// Handle protobuf well known types
+	type wkt interface {
+		XXX_WellKnownType() string
+	}
+	if wkt, ok := i.(wkt); ok {
+		switch wkt.XXX_WellKnownType() {
+		case "Timestamp":
+			if value == "null" {
+				f.Field(0).SetInt(0)
+				f.Field(1).SetInt(0)
+				return nil
+			}
+
+			t, err := time.Parse(time.RFC3339Nano, value)
+			if err != nil {
+				return fmt.Errorf("bad Timestamp: %v", err)
+			}
+			f.Field(0).SetInt(int64(t.Unix()))
+			f.Field(1).SetInt(int64(t.Nanosecond()))
+			return nil
+		case "Duration":
+			if value == "null" {
+				f.Field(0).SetInt(0)
+				f.Field(1).SetInt(0)
+				return nil
+			}
+			d, err := time.ParseDuration(value)
+			if err != nil {
+				return fmt.Errorf("bad Duration: %v", err)
+			}
+
+			ns := d.Nanoseconds()
+			s := ns / 1e9
+			ns %= 1e9
+			f.Field(0).SetInt(s)
+			f.Field(1).SetInt(ns)
+			return nil
+		case "DoubleValue":
+			fallthrough
+		case "FloatValue":
+			float64Val, err := strconv.ParseFloat(value, 64)
+			if err != nil {
+				return fmt.Errorf("bad DoubleValue: %s", value)
+			}
+			f.Field(0).SetFloat(float64Val)
+			return nil
+		case "Int64Value":
+			fallthrough
+		case "Int32Value":
+			int64Val, err := strconv.ParseInt(value, 10, 64)
+			if err != nil {
+				return fmt.Errorf("bad DoubleValue: %s", value)
+			}
+			f.Field(0).SetInt(int64Val)
+			return nil
+		case "UInt64Value":
+			fallthrough
+		case "UInt32Value":
+			uint64Val, err := strconv.ParseUint(value, 10, 64)
+			if err != nil {
+				return fmt.Errorf("bad DoubleValue: %s", value)
+			}
+			f.Field(0).SetUint(uint64Val)
+			return nil
+		case "BoolValue":
+			if value == "true" {
+				f.Field(0).SetBool(true)
+			} else if value == "false" {
+				f.Field(0).SetBool(false)
+			} else {
+				return fmt.Errorf("bad BoolValue: %s", value)
+			}
+			return nil
+		case "StringValue":
+			f.Field(0).SetString(value)
+			return nil
+		case "BytesValue":
+			bytesVal, err := base64.StdEncoding.DecodeString(value)
+			if err != nil {
+				return fmt.Errorf("bad BytesValue: %s", value)
+			}
+			f.Field(0).SetBytes(bytesVal)
+			return nil
+		}
+	}
+
+	// Handle google well known types
+	if gwkt, ok := i.(proto.Message); ok {
+		switch proto.MessageName(gwkt) {
+		case "google.protobuf.FieldMask":
+			p := f.Field(0)
+			for _, v := range strings.Split(value, ",") {
+				if v != "" {
+					p.Set(reflect.Append(p, reflect.ValueOf(v)))
+				}
+			}
+			return nil
+		}
+	}
+
+	// Handle Time and Duration stdlib types
+	switch t := i.(type) {
+	case *time.Time:
+		pt, err := time.Parse(time.RFC3339Nano, value)
+		if err != nil {
+			return fmt.Errorf("bad Timestamp: %v", err)
+		}
+		*t = pt
+		return nil
+	case *time.Duration:
+		d, err := time.ParseDuration(value)
+		if err != nil {
+			return fmt.Errorf("bad Duration: %v", err)
+		}
+		*t = d
+		return nil
+	}
+
+	// is the destination field an enumeration type?
+	if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
+		return populateFieldEnum(f, value, enumValMap)
+	}
+
+	conv, ok := convFromType[f.Kind()]
+	if !ok {
+		return fmt.Errorf("field type %T is not supported in query parameters", i)
+	}
+	result := conv.Call([]reflect.Value{reflect.ValueOf(value)})
+	if err := result[1].Interface(); err != nil {
+		return err.(error)
+	}
+	f.Set(result[0].Convert(f.Type()))
+	return nil
+}
+
+func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) {
+	// see if it's an enumeration string
+	if enumVal, ok := enumValMap[value]; ok {
+		return reflect.ValueOf(enumVal).Convert(t), nil
+	}
+
+	// check for an integer that matches an enumeration value
+	eVal, err := strconv.Atoi(value)
+	if err != nil {
+		return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
+	}
+	for _, v := range enumValMap {
+		if v == int32(eVal) {
+			return reflect.ValueOf(eVal).Convert(t), nil
+		}
+	}
+	return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
+}
+
+func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error {
+	cval, err := convertEnum(value, f.Type(), enumValMap)
+	if err != nil {
+		return err
+	}
+	f.Set(cval)
+	return nil
+}
+
+func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error {
+	elemType := f.Type().Elem()
+	f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
+	for i, v := range values {
+		result, err := convertEnum(v, elemType, enumValMap)
+		if err != nil {
+			return err
+		}
+		f.Index(i).Set(result)
+	}
+	return nil
+}
+
+var (
+	convFromType = map[reflect.Kind]reflect.Value{
+		reflect.String:  reflect.ValueOf(String),
+		reflect.Bool:    reflect.ValueOf(Bool),
+		reflect.Float64: reflect.ValueOf(Float64),
+		reflect.Float32: reflect.ValueOf(Float32),
+		reflect.Int64:   reflect.ValueOf(Int64),
+		reflect.Int32:   reflect.ValueOf(Int32),
+		reflect.Uint64:  reflect.ValueOf(Uint64),
+		reflect.Uint32:  reflect.ValueOf(Uint32),
+		reflect.Slice:   reflect.ValueOf(Bytes),
+	}
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/LICENSE b/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
new file mode 100644
index 0000000..58d291c
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
@@ -0,0 +1,20 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "doc.go",
+        "pattern.go",
+        "trie.go",
+    ],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities",
+)
+
+go_test(
+    name = "go_default_xtest",
+    size = "small",
+    srcs = ["trie_test.go"],
+    deps = [":go_default_library"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
new file mode 100644
index 0000000..cf79a4d
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
@@ -0,0 +1,2 @@
+// Package utilities provides members for internal use in grpc-gateway.
+package utilities
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
new file mode 100644
index 0000000..dfe7de4
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
@@ -0,0 +1,22 @@
+package utilities
+
+// An OpCode is a opcode of compiled path patterns.
+type OpCode int
+
+// These constants are the valid values of OpCode.
+const (
+	// OpNop does nothing
+	OpNop = OpCode(iota)
+	// OpPush pushes a component to stack
+	OpPush
+	// OpLitPush pushes a component to stack if it matches to the literal
+	OpLitPush
+	// OpPushM concatenates the remaining components and pushes it to stack
+	OpPushM
+	// OpConcatN pops N items from stack, concatenates them and pushes it back to stack
+	OpConcatN
+	// OpCapture pops an item and binds it to the variable
+	OpCapture
+	// OpEnd is the least positive invalid opcode.
+	OpEnd
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
new file mode 100644
index 0000000..c2b7b30
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
@@ -0,0 +1,177 @@
+package utilities
+
+import (
+	"sort"
+)
+
+// DoubleArray is a Double Array implementation of trie on sequences of strings.
+type DoubleArray struct {
+	// Encoding keeps an encoding from string to int
+	Encoding map[string]int
+	// Base is the base array of Double Array
+	Base []int
+	// Check is the check array of Double Array
+	Check []int
+}
+
+// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
+func NewDoubleArray(seqs [][]string) *DoubleArray {
+	da := &DoubleArray{Encoding: make(map[string]int)}
+	if len(seqs) == 0 {
+		return da
+	}
+
+	encoded := registerTokens(da, seqs)
+	sort.Sort(byLex(encoded))
+
+	root := node{row: -1, col: -1, left: 0, right: len(encoded)}
+	addSeqs(da, encoded, 0, root)
+
+	for i := len(da.Base); i > 0; i-- {
+		if da.Check[i-1] != 0 {
+			da.Base = da.Base[:i]
+			da.Check = da.Check[:i]
+			break
+		}
+	}
+	return da
+}
+
+func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
+	var result [][]int
+	for _, seq := range seqs {
+		var encoded []int
+		for _, token := range seq {
+			if _, ok := da.Encoding[token]; !ok {
+				da.Encoding[token] = len(da.Encoding)
+			}
+			encoded = append(encoded, da.Encoding[token])
+		}
+		result = append(result, encoded)
+	}
+	for i := range result {
+		result[i] = append(result[i], len(da.Encoding))
+	}
+	return result
+}
+
+type node struct {
+	row, col    int
+	left, right int
+}
+
+func (n node) value(seqs [][]int) int {
+	return seqs[n.row][n.col]
+}
+
+func (n node) children(seqs [][]int) []*node {
+	var result []*node
+	lastVal := int(-1)
+	last := new(node)
+	for i := n.left; i < n.right; i++ {
+		if lastVal == seqs[i][n.col+1] {
+			continue
+		}
+		last.right = i
+		last = &node{
+			row:  i,
+			col:  n.col + 1,
+			left: i,
+		}
+		result = append(result, last)
+	}
+	last.right = n.right
+	return result
+}
+
+func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
+	ensureSize(da, pos)
+
+	children := n.children(seqs)
+	var i int
+	for i = 1; ; i++ {
+		ok := func() bool {
+			for _, child := range children {
+				code := child.value(seqs)
+				j := i + code
+				ensureSize(da, j)
+				if da.Check[j] != 0 {
+					return false
+				}
+			}
+			return true
+		}()
+		if ok {
+			break
+		}
+	}
+	da.Base[pos] = i
+	for _, child := range children {
+		code := child.value(seqs)
+		j := i + code
+		da.Check[j] = pos + 1
+	}
+	terminator := len(da.Encoding)
+	for _, child := range children {
+		code := child.value(seqs)
+		if code == terminator {
+			continue
+		}
+		j := i + code
+		addSeqs(da, seqs, j, *child)
+	}
+}
+
+func ensureSize(da *DoubleArray, i int) {
+	for i >= len(da.Base) {
+		da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
+		da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
+	}
+}
+
+type byLex [][]int
+
+func (l byLex) Len() int      { return len(l) }
+func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l byLex) Less(i, j int) bool {
+	si := l[i]
+	sj := l[j]
+	var k int
+	for k = 0; k < len(si) && k < len(sj); k++ {
+		if si[k] < sj[k] {
+			return true
+		}
+		if si[k] > sj[k] {
+			return false
+		}
+	}
+	if k < len(sj) {
+		return true
+	}
+	return false
+}
+
+// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
+func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
+	if len(da.Base) == 0 {
+		return false
+	}
+
+	var i int
+	for _, t := range seq {
+		code, ok := da.Encoding[t]
+		if !ok {
+			break
+		}
+		j := da.Base[i] + code
+		if len(da.Check) <= j || da.Check[j] != i+1 {
+			break
+		}
+		i = j
+	}
+	j := da.Base[i] + len(da.Encoding)
+	if len(da.Check) <= j || da.Check[j] != i+1 {
+		return false
+	}
+	return true
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE
new file mode 100644
index 0000000..5f0d1fb
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2014 Alan Shreve
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md
new file mode 100644
index 0000000..7a950d1
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/README.md
@@ -0,0 +1,23 @@
+# mousetrap
+
+mousetrap is a tiny library that answers a single question.
+
+On a Windows machine, was the process invoked by someone double clicking on
+the executable file while browsing in explorer?
+
+### Motivation
+
+Windows developers unfamiliar with command line tools will often "double-click"
+the executable for a tool. Because most CLI tools print the help and then exit
+when invoked without arguments, this is often very frustrating for those users.
+
+mousetrap provides a way to detect these invocations so that you can provide
+more helpful behavior and instructions on how to run the CLI tool. To see what
+this looks like, both from an organizational and a technical perspective, see
+https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
+
+### The interface
+
+The library exposes a single interface:
+
+    func StartedByExplorer() (bool)
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
new file mode 100644
index 0000000..9d2d8a4
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package mousetrap
+
+// StartedByExplorer returns true if the program was invoked by the user
+// double-clicking on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+//
+// On non-Windows platforms, it always returns false.
+func StartedByExplorer() bool {
+	return false
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
new file mode 100644
index 0000000..336142a
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
@@ -0,0 +1,98 @@
+// +build windows
+// +build !go1.4
+
+package mousetrap
+
+import (
+	"fmt"
+	"os"
+	"syscall"
+	"unsafe"
+)
+
+const (
+	// defined by the Win32 API
+	th32cs_snapprocess uintptr = 0x2
+)
+
+var (
+	kernel                   = syscall.MustLoadDLL("kernel32.dll")
+	CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot")
+	Process32First           = kernel.MustFindProc("Process32FirstW")
+	Process32Next            = kernel.MustFindProc("Process32NextW")
+)
+
+// ProcessEntry32 structure defined by the Win32 API
+type processEntry32 struct {
+	dwSize              uint32
+	cntUsage            uint32
+	th32ProcessID       uint32
+	th32DefaultHeapID   int
+	th32ModuleID        uint32
+	cntThreads          uint32
+	th32ParentProcessID uint32
+	pcPriClassBase      int32
+	dwFlags             uint32
+	szExeFile           [syscall.MAX_PATH]uint16
+}
+
+func getProcessEntry(pid int) (pe *processEntry32, err error) {
+	snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0))
+	if snapshot == uintptr(syscall.InvalidHandle) {
+		err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1)
+		return
+	}
+	defer syscall.CloseHandle(syscall.Handle(snapshot))
+
+	var processEntry processEntry32
+	processEntry.dwSize = uint32(unsafe.Sizeof(processEntry))
+	ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+	if ok == 0 {
+		err = fmt.Errorf("Process32First: %v", e1)
+		return
+	}
+
+	for {
+		if processEntry.th32ProcessID == uint32(pid) {
+			pe = &processEntry
+			return
+		}
+
+		ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+		if ok == 0 {
+			err = fmt.Errorf("Process32Next: %v", e1)
+			return
+		}
+	}
+}
+
+func getppid() (pid int, err error) {
+	pe, err := getProcessEntry(os.Getpid())
+	if err != nil {
+		return
+	}
+
+	pid = int(pe.th32ParentProcessID)
+	return
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+	ppid, err := getppid()
+	if err != nil {
+		return false
+	}
+
+	pe, err := getProcessEntry(ppid)
+	if err != nil {
+		return false
+	}
+
+	name := syscall.UTF16ToString(pe.szExeFile[:])
+	return name == "explorer.exe"
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
new file mode 100644
index 0000000..9a28e57
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
@@ -0,0 +1,46 @@
+// +build windows
+// +build go1.4
+
+package mousetrap
+
+import (
+	"os"
+	"syscall"
+	"unsafe"
+)
+
+func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
+	snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
+	if err != nil {
+		return nil, err
+	}
+	defer syscall.CloseHandle(snapshot)
+	var procEntry syscall.ProcessEntry32
+	procEntry.Size = uint32(unsafe.Sizeof(procEntry))
+	if err = syscall.Process32First(snapshot, &procEntry); err != nil {
+		return nil, err
+	}
+	for {
+		if procEntry.ProcessID == uint32(pid) {
+			return &procEntry, nil
+		}
+		err = syscall.Process32Next(snapshot, &procEntry)
+		if err != nil {
+			return nil, err
+		}
+	}
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+	pe, err := getProcessEntry(os.Getppid())
+	if err != nil {
+		return false
+	}
+	return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
+}
diff --git a/vendor/github.com/jonboulle/clockwork/.gitignore b/vendor/github.com/jonboulle/clockwork/.gitignore
new file mode 100644
index 0000000..010c242
--- /dev/null
+++ b/vendor/github.com/jonboulle/clockwork/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+
+*.swp
diff --git a/vendor/github.com/jonboulle/clockwork/.travis.yml b/vendor/github.com/jonboulle/clockwork/.travis.yml
new file mode 100644
index 0000000..aefda90
--- /dev/null
+++ b/vendor/github.com/jonboulle/clockwork/.travis.yml
@@ -0,0 +1,5 @@
+language: go
+go:
+  - 1.3
+
+sudo: false
diff --git a/vendor/github.com/jonboulle/clockwork/LICENSE b/vendor/github.com/jonboulle/clockwork/LICENSE
new file mode 100644
index 0000000..5c304d1
--- /dev/null
+++ b/vendor/github.com/jonboulle/clockwork/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/jonboulle/clockwork/README.md b/vendor/github.com/jonboulle/clockwork/README.md
new file mode 100644
index 0000000..d43a6c7
--- /dev/null
+++ b/vendor/github.com/jonboulle/clockwork/README.md
@@ -0,0 +1,61 @@
+clockwork
+=========
+
+[![Build Status](https://travis-ci.org/jonboulle/clockwork.png?branch=master)](https://travis-ci.org/jonboulle/clockwork)
+[![godoc](https://godoc.org/github.com/jonboulle/clockwork?status.svg)](http://godoc.org/github.com/jonboulle/clockwork) 
+
+a simple fake clock for golang
+
+# Usage
+
+Replace uses of the `time` package with the `clockwork.Clock` interface instead.
+
+For example, instead of using `time.Sleep` directly:
+
+```
+func my_func() {
+	time.Sleep(3 * time.Second)
+	do_something()
+}
+```
+
+inject a clock and use its `Sleep` method instead:
+
+```
+func my_func(clock clockwork.Clock) {
+	clock.Sleep(3 * time.Second)
+	do_something()
+}
+```
+
+Now you can easily test `my_func` with a `FakeClock`:
+
+```
+func TestMyFunc(t *testing.T) {
+	c := clockwork.NewFakeClock()
+
+	// Start our sleepy function
+	my_func(c)
+
+	// Ensure we wait until my_func is sleeping
+	c.BlockUntil(1)
+
+	assert_state()
+
+	// Advance the FakeClock forward in time
+	c.Advance(3)
+
+	assert_state()
+}
+```
+
+and in production builds, simply inject the real clock instead:
+```
+my_func(clockwork.NewRealClock())
+```
+
+See [example_test.go](example_test.go) for a full example.
+
+# Credits
+
+clockwork is inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](http://blog.golang.org/playground#Faking time)
diff --git a/vendor/github.com/jonboulle/clockwork/clockwork.go b/vendor/github.com/jonboulle/clockwork/clockwork.go
new file mode 100644
index 0000000..9ec96ed
--- /dev/null
+++ b/vendor/github.com/jonboulle/clockwork/clockwork.go
@@ -0,0 +1,169 @@
+package clockwork
+
+import (
+	"sync"
+	"time"
+)
+
+// Clock provides an interface that packages can use instead of directly
+// using the time module, so that chronology-related behavior can be tested
+type Clock interface {
+	After(d time.Duration) <-chan time.Time
+	Sleep(d time.Duration)
+	Now() time.Time
+}
+
+// FakeClock provides an interface for a clock which can be
+// manually advanced through time
+type FakeClock interface {
+	Clock
+	// Advance advances the FakeClock to a new point in time, ensuring any existing
+	// sleepers are notified appropriately before returning
+	Advance(d time.Duration)
+	// BlockUntil will block until the FakeClock has the given number of
+	// sleepers (callers of Sleep or After)
+	BlockUntil(n int)
+}
+
+// NewRealClock returns a Clock which simply delegates calls to the actual time
+// package; it should be used by packages in production.
+func NewRealClock() Clock {
+	return &realClock{}
+}
+
+// NewFakeClock returns a FakeClock implementation which can be
+// manually advanced through time for testing. The initial time of the
+// FakeClock will be an arbitrary non-zero time.
+func NewFakeClock() FakeClock {
+	// use a fixture that does not fulfill Time.IsZero()
+	return NewFakeClockAt(time.Date(1984, time.April, 4, 0, 0, 0, 0, time.UTC))
+}
+
+// NewFakeClockAt returns a FakeClock initialised at the given time.Time.
+func NewFakeClockAt(t time.Time) FakeClock {
+	return &fakeClock{
+		time: t,
+	}
+}
+
+type realClock struct{}
+
+func (rc *realClock) After(d time.Duration) <-chan time.Time {
+	return time.After(d)
+}
+
+func (rc *realClock) Sleep(d time.Duration) {
+	time.Sleep(d)
+}
+
+func (rc *realClock) Now() time.Time {
+	return time.Now()
+}
+
+type fakeClock struct {
+	sleepers []*sleeper
+	blockers []*blocker
+	time     time.Time
+
+	l sync.RWMutex
+}
+
+// sleeper represents a caller of After or Sleep
+type sleeper struct {
+	until time.Time
+	done  chan time.Time
+}
+
+// blocker represents a caller of BlockUntil
+type blocker struct {
+	count int
+	ch    chan struct{}
+}
+
+// After mimics time.After; it waits for the given duration to elapse on the
+// fakeClock, then sends the current time on the returned channel.
+func (fc *fakeClock) After(d time.Duration) <-chan time.Time {
+	fc.l.Lock()
+	defer fc.l.Unlock()
+	now := fc.time
+	done := make(chan time.Time, 1)
+	if d.Nanoseconds() == 0 {
+		// special case - trigger immediately
+		done <- now
+	} else {
+		// otherwise, add to the set of sleepers
+		s := &sleeper{
+			until: now.Add(d),
+			done:  done,
+		}
+		fc.sleepers = append(fc.sleepers, s)
+		// and notify any blockers
+		fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers))
+	}
+	return done
+}
+
+// notifyBlockers notifies all the blockers waiting until the
+// given number of sleepers are waiting on the fakeClock. It
+// returns an updated slice of blockers (i.e. those still waiting)
+func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) {
+	for _, b := range blockers {
+		if b.count == count {
+			close(b.ch)
+		} else {
+			newBlockers = append(newBlockers, b)
+		}
+	}
+	return
+}
+
+// Sleep blocks until the given duration has passed on the fakeClock
+func (fc *fakeClock) Sleep(d time.Duration) {
+	<-fc.After(d)
+}
+
+// Time returns the current time of the fakeClock
+func (fc *fakeClock) Now() time.Time {
+	fc.l.RLock()
+	t := fc.time
+	fc.l.RUnlock()
+	return t
+}
+
+// Advance advances fakeClock to a new point in time, ensuring channels from any
+// previous invocations of After are notified appropriately before returning
+func (fc *fakeClock) Advance(d time.Duration) {
+	fc.l.Lock()
+	defer fc.l.Unlock()
+	end := fc.time.Add(d)
+	var newSleepers []*sleeper
+	for _, s := range fc.sleepers {
+		if end.Sub(s.until) >= 0 {
+			s.done <- end
+		} else {
+			newSleepers = append(newSleepers, s)
+		}
+	}
+	fc.sleepers = newSleepers
+	fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers))
+	fc.time = end
+}
+
+// BlockUntil will block until the fakeClock has the given number of sleepers
+// (callers of Sleep or After)
+func (fc *fakeClock) BlockUntil(n int) {
+	fc.l.Lock()
+	// Fast path: current number of sleepers is what we're looking for
+	if len(fc.sleepers) == n {
+		fc.l.Unlock()
+		return
+	}
+	// Otherwise, set up a new blocker
+	b := &blocker{
+		count: n,
+		ch:    make(chan struct{}),
+	}
+	fc.blockers = append(fc.blockers, b)
+	fc.l.Unlock()
+	<-b.ch
+}
diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml
new file mode 100644
index 0000000..955dc0b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.codecov.yml
@@ -0,0 +1,3 @@
+ignore:
+    - "output_tests/.*"
+
diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore
new file mode 100644
index 0000000..1555653
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.gitignore
@@ -0,0 +1,4 @@
+/vendor
+/bug_test.go
+/coverage.txt
+/.idea
diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml
new file mode 100644
index 0000000..449e67c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+  - 1.8.x
+  - 1.x
+
+before_install:
+  - go get -t -v ./...
+
+script:
+  - ./test.sh
+
+after_success:
+  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock
new file mode 100644
index 0000000..c8a9fbb
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/Gopkg.lock
@@ -0,0 +1,21 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+  name = "github.com/modern-go/concurrent"
+  packages = ["."]
+  revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
+  version = "1.0.0"
+
+[[projects]]
+  name = "github.com/modern-go/reflect2"
+  packages = ["."]
+  revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
+  version = "1.0.1"
+
+[solve-meta]
+  analyzer-name = "dep"
+  analyzer-version = 1
+  inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8"
+  solver-name = "gps-cdcl"
+  solver-version = 1
diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml
new file mode 100644
index 0000000..313a0f8
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/Gopkg.toml
@@ -0,0 +1,26 @@
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+#   name = "github.com/user/project"
+#   version = "1.0.0"
+#
+# [[constraint]]
+#   name = "github.com/user/project2"
+#   branch = "dev"
+#   source = "github.com/myfork/project2"
+#
+# [[override]]
+#  name = "github.com/x/y"
+#  version = "2.4.0"
+
+ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"]
+
+[[constraint]]
+  name = "github.com/modern-go/reflect2"
+  version = "1.0.1"
diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE
new file mode 100644
index 0000000..2cf4f5a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2016 json-iterator
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md
new file mode 100644
index 0000000..54d5afe
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/README.md
@@ -0,0 +1,91 @@
+[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge)
+[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go)
+[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go)
+[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go)
+[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go)
+[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE)
+[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
+
+A high-performance 100% compatible drop-in replacement of "encoding/json"
+
+You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go)
+
+```
+Go开发者们请加入我们,滴滴出行平台技术部 taowen@didichuxing.com
+```
+
+# Benchmark
+
+![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png)
+
+Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go
+
+Raw Result (easyjson requires static code generation)
+
+| | ns/op | allocation bytes | allocation times |
+| --- | --- | --- | --- |
+| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
+| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
+| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
+| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
+| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
+| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
+
+Always benchmark with your own workload. 
+The result depends heavily on the data input.
+
+# Usage
+
+100% compatibility with standard lib
+
+Replace
+
+```go
+import "encoding/json"
+json.Marshal(&data)
+```
+
+with 
+
+```go
+import "github.com/json-iterator/go"
+
+var json = jsoniter.ConfigCompatibleWithStandardLibrary
+json.Marshal(&data)
+```
+
+Replace
+
+```go
+import "encoding/json"
+json.Unmarshal(input, &data)
+```
+
+with
+
+```go
+import "github.com/json-iterator/go"
+
+var json = jsoniter.ConfigCompatibleWithStandardLibrary
+json.Unmarshal(input, &data)
+```
+
+[More documentation](http://jsoniter.com/migrate-from-go-std.html)
+
+# How to get
+
+```
+go get github.com/json-iterator/go
+```
+
+# Contribution Welcomed !
+
+Contributors
+
+* [thockin](https://github.com/thockin) 
+* [mattn](https://github.com/mattn)
+* [cch123](https://github.com/cch123)
+* [Oleg Shaldybin](https://github.com/olegshaldybin)
+* [Jason Toffaletti](https://github.com/toffaletti)
+
+Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go
new file mode 100644
index 0000000..e674d0f
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/adapter.go
@@ -0,0 +1,150 @@
+package jsoniter
+
+import (
+	"bytes"
+	"io"
+)
+
+// RawMessage to make replace json with jsoniter
+type RawMessage []byte
+
+// Unmarshal adapts to json/encoding Unmarshal API
+//
+// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v.
+// Refer to https://godoc.org/encoding/json#Unmarshal for more information
+func Unmarshal(data []byte, v interface{}) error {
+	return ConfigDefault.Unmarshal(data, v)
+}
+
+// UnmarshalFromString convenient method to read from string instead of []byte
+func UnmarshalFromString(str string, v interface{}) error {
+	return ConfigDefault.UnmarshalFromString(str, v)
+}
+
+// Get quick method to get value from deeply nested JSON structure
+func Get(data []byte, path ...interface{}) Any {
+	return ConfigDefault.Get(data, path...)
+}
+
+// Marshal adapts to json/encoding Marshal API
+//
+// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API
+// Refer to https://godoc.org/encoding/json#Marshal for more information
+func Marshal(v interface{}) ([]byte, error) {
+	return ConfigDefault.Marshal(v)
+}
+
+// MarshalIndent same as json.MarshalIndent. Prefix is not supported.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+	return ConfigDefault.MarshalIndent(v, prefix, indent)
+}
+
+// MarshalToString convenient method to write as string instead of []byte
+func MarshalToString(v interface{}) (string, error) {
+	return ConfigDefault.MarshalToString(v)
+}
+
+// NewDecoder adapts to json/stream NewDecoder API.
+//
+// NewDecoder returns a new decoder that reads from r.
+//
+// Instead of a json/encoding Decoder, an Decoder is returned
+// Refer to https://godoc.org/encoding/json#NewDecoder for more information
+func NewDecoder(reader io.Reader) *Decoder {
+	return ConfigDefault.NewDecoder(reader)
+}
+
+// Decoder reads and decodes JSON values from an input stream.
+// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress)
+type Decoder struct {
+	iter *Iterator
+}
+
+// Decode decode JSON into interface{}
+func (adapter *Decoder) Decode(obj interface{}) error {
+	if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil {
+		if !adapter.iter.loadMore() {
+			return io.EOF
+		}
+	}
+	adapter.iter.ReadVal(obj)
+	err := adapter.iter.Error
+	if err == io.EOF {
+		return nil
+	}
+	return adapter.iter.Error
+}
+
+// More is there more?
+func (adapter *Decoder) More() bool {
+	iter := adapter.iter
+	if iter.Error != nil {
+		return false
+	}
+	c := iter.nextToken()
+	if c == 0 {
+		return false
+	}
+	iter.unreadByte()
+	return c != ']' && c != '}'
+}
+
+// Buffered remaining buffer
+func (adapter *Decoder) Buffered() io.Reader {
+	remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail]
+	return bytes.NewReader(remaining)
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (adapter *Decoder) UseNumber() {
+	cfg := adapter.iter.cfg.configBeforeFrozen
+	cfg.UseNumber = true
+	adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
+}
+
+// DisallowUnknownFields causes the Decoder to return an error when the destination
+// is a struct and the input contains object keys which do not match any
+// non-ignored, exported fields in the destination.
+func (adapter *Decoder) DisallowUnknownFields() {
+	cfg := adapter.iter.cfg.configBeforeFrozen
+	cfg.DisallowUnknownFields = true
+	adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
+}
+
+// NewEncoder same as json.NewEncoder
+func NewEncoder(writer io.Writer) *Encoder {
+	return ConfigDefault.NewEncoder(writer)
+}
+
+// Encoder same as json.Encoder
+type Encoder struct {
+	stream *Stream
+}
+
+// Encode encode interface{} as JSON to io.Writer
+func (adapter *Encoder) Encode(val interface{}) error {
+	adapter.stream.WriteVal(val)
+	adapter.stream.WriteRaw("\n")
+	adapter.stream.Flush()
+	return adapter.stream.Error
+}
+
+// SetIndent set the indention. Prefix is not supported
+func (adapter *Encoder) SetIndent(prefix, indent string) {
+	config := adapter.stream.cfg.configBeforeFrozen
+	config.IndentionStep = len(indent)
+	adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
+}
+
+// SetEscapeHTML escape html by default, set to false to disable
+func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
+	config := adapter.stream.cfg.configBeforeFrozen
+	config.EscapeHTML = escapeHTML
+	adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
+}
+
+// Valid reports whether data is a valid JSON encoding.
+func Valid(data []byte) bool {
+	return ConfigDefault.Valid(data)
+}
diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go
new file mode 100644
index 0000000..daecfed
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any.go
@@ -0,0 +1,321 @@
+package jsoniter
+
+import (
+	"errors"
+	"fmt"
+	"github.com/modern-go/reflect2"
+	"io"
+	"reflect"
+	"strconv"
+	"unsafe"
+)
+
+// Any generic object representation.
+// The lazy json implementation holds []byte and parse lazily.
+type Any interface {
+	LastError() error
+	ValueType() ValueType
+	MustBeValid() Any
+	ToBool() bool
+	ToInt() int
+	ToInt32() int32
+	ToInt64() int64
+	ToUint() uint
+	ToUint32() uint32
+	ToUint64() uint64
+	ToFloat32() float32
+	ToFloat64() float64
+	ToString() string
+	ToVal(val interface{})
+	Get(path ...interface{}) Any
+	Size() int
+	Keys() []string
+	GetInterface() interface{}
+	WriteTo(stream *Stream)
+}
+
+type baseAny struct{}
+
+func (any *baseAny) Get(path ...interface{}) Any {
+	return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
+}
+
+func (any *baseAny) Size() int {
+	return 0
+}
+
+func (any *baseAny) Keys() []string {
+	return []string{}
+}
+
+func (any *baseAny) ToVal(obj interface{}) {
+	panic("not implemented")
+}
+
+// WrapInt32 turn int32 into Any interface
+func WrapInt32(val int32) Any {
+	return &int32Any{baseAny{}, val}
+}
+
+// WrapInt64 turn int64 into Any interface
+func WrapInt64(val int64) Any {
+	return &int64Any{baseAny{}, val}
+}
+
+// WrapUint32 turn uint32 into Any interface
+func WrapUint32(val uint32) Any {
+	return &uint32Any{baseAny{}, val}
+}
+
+// WrapUint64 turn uint64 into Any interface
+func WrapUint64(val uint64) Any {
+	return &uint64Any{baseAny{}, val}
+}
+
+// WrapFloat64 turn float64 into Any interface
+func WrapFloat64(val float64) Any {
+	return &floatAny{baseAny{}, val}
+}
+
+// WrapString turn string into Any interface
+func WrapString(val string) Any {
+	return &stringAny{baseAny{}, val}
+}
+
+// Wrap turn a go object into Any interface
+func Wrap(val interface{}) Any {
+	if val == nil {
+		return &nilAny{}
+	}
+	asAny, isAny := val.(Any)
+	if isAny {
+		return asAny
+	}
+	typ := reflect2.TypeOf(val)
+	switch typ.Kind() {
+	case reflect.Slice:
+		return wrapArray(val)
+	case reflect.Struct:
+		return wrapStruct(val)
+	case reflect.Map:
+		return wrapMap(val)
+	case reflect.String:
+		return WrapString(val.(string))
+	case reflect.Int:
+		if strconv.IntSize == 32 {
+			return WrapInt32(int32(val.(int)))
+		}
+		return WrapInt64(int64(val.(int)))
+	case reflect.Int8:
+		return WrapInt32(int32(val.(int8)))
+	case reflect.Int16:
+		return WrapInt32(int32(val.(int16)))
+	case reflect.Int32:
+		return WrapInt32(val.(int32))
+	case reflect.Int64:
+		return WrapInt64(val.(int64))
+	case reflect.Uint:
+		if strconv.IntSize == 32 {
+			return WrapUint32(uint32(val.(uint)))
+		}
+		return WrapUint64(uint64(val.(uint)))
+	case reflect.Uintptr:
+		if ptrSize == 32 {
+			return WrapUint32(uint32(val.(uintptr)))
+		}
+		return WrapUint64(uint64(val.(uintptr)))
+	case reflect.Uint8:
+		return WrapUint32(uint32(val.(uint8)))
+	case reflect.Uint16:
+		return WrapUint32(uint32(val.(uint16)))
+	case reflect.Uint32:
+		return WrapUint32(uint32(val.(uint32)))
+	case reflect.Uint64:
+		return WrapUint64(val.(uint64))
+	case reflect.Float32:
+		return WrapFloat64(float64(val.(float32)))
+	case reflect.Float64:
+		return WrapFloat64(val.(float64))
+	case reflect.Bool:
+		if val.(bool) == true {
+			return &trueAny{}
+		}
+		return &falseAny{}
+	}
+	return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)}
+}
+
+// ReadAny read next JSON element as an Any object. It is a better json.RawMessage.
+func (iter *Iterator) ReadAny() Any {
+	return iter.readAny()
+}
+
+func (iter *Iterator) readAny() Any {
+	c := iter.nextToken()
+	switch c {
+	case '"':
+		iter.unreadByte()
+		return &stringAny{baseAny{}, iter.ReadString()}
+	case 'n':
+		iter.skipThreeBytes('u', 'l', 'l') // null
+		return &nilAny{}
+	case 't':
+		iter.skipThreeBytes('r', 'u', 'e') // true
+		return &trueAny{}
+	case 'f':
+		iter.skipFourBytes('a', 'l', 's', 'e') // false
+		return &falseAny{}
+	case '{':
+		return iter.readObjectAny()
+	case '[':
+		return iter.readArrayAny()
+	case '-':
+		return iter.readNumberAny(false)
+	case 0:
+		return &invalidAny{baseAny{}, errors.New("input is empty")}
+	default:
+		return iter.readNumberAny(true)
+	}
+}
+
+func (iter *Iterator) readNumberAny(positive bool) Any {
+	iter.startCapture(iter.head - 1)
+	iter.skipNumber()
+	lazyBuf := iter.stopCapture()
+	return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func (iter *Iterator) readObjectAny() Any {
+	iter.startCapture(iter.head - 1)
+	iter.skipObject()
+	lazyBuf := iter.stopCapture()
+	return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func (iter *Iterator) readArrayAny() Any {
+	iter.startCapture(iter.head - 1)
+	iter.skipArray()
+	lazyBuf := iter.stopCapture()
+	return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func locateObjectField(iter *Iterator, target string) []byte {
+	var found []byte
+	iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+		if field == target {
+			found = iter.SkipAndReturnBytes()
+			return false
+		}
+		iter.Skip()
+		return true
+	})
+	return found
+}
+
+func locateArrayElement(iter *Iterator, target int) []byte {
+	var found []byte
+	n := 0
+	iter.ReadArrayCB(func(iter *Iterator) bool {
+		if n == target {
+			found = iter.SkipAndReturnBytes()
+			return false
+		}
+		iter.Skip()
+		n++
+		return true
+	})
+	return found
+}
+
+func locatePath(iter *Iterator, path []interface{}) Any {
+	for i, pathKeyObj := range path {
+		switch pathKey := pathKeyObj.(type) {
+		case string:
+			valueBytes := locateObjectField(iter, pathKey)
+			if valueBytes == nil {
+				return newInvalidAny(path[i:])
+			}
+			iter.ResetBytes(valueBytes)
+		case int:
+			valueBytes := locateArrayElement(iter, pathKey)
+			if valueBytes == nil {
+				return newInvalidAny(path[i:])
+			}
+			iter.ResetBytes(valueBytes)
+		case int32:
+			if '*' == pathKey {
+				return iter.readAny().Get(path[i:]...)
+			}
+			return newInvalidAny(path[i:])
+		default:
+			return newInvalidAny(path[i:])
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		return &invalidAny{baseAny{}, iter.Error}
+	}
+	return iter.readAny()
+}
+
+var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem()
+
+func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder {
+	if typ == anyType {
+		return &directAnyCodec{}
+	}
+	if typ.Implements(anyType) {
+		return &anyCodec{
+			valType: typ,
+		}
+	}
+	return nil
+}
+
+func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder {
+	if typ == anyType {
+		return &directAnyCodec{}
+	}
+	if typ.Implements(anyType) {
+		return &anyCodec{
+			valType: typ,
+		}
+	}
+	return nil
+}
+
+type anyCodec struct {
+	valType reflect2.Type
+}
+
+func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	panic("not implemented")
+}
+
+func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	obj := codec.valType.UnsafeIndirect(ptr)
+	any := obj.(Any)
+	any.WriteTo(stream)
+}
+
+func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool {
+	obj := codec.valType.UnsafeIndirect(ptr)
+	any := obj.(Any)
+	return any.Size() == 0
+}
+
+type directAnyCodec struct {
+}
+
+func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	*(*Any)(ptr) = iter.readAny()
+}
+
+func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	any := *(*Any)(ptr)
+	any.WriteTo(stream)
+}
+
+func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool {
+	any := *(*Any)(ptr)
+	return any.Size() == 0
+}
diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go
new file mode 100644
index 0000000..0449e9a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_array.go
@@ -0,0 +1,278 @@
+package jsoniter
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type arrayLazyAny struct {
+	baseAny
+	cfg *frozenConfig
+	buf []byte
+	err error
+}
+
+func (any *arrayLazyAny) ValueType() ValueType {
+	return ArrayValue
+}
+
+func (any *arrayLazyAny) MustBeValid() Any {
+	return any
+}
+
+func (any *arrayLazyAny) LastError() error {
+	return any.err
+}
+
+func (any *arrayLazyAny) ToBool() bool {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	return iter.ReadArray()
+}
+
+func (any *arrayLazyAny) ToInt() int {
+	if any.ToBool() {
+		return 1
+	}
+	return 0
+}
+
+func (any *arrayLazyAny) ToInt32() int32 {
+	if any.ToBool() {
+		return 1
+	}
+	return 0
+}
+
+func (any *arrayLazyAny) ToInt64() int64 {
+	if any.ToBool() {
+		return 1
+	}
+	return 0
+}
+
+func (any *arrayLazyAny) ToUint() uint {
+	if any.ToBool() {
+		return 1
+	}
+	return 0
+}
+
+func (any *arrayLazyAny) ToUint32() uint32 {
+	if any.ToBool() {
+		return 1
+	}
+	return 0
+}
+
+func (any *arrayLazyAny) ToUint64() uint64 {
+	if any.ToBool() {
+		return 1
+	}
+	return 0
+}
+
+func (any *arrayLazyAny) ToFloat32() float32 {
+	if any.ToBool() {
+		return 1
+	}
+	return 0
+}
+
+func (any *arrayLazyAny) ToFloat64() float64 {
+	if any.ToBool() {
+		return 1
+	}
+	return 0
+}
+
+func (any *arrayLazyAny) ToString() string {
+	return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *arrayLazyAny) ToVal(val interface{}) {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	iter.ReadVal(val)
+}
+
+func (any *arrayLazyAny) Get(path ...interface{}) Any {
+	if len(path) == 0 {
+		return any
+	}
+	switch firstPath := path[0].(type) {
+	case int:
+		iter := any.cfg.BorrowIterator(any.buf)
+		defer any.cfg.ReturnIterator(iter)
+		valueBytes := locateArrayElement(iter, firstPath)
+		if valueBytes == nil {
+			return newInvalidAny(path)
+		}
+		iter.ResetBytes(valueBytes)
+		return locatePath(iter, path[1:])
+	case int32:
+		if '*' == firstPath {
+			iter := any.cfg.BorrowIterator(any.buf)
+			defer any.cfg.ReturnIterator(iter)
+			arr := make([]Any, 0)
+			iter.ReadArrayCB(func(iter *Iterator) bool {
+				found := iter.readAny().Get(path[1:]...)
+				if found.ValueType() != InvalidValue {
+					arr = append(arr, found)
+				}
+				return true
+			})
+			return wrapArray(arr)
+		}
+		return newInvalidAny(path)
+	default:
+		return newInvalidAny(path)
+	}
+}
+
+func (any *arrayLazyAny) Size() int {
+	size := 0
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	iter.ReadArrayCB(func(iter *Iterator) bool {
+		size++
+		iter.Skip()
+		return true
+	})
+	return size
+}
+
+func (any *arrayLazyAny) WriteTo(stream *Stream) {
+	stream.Write(any.buf)
+}
+
+func (any *arrayLazyAny) GetInterface() interface{} {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	return iter.Read()
+}
+
+type arrayAny struct {
+	baseAny
+	val reflect.Value
+}
+
+func wrapArray(val interface{}) *arrayAny {
+	return &arrayAny{baseAny{}, reflect.ValueOf(val)}
+}
+
+func (any *arrayAny) ValueType() ValueType {
+	return ArrayValue
+}
+
+func (any *arrayAny) MustBeValid() Any {
+	return any
+}
+
+func (any *arrayAny) LastError() error {
+	return nil
+}
+
+func (any *arrayAny) ToBool() bool {
+	return any.val.Len() != 0
+}
+
+func (any *arrayAny) ToInt() int {
+	if any.val.Len() == 0 {
+		return 0
+	}
+	return 1
+}
+
+func (any *arrayAny) ToInt32() int32 {
+	if any.val.Len() == 0 {
+		return 0
+	}
+	return 1
+}
+
+func (any *arrayAny) ToInt64() int64 {
+	if any.val.Len() == 0 {
+		return 0
+	}
+	return 1
+}
+
+func (any *arrayAny) ToUint() uint {
+	if any.val.Len() == 0 {
+		return 0
+	}
+	return 1
+}
+
+func (any *arrayAny) ToUint32() uint32 {
+	if any.val.Len() == 0 {
+		return 0
+	}
+	return 1
+}
+
+func (any *arrayAny) ToUint64() uint64 {
+	if any.val.Len() == 0 {
+		return 0
+	}
+	return 1
+}
+
+func (any *arrayAny) ToFloat32() float32 {
+	if any.val.Len() == 0 {
+		return 0
+	}
+	return 1
+}
+
+func (any *arrayAny) ToFloat64() float64 {
+	if any.val.Len() == 0 {
+		return 0
+	}
+	return 1
+}
+
+func (any *arrayAny) ToString() string {
+	str, _ := MarshalToString(any.val.Interface())
+	return str
+}
+
+func (any *arrayAny) Get(path ...interface{}) Any {
+	if len(path) == 0 {
+		return any
+	}
+	switch firstPath := path[0].(type) {
+	case int:
+		if firstPath < 0 || firstPath >= any.val.Len() {
+			return newInvalidAny(path)
+		}
+		return Wrap(any.val.Index(firstPath).Interface())
+	case int32:
+		if '*' == firstPath {
+			mappedAll := make([]Any, 0)
+			for i := 0; i < any.val.Len(); i++ {
+				mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...)
+				if mapped.ValueType() != InvalidValue {
+					mappedAll = append(mappedAll, mapped)
+				}
+			}
+			return wrapArray(mappedAll)
+		}
+		return newInvalidAny(path)
+	default:
+		return newInvalidAny(path)
+	}
+}
+
+func (any *arrayAny) Size() int {
+	return any.val.Len()
+}
+
+func (any *arrayAny) WriteTo(stream *Stream) {
+	stream.WriteVal(any.val)
+}
+
+func (any *arrayAny) GetInterface() interface{} {
+	return any.val.Interface()
+}
diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go
new file mode 100644
index 0000000..9452324
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_bool.go
@@ -0,0 +1,137 @@
+package jsoniter
+
+type trueAny struct {
+	baseAny
+}
+
+func (any *trueAny) LastError() error {
+	return nil
+}
+
+func (any *trueAny) ToBool() bool {
+	return true
+}
+
+func (any *trueAny) ToInt() int {
+	return 1
+}
+
+func (any *trueAny) ToInt32() int32 {
+	return 1
+}
+
+func (any *trueAny) ToInt64() int64 {
+	return 1
+}
+
+func (any *trueAny) ToUint() uint {
+	return 1
+}
+
+func (any *trueAny) ToUint32() uint32 {
+	return 1
+}
+
+func (any *trueAny) ToUint64() uint64 {
+	return 1
+}
+
+func (any *trueAny) ToFloat32() float32 {
+	return 1
+}
+
+func (any *trueAny) ToFloat64() float64 {
+	return 1
+}
+
+func (any *trueAny) ToString() string {
+	return "true"
+}
+
+func (any *trueAny) WriteTo(stream *Stream) {
+	stream.WriteTrue()
+}
+
+func (any *trueAny) Parse() *Iterator {
+	return nil
+}
+
+func (any *trueAny) GetInterface() interface{} {
+	return true
+}
+
+func (any *trueAny) ValueType() ValueType {
+	return BoolValue
+}
+
+func (any *trueAny) MustBeValid() Any {
+	return any
+}
+
+type falseAny struct {
+	baseAny
+}
+
+func (any *falseAny) LastError() error {
+	return nil
+}
+
+func (any *falseAny) ToBool() bool {
+	return false
+}
+
+func (any *falseAny) ToInt() int {
+	return 0
+}
+
+func (any *falseAny) ToInt32() int32 {
+	return 0
+}
+
+func (any *falseAny) ToInt64() int64 {
+	return 0
+}
+
+func (any *falseAny) ToUint() uint {
+	return 0
+}
+
+func (any *falseAny) ToUint32() uint32 {
+	return 0
+}
+
+func (any *falseAny) ToUint64() uint64 {
+	return 0
+}
+
+func (any *falseAny) ToFloat32() float32 {
+	return 0
+}
+
+func (any *falseAny) ToFloat64() float64 {
+	return 0
+}
+
+func (any *falseAny) ToString() string {
+	return "false"
+}
+
+func (any *falseAny) WriteTo(stream *Stream) {
+	stream.WriteFalse()
+}
+
+func (any *falseAny) Parse() *Iterator {
+	return nil
+}
+
+func (any *falseAny) GetInterface() interface{} {
+	return false
+}
+
+func (any *falseAny) ValueType() ValueType {
+	return BoolValue
+}
+
+func (any *falseAny) MustBeValid() Any {
+	return any
+}
diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go
new file mode 100644
index 0000000..35fdb09
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_float.go
@@ -0,0 +1,83 @@
+package jsoniter
+
+import (
+	"strconv"
+)
+
+type floatAny struct {
+	baseAny
+	val float64
+}
+
+func (any *floatAny) Parse() *Iterator {
+	return nil
+}
+
+func (any *floatAny) ValueType() ValueType {
+	return NumberValue
+}
+
+func (any *floatAny) MustBeValid() Any {
+	return any
+}
+
+func (any *floatAny) LastError() error {
+	return nil
+}
+
+func (any *floatAny) ToBool() bool {
+	return any.ToFloat64() != 0
+}
+
+func (any *floatAny) ToInt() int {
+	return int(any.val)
+}
+
+func (any *floatAny) ToInt32() int32 {
+	return int32(any.val)
+}
+
+func (any *floatAny) ToInt64() int64 {
+	return int64(any.val)
+}
+
+func (any *floatAny) ToUint() uint {
+	if any.val > 0 {
+		return uint(any.val)
+	}
+	return 0
+}
+
+func (any *floatAny) ToUint32() uint32 {
+	if any.val > 0 {
+		return uint32(any.val)
+	}
+	return 0
+}
+
+func (any *floatAny) ToUint64() uint64 {
+	if any.val > 0 {
+		return uint64(any.val)
+	}
+	return 0
+}
+
+func (any *floatAny) ToFloat32() float32 {
+	return float32(any.val)
+}
+
+func (any *floatAny) ToFloat64() float64 {
+	return any.val
+}
+
+func (any *floatAny) ToString() string {
+	return strconv.FormatFloat(any.val, 'E', -1, 64)
+}
+
+func (any *floatAny) WriteTo(stream *Stream) {
+	stream.WriteFloat64(any.val)
+}
+
+func (any *floatAny) GetInterface() interface{} {
+	return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go
new file mode 100644
index 0000000..1b56f39
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_int32.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+	"strconv"
+)
+
+type int32Any struct {
+	baseAny
+	val int32
+}
+
+func (any *int32Any) LastError() error {
+	return nil
+}
+
+func (any *int32Any) ValueType() ValueType {
+	return NumberValue
+}
+
+func (any *int32Any) MustBeValid() Any {
+	return any
+}
+
+func (any *int32Any) ToBool() bool {
+	return any.val != 0
+}
+
+func (any *int32Any) ToInt() int {
+	return int(any.val)
+}
+
+func (any *int32Any) ToInt32() int32 {
+	return any.val
+}
+
+func (any *int32Any) ToInt64() int64 {
+	return int64(any.val)
+}
+
+func (any *int32Any) ToUint() uint {
+	return uint(any.val)
+}
+
+func (any *int32Any) ToUint32() uint32 {
+	return uint32(any.val)
+}
+
+func (any *int32Any) ToUint64() uint64 {
+	return uint64(any.val)
+}
+
+func (any *int32Any) ToFloat32() float32 {
+	return float32(any.val)
+}
+
+func (any *int32Any) ToFloat64() float64 {
+	return float64(any.val)
+}
+
+func (any *int32Any) ToString() string {
+	return strconv.FormatInt(int64(any.val), 10)
+}
+
+func (any *int32Any) WriteTo(stream *Stream) {
+	stream.WriteInt32(any.val)
+}
+
+func (any *int32Any) Parse() *Iterator {
+	return nil
+}
+
+func (any *int32Any) GetInterface() interface{} {
+	return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go
new file mode 100644
index 0000000..c440d72
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_int64.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+	"strconv"
+)
+
+type int64Any struct {
+	baseAny
+	val int64
+}
+
+func (any *int64Any) LastError() error {
+	return nil
+}
+
+func (any *int64Any) ValueType() ValueType {
+	return NumberValue
+}
+
+func (any *int64Any) MustBeValid() Any {
+	return any
+}
+
+func (any *int64Any) ToBool() bool {
+	return any.val != 0
+}
+
+func (any *int64Any) ToInt() int {
+	return int(any.val)
+}
+
+func (any *int64Any) ToInt32() int32 {
+	return int32(any.val)
+}
+
+func (any *int64Any) ToInt64() int64 {
+	return any.val
+}
+
+func (any *int64Any) ToUint() uint {
+	return uint(any.val)
+}
+
+func (any *int64Any) ToUint32() uint32 {
+	return uint32(any.val)
+}
+
+func (any *int64Any) ToUint64() uint64 {
+	return uint64(any.val)
+}
+
+func (any *int64Any) ToFloat32() float32 {
+	return float32(any.val)
+}
+
+func (any *int64Any) ToFloat64() float64 {
+	return float64(any.val)
+}
+
+func (any *int64Any) ToString() string {
+	return strconv.FormatInt(any.val, 10)
+}
+
+func (any *int64Any) WriteTo(stream *Stream) {
+	stream.WriteInt64(any.val)
+}
+
+func (any *int64Any) Parse() *Iterator {
+	return nil
+}
+
+func (any *int64Any) GetInterface() interface{} {
+	return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go
new file mode 100644
index 0000000..1d859ea
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_invalid.go
@@ -0,0 +1,82 @@
+package jsoniter
+
+import "fmt"
+
+type invalidAny struct {
+	baseAny
+	err error
+}
+
+func newInvalidAny(path []interface{}) *invalidAny {
+	return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)}
+}
+
+func (any *invalidAny) LastError() error {
+	return any.err
+}
+
+func (any *invalidAny) ValueType() ValueType {
+	return InvalidValue
+}
+
+func (any *invalidAny) MustBeValid() Any {
+	panic(any.err)
+}
+
+func (any *invalidAny) ToBool() bool {
+	return false
+}
+
+func (any *invalidAny) ToInt() int {
+	return 0
+}
+
+func (any *invalidAny) ToInt32() int32 {
+	return 0
+}
+
+func (any *invalidAny) ToInt64() int64 {
+	return 0
+}
+
+func (any *invalidAny) ToUint() uint {
+	return 0
+}
+
+func (any *invalidAny) ToUint32() uint32 {
+	return 0
+}
+
+func (any *invalidAny) ToUint64() uint64 {
+	return 0
+}
+
+func (any *invalidAny) ToFloat32() float32 {
+	return 0
+}
+
+func (any *invalidAny) ToFloat64() float64 {
+	return 0
+}
+
+func (any *invalidAny) ToString() string {
+	return ""
+}
+
+func (any *invalidAny) WriteTo(stream *Stream) {
+}
+
+func (any *invalidAny) Get(path ...interface{}) Any {
+	if any.err == nil {
+		return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)}
+	}
+	return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)}
+}
+
+func (any *invalidAny) Parse() *Iterator {
+	return nil
+}
+
+func (any *invalidAny) GetInterface() interface{} {
+	return nil
+}
diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go
new file mode 100644
index 0000000..d04cb54
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_nil.go
@@ -0,0 +1,69 @@
+package jsoniter
+
+type nilAny struct {
+	baseAny
+}
+
+func (any *nilAny) LastError() error {
+	return nil
+}
+
+func (any *nilAny) ValueType() ValueType {
+	return NilValue
+}
+
+func (any *nilAny) MustBeValid() Any {
+	return any
+}
+
+func (any *nilAny) ToBool() bool {
+	return false
+}
+
+func (any *nilAny) ToInt() int {
+	return 0
+}
+
+func (any *nilAny) ToInt32() int32 {
+	return 0
+}
+
+func (any *nilAny) ToInt64() int64 {
+	return 0
+}
+
+func (any *nilAny) ToUint() uint {
+	return 0
+}
+
+func (any *nilAny) ToUint32() uint32 {
+	return 0
+}
+
+func (any *nilAny) ToUint64() uint64 {
+	return 0
+}
+
+func (any *nilAny) ToFloat32() float32 {
+	return 0
+}
+
+func (any *nilAny) ToFloat64() float64 {
+	return 0
+}
+
+func (any *nilAny) ToString() string {
+	return ""
+}
+
+func (any *nilAny) WriteTo(stream *Stream) {
+	stream.WriteNil()
+}
+
+func (any *nilAny) Parse() *Iterator {
+	return nil
+}
+
+func (any *nilAny) GetInterface() interface{} {
+	return nil
+}
diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go
new file mode 100644
index 0000000..9d1e901
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_number.go
@@ -0,0 +1,123 @@
+package jsoniter
+
+import (
+	"io"
+	"unsafe"
+)
+
+type numberLazyAny struct {
+	baseAny
+	cfg *frozenConfig
+	buf []byte
+	err error
+}
+
+func (any *numberLazyAny) ValueType() ValueType {
+	return NumberValue
+}
+
+func (any *numberLazyAny) MustBeValid() Any {
+	return any
+}
+
+func (any *numberLazyAny) LastError() error {
+	return any.err
+}
+
+func (any *numberLazyAny) ToBool() bool {
+	return any.ToFloat64() != 0
+}
+
+func (any *numberLazyAny) ToInt() int {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	val := iter.ReadInt()
+	if iter.Error != nil && iter.Error != io.EOF {
+		any.err = iter.Error
+	}
+	return val
+}
+
+func (any *numberLazyAny) ToInt32() int32 {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	val := iter.ReadInt32()
+	if iter.Error != nil && iter.Error != io.EOF {
+		any.err = iter.Error
+	}
+	return val
+}
+
+func (any *numberLazyAny) ToInt64() int64 {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	val := iter.ReadInt64()
+	if iter.Error != nil && iter.Error != io.EOF {
+		any.err = iter.Error
+	}
+	return val
+}
+
+func (any *numberLazyAny) ToUint() uint {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	val := iter.ReadUint()
+	if iter.Error != nil && iter.Error != io.EOF {
+		any.err = iter.Error
+	}
+	return val
+}
+
+func (any *numberLazyAny) ToUint32() uint32 {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	val := iter.ReadUint32()
+	if iter.Error != nil && iter.Error != io.EOF {
+		any.err = iter.Error
+	}
+	return val
+}
+
+func (any *numberLazyAny) ToUint64() uint64 {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	val := iter.ReadUint64()
+	if iter.Error != nil && iter.Error != io.EOF {
+		any.err = iter.Error
+	}
+	return val
+}
+
+func (any *numberLazyAny) ToFloat32() float32 {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	val := iter.ReadFloat32()
+	if iter.Error != nil && iter.Error != io.EOF {
+		any.err = iter.Error
+	}
+	return val
+}
+
+func (any *numberLazyAny) ToFloat64() float64 {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	val := iter.ReadFloat64()
+	if iter.Error != nil && iter.Error != io.EOF {
+		any.err = iter.Error
+	}
+	return val
+}
+
+func (any *numberLazyAny) ToString() string {
+	return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *numberLazyAny) WriteTo(stream *Stream) {
+	stream.Write(any.buf)
+}
+
+func (any *numberLazyAny) GetInterface() interface{} {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	return iter.Read()
+}
diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go
new file mode 100644
index 0000000..c44ef5c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_object.go
@@ -0,0 +1,374 @@
+package jsoniter
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type objectLazyAny struct {
+	baseAny
+	cfg *frozenConfig
+	buf []byte
+	err error
+}
+
+func (any *objectLazyAny) ValueType() ValueType {
+	return ObjectValue
+}
+
+func (any *objectLazyAny) MustBeValid() Any {
+	return any
+}
+
+func (any *objectLazyAny) LastError() error {
+	return any.err
+}
+
+func (any *objectLazyAny) ToBool() bool {
+	return true
+}
+
+func (any *objectLazyAny) ToInt() int {
+	return 0
+}
+
+func (any *objectLazyAny) ToInt32() int32 {
+	return 0
+}
+
+func (any *objectLazyAny) ToInt64() int64 {
+	return 0
+}
+
+func (any *objectLazyAny) ToUint() uint {
+	return 0
+}
+
+func (any *objectLazyAny) ToUint32() uint32 {
+	return 0
+}
+
+func (any *objectLazyAny) ToUint64() uint64 {
+	return 0
+}
+
+func (any *objectLazyAny) ToFloat32() float32 {
+	return 0
+}
+
+func (any *objectLazyAny) ToFloat64() float64 {
+	return 0
+}
+
+func (any *objectLazyAny) ToString() string {
+	return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *objectLazyAny) ToVal(obj interface{}) {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	iter.ReadVal(obj)
+}
+
+func (any *objectLazyAny) Get(path ...interface{}) Any {
+	if len(path) == 0 {
+		return any
+	}
+	switch firstPath := path[0].(type) {
+	case string:
+		iter := any.cfg.BorrowIterator(any.buf)
+		defer any.cfg.ReturnIterator(iter)
+		valueBytes := locateObjectField(iter, firstPath)
+		if valueBytes == nil {
+			return newInvalidAny(path)
+		}
+		iter.ResetBytes(valueBytes)
+		return locatePath(iter, path[1:])
+	case int32:
+		if '*' == firstPath {
+			mappedAll := map[string]Any{}
+			iter := any.cfg.BorrowIterator(any.buf)
+			defer any.cfg.ReturnIterator(iter)
+			iter.ReadMapCB(func(iter *Iterator, field string) bool {
+				mapped := locatePath(iter, path[1:])
+				if mapped.ValueType() != InvalidValue {
+					mappedAll[field] = mapped
+				}
+				return true
+			})
+			return wrapMap(mappedAll)
+		}
+		return newInvalidAny(path)
+	default:
+		return newInvalidAny(path)
+	}
+}
+
+func (any *objectLazyAny) Keys() []string {
+	keys := []string{}
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	iter.ReadMapCB(func(iter *Iterator, field string) bool {
+		iter.Skip()
+		keys = append(keys, field)
+		return true
+	})
+	return keys
+}
+
+func (any *objectLazyAny) Size() int {
+	size := 0
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+		iter.Skip()
+		size++
+		return true
+	})
+	return size
+}
+
+func (any *objectLazyAny) WriteTo(stream *Stream) {
+	stream.Write(any.buf)
+}
+
+func (any *objectLazyAny) GetInterface() interface{} {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	return iter.Read()
+}
+
+type objectAny struct {
+	baseAny
+	err error
+	val reflect.Value
+}
+
+func wrapStruct(val interface{}) *objectAny {
+	return &objectAny{baseAny{}, nil, reflect.ValueOf(val)}
+}
+
+func (any *objectAny) ValueType() ValueType {
+	return ObjectValue
+}
+
+func (any *objectAny) MustBeValid() Any {
+	return any
+}
+
+func (any *objectAny) Parse() *Iterator {
+	return nil
+}
+
+func (any *objectAny) LastError() error {
+	return any.err
+}
+
+func (any *objectAny) ToBool() bool {
+	return any.val.NumField() != 0
+}
+
+func (any *objectAny) ToInt() int {
+	return 0
+}
+
+func (any *objectAny) ToInt32() int32 {
+	return 0
+}
+
+func (any *objectAny) ToInt64() int64 {
+	return 0
+}
+
+func (any *objectAny) ToUint() uint {
+	return 0
+}
+
+func (any *objectAny) ToUint32() uint32 {
+	return 0
+}
+
+func (any *objectAny) ToUint64() uint64 {
+	return 0
+}
+
+func (any *objectAny) ToFloat32() float32 {
+	return 0
+}
+
+func (any *objectAny) ToFloat64() float64 {
+	return 0
+}
+
+func (any *objectAny) ToString() string {
+	str, err := MarshalToString(any.val.Interface())
+	any.err = err
+	return str
+}
+
+func (any *objectAny) Get(path ...interface{}) Any {
+	if len(path) == 0 {
+		return any
+	}
+	switch firstPath := path[0].(type) {
+	case string:
+		field := any.val.FieldByName(firstPath)
+		if !field.IsValid() {
+			return newInvalidAny(path)
+		}
+		return Wrap(field.Interface())
+	case int32:
+		if '*' == firstPath {
+			mappedAll := map[string]Any{}
+			for i := 0; i < any.val.NumField(); i++ {
+				field := any.val.Field(i)
+				if field.CanInterface() {
+					mapped := Wrap(field.Interface()).Get(path[1:]...)
+					if mapped.ValueType() != InvalidValue {
+						mappedAll[any.val.Type().Field(i).Name] = mapped
+					}
+				}
+			}
+			return wrapMap(mappedAll)
+		}
+		return newInvalidAny(path)
+	default:
+		return newInvalidAny(path)
+	}
+}
+
+func (any *objectAny) Keys() []string {
+	keys := make([]string, 0, any.val.NumField())
+	for i := 0; i < any.val.NumField(); i++ {
+		keys = append(keys, any.val.Type().Field(i).Name)
+	}
+	return keys
+}
+
+func (any *objectAny) Size() int {
+	return any.val.NumField()
+}
+
+func (any *objectAny) WriteTo(stream *Stream) {
+	stream.WriteVal(any.val)
+}
+
+func (any *objectAny) GetInterface() interface{} {
+	return any.val.Interface()
+}
+
+type mapAny struct {
+	baseAny
+	err error
+	val reflect.Value
+}
+
+func wrapMap(val interface{}) *mapAny {
+	return &mapAny{baseAny{}, nil, reflect.ValueOf(val)}
+}
+
+func (any *mapAny) ValueType() ValueType {
+	return ObjectValue
+}
+
+func (any *mapAny) MustBeValid() Any {
+	return any
+}
+
+func (any *mapAny) Parse() *Iterator {
+	return nil
+}
+
+func (any *mapAny) LastError() error {
+	return any.err
+}
+
+func (any *mapAny) ToBool() bool {
+	return true
+}
+
+func (any *mapAny) ToInt() int {
+	return 0
+}
+
+func (any *mapAny) ToInt32() int32 {
+	return 0
+}
+
+func (any *mapAny) ToInt64() int64 {
+	return 0
+}
+
+func (any *mapAny) ToUint() uint {
+	return 0
+}
+
+func (any *mapAny) ToUint32() uint32 {
+	return 0
+}
+
+func (any *mapAny) ToUint64() uint64 {
+	return 0
+}
+
+func (any *mapAny) ToFloat32() float32 {
+	return 0
+}
+
+func (any *mapAny) ToFloat64() float64 {
+	return 0
+}
+
+func (any *mapAny) ToString() string {
+	str, err := MarshalToString(any.val.Interface())
+	any.err = err
+	return str
+}
+
+func (any *mapAny) Get(path ...interface{}) Any {
+	if len(path) == 0 {
+		return any
+	}
+	switch firstPath := path[0].(type) {
+	case int32:
+		if '*' == firstPath {
+			mappedAll := map[string]Any{}
+			for _, key := range any.val.MapKeys() {
+				keyAsStr := key.String()
+				element := Wrap(any.val.MapIndex(key).Interface())
+				mapped := element.Get(path[1:]...)
+				if mapped.ValueType() != InvalidValue {
+					mappedAll[keyAsStr] = mapped
+				}
+			}
+			return wrapMap(mappedAll)
+		}
+		return newInvalidAny(path)
+	default:
+		value := any.val.MapIndex(reflect.ValueOf(firstPath))
+		if !value.IsValid() {
+			return newInvalidAny(path)
+		}
+		return Wrap(value.Interface())
+	}
+}
+
+func (any *mapAny) Keys() []string {
+	keys := make([]string, 0, any.val.Len())
+	for _, key := range any.val.MapKeys() {
+		keys = append(keys, key.String())
+	}
+	return keys
+}
+
+func (any *mapAny) Size() int {
+	return any.val.Len()
+}
+
+func (any *mapAny) WriteTo(stream *Stream) {
+	stream.WriteVal(any.val)
+}
+
+func (any *mapAny) GetInterface() interface{} {
+	return any.val.Interface()
+}
diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go
new file mode 100644
index 0000000..a4b93c7
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_str.go
@@ -0,0 +1,166 @@
+package jsoniter
+
+import (
+	"fmt"
+	"strconv"
+)
+
+type stringAny struct {
+	baseAny
+	val string
+}
+
+func (any *stringAny) Get(path ...interface{}) Any {
+	if len(path) == 0 {
+		return any
+	}
+	return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
+}
+
+func (any *stringAny) Parse() *Iterator {
+	return nil
+}
+
+func (any *stringAny) ValueType() ValueType {
+	return StringValue
+}
+
+func (any *stringAny) MustBeValid() Any {
+	return any
+}
+
+func (any *stringAny) LastError() error {
+	return nil
+}
+
+func (any *stringAny) ToBool() bool {
+	str := any.ToString()
+	if str == "0" {
+		return false
+	}
+	for _, c := range str {
+		switch c {
+		case ' ', '\n', '\r', '\t':
+		default:
+			return true
+		}
+	}
+	return false
+}
+
+func (any *stringAny) ToInt() int {
+	return int(any.ToInt64())
+
+}
+
+func (any *stringAny) ToInt32() int32 {
+	return int32(any.ToInt64())
+}
+
+func (any *stringAny) ToInt64() int64 {
+	if any.val == "" {
+		return 0
+	}
+
+	flag := 1
+	startPos := 0
+	endPos := 0
+	if any.val[0] == '+' || any.val[0] == '-' {
+		startPos = 1
+	}
+
+	if any.val[0] == '-' {
+		flag = -1
+	}
+
+	for i := startPos; i < len(any.val); i++ {
+		if any.val[i] >= '0' && any.val[i] <= '9' {
+			endPos = i + 1
+		} else {
+			break
+		}
+	}
+	parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64)
+	return int64(flag) * parsed
+}
+
+func (any *stringAny) ToUint() uint {
+	return uint(any.ToUint64())
+}
+
+func (any *stringAny) ToUint32() uint32 {
+	return uint32(any.ToUint64())
+}
+
+func (any *stringAny) ToUint64() uint64 {
+	if any.val == "" {
+		return 0
+	}
+
+	startPos := 0
+	endPos := 0
+
+	if any.val[0] == '-' {
+		return 0
+	}
+	if any.val[0] == '+' {
+		startPos = 1
+	}
+
+	for i := startPos; i < len(any.val); i++ {
+		if any.val[i] >= '0' && any.val[i] <= '9' {
+			endPos = i + 1
+		} else {
+			break
+		}
+	}
+	parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64)
+	return parsed
+}
+
+func (any *stringAny) ToFloat32() float32 {
+	return float32(any.ToFloat64())
+}
+
+func (any *stringAny) ToFloat64() float64 {
+	if len(any.val) == 0 {
+		return 0
+	}
+
+	// first char invalid
+	if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') {
+		return 0
+	}
+
+	// extract valid num expression from string
+	// eg 123true => 123, -12.12xxa => -12.12
+	endPos := 1
+	for i := 1; i < len(any.val); i++ {
+		if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' {
+			endPos = i + 1
+			continue
+		}
+
+		// end position is the first char which is not digit
+		if any.val[i] >= '0' && any.val[i] <= '9' {
+			endPos = i + 1
+		} else {
+			endPos = i
+			break
+		}
+	}
+	parsed, _ := strconv.ParseFloat(any.val[:endPos], 64)
+	return parsed
+}
+
+func (any *stringAny) ToString() string {
+	return any.val
+}
+
+func (any *stringAny) WriteTo(stream *Stream) {
+	stream.WriteString(any.val)
+}
+
+func (any *stringAny) GetInterface() interface{} {
+	return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go
new file mode 100644
index 0000000..656bbd3
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_uint32.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+	"strconv"
+)
+
+type uint32Any struct {
+	baseAny
+	val uint32
+}
+
+func (any *uint32Any) LastError() error {
+	return nil
+}
+
+func (any *uint32Any) ValueType() ValueType {
+	return NumberValue
+}
+
+func (any *uint32Any) MustBeValid() Any {
+	return any
+}
+
+func (any *uint32Any) ToBool() bool {
+	return any.val != 0
+}
+
+func (any *uint32Any) ToInt() int {
+	return int(any.val)
+}
+
+func (any *uint32Any) ToInt32() int32 {
+	return int32(any.val)
+}
+
+func (any *uint32Any) ToInt64() int64 {
+	return int64(any.val)
+}
+
+func (any *uint32Any) ToUint() uint {
+	return uint(any.val)
+}
+
+func (any *uint32Any) ToUint32() uint32 {
+	return any.val
+}
+
+func (any *uint32Any) ToUint64() uint64 {
+	return uint64(any.val)
+}
+
+func (any *uint32Any) ToFloat32() float32 {
+	return float32(any.val)
+}
+
+func (any *uint32Any) ToFloat64() float64 {
+	return float64(any.val)
+}
+
+func (any *uint32Any) ToString() string {
+	return strconv.FormatInt(int64(any.val), 10)
+}
+
+func (any *uint32Any) WriteTo(stream *Stream) {
+	stream.WriteUint32(any.val)
+}
+
+func (any *uint32Any) Parse() *Iterator {
+	return nil
+}
+
+func (any *uint32Any) GetInterface() interface{} {
+	return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go
new file mode 100644
index 0000000..7df2fce
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_uint64.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+	"strconv"
+)
+
+type uint64Any struct {
+	baseAny
+	val uint64
+}
+
+func (any *uint64Any) LastError() error {
+	return nil
+}
+
+func (any *uint64Any) ValueType() ValueType {
+	return NumberValue
+}
+
+func (any *uint64Any) MustBeValid() Any {
+	return any
+}
+
+func (any *uint64Any) ToBool() bool {
+	return any.val != 0
+}
+
+func (any *uint64Any) ToInt() int {
+	return int(any.val)
+}
+
+func (any *uint64Any) ToInt32() int32 {
+	return int32(any.val)
+}
+
+func (any *uint64Any) ToInt64() int64 {
+	return int64(any.val)
+}
+
+func (any *uint64Any) ToUint() uint {
+	return uint(any.val)
+}
+
+func (any *uint64Any) ToUint32() uint32 {
+	return uint32(any.val)
+}
+
+func (any *uint64Any) ToUint64() uint64 {
+	return any.val
+}
+
+func (any *uint64Any) ToFloat32() float32 {
+	return float32(any.val)
+}
+
+func (any *uint64Any) ToFloat64() float64 {
+	return float64(any.val)
+}
+
+func (any *uint64Any) ToString() string {
+	return strconv.FormatUint(any.val, 10)
+}
+
+func (any *uint64Any) WriteTo(stream *Stream) {
+	stream.WriteUint64(any.val)
+}
+
+func (any *uint64Any) Parse() *Iterator {
+	return nil
+}
+
+func (any *uint64Any) GetInterface() interface{} {
+	return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh
new file mode 100755
index 0000000..b45ef68
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/build.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -e
+set -x
+
+if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then
+    mkdir -p /tmp/build-golang/src/github.com/json-iterator
+    ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go
+fi
+export GOPATH=/tmp/build-golang
+go get -u github.com/golang/dep/cmd/dep
+cd /tmp/build-golang/src/github.com/json-iterator/go
+exec $GOPATH/bin/dep ensure -update
diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go
new file mode 100644
index 0000000..8c58fcb
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/config.go
@@ -0,0 +1,375 @@
+package jsoniter
+
+import (
+	"encoding/json"
+	"io"
+	"reflect"
+	"sync"
+	"unsafe"
+
+	"github.com/modern-go/concurrent"
+	"github.com/modern-go/reflect2"
+)
+
+// Config customize how the API should behave.
+// The API is created from Config by Froze.
+type Config struct {
+	IndentionStep                 int
+	MarshalFloatWith6Digits       bool
+	EscapeHTML                    bool
+	SortMapKeys                   bool
+	UseNumber                     bool
+	DisallowUnknownFields         bool
+	TagKey                        string
+	OnlyTaggedField               bool
+	ValidateJsonRawMessage        bool
+	ObjectFieldMustBeSimpleString bool
+	CaseSensitive                 bool
+}
+
+// API the public interface of this package.
+// Primary Marshal and Unmarshal.
+type API interface {
+	IteratorPool
+	StreamPool
+	MarshalToString(v interface{}) (string, error)
+	Marshal(v interface{}) ([]byte, error)
+	MarshalIndent(v interface{}, prefix, indent string) ([]byte, error)
+	UnmarshalFromString(str string, v interface{}) error
+	Unmarshal(data []byte, v interface{}) error
+	Get(data []byte, path ...interface{}) Any
+	NewEncoder(writer io.Writer) *Encoder
+	NewDecoder(reader io.Reader) *Decoder
+	Valid(data []byte) bool
+	RegisterExtension(extension Extension)
+	DecoderOf(typ reflect2.Type) ValDecoder
+	EncoderOf(typ reflect2.Type) ValEncoder
+}
+
+// ConfigDefault the default API
+var ConfigDefault = Config{
+	EscapeHTML: true,
+}.Froze()
+
+// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior
+var ConfigCompatibleWithStandardLibrary = Config{
+	EscapeHTML:             true,
+	SortMapKeys:            true,
+	ValidateJsonRawMessage: true,
+}.Froze()
+
+// ConfigFastest marshals float with only 6 digits precision
+var ConfigFastest = Config{
+	EscapeHTML:                    false,
+	MarshalFloatWith6Digits:       true, // will lose precession
+	ObjectFieldMustBeSimpleString: true, // do not unescape object field
+}.Froze()
+
+type frozenConfig struct {
+	configBeforeFrozen            Config
+	sortMapKeys                   bool
+	indentionStep                 int
+	objectFieldMustBeSimpleString bool
+	onlyTaggedField               bool
+	disallowUnknownFields         bool
+	decoderCache                  *concurrent.Map
+	encoderCache                  *concurrent.Map
+	encoderExtension              Extension
+	decoderExtension              Extension
+	extraExtensions               []Extension
+	streamPool                    *sync.Pool
+	iteratorPool                  *sync.Pool
+	caseSensitive                 bool
+}
+
+func (cfg *frozenConfig) initCache() {
+	cfg.decoderCache = concurrent.NewMap()
+	cfg.encoderCache = concurrent.NewMap()
+}
+
+func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) {
+	cfg.decoderCache.Store(cacheKey, decoder)
+}
+
+func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) {
+	cfg.encoderCache.Store(cacheKey, encoder)
+}
+
+func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder {
+	decoder, found := cfg.decoderCache.Load(cacheKey)
+	if found {
+		return decoder.(ValDecoder)
+	}
+	return nil
+}
+
+func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder {
+	encoder, found := cfg.encoderCache.Load(cacheKey)
+	if found {
+		return encoder.(ValEncoder)
+	}
+	return nil
+}
+
+var cfgCache = concurrent.NewMap()
+
+func getFrozenConfigFromCache(cfg Config) *frozenConfig {
+	obj, found := cfgCache.Load(cfg)
+	if found {
+		return obj.(*frozenConfig)
+	}
+	return nil
+}
+
+func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) {
+	cfgCache.Store(cfg, frozenConfig)
+}
+
+// Froze forge API from config
+func (cfg Config) Froze() API {
+	api := &frozenConfig{
+		sortMapKeys:                   cfg.SortMapKeys,
+		indentionStep:                 cfg.IndentionStep,
+		objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString,
+		onlyTaggedField:               cfg.OnlyTaggedField,
+		disallowUnknownFields:         cfg.DisallowUnknownFields,
+		caseSensitive:                 cfg.CaseSensitive,
+	}
+	api.streamPool = &sync.Pool{
+		New: func() interface{} {
+			return NewStream(api, nil, 512)
+		},
+	}
+	api.iteratorPool = &sync.Pool{
+		New: func() interface{} {
+			return NewIterator(api)
+		},
+	}
+	api.initCache()
+	encoderExtension := EncoderExtension{}
+	decoderExtension := DecoderExtension{}
+	if cfg.MarshalFloatWith6Digits {
+		api.marshalFloatWith6Digits(encoderExtension)
+	}
+	if cfg.EscapeHTML {
+		api.escapeHTML(encoderExtension)
+	}
+	if cfg.UseNumber {
+		api.useNumber(decoderExtension)
+	}
+	if cfg.ValidateJsonRawMessage {
+		api.validateJsonRawMessage(encoderExtension)
+	}
+	api.encoderExtension = encoderExtension
+	api.decoderExtension = decoderExtension
+	api.configBeforeFrozen = cfg
+	return api
+}
+
+func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig {
+	api := getFrozenConfigFromCache(cfg)
+	if api != nil {
+		return api
+	}
+	api = cfg.Froze().(*frozenConfig)
+	for _, extension := range extraExtensions {
+		api.RegisterExtension(extension)
+	}
+	addFrozenConfigToCache(cfg, api)
+	return api
+}
+
+func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) {
+	encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) {
+		rawMessage := *(*json.RawMessage)(ptr)
+		iter := cfg.BorrowIterator([]byte(rawMessage))
+		iter.Read()
+		if iter.Error != nil {
+			stream.WriteRaw("null")
+		} else {
+			cfg.ReturnIterator(iter)
+			stream.WriteRaw(string(rawMessage))
+		}
+	}, func(ptr unsafe.Pointer) bool {
+		return len(*((*json.RawMessage)(ptr))) == 0
+	}}
+	extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder
+	extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder
+}
+
+func (cfg *frozenConfig) useNumber(extension DecoderExtension) {
+	extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) {
+		exitingValue := *((*interface{})(ptr))
+		if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr {
+			iter.ReadVal(exitingValue)
+			return
+		}
+		if iter.WhatIsNext() == NumberValue {
+			*((*interface{})(ptr)) = json.Number(iter.readNumberAsString())
+		} else {
+			*((*interface{})(ptr)) = iter.Read()
+		}
+	}}
+}
+func (cfg *frozenConfig) getTagKey() string {
+	tagKey := cfg.configBeforeFrozen.TagKey
+	if tagKey == "" {
+		return "json"
+	}
+	return tagKey
+}
+
+func (cfg *frozenConfig) RegisterExtension(extension Extension) {
+	cfg.extraExtensions = append(cfg.extraExtensions, extension)
+	copied := cfg.configBeforeFrozen
+	cfg.configBeforeFrozen = copied
+}
+
+type lossyFloat32Encoder struct {
+}
+
+func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteFloat32Lossy(*((*float32)(ptr)))
+}
+
+func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*float32)(ptr)) == 0
+}
+
+type lossyFloat64Encoder struct {
+}
+
+func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteFloat64Lossy(*((*float64)(ptr)))
+}
+
+func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*float64)(ptr)) == 0
+}
+
+// EnableLossyFloatMarshalling keeps 10**(-6) precision
+// for float variables for better performance.
+func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) {
+	// for better performance
+	extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{}
+	extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{}
+}
+
+type htmlEscapedStringEncoder struct {
+}
+
+func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	str := *((*string)(ptr))
+	stream.WriteStringWithHTMLEscaped(str)
+}
+
+func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*string)(ptr)) == ""
+}
+
+func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) {
+	encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{}
+}
+
+func (cfg *frozenConfig) cleanDecoders() {
+	typeDecoders = map[string]ValDecoder{}
+	fieldDecoders = map[string]ValDecoder{}
+	*cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
+}
+
+func (cfg *frozenConfig) cleanEncoders() {
+	typeEncoders = map[string]ValEncoder{}
+	fieldEncoders = map[string]ValEncoder{}
+	*cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
+}
+
+func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) {
+	stream := cfg.BorrowStream(nil)
+	defer cfg.ReturnStream(stream)
+	stream.WriteVal(v)
+	if stream.Error != nil {
+		return "", stream.Error
+	}
+	return string(stream.Buffer()), nil
+}
+
+func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) {
+	stream := cfg.BorrowStream(nil)
+	defer cfg.ReturnStream(stream)
+	stream.WriteVal(v)
+	if stream.Error != nil {
+		return nil, stream.Error
+	}
+	result := stream.Buffer()
+	copied := make([]byte, len(result))
+	copy(copied, result)
+	return copied, nil
+}
+
+func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+	if prefix != "" {
+		panic("prefix is not supported")
+	}
+	for _, r := range indent {
+		if r != ' ' {
+			panic("indent can only be space")
+		}
+	}
+	newCfg := cfg.configBeforeFrozen
+	newCfg.IndentionStep = len(indent)
+	return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v)
+}
+
+func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error {
+	data := []byte(str)
+	iter := cfg.BorrowIterator(data)
+	defer cfg.ReturnIterator(iter)
+	iter.ReadVal(v)
+	c := iter.nextToken()
+	if c == 0 {
+		if iter.Error == io.EOF {
+			return nil
+		}
+		return iter.Error
+	}
+	iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
+	return iter.Error
+}
+
+func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any {
+	iter := cfg.BorrowIterator(data)
+	defer cfg.ReturnIterator(iter)
+	return locatePath(iter, path)
+}
+
+func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error {
+	iter := cfg.BorrowIterator(data)
+	defer cfg.ReturnIterator(iter)
+	iter.ReadVal(v)
+	c := iter.nextToken()
+	if c == 0 {
+		if iter.Error == io.EOF {
+			return nil
+		}
+		return iter.Error
+	}
+	iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
+	return iter.Error
+}
+
+func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder {
+	stream := NewStream(cfg, writer, 512)
+	return &Encoder{stream}
+}
+
+func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder {
+	iter := Parse(cfg, reader, 512)
+	return &Decoder{iter}
+}
+
+func (cfg *frozenConfig) Valid(data []byte) bool {
+	iter := cfg.BorrowIterator(data)
+	defer cfg.ReturnIterator(iter)
+	iter.Skip()
+	return iter.Error == nil
+}
diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
new file mode 100644
index 0000000..3095662
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
@@ -0,0 +1,7 @@
+| json type \ dest type | bool | int | uint | float |string|
+| --- | --- | --- | --- |--|--|
+| number | positive => true <br/> negative => true <br/> zero => false| 23.2 => 23 <br/> -32.1 => -32| 12.1 => 12 <br/> -12.1 => 0|as normal|same as origin|
+| string | empty string => false <br/> string "0" => false <br/> other strings => true | "123.32" => 123 <br/> "-123.4" => -123 <br/> "123.23xxxw" => 123 <br/>  "abcde12" => 0 <br/> "-32.1" => -32| 13.2 => 13 <br/> -1.1 => 0 |12.1 => 12.1 <br/> -12.3 => -12.3<br/> 12.4xxa => 12.4 <br/> +1.1e2 =>110 |same as origin|
+| bool | true => true <br/> false => false| true => 1 <br/> false => 0 | true => 1 <br/> false => 0 |true => 1 <br/>false => 0|true => "true" <br/> false => "false"|
+| object | true | 0 | 0 |0|originnal json|
+| array | empty array => false <br/> nonempty array => true| [] => 0 <br/> [1,2] => 1 | [] => 0 <br/> [1,2] => 1 |[] => 0<br/>[1,2] => 1|original json|
\ No newline at end of file
diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go
new file mode 100644
index 0000000..95ae54f
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter.go
@@ -0,0 +1,322 @@
+package jsoniter
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+)
+
+// ValueType the type for JSON element
+type ValueType int
+
+const (
+	// InvalidValue invalid JSON element
+	InvalidValue ValueType = iota
+	// StringValue JSON element "string"
+	StringValue
+	// NumberValue JSON element 100 or 0.10
+	NumberValue
+	// NilValue JSON element null
+	NilValue
+	// BoolValue JSON element true or false
+	BoolValue
+	// ArrayValue JSON element []
+	ArrayValue
+	// ObjectValue JSON element {}
+	ObjectValue
+)
+
+var hexDigits []byte
+var valueTypes []ValueType
+
+func init() {
+	hexDigits = make([]byte, 256)
+	for i := 0; i < len(hexDigits); i++ {
+		hexDigits[i] = 255
+	}
+	for i := '0'; i <= '9'; i++ {
+		hexDigits[i] = byte(i - '0')
+	}
+	for i := 'a'; i <= 'f'; i++ {
+		hexDigits[i] = byte((i - 'a') + 10)
+	}
+	for i := 'A'; i <= 'F'; i++ {
+		hexDigits[i] = byte((i - 'A') + 10)
+	}
+	valueTypes = make([]ValueType, 256)
+	for i := 0; i < len(valueTypes); i++ {
+		valueTypes[i] = InvalidValue
+	}
+	valueTypes['"'] = StringValue
+	valueTypes['-'] = NumberValue
+	valueTypes['0'] = NumberValue
+	valueTypes['1'] = NumberValue
+	valueTypes['2'] = NumberValue
+	valueTypes['3'] = NumberValue
+	valueTypes['4'] = NumberValue
+	valueTypes['5'] = NumberValue
+	valueTypes['6'] = NumberValue
+	valueTypes['7'] = NumberValue
+	valueTypes['8'] = NumberValue
+	valueTypes['9'] = NumberValue
+	valueTypes['t'] = BoolValue
+	valueTypes['f'] = BoolValue
+	valueTypes['n'] = NilValue
+	valueTypes['['] = ArrayValue
+	valueTypes['{'] = ObjectValue
+}
+
+// Iterator is a io.Reader like object, with JSON specific read functions.
+// Error is not returned as return value, but stored as Error member on this iterator instance.
+type Iterator struct {
+	cfg              *frozenConfig
+	reader           io.Reader
+	buf              []byte
+	head             int
+	tail             int
+	captureStartedAt int
+	captured         []byte
+	Error            error
+	Attachment       interface{} // open for customized decoder
+}
+
+// NewIterator creates an empty Iterator instance
+func NewIterator(cfg API) *Iterator {
+	return &Iterator{
+		cfg:    cfg.(*frozenConfig),
+		reader: nil,
+		buf:    nil,
+		head:   0,
+		tail:   0,
+	}
+}
+
+// Parse creates an Iterator instance from io.Reader
+func Parse(cfg API, reader io.Reader, bufSize int) *Iterator {
+	return &Iterator{
+		cfg:    cfg.(*frozenConfig),
+		reader: reader,
+		buf:    make([]byte, bufSize),
+		head:   0,
+		tail:   0,
+	}
+}
+
+// ParseBytes creates an Iterator instance from byte array
+func ParseBytes(cfg API, input []byte) *Iterator {
+	return &Iterator{
+		cfg:    cfg.(*frozenConfig),
+		reader: nil,
+		buf:    input,
+		head:   0,
+		tail:   len(input),
+	}
+}
+
+// ParseString creates an Iterator instance from string
+func ParseString(cfg API, input string) *Iterator {
+	return ParseBytes(cfg, []byte(input))
+}
+
+// Pool returns a pool can provide more iterator with same configuration
+func (iter *Iterator) Pool() IteratorPool {
+	return iter.cfg
+}
+
+// Reset reuse iterator instance by specifying another reader
+func (iter *Iterator) Reset(reader io.Reader) *Iterator {
+	iter.reader = reader
+	iter.head = 0
+	iter.tail = 0
+	return iter
+}
+
+// ResetBytes reuse iterator instance by specifying another byte array as input
+func (iter *Iterator) ResetBytes(input []byte) *Iterator {
+	iter.reader = nil
+	iter.buf = input
+	iter.head = 0
+	iter.tail = len(input)
+	return iter
+}
+
+// WhatIsNext gets ValueType of relatively next json element
+func (iter *Iterator) WhatIsNext() ValueType {
+	valueType := valueTypes[iter.nextToken()]
+	iter.unreadByte()
+	return valueType
+}
+
+func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool {
+	for i := iter.head; i < iter.tail; i++ {
+		c := iter.buf[i]
+		switch c {
+		case ' ', '\n', '\t', '\r':
+			continue
+		}
+		iter.head = i
+		return false
+	}
+	return true
+}
+
+func (iter *Iterator) isObjectEnd() bool {
+	c := iter.nextToken()
+	if c == ',' {
+		return false
+	}
+	if c == '}' {
+		return true
+	}
+	iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c}))
+	return true
+}
+
+func (iter *Iterator) nextToken() byte {
+	// a variation of skip whitespaces, returning the next non-whitespace token
+	for {
+		for i := iter.head; i < iter.tail; i++ {
+			c := iter.buf[i]
+			switch c {
+			case ' ', '\n', '\t', '\r':
+				continue
+			}
+			iter.head = i + 1
+			return c
+		}
+		if !iter.loadMore() {
+			return 0
+		}
+	}
+}
+
+// ReportError record a error in iterator instance with current position.
+func (iter *Iterator) ReportError(operation string, msg string) {
+	if iter.Error != nil {
+		if iter.Error != io.EOF {
+			return
+		}
+	}
+	peekStart := iter.head - 10
+	if peekStart < 0 {
+		peekStart = 0
+	}
+	peekEnd := iter.head + 10
+	if peekEnd > iter.tail {
+		peekEnd = iter.tail
+	}
+	parsing := string(iter.buf[peekStart:peekEnd])
+	contextStart := iter.head - 50
+	if contextStart < 0 {
+		contextStart = 0
+	}
+	contextEnd := iter.head + 50
+	if contextEnd > iter.tail {
+		contextEnd = iter.tail
+	}
+	context := string(iter.buf[contextStart:contextEnd])
+	iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...",
+		operation, msg, iter.head-peekStart, parsing, context)
+}
+
+// CurrentBuffer gets current buffer as string for debugging purpose
+func (iter *Iterator) CurrentBuffer() string {
+	peekStart := iter.head - 10
+	if peekStart < 0 {
+		peekStart = 0
+	}
+	return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head,
+		string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail]))
+}
+
+func (iter *Iterator) readByte() (ret byte) {
+	if iter.head == iter.tail {
+		if iter.loadMore() {
+			ret = iter.buf[iter.head]
+			iter.head++
+			return ret
+		}
+		return 0
+	}
+	ret = iter.buf[iter.head]
+	iter.head++
+	return ret
+}
+
+func (iter *Iterator) loadMore() bool {
+	if iter.reader == nil {
+		if iter.Error == nil {
+			iter.head = iter.tail
+			iter.Error = io.EOF
+		}
+		return false
+	}
+	if iter.captured != nil {
+		iter.captured = append(iter.captured,
+			iter.buf[iter.captureStartedAt:iter.tail]...)
+		iter.captureStartedAt = 0
+	}
+	for {
+		n, err := iter.reader.Read(iter.buf)
+		if n == 0 {
+			if err != nil {
+				if iter.Error == nil {
+					iter.Error = err
+				}
+				return false
+			}
+		} else {
+			iter.head = 0
+			iter.tail = n
+			return true
+		}
+	}
+}
+
+func (iter *Iterator) unreadByte() {
+	if iter.Error != nil {
+		return
+	}
+	iter.head--
+	return
+}
+
+// Read read the next JSON element as generic interface{}.
+func (iter *Iterator) Read() interface{} {
+	valueType := iter.WhatIsNext()
+	switch valueType {
+	case StringValue:
+		return iter.ReadString()
+	case NumberValue:
+		if iter.cfg.configBeforeFrozen.UseNumber {
+			return json.Number(iter.readNumberAsString())
+		}
+		return iter.ReadFloat64()
+	case NilValue:
+		iter.skipFourBytes('n', 'u', 'l', 'l')
+		return nil
+	case BoolValue:
+		return iter.ReadBool()
+	case ArrayValue:
+		arr := []interface{}{}
+		iter.ReadArrayCB(func(iter *Iterator) bool {
+			var elem interface{}
+			iter.ReadVal(&elem)
+			arr = append(arr, elem)
+			return true
+		})
+		return arr
+	case ObjectValue:
+		obj := map[string]interface{}{}
+		iter.ReadMapCB(func(Iter *Iterator, field string) bool {
+			var elem interface{}
+			iter.ReadVal(&elem)
+			obj[field] = elem
+			return true
+		})
+		return obj
+	default:
+		iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType))
+		return nil
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go
new file mode 100644
index 0000000..6188cb4
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_array.go
@@ -0,0 +1,58 @@
+package jsoniter
+
+// ReadArray read array element, tells if the array has more element to read.
+func (iter *Iterator) ReadArray() (ret bool) {
+	c := iter.nextToken()
+	switch c {
+	case 'n':
+		iter.skipThreeBytes('u', 'l', 'l')
+		return false // null
+	case '[':
+		c = iter.nextToken()
+		if c != ']' {
+			iter.unreadByte()
+			return true
+		}
+		return false
+	case ']':
+		return false
+	case ',':
+		return true
+	default:
+		iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c}))
+		return
+	}
+}
+
+// ReadArrayCB read array with callback
+func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) {
+	c := iter.nextToken()
+	if c == '[' {
+		c = iter.nextToken()
+		if c != ']' {
+			iter.unreadByte()
+			if !callback(iter) {
+				return false
+			}
+			c = iter.nextToken()
+			for c == ',' {
+				if !callback(iter) {
+					return false
+				}
+				c = iter.nextToken()
+			}
+			if c != ']' {
+				iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c}))
+				return false
+			}
+			return true
+		}
+		return true
+	}
+	if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l')
+		return true // null
+	}
+	iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c}))
+	return false
+}
diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go
new file mode 100644
index 0000000..4f883c0
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_float.go
@@ -0,0 +1,347 @@
+package jsoniter
+
+import (
+	"encoding/json"
+	"io"
+	"math/big"
+	"strconv"
+	"strings"
+	"unsafe"
+)
+
+var floatDigits []int8
+
+const invalidCharForNumber = int8(-1)
+const endOfNumber = int8(-2)
+const dotInNumber = int8(-3)
+
+func init() {
+	floatDigits = make([]int8, 256)
+	for i := 0; i < len(floatDigits); i++ {
+		floatDigits[i] = invalidCharForNumber
+	}
+	for i := int8('0'); i <= int8('9'); i++ {
+		floatDigits[i] = i - int8('0')
+	}
+	floatDigits[','] = endOfNumber
+	floatDigits[']'] = endOfNumber
+	floatDigits['}'] = endOfNumber
+	floatDigits[' '] = endOfNumber
+	floatDigits['\t'] = endOfNumber
+	floatDigits['\n'] = endOfNumber
+	floatDigits['.'] = dotInNumber
+}
+
+// ReadBigFloat read big.Float
+func (iter *Iterator) ReadBigFloat() (ret *big.Float) {
+	str := iter.readNumberAsString()
+	if iter.Error != nil && iter.Error != io.EOF {
+		return nil
+	}
+	prec := 64
+	if len(str) > prec {
+		prec = len(str)
+	}
+	val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero)
+	if err != nil {
+		iter.Error = err
+		return nil
+	}
+	return val
+}
+
+// ReadBigInt read big.Int
+func (iter *Iterator) ReadBigInt() (ret *big.Int) {
+	str := iter.readNumberAsString()
+	if iter.Error != nil && iter.Error != io.EOF {
+		return nil
+	}
+	ret = big.NewInt(0)
+	var success bool
+	ret, success = ret.SetString(str, 10)
+	if !success {
+		iter.ReportError("ReadBigInt", "invalid big int")
+		return nil
+	}
+	return ret
+}
+
+//ReadFloat32 read float32
+func (iter *Iterator) ReadFloat32() (ret float32) {
+	c := iter.nextToken()
+	if c == '-' {
+		return -iter.readPositiveFloat32()
+	}
+	iter.unreadByte()
+	return iter.readPositiveFloat32()
+}
+
+func (iter *Iterator) readPositiveFloat32() (ret float32) {
+	value := uint64(0)
+	c := byte(' ')
+	i := iter.head
+	// first char
+	if i == iter.tail {
+		return iter.readFloat32SlowPath()
+	}
+	c = iter.buf[i]
+	i++
+	ind := floatDigits[c]
+	switch ind {
+	case invalidCharForNumber:
+		return iter.readFloat32SlowPath()
+	case endOfNumber:
+		iter.ReportError("readFloat32", "empty number")
+		return
+	case dotInNumber:
+		iter.ReportError("readFloat32", "leading dot is invalid")
+		return
+	case 0:
+		if i == iter.tail {
+			return iter.readFloat32SlowPath()
+		}
+		c = iter.buf[i]
+		switch c {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			iter.ReportError("readFloat32", "leading zero is invalid")
+			return
+		}
+	}
+	value = uint64(ind)
+	// chars before dot
+non_decimal_loop:
+	for ; i < iter.tail; i++ {
+		c = iter.buf[i]
+		ind := floatDigits[c]
+		switch ind {
+		case invalidCharForNumber:
+			return iter.readFloat32SlowPath()
+		case endOfNumber:
+			iter.head = i
+			return float32(value)
+		case dotInNumber:
+			break non_decimal_loop
+		}
+		if value > uint64SafeToMultiple10 {
+			return iter.readFloat32SlowPath()
+		}
+		value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
+	}
+	// chars after dot
+	if c == '.' {
+		i++
+		decimalPlaces := 0
+		if i == iter.tail {
+			return iter.readFloat32SlowPath()
+		}
+		for ; i < iter.tail; i++ {
+			c = iter.buf[i]
+			ind := floatDigits[c]
+			switch ind {
+			case endOfNumber:
+				if decimalPlaces > 0 && decimalPlaces < len(pow10) {
+					iter.head = i
+					return float32(float64(value) / float64(pow10[decimalPlaces]))
+				}
+				// too many decimal places
+				return iter.readFloat32SlowPath()
+			case invalidCharForNumber:
+				fallthrough
+			case dotInNumber:
+				return iter.readFloat32SlowPath()
+			}
+			decimalPlaces++
+			if value > uint64SafeToMultiple10 {
+				return iter.readFloat32SlowPath()
+			}
+			value = (value << 3) + (value << 1) + uint64(ind)
+		}
+	}
+	return iter.readFloat32SlowPath()
+}
+
+func (iter *Iterator) readNumberAsString() (ret string) {
+	strBuf := [16]byte{}
+	str := strBuf[0:0]
+load_loop:
+	for {
+		for i := iter.head; i < iter.tail; i++ {
+			c := iter.buf[i]
+			switch c {
+			case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+				str = append(str, c)
+				continue
+			default:
+				iter.head = i
+				break load_loop
+			}
+		}
+		if !iter.loadMore() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		return
+	}
+	if len(str) == 0 {
+		iter.ReportError("readNumberAsString", "invalid number")
+	}
+	return *(*string)(unsafe.Pointer(&str))
+}
+
+func (iter *Iterator) readFloat32SlowPath() (ret float32) {
+	str := iter.readNumberAsString()
+	if iter.Error != nil && iter.Error != io.EOF {
+		return
+	}
+	errMsg := validateFloat(str)
+	if errMsg != "" {
+		iter.ReportError("readFloat32SlowPath", errMsg)
+		return
+	}
+	val, err := strconv.ParseFloat(str, 32)
+	if err != nil {
+		iter.Error = err
+		return
+	}
+	return float32(val)
+}
+
+// ReadFloat64 read float64
+func (iter *Iterator) ReadFloat64() (ret float64) {
+	c := iter.nextToken()
+	if c == '-' {
+		return -iter.readPositiveFloat64()
+	}
+	iter.unreadByte()
+	return iter.readPositiveFloat64()
+}
+
+func (iter *Iterator) readPositiveFloat64() (ret float64) {
+	value := uint64(0)
+	c := byte(' ')
+	i := iter.head
+	// first char
+	if i == iter.tail {
+		return iter.readFloat64SlowPath()
+	}
+	c = iter.buf[i]
+	i++
+	ind := floatDigits[c]
+	switch ind {
+	case invalidCharForNumber:
+		return iter.readFloat64SlowPath()
+	case endOfNumber:
+		iter.ReportError("readFloat64", "empty number")
+		return
+	case dotInNumber:
+		iter.ReportError("readFloat64", "leading dot is invalid")
+		return
+	case 0:
+		if i == iter.tail {
+			return iter.readFloat64SlowPath()
+		}
+		c = iter.buf[i]
+		switch c {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			iter.ReportError("readFloat64", "leading zero is invalid")
+			return
+		}
+	}
+	value = uint64(ind)
+	// chars before dot
+non_decimal_loop:
+	for ; i < iter.tail; i++ {
+		c = iter.buf[i]
+		ind := floatDigits[c]
+		switch ind {
+		case invalidCharForNumber:
+			return iter.readFloat64SlowPath()
+		case endOfNumber:
+			iter.head = i
+			return float64(value)
+		case dotInNumber:
+			break non_decimal_loop
+		}
+		if value > uint64SafeToMultiple10 {
+			return iter.readFloat64SlowPath()
+		}
+		value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
+	}
+	// chars after dot
+	if c == '.' {
+		i++
+		decimalPlaces := 0
+		if i == iter.tail {
+			return iter.readFloat64SlowPath()
+		}
+		for ; i < iter.tail; i++ {
+			c = iter.buf[i]
+			ind := floatDigits[c]
+			switch ind {
+			case endOfNumber:
+				if decimalPlaces > 0 && decimalPlaces < len(pow10) {
+					iter.head = i
+					return float64(value) / float64(pow10[decimalPlaces])
+				}
+				// too many decimal places
+				return iter.readFloat64SlowPath()
+			case invalidCharForNumber:
+				fallthrough
+			case dotInNumber:
+				return iter.readFloat64SlowPath()
+			}
+			decimalPlaces++
+			if value > uint64SafeToMultiple10 {
+				return iter.readFloat64SlowPath()
+			}
+			value = (value << 3) + (value << 1) + uint64(ind)
+		}
+	}
+	return iter.readFloat64SlowPath()
+}
+
+func (iter *Iterator) readFloat64SlowPath() (ret float64) {
+	str := iter.readNumberAsString()
+	if iter.Error != nil && iter.Error != io.EOF {
+		return
+	}
+	errMsg := validateFloat(str)
+	if errMsg != "" {
+		iter.ReportError("readFloat64SlowPath", errMsg)
+		return
+	}
+	val, err := strconv.ParseFloat(str, 64)
+	if err != nil {
+		iter.Error = err
+		return
+	}
+	return val
+}
+
+func validateFloat(str string) string {
+	// strconv.ParseFloat is not validating `1.` or `1.e1`
+	if len(str) == 0 {
+		return "empty number"
+	}
+	if str[0] == '-' {
+		return "-- is not valid"
+	}
+	dotPos := strings.IndexByte(str, '.')
+	if dotPos != -1 {
+		if dotPos == len(str)-1 {
+			return "dot can not be last character"
+		}
+		switch str[dotPos+1] {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+		default:
+			return "missing digit after dot"
+		}
+	}
+	return ""
+}
+
+// ReadNumber read json.Number
+func (iter *Iterator) ReadNumber() (ret json.Number) {
+	return json.Number(iter.readNumberAsString())
+}
diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go
new file mode 100644
index 0000000..2142320
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_int.go
@@ -0,0 +1,345 @@
+package jsoniter
+
+import (
+	"math"
+	"strconv"
+)
+
+var intDigits []int8
+
+const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1
+const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1
+
+func init() {
+	intDigits = make([]int8, 256)
+	for i := 0; i < len(intDigits); i++ {
+		intDigits[i] = invalidCharForNumber
+	}
+	for i := int8('0'); i <= int8('9'); i++ {
+		intDigits[i] = i - int8('0')
+	}
+}
+
+// ReadUint read uint
+func (iter *Iterator) ReadUint() uint {
+	if strconv.IntSize == 32 {
+		return uint(iter.ReadUint32())
+	}
+	return uint(iter.ReadUint64())
+}
+
+// ReadInt read int
+func (iter *Iterator) ReadInt() int {
+	if strconv.IntSize == 32 {
+		return int(iter.ReadInt32())
+	}
+	return int(iter.ReadInt64())
+}
+
+// ReadInt8 read int8
+func (iter *Iterator) ReadInt8() (ret int8) {
+	c := iter.nextToken()
+	if c == '-' {
+		val := iter.readUint32(iter.readByte())
+		if val > math.MaxInt8+1 {
+			iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
+			return
+		}
+		return -int8(val)
+	}
+	val := iter.readUint32(c)
+	if val > math.MaxInt8 {
+		iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
+		return
+	}
+	return int8(val)
+}
+
+// ReadUint8 read uint8
+func (iter *Iterator) ReadUint8() (ret uint8) {
+	val := iter.readUint32(iter.nextToken())
+	if val > math.MaxUint8 {
+		iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10))
+		return
+	}
+	return uint8(val)
+}
+
+// ReadInt16 read int16
+func (iter *Iterator) ReadInt16() (ret int16) {
+	c := iter.nextToken()
+	if c == '-' {
+		val := iter.readUint32(iter.readByte())
+		if val > math.MaxInt16+1 {
+			iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
+			return
+		}
+		return -int16(val)
+	}
+	val := iter.readUint32(c)
+	if val > math.MaxInt16 {
+		iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
+		return
+	}
+	return int16(val)
+}
+
+// ReadUint16 read uint16
+func (iter *Iterator) ReadUint16() (ret uint16) {
+	val := iter.readUint32(iter.nextToken())
+	if val > math.MaxUint16 {
+		iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10))
+		return
+	}
+	return uint16(val)
+}
+
+// ReadInt32 read int32
+func (iter *Iterator) ReadInt32() (ret int32) {
+	c := iter.nextToken()
+	if c == '-' {
+		val := iter.readUint32(iter.readByte())
+		if val > math.MaxInt32+1 {
+			iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
+			return
+		}
+		return -int32(val)
+	}
+	val := iter.readUint32(c)
+	if val > math.MaxInt32 {
+		iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
+		return
+	}
+	return int32(val)
+}
+
+// ReadUint32 read uint32
+func (iter *Iterator) ReadUint32() (ret uint32) {
+	return iter.readUint32(iter.nextToken())
+}
+
+func (iter *Iterator) readUint32(c byte) (ret uint32) {
+	ind := intDigits[c]
+	if ind == 0 {
+		iter.assertInteger()
+		return 0 // single zero
+	}
+	if ind == invalidCharForNumber {
+		iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)}))
+		return
+	}
+	value := uint32(ind)
+	if iter.tail-iter.head > 10 {
+		i := iter.head
+		ind2 := intDigits[iter.buf[i]]
+		if ind2 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value
+		}
+		i++
+		ind3 := intDigits[iter.buf[i]]
+		if ind3 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*10 + uint32(ind2)
+		}
+		//iter.head = i + 1
+		//value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
+		i++
+		ind4 := intDigits[iter.buf[i]]
+		if ind4 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*100 + uint32(ind2)*10 + uint32(ind3)
+		}
+		i++
+		ind5 := intDigits[iter.buf[i]]
+		if ind5 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4)
+		}
+		i++
+		ind6 := intDigits[iter.buf[i]]
+		if ind6 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5)
+		}
+		i++
+		ind7 := intDigits[iter.buf[i]]
+		if ind7 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6)
+		}
+		i++
+		ind8 := intDigits[iter.buf[i]]
+		if ind8 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7)
+		}
+		i++
+		ind9 := intDigits[iter.buf[i]]
+		value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8)
+		iter.head = i
+		if ind9 == invalidCharForNumber {
+			iter.assertInteger()
+			return value
+		}
+	}
+	for {
+		for i := iter.head; i < iter.tail; i++ {
+			ind = intDigits[iter.buf[i]]
+			if ind == invalidCharForNumber {
+				iter.head = i
+				iter.assertInteger()
+				return value
+			}
+			if value > uint32SafeToMultiply10 {
+				value2 := (value << 3) + (value << 1) + uint32(ind)
+				if value2 < value {
+					iter.ReportError("readUint32", "overflow")
+					return
+				}
+				value = value2
+				continue
+			}
+			value = (value << 3) + (value << 1) + uint32(ind)
+		}
+		if !iter.loadMore() {
+			iter.assertInteger()
+			return value
+		}
+	}
+}
+
+// ReadInt64 read int64
+func (iter *Iterator) ReadInt64() (ret int64) {
+	c := iter.nextToken()
+	if c == '-' {
+		val := iter.readUint64(iter.readByte())
+		if val > math.MaxInt64+1 {
+			iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
+			return
+		}
+		return -int64(val)
+	}
+	val := iter.readUint64(c)
+	if val > math.MaxInt64 {
+		iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
+		return
+	}
+	return int64(val)
+}
+
+// ReadUint64 read uint64
+func (iter *Iterator) ReadUint64() uint64 {
+	return iter.readUint64(iter.nextToken())
+}
+
+func (iter *Iterator) readUint64(c byte) (ret uint64) {
+	ind := intDigits[c]
+	if ind == 0 {
+		iter.assertInteger()
+		return 0 // single zero
+	}
+	if ind == invalidCharForNumber {
+		iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)}))
+		return
+	}
+	value := uint64(ind)
+	if iter.tail-iter.head > 10 {
+		i := iter.head
+		ind2 := intDigits[iter.buf[i]]
+		if ind2 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value
+		}
+		i++
+		ind3 := intDigits[iter.buf[i]]
+		if ind3 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*10 + uint64(ind2)
+		}
+		//iter.head = i + 1
+		//value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
+		i++
+		ind4 := intDigits[iter.buf[i]]
+		if ind4 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*100 + uint64(ind2)*10 + uint64(ind3)
+		}
+		i++
+		ind5 := intDigits[iter.buf[i]]
+		if ind5 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4)
+		}
+		i++
+		ind6 := intDigits[iter.buf[i]]
+		if ind6 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5)
+		}
+		i++
+		ind7 := intDigits[iter.buf[i]]
+		if ind7 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6)
+		}
+		i++
+		ind8 := intDigits[iter.buf[i]]
+		if ind8 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7)
+		}
+		i++
+		ind9 := intDigits[iter.buf[i]]
+		value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8)
+		iter.head = i
+		if ind9 == invalidCharForNumber {
+			iter.assertInteger()
+			return value
+		}
+	}
+	for {
+		for i := iter.head; i < iter.tail; i++ {
+			ind = intDigits[iter.buf[i]]
+			if ind == invalidCharForNumber {
+				iter.head = i
+				iter.assertInteger()
+				return value
+			}
+			if value > uint64SafeToMultiple10 {
+				value2 := (value << 3) + (value << 1) + uint64(ind)
+				if value2 < value {
+					iter.ReportError("readUint64", "overflow")
+					return
+				}
+				value = value2
+				continue
+			}
+			value = (value << 3) + (value << 1) + uint64(ind)
+		}
+		if !iter.loadMore() {
+			iter.assertInteger()
+			return value
+		}
+	}
+}
+
+func (iter *Iterator) assertInteger() {
+	if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' {
+		iter.ReportError("assertInteger", "can not decode float as int")
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go
new file mode 100644
index 0000000..1c57576
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_object.go
@@ -0,0 +1,251 @@
+package jsoniter
+
+import (
+	"fmt"
+	"strings"
+)
+
+// ReadObject read one field from object.
+// If object ended, returns empty string.
+// Otherwise, returns the field name.
+func (iter *Iterator) ReadObject() (ret string) {
+	c := iter.nextToken()
+	switch c {
+	case 'n':
+		iter.skipThreeBytes('u', 'l', 'l')
+		return "" // null
+	case '{':
+		c = iter.nextToken()
+		if c == '"' {
+			iter.unreadByte()
+			field := iter.ReadString()
+			c = iter.nextToken()
+			if c != ':' {
+				iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+			}
+			return field
+		}
+		if c == '}' {
+			return "" // end of object
+		}
+		iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c}))
+		return
+	case ',':
+		field := iter.ReadString()
+		c = iter.nextToken()
+		if c != ':' {
+			iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+		}
+		return field
+	case '}':
+		return "" // end of object
+	default:
+		iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c})))
+		return
+	}
+}
+
+// CaseInsensitive
+func (iter *Iterator) readFieldHash() int64 {
+	hash := int64(0x811c9dc5)
+	c := iter.nextToken()
+	if c != '"' {
+		iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c}))
+		return 0
+	}
+	for {
+		for i := iter.head; i < iter.tail; i++ {
+			// require ascii string and no escape
+			b := iter.buf[i]
+			if b == '\\' {
+				iter.head = i
+				for _, b := range iter.readStringSlowPath() {
+					if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
+						b += 'a' - 'A'
+					}
+					hash ^= int64(b)
+					hash *= 0x1000193
+				}
+				c = iter.nextToken()
+				if c != ':' {
+					iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
+					return 0
+				}
+				return hash
+			}
+			if b == '"' {
+				iter.head = i + 1
+				c = iter.nextToken()
+				if c != ':' {
+					iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
+					return 0
+				}
+				return hash
+			}
+			if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
+				b += 'a' - 'A'
+			}
+			hash ^= int64(b)
+			hash *= 0x1000193
+		}
+		if !iter.loadMore() {
+			iter.ReportError("readFieldHash", `incomplete field name`)
+			return 0
+		}
+	}
+}
+
+func calcHash(str string, caseSensitive bool) int64 {
+	if !caseSensitive {
+		str = strings.ToLower(str)
+	}
+	hash := int64(0x811c9dc5)
+	for _, b := range []byte(str) {
+		hash ^= int64(b)
+		hash *= 0x1000193
+	}
+	return int64(hash)
+}
+
+// ReadObjectCB read object with callback, the key is ascii only and field name not copied
+func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
+	c := iter.nextToken()
+	var field string
+	if c == '{' {
+		c = iter.nextToken()
+		if c == '"' {
+			iter.unreadByte()
+			field = iter.ReadString()
+			c = iter.nextToken()
+			if c != ':' {
+				iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+			}
+			if !callback(iter, field) {
+				return false
+			}
+			c = iter.nextToken()
+			for c == ',' {
+				field = iter.ReadString()
+				c = iter.nextToken()
+				if c != ':' {
+					iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+				}
+				if !callback(iter, field) {
+					return false
+				}
+				c = iter.nextToken()
+			}
+			if c != '}' {
+				iter.ReportError("ReadObjectCB", `object not ended with }`)
+				return false
+			}
+			return true
+		}
+		if c == '}' {
+			return true
+		}
+		iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c}))
+		return false
+	}
+	if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l')
+		return true // null
+	}
+	iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c}))
+	return false
+}
+
+// ReadMapCB read map with callback, the key can be any string
+func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
+	c := iter.nextToken()
+	if c == '{' {
+		c = iter.nextToken()
+		if c == '"' {
+			iter.unreadByte()
+			field := iter.ReadString()
+			if iter.nextToken() != ':' {
+				iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+				return false
+			}
+			if !callback(iter, field) {
+				return false
+			}
+			c = iter.nextToken()
+			for c == ',' {
+				field = iter.ReadString()
+				if iter.nextToken() != ':' {
+					iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+					return false
+				}
+				if !callback(iter, field) {
+					return false
+				}
+				c = iter.nextToken()
+			}
+			if c != '}' {
+				iter.ReportError("ReadMapCB", `object not ended with }`)
+				return false
+			}
+			return true
+		}
+		if c == '}' {
+			return true
+		}
+		iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
+		return false
+	}
+	if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l')
+		return true // null
+	}
+	iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
+	return false
+}
+
+func (iter *Iterator) readObjectStart() bool {
+	c := iter.nextToken()
+	if c == '{' {
+		c = iter.nextToken()
+		if c == '}' {
+			return false
+		}
+		iter.unreadByte()
+		return true
+	} else if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l')
+		return false
+	}
+	iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c}))
+	return false
+}
+
+func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) {
+	str := iter.ReadStringAsSlice()
+	if iter.skipWhitespacesWithoutLoadMore() {
+		if ret == nil {
+			ret = make([]byte, len(str))
+			copy(ret, str)
+		}
+		if !iter.loadMore() {
+			return
+		}
+	}
+	if iter.buf[iter.head] != ':' {
+		iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]}))
+		return
+	}
+	iter.head++
+	if iter.skipWhitespacesWithoutLoadMore() {
+		if ret == nil {
+			ret = make([]byte, len(str))
+			copy(ret, str)
+		}
+		if !iter.loadMore() {
+			return
+		}
+	}
+	if ret == nil {
+		return str
+	}
+	return ret
+}
diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go
new file mode 100644
index 0000000..f58beb9
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip.go
@@ -0,0 +1,129 @@
+package jsoniter
+
+import "fmt"
+
+// ReadNil reads a json object as nil and
+// returns whether it's a nil or not
+func (iter *Iterator) ReadNil() (ret bool) {
+	c := iter.nextToken()
+	if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l') // null
+		return true
+	}
+	iter.unreadByte()
+	return false
+}
+
+// ReadBool reads a json object as BoolValue
+func (iter *Iterator) ReadBool() (ret bool) {
+	c := iter.nextToken()
+	if c == 't' {
+		iter.skipThreeBytes('r', 'u', 'e')
+		return true
+	}
+	if c == 'f' {
+		iter.skipFourBytes('a', 'l', 's', 'e')
+		return false
+	}
+	iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c}))
+	return
+}
+
+// SkipAndReturnBytes skip next JSON element, and return its content as []byte.
+// The []byte can be kept, it is a copy of data.
+func (iter *Iterator) SkipAndReturnBytes() []byte {
+	iter.startCapture(iter.head)
+	iter.Skip()
+	return iter.stopCapture()
+}
+
+type captureBuffer struct {
+	startedAt int
+	captured  []byte
+}
+
+func (iter *Iterator) startCapture(captureStartedAt int) {
+	if iter.captured != nil {
+		panic("already in capture mode")
+	}
+	iter.captureStartedAt = captureStartedAt
+	iter.captured = make([]byte, 0, 32)
+}
+
+func (iter *Iterator) stopCapture() []byte {
+	if iter.captured == nil {
+		panic("not in capture mode")
+	}
+	captured := iter.captured
+	remaining := iter.buf[iter.captureStartedAt:iter.head]
+	iter.captureStartedAt = -1
+	iter.captured = nil
+	if len(captured) == 0 {
+		copied := make([]byte, len(remaining))
+		copy(copied, remaining)
+		return copied
+	}
+	captured = append(captured, remaining...)
+	return captured
+}
+
+// Skip skips a json object and positions to relatively the next json object
+func (iter *Iterator) Skip() {
+	c := iter.nextToken()
+	switch c {
+	case '"':
+		iter.skipString()
+	case 'n':
+		iter.skipThreeBytes('u', 'l', 'l') // null
+	case 't':
+		iter.skipThreeBytes('r', 'u', 'e') // true
+	case 'f':
+		iter.skipFourBytes('a', 'l', 's', 'e') // false
+	case '0':
+		iter.unreadByte()
+		iter.ReadFloat32()
+	case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+		iter.skipNumber()
+	case '[':
+		iter.skipArray()
+	case '{':
+		iter.skipObject()
+	default:
+		iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c))
+		return
+	}
+}
+
+func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) {
+	if iter.readByte() != b1 {
+		iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+		return
+	}
+	if iter.readByte() != b2 {
+		iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+		return
+	}
+	if iter.readByte() != b3 {
+		iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+		return
+	}
+	if iter.readByte() != b4 {
+		iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+		return
+	}
+}
+
+func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) {
+	if iter.readByte() != b1 {
+		iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+		return
+	}
+	if iter.readByte() != b2 {
+		iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+		return
+	}
+	if iter.readByte() != b3 {
+		iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+		return
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
new file mode 100644
index 0000000..8fcdc3b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
@@ -0,0 +1,144 @@
+//+build jsoniter_sloppy
+
+package jsoniter
+
+// sloppy but faster implementation, do not validate the input json
+
+func (iter *Iterator) skipNumber() {
+	for {
+		for i := iter.head; i < iter.tail; i++ {
+			c := iter.buf[i]
+			switch c {
+			case ' ', '\n', '\r', '\t', ',', '}', ']':
+				iter.head = i
+				return
+			}
+		}
+		if !iter.loadMore() {
+			return
+		}
+	}
+}
+
+func (iter *Iterator) skipArray() {
+	level := 1
+	for {
+		for i := iter.head; i < iter.tail; i++ {
+			switch iter.buf[i] {
+			case '"': // If inside string, skip it
+				iter.head = i + 1
+				iter.skipString()
+				i = iter.head - 1 // it will be i++ soon
+			case '[': // If open symbol, increase level
+				level++
+			case ']': // If close symbol, increase level
+				level--
+
+				// If we have returned to the original level, we're done
+				if level == 0 {
+					iter.head = i + 1
+					return
+				}
+			}
+		}
+		if !iter.loadMore() {
+			iter.ReportError("skipObject", "incomplete array")
+			return
+		}
+	}
+}
+
+func (iter *Iterator) skipObject() {
+	level := 1
+	for {
+		for i := iter.head; i < iter.tail; i++ {
+			switch iter.buf[i] {
+			case '"': // If inside string, skip it
+				iter.head = i + 1
+				iter.skipString()
+				i = iter.head - 1 // it will be i++ soon
+			case '{': // If open symbol, increase level
+				level++
+			case '}': // If close symbol, increase level
+				level--
+
+				// If we have returned to the original level, we're done
+				if level == 0 {
+					iter.head = i + 1
+					return
+				}
+			}
+		}
+		if !iter.loadMore() {
+			iter.ReportError("skipObject", "incomplete object")
+			return
+		}
+	}
+}
+
+func (iter *Iterator) skipString() {
+	for {
+		end, escaped := iter.findStringEnd()
+		if end == -1 {
+			if !iter.loadMore() {
+				iter.ReportError("skipString", "incomplete string")
+				return
+			}
+			if escaped {
+				iter.head = 1 // skip the first char as last char read is \
+			}
+		} else {
+			iter.head = end
+			return
+		}
+	}
+}
+
+// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go
+// Tries to find the end of string
+// Support if string contains escaped quote symbols.
+func (iter *Iterator) findStringEnd() (int, bool) {
+	escaped := false
+	for i := iter.head; i < iter.tail; i++ {
+		c := iter.buf[i]
+		if c == '"' {
+			if !escaped {
+				return i + 1, false
+			}
+			j := i - 1
+			for {
+				if j < iter.head || iter.buf[j] != '\\' {
+					// even number of backslashes
+					// either end of buffer, or " found
+					return i + 1, true
+				}
+				j--
+				if j < iter.head || iter.buf[j] != '\\' {
+					// odd number of backslashes
+					// it is \" or \\\"
+					break
+				}
+				j--
+			}
+		} else if c == '\\' {
+			escaped = true
+		}
+	}
+	j := iter.tail - 1
+	for {
+		if j < iter.head || iter.buf[j] != '\\' {
+			// even number of backslashes
+			// either end of buffer, or " found
+			return -1, false // do not end with \
+		}
+		j--
+		if j < iter.head || iter.buf[j] != '\\' {
+			// odd number of backslashes
+			// it is \" or \\\"
+			break
+		}
+		j--
+
+	}
+	return -1, true // end with \
+}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go
new file mode 100644
index 0000000..f67bc2e
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go
@@ -0,0 +1,89 @@
+//+build !jsoniter_sloppy
+
+package jsoniter
+
+import "fmt"
+
+func (iter *Iterator) skipNumber() {
+	if !iter.trySkipNumber() {
+		iter.unreadByte()
+		iter.ReadFloat32()
+	}
+}
+
+func (iter *Iterator) trySkipNumber() bool {
+	dotFound := false
+	for i := iter.head; i < iter.tail; i++ {
+		c := iter.buf[i]
+		switch c {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+		case '.':
+			if dotFound {
+				iter.ReportError("validateNumber", `more than one dot found in number`)
+				return true // already failed
+			}
+			if i+1 == iter.tail {
+				return false
+			}
+			c = iter.buf[i+1]
+			switch c {
+			case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			default:
+				iter.ReportError("validateNumber", `missing digit after dot`)
+				return true // already failed
+			}
+			dotFound = true
+		default:
+			switch c {
+			case ',', ']', '}', ' ', '\t', '\n', '\r':
+				if iter.head == i {
+					return false // if - without following digits
+				}
+				iter.head = i
+				return true // must be valid
+			}
+			return false // may be invalid
+		}
+	}
+	return false
+}
+
+func (iter *Iterator) skipString() {
+	if !iter.trySkipString() {
+		iter.unreadByte()
+		iter.ReadString()
+	}
+}
+
+func (iter *Iterator) trySkipString() bool {
+	for i := iter.head; i < iter.tail; i++ {
+		c := iter.buf[i]
+		if c == '"' {
+			iter.head = i + 1
+			return true // valid
+		} else if c == '\\' {
+			return false
+		} else if c < ' ' {
+			iter.ReportError("trySkipString",
+				fmt.Sprintf(`invalid control character found: %d`, c))
+			return true // already failed
+		}
+	}
+	return false
+}
+
+func (iter *Iterator) skipObject() {
+	iter.unreadByte()
+	iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+		iter.Skip()
+		return true
+	})
+}
+
+func (iter *Iterator) skipArray() {
+	iter.unreadByte()
+	iter.ReadArrayCB(func(iter *Iterator) bool {
+		iter.Skip()
+		return true
+	})
+}
diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go
new file mode 100644
index 0000000..adc487e
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_str.go
@@ -0,0 +1,215 @@
+package jsoniter
+
+import (
+	"fmt"
+	"unicode/utf16"
+)
+
+// ReadString read string from iterator
+func (iter *Iterator) ReadString() (ret string) {
+	c := iter.nextToken()
+	if c == '"' {
+		for i := iter.head; i < iter.tail; i++ {
+			c := iter.buf[i]
+			if c == '"' {
+				ret = string(iter.buf[iter.head:i])
+				iter.head = i + 1
+				return ret
+			} else if c == '\\' {
+				break
+			} else if c < ' ' {
+				iter.ReportError("ReadString",
+					fmt.Sprintf(`invalid control character found: %d`, c))
+				return
+			}
+		}
+		return iter.readStringSlowPath()
+	} else if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l')
+		return ""
+	}
+	iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c}))
+	return
+}
+
+func (iter *Iterator) readStringSlowPath() (ret string) {
+	var str []byte
+	var c byte
+	for iter.Error == nil {
+		c = iter.readByte()
+		if c == '"' {
+			return string(str)
+		}
+		if c == '\\' {
+			c = iter.readByte()
+			str = iter.readEscapedChar(c, str)
+		} else {
+			str = append(str, c)
+		}
+	}
+	iter.ReportError("readStringSlowPath", "unexpected end of input")
+	return
+}
+
+func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte {
+	switch c {
+	case 'u':
+		r := iter.readU4()
+		if utf16.IsSurrogate(r) {
+			c = iter.readByte()
+			if iter.Error != nil {
+				return nil
+			}
+			if c != '\\' {
+				iter.unreadByte()
+				str = appendRune(str, r)
+				return str
+			}
+			c = iter.readByte()
+			if iter.Error != nil {
+				return nil
+			}
+			if c != 'u' {
+				str = appendRune(str, r)
+				return iter.readEscapedChar(c, str)
+			}
+			r2 := iter.readU4()
+			if iter.Error != nil {
+				return nil
+			}
+			combined := utf16.DecodeRune(r, r2)
+			if combined == '\uFFFD' {
+				str = appendRune(str, r)
+				str = appendRune(str, r2)
+			} else {
+				str = appendRune(str, combined)
+			}
+		} else {
+			str = appendRune(str, r)
+		}
+	case '"':
+		str = append(str, '"')
+	case '\\':
+		str = append(str, '\\')
+	case '/':
+		str = append(str, '/')
+	case 'b':
+		str = append(str, '\b')
+	case 'f':
+		str = append(str, '\f')
+	case 'n':
+		str = append(str, '\n')
+	case 'r':
+		str = append(str, '\r')
+	case 't':
+		str = append(str, '\t')
+	default:
+		iter.ReportError("readEscapedChar",
+			`invalid escape char after \`)
+		return nil
+	}
+	return str
+}
+
+// ReadStringAsSlice read string from iterator without copying into string form.
+// The []byte can not be kept, as it will change after next iterator call.
+func (iter *Iterator) ReadStringAsSlice() (ret []byte) {
+	c := iter.nextToken()
+	if c == '"' {
+		for i := iter.head; i < iter.tail; i++ {
+			// require ascii string and no escape
+			// for: field name, base64, number
+			if iter.buf[i] == '"' {
+				// fast path: reuse the underlying buffer
+				ret = iter.buf[iter.head:i]
+				iter.head = i + 1
+				return ret
+			}
+		}
+		readLen := iter.tail - iter.head
+		copied := make([]byte, readLen, readLen*2)
+		copy(copied, iter.buf[iter.head:iter.tail])
+		iter.head = iter.tail
+		for iter.Error == nil {
+			c := iter.readByte()
+			if c == '"' {
+				return copied
+			}
+			copied = append(copied, c)
+		}
+		return copied
+	}
+	iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c}))
+	return
+}
+
+func (iter *Iterator) readU4() (ret rune) {
+	for i := 0; i < 4; i++ {
+		c := iter.readByte()
+		if iter.Error != nil {
+			return
+		}
+		if c >= '0' && c <= '9' {
+			ret = ret*16 + rune(c-'0')
+		} else if c >= 'a' && c <= 'f' {
+			ret = ret*16 + rune(c-'a'+10)
+		} else if c >= 'A' && c <= 'F' {
+			ret = ret*16 + rune(c-'A'+10)
+		} else {
+			iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c}))
+			return
+		}
+	}
+	return ret
+}
+
+const (
+	t1 = 0x00 // 0000 0000
+	tx = 0x80 // 1000 0000
+	t2 = 0xC0 // 1100 0000
+	t3 = 0xE0 // 1110 0000
+	t4 = 0xF0 // 1111 0000
+	t5 = 0xF8 // 1111 1000
+
+	maskx = 0x3F // 0011 1111
+	mask2 = 0x1F // 0001 1111
+	mask3 = 0x0F // 0000 1111
+	mask4 = 0x07 // 0000 0111
+
+	rune1Max = 1<<7 - 1
+	rune2Max = 1<<11 - 1
+	rune3Max = 1<<16 - 1
+
+	surrogateMin = 0xD800
+	surrogateMax = 0xDFFF
+
+	maxRune   = '\U0010FFFF' // Maximum valid Unicode code point.
+	runeError = '\uFFFD'     // the "error" Rune or "Unicode replacement character"
+)
+
+func appendRune(p []byte, r rune) []byte {
+	// Negative values are erroneous. Making it unsigned addresses the problem.
+	switch i := uint32(r); {
+	case i <= rune1Max:
+		p = append(p, byte(r))
+		return p
+	case i <= rune2Max:
+		p = append(p, t2|byte(r>>6))
+		p = append(p, tx|byte(r)&maskx)
+		return p
+	case i > maxRune, surrogateMin <= i && i <= surrogateMax:
+		r = runeError
+		fallthrough
+	case i <= rune3Max:
+		p = append(p, t3|byte(r>>12))
+		p = append(p, tx|byte(r>>6)&maskx)
+		p = append(p, tx|byte(r)&maskx)
+		return p
+	default:
+		p = append(p, t4|byte(r>>18))
+		p = append(p, tx|byte(r>>12)&maskx)
+		p = append(p, tx|byte(r>>6)&maskx)
+		p = append(p, tx|byte(r)&maskx)
+		return p
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go
new file mode 100644
index 0000000..c2934f9
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/jsoniter.go
@@ -0,0 +1,18 @@
+// Package jsoniter implements encoding and decoding of JSON as defined in
+// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json.
+// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter
+// and variable type declarations (if any).
+// jsoniter interfaces gives 100% compatibility with code using standard lib.
+//
+// "JSON and Go"
+// (https://golang.org/doc/articles/json_and_go.html)
+// gives a description of how Marshal/Unmarshal operate
+// between arbitrary or predefined json objects and bytes,
+// and it applies to jsoniter.Marshal/Unmarshal as well.
+//
+// Besides, jsoniter.Iterator provides a different set of interfaces
+// iterating given bytes/string/reader
+// and yielding parsed elements one by one.
+// This set of interfaces reads input as required and gives
+// better performance.
+package jsoniter
diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go
new file mode 100644
index 0000000..e2389b5
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/pool.go
@@ -0,0 +1,42 @@
+package jsoniter
+
+import (
+	"io"
+)
+
+// IteratorPool a thread safe pool of iterators with same configuration
+type IteratorPool interface {
+	BorrowIterator(data []byte) *Iterator
+	ReturnIterator(iter *Iterator)
+}
+
+// StreamPool a thread safe pool of streams with same configuration
+type StreamPool interface {
+	BorrowStream(writer io.Writer) *Stream
+	ReturnStream(stream *Stream)
+}
+
+func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream {
+	stream := cfg.streamPool.Get().(*Stream)
+	stream.Reset(writer)
+	return stream
+}
+
+func (cfg *frozenConfig) ReturnStream(stream *Stream) {
+	stream.out = nil
+	stream.Error = nil
+	stream.Attachment = nil
+	cfg.streamPool.Put(stream)
+}
+
+func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator {
+	iter := cfg.iteratorPool.Get().(*Iterator)
+	iter.ResetBytes(data)
+	return iter
+}
+
+func (cfg *frozenConfig) ReturnIterator(iter *Iterator) {
+	iter.Error = nil
+	iter.Attachment = nil
+	cfg.iteratorPool.Put(iter)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go
new file mode 100644
index 0000000..4459e20
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect.go
@@ -0,0 +1,332 @@
+package jsoniter
+
+import (
+	"fmt"
+	"reflect"
+	"unsafe"
+
+	"github.com/modern-go/reflect2"
+)
+
+// ValDecoder is an internal type registered to cache as needed.
+// Don't confuse jsoniter.ValDecoder with json.Decoder.
+// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link).
+//
+// Reflection on type to create decoders, which is then cached
+// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions
+// 1. create instance of new value, for example *int will need a int to be allocated
+// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New
+// 3. assignment to map, both key and value will be reflect.Value
+// For a simple struct binding, it will be reflect.Value free and allocation free
+type ValDecoder interface {
+	Decode(ptr unsafe.Pointer, iter *Iterator)
+}
+
+// ValEncoder is an internal type registered to cache as needed.
+// Don't confuse jsoniter.ValEncoder with json.Encoder.
+// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link).
+type ValEncoder interface {
+	IsEmpty(ptr unsafe.Pointer) bool
+	Encode(ptr unsafe.Pointer, stream *Stream)
+}
+
+type checkIsEmpty interface {
+	IsEmpty(ptr unsafe.Pointer) bool
+}
+
+type ctx struct {
+	*frozenConfig
+	prefix   string
+	encoders map[reflect2.Type]ValEncoder
+	decoders map[reflect2.Type]ValDecoder
+}
+
+func (b *ctx) caseSensitive() bool {
+	if b.frozenConfig == nil {
+		// default is case-insensitive
+		return false
+	}
+	return b.frozenConfig.caseSensitive
+}
+
+func (b *ctx) append(prefix string) *ctx {
+	return &ctx{
+		frozenConfig: b.frozenConfig,
+		prefix:       b.prefix + " " + prefix,
+		encoders:     b.encoders,
+		decoders:     b.decoders,
+	}
+}
+
+// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
+func (iter *Iterator) ReadVal(obj interface{}) {
+	cacheKey := reflect2.RTypeOf(obj)
+	decoder := iter.cfg.getDecoderFromCache(cacheKey)
+	if decoder == nil {
+		typ := reflect2.TypeOf(obj)
+		if typ.Kind() != reflect.Ptr {
+			iter.ReportError("ReadVal", "can only unmarshal into pointer")
+			return
+		}
+		decoder = iter.cfg.DecoderOf(typ)
+	}
+	ptr := reflect2.PtrOf(obj)
+	if ptr == nil {
+		iter.ReportError("ReadVal", "can not read into nil pointer")
+		return
+	}
+	decoder.Decode(ptr, iter)
+}
+
+// WriteVal copy the go interface into underlying JSON, same as json.Marshal
+func (stream *Stream) WriteVal(val interface{}) {
+	if nil == val {
+		stream.WriteNil()
+		return
+	}
+	cacheKey := reflect2.RTypeOf(val)
+	encoder := stream.cfg.getEncoderFromCache(cacheKey)
+	if encoder == nil {
+		typ := reflect2.TypeOf(val)
+		encoder = stream.cfg.EncoderOf(typ)
+	}
+	encoder.Encode(reflect2.PtrOf(val), stream)
+}
+
+func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder {
+	cacheKey := typ.RType()
+	decoder := cfg.getDecoderFromCache(cacheKey)
+	if decoder != nil {
+		return decoder
+	}
+	ctx := &ctx{
+		frozenConfig: cfg,
+		prefix:       "",
+		decoders:     map[reflect2.Type]ValDecoder{},
+		encoders:     map[reflect2.Type]ValEncoder{},
+	}
+	ptrType := typ.(*reflect2.UnsafePtrType)
+	decoder = decoderOfType(ctx, ptrType.Elem())
+	cfg.addDecoderToCache(cacheKey, decoder)
+	return decoder
+}
+
+func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+	decoder := getTypeDecoderFromExtension(ctx, typ)
+	if decoder != nil {
+		return decoder
+	}
+	decoder = createDecoderOfType(ctx, typ)
+	for _, extension := range extensions {
+		decoder = extension.DecorateDecoder(typ, decoder)
+	}
+	decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
+	for _, extension := range ctx.extraExtensions {
+		decoder = extension.DecorateDecoder(typ, decoder)
+	}
+	return decoder
+}
+
+func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+	decoder := ctx.decoders[typ]
+	if decoder != nil {
+		return decoder
+	}
+	placeholder := &placeholderDecoder{}
+	ctx.decoders[typ] = placeholder
+	decoder = _createDecoderOfType(ctx, typ)
+	placeholder.decoder = decoder
+	return decoder
+}
+
+func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+	decoder := createDecoderOfJsonRawMessage(ctx, typ)
+	if decoder != nil {
+		return decoder
+	}
+	decoder = createDecoderOfJsonNumber(ctx, typ)
+	if decoder != nil {
+		return decoder
+	}
+	decoder = createDecoderOfMarshaler(ctx, typ)
+	if decoder != nil {
+		return decoder
+	}
+	decoder = createDecoderOfAny(ctx, typ)
+	if decoder != nil {
+		return decoder
+	}
+	decoder = createDecoderOfNative(ctx, typ)
+	if decoder != nil {
+		return decoder
+	}
+	switch typ.Kind() {
+	case reflect.Interface:
+		ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType)
+		if isIFace {
+			return &ifaceDecoder{valType: ifaceType}
+		}
+		return &efaceDecoder{}
+	case reflect.Struct:
+		return decoderOfStruct(ctx, typ)
+	case reflect.Array:
+		return decoderOfArray(ctx, typ)
+	case reflect.Slice:
+		return decoderOfSlice(ctx, typ)
+	case reflect.Map:
+		return decoderOfMap(ctx, typ)
+	case reflect.Ptr:
+		return decoderOfOptional(ctx, typ)
+	default:
+		return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
+	}
+}
+
+func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder {
+	cacheKey := typ.RType()
+	encoder := cfg.getEncoderFromCache(cacheKey)
+	if encoder != nil {
+		return encoder
+	}
+	ctx := &ctx{
+		frozenConfig: cfg,
+		prefix:       "",
+		decoders:     map[reflect2.Type]ValDecoder{},
+		encoders:     map[reflect2.Type]ValEncoder{},
+	}
+	encoder = encoderOfType(ctx, typ)
+	if typ.LikePtr() {
+		encoder = &onePtrEncoder{encoder}
+	}
+	cfg.addEncoderToCache(cacheKey, encoder)
+	return encoder
+}
+
+type onePtrEncoder struct {
+	encoder ValEncoder
+}
+
+func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
+}
+
+func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
+}
+
+func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+	encoder := getTypeEncoderFromExtension(ctx, typ)
+	if encoder != nil {
+		return encoder
+	}
+	encoder = createEncoderOfType(ctx, typ)
+	for _, extension := range extensions {
+		encoder = extension.DecorateEncoder(typ, encoder)
+	}
+	encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
+	for _, extension := range ctx.extraExtensions {
+		encoder = extension.DecorateEncoder(typ, encoder)
+	}
+	return encoder
+}
+
+func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+	encoder := ctx.encoders[typ]
+	if encoder != nil {
+		return encoder
+	}
+	placeholder := &placeholderEncoder{}
+	ctx.encoders[typ] = placeholder
+	encoder = _createEncoderOfType(ctx, typ)
+	placeholder.encoder = encoder
+	return encoder
+}
+func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+	encoder := createEncoderOfJsonRawMessage(ctx, typ)
+	if encoder != nil {
+		return encoder
+	}
+	encoder = createEncoderOfJsonNumber(ctx, typ)
+	if encoder != nil {
+		return encoder
+	}
+	encoder = createEncoderOfMarshaler(ctx, typ)
+	if encoder != nil {
+		return encoder
+	}
+	encoder = createEncoderOfAny(ctx, typ)
+	if encoder != nil {
+		return encoder
+	}
+	encoder = createEncoderOfNative(ctx, typ)
+	if encoder != nil {
+		return encoder
+	}
+	kind := typ.Kind()
+	switch kind {
+	case reflect.Interface:
+		return &dynamicEncoder{typ}
+	case reflect.Struct:
+		return encoderOfStruct(ctx, typ)
+	case reflect.Array:
+		return encoderOfArray(ctx, typ)
+	case reflect.Slice:
+		return encoderOfSlice(ctx, typ)
+	case reflect.Map:
+		return encoderOfMap(ctx, typ)
+	case reflect.Ptr:
+		return encoderOfOptional(ctx, typ)
+	default:
+		return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
+	}
+}
+
+type lazyErrorDecoder struct {
+	err error
+}
+
+func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if iter.WhatIsNext() != NilValue {
+		if iter.Error == nil {
+			iter.Error = decoder.err
+		}
+	} else {
+		iter.Skip()
+	}
+}
+
+type lazyErrorEncoder struct {
+	err error
+}
+
+func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	if ptr == nil {
+		stream.WriteNil()
+	} else if stream.Error == nil {
+		stream.Error = encoder.err
+	}
+}
+
+func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return false
+}
+
+type placeholderDecoder struct {
+	decoder ValDecoder
+}
+
+func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	decoder.decoder.Decode(ptr, iter)
+}
+
+type placeholderEncoder struct {
+	encoder ValEncoder
+}
+
+func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	encoder.encoder.Encode(ptr, stream)
+}
+
+func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.encoder.IsEmpty(ptr)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go
new file mode 100644
index 0000000..13a0b7b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_array.go
@@ -0,0 +1,104 @@
+package jsoniter
+
+import (
+	"fmt"
+	"github.com/modern-go/reflect2"
+	"io"
+	"unsafe"
+)
+
+func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder {
+	arrayType := typ.(*reflect2.UnsafeArrayType)
+	decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
+	return &arrayDecoder{arrayType, decoder}
+}
+
+func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder {
+	arrayType := typ.(*reflect2.UnsafeArrayType)
+	if arrayType.Len() == 0 {
+		return emptyArrayEncoder{}
+	}
+	encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
+	return &arrayEncoder{arrayType, encoder}
+}
+
+type emptyArrayEncoder struct{}
+
+func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteEmptyArray()
+}
+
+func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return true
+}
+
+type arrayEncoder struct {
+	arrayType   *reflect2.UnsafeArrayType
+	elemEncoder ValEncoder
+}
+
+func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteArrayStart()
+	elemPtr := unsafe.Pointer(ptr)
+	encoder.elemEncoder.Encode(elemPtr, stream)
+	for i := 1; i < encoder.arrayType.Len(); i++ {
+		stream.WriteMore()
+		elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i)
+		encoder.elemEncoder.Encode(elemPtr, stream)
+	}
+	stream.WriteArrayEnd()
+	if stream.Error != nil && stream.Error != io.EOF {
+		stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error())
+	}
+}
+
+func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return false
+}
+
+type arrayDecoder struct {
+	arrayType   *reflect2.UnsafeArrayType
+	elemDecoder ValDecoder
+}
+
+func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	decoder.doDecode(ptr, iter)
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error())
+	}
+}
+
+func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
+	c := iter.nextToken()
+	arrayType := decoder.arrayType
+	if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l')
+		return
+	}
+	if c != '[' {
+		iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c}))
+		return
+	}
+	c = iter.nextToken()
+	if c == ']' {
+		return
+	}
+	iter.unreadByte()
+	elemPtr := arrayType.UnsafeGetIndex(ptr, 0)
+	decoder.elemDecoder.Decode(elemPtr, iter)
+	length := 1
+	for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+		if length >= arrayType.Len() {
+			iter.Skip()
+			continue
+		}
+		idx := length
+		length += 1
+		elemPtr = arrayType.UnsafeGetIndex(ptr, idx)
+		decoder.elemDecoder.Decode(elemPtr, iter)
+	}
+	if c != ']' {
+		iter.ReportError("decode array", "expect ], but found "+string([]byte{c}))
+		return
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go
new file mode 100644
index 0000000..8b6bc8b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go
@@ -0,0 +1,70 @@
+package jsoniter
+
+import (
+	"github.com/modern-go/reflect2"
+	"reflect"
+	"unsafe"
+)
+
+type dynamicEncoder struct {
+	valType reflect2.Type
+}
+
+func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	obj := encoder.valType.UnsafeIndirect(ptr)
+	stream.WriteVal(obj)
+}
+
+func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.valType.UnsafeIndirect(ptr) == nil
+}
+
+type efaceDecoder struct {
+}
+
+func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	pObj := (*interface{})(ptr)
+	obj := *pObj
+	if obj == nil {
+		*pObj = iter.Read()
+		return
+	}
+	typ := reflect2.TypeOf(obj)
+	if typ.Kind() != reflect.Ptr {
+		*pObj = iter.Read()
+		return
+	}
+	ptrType := typ.(*reflect2.UnsafePtrType)
+	ptrElemType := ptrType.Elem()
+	if iter.WhatIsNext() == NilValue {
+		if ptrElemType.Kind() != reflect.Ptr {
+			iter.skipFourBytes('n', 'u', 'l', 'l')
+			*pObj = nil
+			return
+		}
+	}
+	if reflect2.IsNil(obj) {
+		obj := ptrElemType.New()
+		iter.ReadVal(obj)
+		*pObj = obj
+		return
+	}
+	iter.ReadVal(obj)
+}
+
+type ifaceDecoder struct {
+	valType *reflect2.UnsafeIFaceType
+}
+
+func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if iter.ReadNil() {
+		decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew())
+		return
+	}
+	obj := decoder.valType.UnsafeIndirect(ptr)
+	if reflect2.IsNil(obj) {
+		iter.ReportError("decode non empty interface", "can not unmarshal into nil")
+		return
+	}
+	iter.ReadVal(obj)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go
new file mode 100644
index 0000000..04f6875
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_extension.go
@@ -0,0 +1,483 @@
+package jsoniter
+
+import (
+	"fmt"
+	"github.com/modern-go/reflect2"
+	"reflect"
+	"sort"
+	"strings"
+	"unicode"
+	"unsafe"
+)
+
+var typeDecoders = map[string]ValDecoder{}
+var fieldDecoders = map[string]ValDecoder{}
+var typeEncoders = map[string]ValEncoder{}
+var fieldEncoders = map[string]ValEncoder{}
+var extensions = []Extension{}
+
+// StructDescriptor describe how should we encode/decode the struct
+type StructDescriptor struct {
+	Type   reflect2.Type
+	Fields []*Binding
+}
+
+// GetField get one field from the descriptor by its name.
+// Can not use map here to keep field orders.
+func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding {
+	for _, binding := range structDescriptor.Fields {
+		if binding.Field.Name() == fieldName {
+			return binding
+		}
+	}
+	return nil
+}
+
+// Binding describe how should we encode/decode the struct field
+type Binding struct {
+	levels    []int
+	Field     reflect2.StructField
+	FromNames []string
+	ToNames   []string
+	Encoder   ValEncoder
+	Decoder   ValDecoder
+}
+
+// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder.
+// Can also rename fields by UpdateStructDescriptor.
+type Extension interface {
+	UpdateStructDescriptor(structDescriptor *StructDescriptor)
+	CreateMapKeyDecoder(typ reflect2.Type) ValDecoder
+	CreateMapKeyEncoder(typ reflect2.Type) ValEncoder
+	CreateDecoder(typ reflect2.Type) ValDecoder
+	CreateEncoder(typ reflect2.Type) ValEncoder
+	DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder
+	DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder
+}
+
+// DummyExtension embed this type get dummy implementation for all methods of Extension
+type DummyExtension struct {
+}
+
+// UpdateStructDescriptor No-op
+func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateMapKeyDecoder No-op
+func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+	return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+	return nil
+}
+
+// CreateDecoder No-op
+func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+	return nil
+}
+
+// CreateEncoder No-op
+func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+	return nil
+}
+
+// DecorateDecoder No-op
+func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+	return decoder
+}
+
+// DecorateEncoder No-op
+func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+	return encoder
+}
+
+type EncoderExtension map[reflect2.Type]ValEncoder
+
+// UpdateStructDescriptor No-op
+func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateDecoder No-op
+func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+	return nil
+}
+
+// CreateEncoder get encoder from map
+func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+	return extension[typ]
+}
+
+// CreateMapKeyDecoder No-op
+func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+	return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+	return nil
+}
+
+// DecorateDecoder No-op
+func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+	return decoder
+}
+
+// DecorateEncoder No-op
+func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+	return encoder
+}
+
+type DecoderExtension map[reflect2.Type]ValDecoder
+
+// UpdateStructDescriptor No-op
+func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateMapKeyDecoder No-op
+func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+	return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+	return nil
+}
+
+// CreateDecoder get decoder from map
+func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+	return extension[typ]
+}
+
+// CreateEncoder No-op
+func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+	return nil
+}
+
+// DecorateDecoder No-op
+func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+	return decoder
+}
+
+// DecorateEncoder No-op
+func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+	return encoder
+}
+
+type funcDecoder struct {
+	fun DecoderFunc
+}
+
+func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	decoder.fun(ptr, iter)
+}
+
+type funcEncoder struct {
+	fun         EncoderFunc
+	isEmptyFunc func(ptr unsafe.Pointer) bool
+}
+
+func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	encoder.fun(ptr, stream)
+}
+
+func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	if encoder.isEmptyFunc == nil {
+		return false
+	}
+	return encoder.isEmptyFunc(ptr)
+}
+
+// DecoderFunc the function form of TypeDecoder
+type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator)
+
+// EncoderFunc the function form of TypeEncoder
+type EncoderFunc func(ptr unsafe.Pointer, stream *Stream)
+
+// RegisterTypeDecoderFunc register TypeDecoder for a type with function
+func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) {
+	typeDecoders[typ] = &funcDecoder{fun}
+}
+
+// RegisterTypeDecoder register TypeDecoder for a typ
+func RegisterTypeDecoder(typ string, decoder ValDecoder) {
+	typeDecoders[typ] = decoder
+}
+
+// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function
+func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) {
+	RegisterFieldDecoder(typ, field, &funcDecoder{fun})
+}
+
+// RegisterFieldDecoder register TypeDecoder for a struct field
+func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) {
+	fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder
+}
+
+// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function
+func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
+	typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc}
+}
+
+// RegisterTypeEncoder register TypeEncoder for a type
+func RegisterTypeEncoder(typ string, encoder ValEncoder) {
+	typeEncoders[typ] = encoder
+}
+
+// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function
+func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
+	RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc})
+}
+
+// RegisterFieldEncoder register TypeEncoder for a struct field
+func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) {
+	fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder
+}
+
+// RegisterExtension register extension
+func RegisterExtension(extension Extension) {
+	extensions = append(extensions, extension)
+}
+
+func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
+	decoder := _getTypeDecoderFromExtension(ctx, typ)
+	if decoder != nil {
+		for _, extension := range extensions {
+			decoder = extension.DecorateDecoder(typ, decoder)
+		}
+		decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
+		for _, extension := range ctx.extraExtensions {
+			decoder = extension.DecorateDecoder(typ, decoder)
+		}
+	}
+	return decoder
+}
+func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
+	for _, extension := range extensions {
+		decoder := extension.CreateDecoder(typ)
+		if decoder != nil {
+			return decoder
+		}
+	}
+	decoder := ctx.decoderExtension.CreateDecoder(typ)
+	if decoder != nil {
+		return decoder
+	}
+	for _, extension := range ctx.extraExtensions {
+		decoder := extension.CreateDecoder(typ)
+		if decoder != nil {
+			return decoder
+		}
+	}
+	typeName := typ.String()
+	decoder = typeDecoders[typeName]
+	if decoder != nil {
+		return decoder
+	}
+	if typ.Kind() == reflect.Ptr {
+		ptrType := typ.(*reflect2.UnsafePtrType)
+		decoder := typeDecoders[ptrType.Elem().String()]
+		if decoder != nil {
+			return &OptionalDecoder{ptrType.Elem(), decoder}
+		}
+	}
+	return nil
+}
+
+func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
+	encoder := _getTypeEncoderFromExtension(ctx, typ)
+	if encoder != nil {
+		for _, extension := range extensions {
+			encoder = extension.DecorateEncoder(typ, encoder)
+		}
+		encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
+		for _, extension := range ctx.extraExtensions {
+			encoder = extension.DecorateEncoder(typ, encoder)
+		}
+	}
+	return encoder
+}
+
+func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
+	for _, extension := range extensions {
+		encoder := extension.CreateEncoder(typ)
+		if encoder != nil {
+			return encoder
+		}
+	}
+	encoder := ctx.encoderExtension.CreateEncoder(typ)
+	if encoder != nil {
+		return encoder
+	}
+	for _, extension := range ctx.extraExtensions {
+		encoder := extension.CreateEncoder(typ)
+		if encoder != nil {
+			return encoder
+		}
+	}
+	typeName := typ.String()
+	encoder = typeEncoders[typeName]
+	if encoder != nil {
+		return encoder
+	}
+	if typ.Kind() == reflect.Ptr {
+		typePtr := typ.(*reflect2.UnsafePtrType)
+		encoder := typeEncoders[typePtr.Elem().String()]
+		if encoder != nil {
+			return &OptionalEncoder{encoder}
+		}
+	}
+	return nil
+}
+
+func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
+	structType := typ.(*reflect2.UnsafeStructType)
+	embeddedBindings := []*Binding{}
+	bindings := []*Binding{}
+	for i := 0; i < structType.NumField(); i++ {
+		field := structType.Field(i)
+		tag, hastag := field.Tag().Lookup(ctx.getTagKey())
+		if ctx.onlyTaggedField && !hastag {
+			continue
+		}
+		tagParts := strings.Split(tag, ",")
+		if tag == "-" {
+			continue
+		}
+		if field.Anonymous() && (tag == "" || tagParts[0] == "") {
+			if field.Type().Kind() == reflect.Struct {
+				structDescriptor := describeStruct(ctx, field.Type())
+				for _, binding := range structDescriptor.Fields {
+					binding.levels = append([]int{i}, binding.levels...)
+					omitempty := binding.Encoder.(*structFieldEncoder).omitempty
+					binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
+					binding.Decoder = &structFieldDecoder{field, binding.Decoder}
+					embeddedBindings = append(embeddedBindings, binding)
+				}
+				continue
+			} else if field.Type().Kind() == reflect.Ptr {
+				ptrType := field.Type().(*reflect2.UnsafePtrType)
+				if ptrType.Elem().Kind() == reflect.Struct {
+					structDescriptor := describeStruct(ctx, ptrType.Elem())
+					for _, binding := range structDescriptor.Fields {
+						binding.levels = append([]int{i}, binding.levels...)
+						omitempty := binding.Encoder.(*structFieldEncoder).omitempty
+						binding.Encoder = &dereferenceEncoder{binding.Encoder}
+						binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
+						binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder}
+						binding.Decoder = &structFieldDecoder{field, binding.Decoder}
+						embeddedBindings = append(embeddedBindings, binding)
+					}
+					continue
+				}
+			}
+		}
+		fieldNames := calcFieldNames(field.Name(), tagParts[0], tag)
+		fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name())
+		decoder := fieldDecoders[fieldCacheKey]
+		if decoder == nil {
+			decoder = decoderOfType(ctx.append(field.Name()), field.Type())
+		}
+		encoder := fieldEncoders[fieldCacheKey]
+		if encoder == nil {
+			encoder = encoderOfType(ctx.append(field.Name()), field.Type())
+		}
+		binding := &Binding{
+			Field:     field,
+			FromNames: fieldNames,
+			ToNames:   fieldNames,
+			Decoder:   decoder,
+			Encoder:   encoder,
+		}
+		binding.levels = []int{i}
+		bindings = append(bindings, binding)
+	}
+	return createStructDescriptor(ctx, typ, bindings, embeddedBindings)
+}
+func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor {
+	structDescriptor := &StructDescriptor{
+		Type:   typ,
+		Fields: bindings,
+	}
+	for _, extension := range extensions {
+		extension.UpdateStructDescriptor(structDescriptor)
+	}
+	ctx.encoderExtension.UpdateStructDescriptor(structDescriptor)
+	ctx.decoderExtension.UpdateStructDescriptor(structDescriptor)
+	for _, extension := range ctx.extraExtensions {
+		extension.UpdateStructDescriptor(structDescriptor)
+	}
+	processTags(structDescriptor, ctx.frozenConfig)
+	// merge normal & embedded bindings & sort with original order
+	allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...))
+	sort.Sort(allBindings)
+	structDescriptor.Fields = allBindings
+	return structDescriptor
+}
+
+type sortableBindings []*Binding
+
+func (bindings sortableBindings) Len() int {
+	return len(bindings)
+}
+
+func (bindings sortableBindings) Less(i, j int) bool {
+	left := bindings[i].levels
+	right := bindings[j].levels
+	k := 0
+	for {
+		if left[k] < right[k] {
+			return true
+		} else if left[k] > right[k] {
+			return false
+		}
+		k++
+	}
+}
+
+func (bindings sortableBindings) Swap(i, j int) {
+	bindings[i], bindings[j] = bindings[j], bindings[i]
+}
+
+func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) {
+	for _, binding := range structDescriptor.Fields {
+		shouldOmitEmpty := false
+		tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",")
+		for _, tagPart := range tagParts[1:] {
+			if tagPart == "omitempty" {
+				shouldOmitEmpty = true
+			} else if tagPart == "string" {
+				if binding.Field.Type().Kind() == reflect.String {
+					binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg}
+					binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg}
+				} else {
+					binding.Decoder = &stringModeNumberDecoder{binding.Decoder}
+					binding.Encoder = &stringModeNumberEncoder{binding.Encoder}
+				}
+			}
+		}
+		binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder}
+		binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty}
+	}
+}
+
+func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string {
+	// ignore?
+	if wholeTag == "-" {
+		return []string{}
+	}
+	// rename?
+	var fieldNames []string
+	if tagProvidedFieldName == "" {
+		fieldNames = []string{originalFieldName}
+	} else {
+		fieldNames = []string{tagProvidedFieldName}
+	}
+	// private?
+	isNotExported := unicode.IsLower(rune(originalFieldName[0]))
+	if isNotExported {
+		fieldNames = []string{}
+	}
+	return fieldNames
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go
new file mode 100644
index 0000000..98d45c1
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_json_number.go
@@ -0,0 +1,112 @@
+package jsoniter
+
+import (
+	"encoding/json"
+	"github.com/modern-go/reflect2"
+	"strconv"
+	"unsafe"
+)
+
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+	return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+	return strconv.ParseInt(string(n), 10, 64)
+}
+
+func CastJsonNumber(val interface{}) (string, bool) {
+	switch typedVal := val.(type) {
+	case json.Number:
+		return string(typedVal), true
+	case Number:
+		return string(typedVal), true
+	}
+	return "", false
+}
+
+var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem()
+var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem()
+
+func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder {
+	if typ.AssignableTo(jsonNumberType) {
+		return &jsonNumberCodec{}
+	}
+	if typ.AssignableTo(jsoniterNumberType) {
+		return &jsoniterNumberCodec{}
+	}
+	return nil
+}
+
+func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder {
+	if typ.AssignableTo(jsonNumberType) {
+		return &jsonNumberCodec{}
+	}
+	if typ.AssignableTo(jsoniterNumberType) {
+		return &jsoniterNumberCodec{}
+	}
+	return nil
+}
+
+type jsonNumberCodec struct {
+}
+
+func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	switch iter.WhatIsNext() {
+	case StringValue:
+		*((*json.Number)(ptr)) = json.Number(iter.ReadString())
+	case NilValue:
+		iter.skipFourBytes('n', 'u', 'l', 'l')
+		*((*json.Number)(ptr)) = ""
+	default:
+		*((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString()))
+	}
+}
+
+func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	number := *((*json.Number)(ptr))
+	if len(number) == 0 {
+		stream.writeByte('0')
+	} else {
+		stream.WriteRaw(string(number))
+	}
+}
+
+func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
+	return len(*((*json.Number)(ptr))) == 0
+}
+
+type jsoniterNumberCodec struct {
+}
+
+func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	switch iter.WhatIsNext() {
+	case StringValue:
+		*((*Number)(ptr)) = Number(iter.ReadString())
+	case NilValue:
+		iter.skipFourBytes('n', 'u', 'l', 'l')
+		*((*Number)(ptr)) = ""
+	default:
+		*((*Number)(ptr)) = Number([]byte(iter.readNumberAsString()))
+	}
+}
+
+func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	number := *((*Number)(ptr))
+	if len(number) == 0 {
+		stream.writeByte('0')
+	} else {
+		stream.WriteRaw(string(number))
+	}
+}
+
+func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
+	return len(*((*Number)(ptr))) == 0
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
new file mode 100644
index 0000000..f261993
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
@@ -0,0 +1,60 @@
+package jsoniter
+
+import (
+	"encoding/json"
+	"github.com/modern-go/reflect2"
+	"unsafe"
+)
+
+var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()
+var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()
+
+func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder {
+	if typ == jsonRawMessageType {
+		return &jsonRawMessageCodec{}
+	}
+	if typ == jsoniterRawMessageType {
+		return &jsoniterRawMessageCodec{}
+	}
+	return nil
+}
+
+func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder {
+	if typ == jsonRawMessageType {
+		return &jsonRawMessageCodec{}
+	}
+	if typ == jsoniterRawMessageType {
+		return &jsoniterRawMessageCodec{}
+	}
+	return nil
+}
+
+type jsonRawMessageCodec struct {
+}
+
+func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	*((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes())
+}
+
+func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
+}
+
+func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
+	return len(*((*json.RawMessage)(ptr))) == 0
+}
+
+type jsoniterRawMessageCodec struct {
+}
+
+func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	*((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes())
+}
+
+func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteRaw(string(*((*RawMessage)(ptr))))
+}
+
+func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
+	return len(*((*RawMessage)(ptr))) == 0
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go
new file mode 100644
index 0000000..7f66a88
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_map.go
@@ -0,0 +1,326 @@
+package jsoniter
+
+import (
+	"fmt"
+	"github.com/modern-go/reflect2"
+	"io"
+	"reflect"
+	"sort"
+	"unsafe"
+)
+
+func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder {
+	mapType := typ.(*reflect2.UnsafeMapType)
+	keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key())
+	elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem())
+	return &mapDecoder{
+		mapType:     mapType,
+		keyType:     mapType.Key(),
+		elemType:    mapType.Elem(),
+		keyDecoder:  keyDecoder,
+		elemDecoder: elemDecoder,
+	}
+}
+
+func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder {
+	mapType := typ.(*reflect2.UnsafeMapType)
+	if ctx.sortMapKeys {
+		return &sortKeysMapEncoder{
+			mapType:     mapType,
+			keyEncoder:  encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
+			elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
+		}
+	}
+	return &mapEncoder{
+		mapType:     mapType,
+		keyEncoder:  encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
+		elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
+	}
+}
+
+func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
+	decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ)
+	if decoder != nil {
+		return decoder
+	}
+	for _, extension := range ctx.extraExtensions {
+		decoder := extension.CreateMapKeyDecoder(typ)
+		if decoder != nil {
+			return decoder
+		}
+	}
+	switch typ.Kind() {
+	case reflect.String:
+		return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
+	case reflect.Bool,
+		reflect.Uint8, reflect.Int8,
+		reflect.Uint16, reflect.Int16,
+		reflect.Uint32, reflect.Int32,
+		reflect.Uint64, reflect.Int64,
+		reflect.Uint, reflect.Int,
+		reflect.Float32, reflect.Float64,
+		reflect.Uintptr:
+		typ = reflect2.DefaultTypeOfKind(typ.Kind())
+		return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
+	default:
+		ptrType := reflect2.PtrTo(typ)
+		if ptrType.Implements(textMarshalerType) {
+			return &referenceDecoder{
+				&textUnmarshalerDecoder{
+					valType: ptrType,
+				},
+			}
+		}
+		if typ.Implements(textMarshalerType) {
+			return &textUnmarshalerDecoder{
+				valType: typ,
+			}
+		}
+		return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
+	}
+}
+
+func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
+	encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ)
+	if encoder != nil {
+		return encoder
+	}
+	for _, extension := range ctx.extraExtensions {
+		encoder := extension.CreateMapKeyEncoder(typ)
+		if encoder != nil {
+			return encoder
+		}
+	}
+	switch typ.Kind() {
+	case reflect.String:
+		return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
+	case reflect.Bool,
+		reflect.Uint8, reflect.Int8,
+		reflect.Uint16, reflect.Int16,
+		reflect.Uint32, reflect.Int32,
+		reflect.Uint64, reflect.Int64,
+		reflect.Uint, reflect.Int,
+		reflect.Float32, reflect.Float64,
+		reflect.Uintptr:
+		typ = reflect2.DefaultTypeOfKind(typ.Kind())
+		return &numericMapKeyEncoder{encoderOfType(ctx, typ)}
+	default:
+		if typ == textMarshalerType {
+			return &directTextMarshalerEncoder{
+				stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+			}
+		}
+		if typ.Implements(textMarshalerType) {
+			return &textMarshalerEncoder{
+				valType:       typ,
+				stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+			}
+		}
+		if typ.Kind() == reflect.Interface {
+			return &dynamicMapKeyEncoder{ctx, typ}
+		}
+		return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
+	}
+}
+
+type mapDecoder struct {
+	mapType     *reflect2.UnsafeMapType
+	keyType     reflect2.Type
+	elemType    reflect2.Type
+	keyDecoder  ValDecoder
+	elemDecoder ValDecoder
+}
+
+func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	mapType := decoder.mapType
+	c := iter.nextToken()
+	if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l')
+		*(*unsafe.Pointer)(ptr) = nil
+		mapType.UnsafeSet(ptr, mapType.UnsafeNew())
+		return
+	}
+	if mapType.UnsafeIsNil(ptr) {
+		mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0))
+	}
+	if c != '{' {
+		iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
+		return
+	}
+	c = iter.nextToken()
+	if c == '}' {
+		return
+	}
+	if c != '"' {
+		iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
+		return
+	}
+	iter.unreadByte()
+	key := decoder.keyType.UnsafeNew()
+	decoder.keyDecoder.Decode(key, iter)
+	c = iter.nextToken()
+	if c != ':' {
+		iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+		return
+	}
+	elem := decoder.elemType.UnsafeNew()
+	decoder.elemDecoder.Decode(elem, iter)
+	decoder.mapType.UnsafeSetIndex(ptr, key, elem)
+	for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+		key := decoder.keyType.UnsafeNew()
+		decoder.keyDecoder.Decode(key, iter)
+		c = iter.nextToken()
+		if c != ':' {
+			iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+			return
+		}
+		elem := decoder.elemType.UnsafeNew()
+		decoder.elemDecoder.Decode(elem, iter)
+		decoder.mapType.UnsafeSetIndex(ptr, key, elem)
+	}
+	if c != '}' {
+		iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c}))
+	}
+}
+
+type numericMapKeyDecoder struct {
+	decoder ValDecoder
+}
+
+func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	c := iter.nextToken()
+	if c != '"' {
+		iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
+		return
+	}
+	decoder.decoder.Decode(ptr, iter)
+	c = iter.nextToken()
+	if c != '"' {
+		iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
+		return
+	}
+}
+
+type numericMapKeyEncoder struct {
+	encoder ValEncoder
+}
+
+func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.writeByte('"')
+	encoder.encoder.Encode(ptr, stream)
+	stream.writeByte('"')
+}
+
+func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return false
+}
+
+type dynamicMapKeyEncoder struct {
+	ctx     *ctx
+	valType reflect2.Type
+}
+
+func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	obj := encoder.valType.UnsafeIndirect(ptr)
+	encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream)
+}
+
+func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	obj := encoder.valType.UnsafeIndirect(ptr)
+	return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj))
+}
+
+type mapEncoder struct {
+	mapType     *reflect2.UnsafeMapType
+	keyEncoder  ValEncoder
+	elemEncoder ValEncoder
+}
+
+func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteObjectStart()
+	iter := encoder.mapType.UnsafeIterate(ptr)
+	for i := 0; iter.HasNext(); i++ {
+		if i != 0 {
+			stream.WriteMore()
+		}
+		key, elem := iter.UnsafeNext()
+		encoder.keyEncoder.Encode(key, stream)
+		if stream.indention > 0 {
+			stream.writeTwoBytes(byte(':'), byte(' '))
+		} else {
+			stream.writeByte(':')
+		}
+		encoder.elemEncoder.Encode(elem, stream)
+	}
+	stream.WriteObjectEnd()
+}
+
+func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	iter := encoder.mapType.UnsafeIterate(ptr)
+	return !iter.HasNext()
+}
+
+type sortKeysMapEncoder struct {
+	mapType     *reflect2.UnsafeMapType
+	keyEncoder  ValEncoder
+	elemEncoder ValEncoder
+}
+
+func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	if *(*unsafe.Pointer)(ptr) == nil {
+		stream.WriteNil()
+		return
+	}
+	stream.WriteObjectStart()
+	mapIter := encoder.mapType.UnsafeIterate(ptr)
+	subStream := stream.cfg.BorrowStream(nil)
+	subIter := stream.cfg.BorrowIterator(nil)
+	keyValues := encodedKeyValues{}
+	for mapIter.HasNext() {
+		subStream.buf = make([]byte, 0, 64)
+		key, elem := mapIter.UnsafeNext()
+		encoder.keyEncoder.Encode(key, subStream)
+		if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil {
+			stream.Error = subStream.Error
+		}
+		encodedKey := subStream.Buffer()
+		subIter.ResetBytes(encodedKey)
+		decodedKey := subIter.ReadString()
+		if stream.indention > 0 {
+			subStream.writeTwoBytes(byte(':'), byte(' '))
+		} else {
+			subStream.writeByte(':')
+		}
+		encoder.elemEncoder.Encode(elem, subStream)
+		keyValues = append(keyValues, encodedKV{
+			key:      decodedKey,
+			keyValue: subStream.Buffer(),
+		})
+	}
+	sort.Sort(keyValues)
+	for i, keyValue := range keyValues {
+		if i != 0 {
+			stream.WriteMore()
+		}
+		stream.Write(keyValue.keyValue)
+	}
+	stream.WriteObjectEnd()
+	stream.cfg.ReturnStream(subStream)
+	stream.cfg.ReturnIterator(subIter)
+}
+
+func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	iter := encoder.mapType.UnsafeIterate(ptr)
+	return !iter.HasNext()
+}
+
+type encodedKeyValues []encodedKV
+
+type encodedKV struct {
+	key      string
+	keyValue []byte
+}
+
+func (sv encodedKeyValues) Len() int           { return len(sv) }
+func (sv encodedKeyValues) Swap(i, j int)      { sv[i], sv[j] = sv[j], sv[i] }
+func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key }
diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go
new file mode 100644
index 0000000..58ac959
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go
@@ -0,0 +1,218 @@
+package jsoniter
+
+import (
+	"encoding"
+	"encoding/json"
+	"github.com/modern-go/reflect2"
+	"unsafe"
+)
+
+var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem()
+var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem()
+var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem()
+var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem()
+
+func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder {
+	ptrType := reflect2.PtrTo(typ)
+	if ptrType.Implements(unmarshalerType) {
+		return &referenceDecoder{
+			&unmarshalerDecoder{ptrType},
+		}
+	}
+	if ptrType.Implements(textUnmarshalerType) {
+		return &referenceDecoder{
+			&textUnmarshalerDecoder{ptrType},
+		}
+	}
+	return nil
+}
+
+func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder {
+	if typ == marshalerType {
+		checkIsEmpty := createCheckIsEmpty(ctx, typ)
+		var encoder ValEncoder = &directMarshalerEncoder{
+			checkIsEmpty: checkIsEmpty,
+		}
+		return encoder
+	}
+	if typ.Implements(marshalerType) {
+		checkIsEmpty := createCheckIsEmpty(ctx, typ)
+		var encoder ValEncoder = &marshalerEncoder{
+			valType:      typ,
+			checkIsEmpty: checkIsEmpty,
+		}
+		return encoder
+	}
+	ptrType := reflect2.PtrTo(typ)
+	if ctx.prefix != "" && ptrType.Implements(marshalerType) {
+		checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
+		var encoder ValEncoder = &marshalerEncoder{
+			valType:      ptrType,
+			checkIsEmpty: checkIsEmpty,
+		}
+		return &referenceEncoder{encoder}
+	}
+	if typ == textMarshalerType {
+		checkIsEmpty := createCheckIsEmpty(ctx, typ)
+		var encoder ValEncoder = &directTextMarshalerEncoder{
+			checkIsEmpty:  checkIsEmpty,
+			stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+		}
+		return encoder
+	}
+	if typ.Implements(textMarshalerType) {
+		checkIsEmpty := createCheckIsEmpty(ctx, typ)
+		var encoder ValEncoder = &textMarshalerEncoder{
+			valType:       typ,
+			stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+			checkIsEmpty:  checkIsEmpty,
+		}
+		return encoder
+	}
+	// if prefix is empty, the type is the root type
+	if ctx.prefix != "" && ptrType.Implements(textMarshalerType) {
+		checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
+		var encoder ValEncoder = &textMarshalerEncoder{
+			valType:       ptrType,
+			stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+			checkIsEmpty:  checkIsEmpty,
+		}
+		return &referenceEncoder{encoder}
+	}
+	return nil
+}
+
+type marshalerEncoder struct {
+	checkIsEmpty checkIsEmpty
+	valType      reflect2.Type
+}
+
+func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	obj := encoder.valType.UnsafeIndirect(ptr)
+	if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
+		stream.WriteNil()
+		return
+	}
+	marshaler := obj.(json.Marshaler)
+	bytes, err := marshaler.MarshalJSON()
+	if err != nil {
+		stream.Error = err
+	} else {
+		stream.Write(bytes)
+	}
+}
+
+func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type directMarshalerEncoder struct {
+	checkIsEmpty checkIsEmpty
+}
+
+func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	marshaler := *(*json.Marshaler)(ptr)
+	if marshaler == nil {
+		stream.WriteNil()
+		return
+	}
+	bytes, err := marshaler.MarshalJSON()
+	if err != nil {
+		stream.Error = err
+	} else {
+		stream.Write(bytes)
+	}
+}
+
+func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type textMarshalerEncoder struct {
+	valType       reflect2.Type
+	stringEncoder ValEncoder
+	checkIsEmpty  checkIsEmpty
+}
+
+func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	obj := encoder.valType.UnsafeIndirect(ptr)
+	if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
+		stream.WriteNil()
+		return
+	}
+	marshaler := (obj).(encoding.TextMarshaler)
+	bytes, err := marshaler.MarshalText()
+	if err != nil {
+		stream.Error = err
+	} else {
+		str := string(bytes)
+		encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
+	}
+}
+
+func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type directTextMarshalerEncoder struct {
+	stringEncoder ValEncoder
+	checkIsEmpty  checkIsEmpty
+}
+
+func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	marshaler := *(*encoding.TextMarshaler)(ptr)
+	if marshaler == nil {
+		stream.WriteNil()
+		return
+	}
+	bytes, err := marshaler.MarshalText()
+	if err != nil {
+		stream.Error = err
+	} else {
+		str := string(bytes)
+		encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
+	}
+}
+
+func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type unmarshalerDecoder struct {
+	valType reflect2.Type
+}
+
+func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	valType := decoder.valType
+	obj := valType.UnsafeIndirect(ptr)
+	unmarshaler := obj.(json.Unmarshaler)
+	iter.nextToken()
+	iter.unreadByte() // skip spaces
+	bytes := iter.SkipAndReturnBytes()
+	err := unmarshaler.UnmarshalJSON(bytes)
+	if err != nil {
+		iter.ReportError("unmarshalerDecoder", err.Error())
+	}
+}
+
+type textUnmarshalerDecoder struct {
+	valType reflect2.Type
+}
+
+func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	valType := decoder.valType
+	obj := valType.UnsafeIndirect(ptr)
+	if reflect2.IsNil(obj) {
+		ptrType := valType.(*reflect2.UnsafePtrType)
+		elemType := ptrType.Elem()
+		elem := elemType.UnsafeNew()
+		ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem))
+		obj = valType.UnsafeIndirect(ptr)
+	}
+	unmarshaler := (obj).(encoding.TextUnmarshaler)
+	str := iter.ReadString()
+	err := unmarshaler.UnmarshalText([]byte(str))
+	if err != nil {
+		iter.ReportError("textUnmarshalerDecoder", err.Error())
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go
new file mode 100644
index 0000000..9042eb0
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_native.go
@@ -0,0 +1,451 @@
+package jsoniter
+
+import (
+	"encoding/base64"
+	"reflect"
+	"strconv"
+	"unsafe"
+
+	"github.com/modern-go/reflect2"
+)
+
+const ptrSize = 32 << uintptr(^uintptr(0)>>63)
+
+func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder {
+	if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
+		sliceDecoder := decoderOfSlice(ctx, typ)
+		return &base64Codec{sliceDecoder: sliceDecoder}
+	}
+	typeName := typ.String()
+	kind := typ.Kind()
+	switch kind {
+	case reflect.String:
+		if typeName != "string" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
+		}
+		return &stringCodec{}
+	case reflect.Int:
+		if typeName != "int" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
+		}
+		if strconv.IntSize == 32 {
+			return &int32Codec{}
+		}
+		return &int64Codec{}
+	case reflect.Int8:
+		if typeName != "int8" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
+		}
+		return &int8Codec{}
+	case reflect.Int16:
+		if typeName != "int16" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
+		}
+		return &int16Codec{}
+	case reflect.Int32:
+		if typeName != "int32" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
+		}
+		return &int32Codec{}
+	case reflect.Int64:
+		if typeName != "int64" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
+		}
+		return &int64Codec{}
+	case reflect.Uint:
+		if typeName != "uint" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
+		}
+		if strconv.IntSize == 32 {
+			return &uint32Codec{}
+		}
+		return &uint64Codec{}
+	case reflect.Uint8:
+		if typeName != "uint8" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
+		}
+		return &uint8Codec{}
+	case reflect.Uint16:
+		if typeName != "uint16" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
+		}
+		return &uint16Codec{}
+	case reflect.Uint32:
+		if typeName != "uint32" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
+		}
+		return &uint32Codec{}
+	case reflect.Uintptr:
+		if typeName != "uintptr" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
+		}
+		if ptrSize == 32 {
+			return &uint32Codec{}
+		}
+		return &uint64Codec{}
+	case reflect.Uint64:
+		if typeName != "uint64" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
+		}
+		return &uint64Codec{}
+	case reflect.Float32:
+		if typeName != "float32" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
+		}
+		return &float32Codec{}
+	case reflect.Float64:
+		if typeName != "float64" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
+		}
+		return &float64Codec{}
+	case reflect.Bool:
+		if typeName != "bool" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
+		}
+		return &boolCodec{}
+	}
+	return nil
+}
+
+func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder {
+	if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
+		sliceDecoder := decoderOfSlice(ctx, typ)
+		return &base64Codec{sliceDecoder: sliceDecoder}
+	}
+	typeName := typ.String()
+	switch typ.Kind() {
+	case reflect.String:
+		if typeName != "string" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
+		}
+		return &stringCodec{}
+	case reflect.Int:
+		if typeName != "int" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
+		}
+		if strconv.IntSize == 32 {
+			return &int32Codec{}
+		}
+		return &int64Codec{}
+	case reflect.Int8:
+		if typeName != "int8" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
+		}
+		return &int8Codec{}
+	case reflect.Int16:
+		if typeName != "int16" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
+		}
+		return &int16Codec{}
+	case reflect.Int32:
+		if typeName != "int32" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
+		}
+		return &int32Codec{}
+	case reflect.Int64:
+		if typeName != "int64" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
+		}
+		return &int64Codec{}
+	case reflect.Uint:
+		if typeName != "uint" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
+		}
+		if strconv.IntSize == 32 {
+			return &uint32Codec{}
+		}
+		return &uint64Codec{}
+	case reflect.Uint8:
+		if typeName != "uint8" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
+		}
+		return &uint8Codec{}
+	case reflect.Uint16:
+		if typeName != "uint16" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
+		}
+		return &uint16Codec{}
+	case reflect.Uint32:
+		if typeName != "uint32" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
+		}
+		return &uint32Codec{}
+	case reflect.Uintptr:
+		if typeName != "uintptr" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
+		}
+		if ptrSize == 32 {
+			return &uint32Codec{}
+		}
+		return &uint64Codec{}
+	case reflect.Uint64:
+		if typeName != "uint64" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
+		}
+		return &uint64Codec{}
+	case reflect.Float32:
+		if typeName != "float32" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
+		}
+		return &float32Codec{}
+	case reflect.Float64:
+		if typeName != "float64" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
+		}
+		return &float64Codec{}
+	case reflect.Bool:
+		if typeName != "bool" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
+		}
+		return &boolCodec{}
+	}
+	return nil
+}
+
+type stringCodec struct {
+}
+
+func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	*((*string)(ptr)) = iter.ReadString()
+}
+
+func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	str := *((*string)(ptr))
+	stream.WriteString(str)
+}
+
+func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*string)(ptr)) == ""
+}
+
+type int8Codec struct {
+}
+
+func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*int8)(ptr)) = iter.ReadInt8()
+	}
+}
+
+func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteInt8(*((*int8)(ptr)))
+}
+
+func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*int8)(ptr)) == 0
+}
+
+type int16Codec struct {
+}
+
+func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*int16)(ptr)) = iter.ReadInt16()
+	}
+}
+
+func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteInt16(*((*int16)(ptr)))
+}
+
+func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*int16)(ptr)) == 0
+}
+
+type int32Codec struct {
+}
+
+func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*int32)(ptr)) = iter.ReadInt32()
+	}
+}
+
+func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteInt32(*((*int32)(ptr)))
+}
+
+func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*int32)(ptr)) == 0
+}
+
+type int64Codec struct {
+}
+
+func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*int64)(ptr)) = iter.ReadInt64()
+	}
+}
+
+func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteInt64(*((*int64)(ptr)))
+}
+
+func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*int64)(ptr)) == 0
+}
+
+type uint8Codec struct {
+}
+
+func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*uint8)(ptr)) = iter.ReadUint8()
+	}
+}
+
+func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteUint8(*((*uint8)(ptr)))
+}
+
+func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*uint8)(ptr)) == 0
+}
+
+type uint16Codec struct {
+}
+
+func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*uint16)(ptr)) = iter.ReadUint16()
+	}
+}
+
+func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteUint16(*((*uint16)(ptr)))
+}
+
+func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*uint16)(ptr)) == 0
+}
+
+type uint32Codec struct {
+}
+
+func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*uint32)(ptr)) = iter.ReadUint32()
+	}
+}
+
+func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteUint32(*((*uint32)(ptr)))
+}
+
+func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*uint32)(ptr)) == 0
+}
+
+type uint64Codec struct {
+}
+
+func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*uint64)(ptr)) = iter.ReadUint64()
+	}
+}
+
+func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteUint64(*((*uint64)(ptr)))
+}
+
+func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*uint64)(ptr)) == 0
+}
+
+type float32Codec struct {
+}
+
+func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*float32)(ptr)) = iter.ReadFloat32()
+	}
+}
+
+func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteFloat32(*((*float32)(ptr)))
+}
+
+func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*float32)(ptr)) == 0
+}
+
+type float64Codec struct {
+}
+
+func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*float64)(ptr)) = iter.ReadFloat64()
+	}
+}
+
+func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteFloat64(*((*float64)(ptr)))
+}
+
+func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*float64)(ptr)) == 0
+}
+
+type boolCodec struct {
+}
+
+func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*bool)(ptr)) = iter.ReadBool()
+	}
+}
+
+func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteBool(*((*bool)(ptr)))
+}
+
+func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool {
+	return !(*((*bool)(ptr)))
+}
+
+type base64Codec struct {
+	sliceType    *reflect2.UnsafeSliceType
+	sliceDecoder ValDecoder
+}
+
+func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if iter.ReadNil() {
+		codec.sliceType.UnsafeSetNil(ptr)
+		return
+	}
+	switch iter.WhatIsNext() {
+	case StringValue:
+		src := iter.ReadString()
+		dst, err := base64.StdEncoding.DecodeString(src)
+		if err != nil {
+			iter.ReportError("decode base64", err.Error())
+		} else {
+			codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst))
+		}
+	case ArrayValue:
+		codec.sliceDecoder.Decode(ptr, iter)
+	default:
+		iter.ReportError("base64Codec", "invalid input")
+	}
+}
+
+func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	src := *((*[]byte)(ptr))
+	if len(src) == 0 {
+		stream.WriteNil()
+		return
+	}
+	encoding := base64.StdEncoding
+	stream.writeByte('"')
+	size := encoding.EncodedLen(len(src))
+	buf := make([]byte, size)
+	encoding.Encode(buf, src)
+	stream.buf = append(stream.buf, buf...)
+	stream.writeByte('"')
+}
+
+func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return len(*((*[]byte)(ptr))) == 0
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go
new file mode 100644
index 0000000..43ec71d
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_optional.go
@@ -0,0 +1,133 @@
+package jsoniter
+
+import (
+	"github.com/modern-go/reflect2"
+	"reflect"
+	"unsafe"
+)
+
+func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder {
+	ptrType := typ.(*reflect2.UnsafePtrType)
+	elemType := ptrType.Elem()
+	decoder := decoderOfType(ctx, elemType)
+	if ctx.prefix == "" && elemType.Kind() == reflect.Ptr {
+		return &dereferenceDecoder{elemType, decoder}
+	}
+	return &OptionalDecoder{elemType, decoder}
+}
+
+func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder {
+	ptrType := typ.(*reflect2.UnsafePtrType)
+	elemType := ptrType.Elem()
+	elemEncoder := encoderOfType(ctx, elemType)
+	encoder := &OptionalEncoder{elemEncoder}
+	return encoder
+}
+
+type OptionalDecoder struct {
+	ValueType    reflect2.Type
+	ValueDecoder ValDecoder
+}
+
+func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if iter.ReadNil() {
+		*((*unsafe.Pointer)(ptr)) = nil
+	} else {
+		if *((*unsafe.Pointer)(ptr)) == nil {
+			//pointer to null, we have to allocate memory to hold the value
+			newPtr := decoder.ValueType.UnsafeNew()
+			decoder.ValueDecoder.Decode(newPtr, iter)
+			*((*unsafe.Pointer)(ptr)) = newPtr
+		} else {
+			//reuse existing instance
+			decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
+		}
+	}
+}
+
+type dereferenceDecoder struct {
+	// only to deference a pointer
+	valueType    reflect2.Type
+	valueDecoder ValDecoder
+}
+
+func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if *((*unsafe.Pointer)(ptr)) == nil {
+		//pointer to null, we have to allocate memory to hold the value
+		newPtr := decoder.valueType.UnsafeNew()
+		decoder.valueDecoder.Decode(newPtr, iter)
+		*((*unsafe.Pointer)(ptr)) = newPtr
+	} else {
+		//reuse existing instance
+		decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
+	}
+}
+
+type OptionalEncoder struct {
+	ValueEncoder ValEncoder
+}
+
+func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	if *((*unsafe.Pointer)(ptr)) == nil {
+		stream.WriteNil()
+	} else {
+		encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
+	}
+}
+
+func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*unsafe.Pointer)(ptr)) == nil
+}
+
+type dereferenceEncoder struct {
+	ValueEncoder ValEncoder
+}
+
+func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	if *((*unsafe.Pointer)(ptr)) == nil {
+		stream.WriteNil()
+	} else {
+		encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
+	}
+}
+
+func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	dePtr := *((*unsafe.Pointer)(ptr))
+	if dePtr == nil {
+		return true
+	}
+	return encoder.ValueEncoder.IsEmpty(dePtr)
+}
+
+func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
+	deReferenced := *((*unsafe.Pointer)(ptr))
+	if deReferenced == nil {
+		return true
+	}
+	isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil)
+	if !converted {
+		return false
+	}
+	fieldPtr := unsafe.Pointer(deReferenced)
+	return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
+}
+
+type referenceEncoder struct {
+	encoder ValEncoder
+}
+
+func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
+}
+
+func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
+}
+
+type referenceDecoder struct {
+	decoder ValDecoder
+}
+
+func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	decoder.decoder.Decode(unsafe.Pointer(&ptr), iter)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go
new file mode 100644
index 0000000..9441d79
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_slice.go
@@ -0,0 +1,99 @@
+package jsoniter
+
+import (
+	"fmt"
+	"github.com/modern-go/reflect2"
+	"io"
+	"unsafe"
+)
+
+func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder {
+	sliceType := typ.(*reflect2.UnsafeSliceType)
+	decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
+	return &sliceDecoder{sliceType, decoder}
+}
+
+func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder {
+	sliceType := typ.(*reflect2.UnsafeSliceType)
+	encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
+	return &sliceEncoder{sliceType, encoder}
+}
+
+type sliceEncoder struct {
+	sliceType   *reflect2.UnsafeSliceType
+	elemEncoder ValEncoder
+}
+
+func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	if encoder.sliceType.UnsafeIsNil(ptr) {
+		stream.WriteNil()
+		return
+	}
+	length := encoder.sliceType.UnsafeLengthOf(ptr)
+	if length == 0 {
+		stream.WriteEmptyArray()
+		return
+	}
+	stream.WriteArrayStart()
+	encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream)
+	for i := 1; i < length; i++ {
+		stream.WriteMore()
+		elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i)
+		encoder.elemEncoder.Encode(elemPtr, stream)
+	}
+	stream.WriteArrayEnd()
+	if stream.Error != nil && stream.Error != io.EOF {
+		stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error())
+	}
+}
+
+func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.sliceType.UnsafeLengthOf(ptr) == 0
+}
+
+type sliceDecoder struct {
+	sliceType   *reflect2.UnsafeSliceType
+	elemDecoder ValDecoder
+}
+
+func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	decoder.doDecode(ptr, iter)
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error())
+	}
+}
+
+func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
+	c := iter.nextToken()
+	sliceType := decoder.sliceType
+	if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l')
+		sliceType.UnsafeSetNil(ptr)
+		return
+	}
+	if c != '[' {
+		iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c}))
+		return
+	}
+	c = iter.nextToken()
+	if c == ']' {
+		sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0))
+		return
+	}
+	iter.unreadByte()
+	sliceType.UnsafeGrow(ptr, 1)
+	elemPtr := sliceType.UnsafeGetIndex(ptr, 0)
+	decoder.elemDecoder.Decode(elemPtr, iter)
+	length := 1
+	for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+		idx := length
+		length += 1
+		sliceType.UnsafeGrow(ptr, length)
+		elemPtr = sliceType.UnsafeGetIndex(ptr, idx)
+		decoder.elemDecoder.Decode(elemPtr, iter)
+	}
+	if c != ']' {
+		iter.ReportError("decode slice", "expect ], but found "+string([]byte{c}))
+		return
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
new file mode 100644
index 0000000..355d2d1
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
@@ -0,0 +1,1048 @@
+package jsoniter
+
+import (
+	"fmt"
+	"io"
+	"strings"
+	"unsafe"
+
+	"github.com/modern-go/reflect2"
+)
+
+func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder {
+	bindings := map[string]*Binding{}
+	structDescriptor := describeStruct(ctx, typ)
+	for _, binding := range structDescriptor.Fields {
+		for _, fromName := range binding.FromNames {
+			old := bindings[fromName]
+			if old == nil {
+				bindings[fromName] = binding
+				continue
+			}
+			ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding)
+			if ignoreOld {
+				delete(bindings, fromName)
+			}
+			if !ignoreNew {
+				bindings[fromName] = binding
+			}
+		}
+	}
+	fields := map[string]*structFieldDecoder{}
+	for k, binding := range bindings {
+		fields[k] = binding.Decoder.(*structFieldDecoder)
+	}
+
+	if !ctx.caseSensitive() {
+		for k, binding := range bindings {
+			if _, found := fields[strings.ToLower(k)]; !found {
+				fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder)
+			}
+		}
+	}
+
+	return createStructDecoder(ctx, typ, fields)
+}
+
+func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder {
+	if ctx.disallowUnknownFields {
+		return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true}
+	}
+	knownHash := map[int64]struct{}{
+		0: {},
+	}
+
+	switch len(fields) {
+	case 0:
+		return &skipObjectDecoder{typ}
+	case 1:
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}
+		}
+	case 2:
+		var fieldHash1 int64
+		var fieldHash2 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldHash1 == 0 {
+				fieldHash1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else {
+				fieldHash2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			}
+		}
+		return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}
+	case 3:
+		var fieldName1 int64
+		var fieldName2 int64
+		var fieldName3 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		var fieldDecoder3 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldName1 == 0 {
+				fieldName1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else if fieldName2 == 0 {
+				fieldName2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			} else {
+				fieldName3 = fieldHash
+				fieldDecoder3 = fieldDecoder
+			}
+		}
+		return &threeFieldsStructDecoder{typ,
+			fieldName1, fieldDecoder1,
+			fieldName2, fieldDecoder2,
+			fieldName3, fieldDecoder3}
+	case 4:
+		var fieldName1 int64
+		var fieldName2 int64
+		var fieldName3 int64
+		var fieldName4 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		var fieldDecoder3 *structFieldDecoder
+		var fieldDecoder4 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldName1 == 0 {
+				fieldName1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else if fieldName2 == 0 {
+				fieldName2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			} else if fieldName3 == 0 {
+				fieldName3 = fieldHash
+				fieldDecoder3 = fieldDecoder
+			} else {
+				fieldName4 = fieldHash
+				fieldDecoder4 = fieldDecoder
+			}
+		}
+		return &fourFieldsStructDecoder{typ,
+			fieldName1, fieldDecoder1,
+			fieldName2, fieldDecoder2,
+			fieldName3, fieldDecoder3,
+			fieldName4, fieldDecoder4}
+	case 5:
+		var fieldName1 int64
+		var fieldName2 int64
+		var fieldName3 int64
+		var fieldName4 int64
+		var fieldName5 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		var fieldDecoder3 *structFieldDecoder
+		var fieldDecoder4 *structFieldDecoder
+		var fieldDecoder5 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldName1 == 0 {
+				fieldName1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else if fieldName2 == 0 {
+				fieldName2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			} else if fieldName3 == 0 {
+				fieldName3 = fieldHash
+				fieldDecoder3 = fieldDecoder
+			} else if fieldName4 == 0 {
+				fieldName4 = fieldHash
+				fieldDecoder4 = fieldDecoder
+			} else {
+				fieldName5 = fieldHash
+				fieldDecoder5 = fieldDecoder
+			}
+		}
+		return &fiveFieldsStructDecoder{typ,
+			fieldName1, fieldDecoder1,
+			fieldName2, fieldDecoder2,
+			fieldName3, fieldDecoder3,
+			fieldName4, fieldDecoder4,
+			fieldName5, fieldDecoder5}
+	case 6:
+		var fieldName1 int64
+		var fieldName2 int64
+		var fieldName3 int64
+		var fieldName4 int64
+		var fieldName5 int64
+		var fieldName6 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		var fieldDecoder3 *structFieldDecoder
+		var fieldDecoder4 *structFieldDecoder
+		var fieldDecoder5 *structFieldDecoder
+		var fieldDecoder6 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldName1 == 0 {
+				fieldName1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else if fieldName2 == 0 {
+				fieldName2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			} else if fieldName3 == 0 {
+				fieldName3 = fieldHash
+				fieldDecoder3 = fieldDecoder
+			} else if fieldName4 == 0 {
+				fieldName4 = fieldHash
+				fieldDecoder4 = fieldDecoder
+			} else if fieldName5 == 0 {
+				fieldName5 = fieldHash
+				fieldDecoder5 = fieldDecoder
+			} else {
+				fieldName6 = fieldHash
+				fieldDecoder6 = fieldDecoder
+			}
+		}
+		return &sixFieldsStructDecoder{typ,
+			fieldName1, fieldDecoder1,
+			fieldName2, fieldDecoder2,
+			fieldName3, fieldDecoder3,
+			fieldName4, fieldDecoder4,
+			fieldName5, fieldDecoder5,
+			fieldName6, fieldDecoder6}
+	case 7:
+		var fieldName1 int64
+		var fieldName2 int64
+		var fieldName3 int64
+		var fieldName4 int64
+		var fieldName5 int64
+		var fieldName6 int64
+		var fieldName7 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		var fieldDecoder3 *structFieldDecoder
+		var fieldDecoder4 *structFieldDecoder
+		var fieldDecoder5 *structFieldDecoder
+		var fieldDecoder6 *structFieldDecoder
+		var fieldDecoder7 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldName1 == 0 {
+				fieldName1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else if fieldName2 == 0 {
+				fieldName2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			} else if fieldName3 == 0 {
+				fieldName3 = fieldHash
+				fieldDecoder3 = fieldDecoder
+			} else if fieldName4 == 0 {
+				fieldName4 = fieldHash
+				fieldDecoder4 = fieldDecoder
+			} else if fieldName5 == 0 {
+				fieldName5 = fieldHash
+				fieldDecoder5 = fieldDecoder
+			} else if fieldName6 == 0 {
+				fieldName6 = fieldHash
+				fieldDecoder6 = fieldDecoder
+			} else {
+				fieldName7 = fieldHash
+				fieldDecoder7 = fieldDecoder
+			}
+		}
+		return &sevenFieldsStructDecoder{typ,
+			fieldName1, fieldDecoder1,
+			fieldName2, fieldDecoder2,
+			fieldName3, fieldDecoder3,
+			fieldName4, fieldDecoder4,
+			fieldName5, fieldDecoder5,
+			fieldName6, fieldDecoder6,
+			fieldName7, fieldDecoder7}
+	case 8:
+		var fieldName1 int64
+		var fieldName2 int64
+		var fieldName3 int64
+		var fieldName4 int64
+		var fieldName5 int64
+		var fieldName6 int64
+		var fieldName7 int64
+		var fieldName8 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		var fieldDecoder3 *structFieldDecoder
+		var fieldDecoder4 *structFieldDecoder
+		var fieldDecoder5 *structFieldDecoder
+		var fieldDecoder6 *structFieldDecoder
+		var fieldDecoder7 *structFieldDecoder
+		var fieldDecoder8 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldName1 == 0 {
+				fieldName1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else if fieldName2 == 0 {
+				fieldName2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			} else if fieldName3 == 0 {
+				fieldName3 = fieldHash
+				fieldDecoder3 = fieldDecoder
+			} else if fieldName4 == 0 {
+				fieldName4 = fieldHash
+				fieldDecoder4 = fieldDecoder
+			} else if fieldName5 == 0 {
+				fieldName5 = fieldHash
+				fieldDecoder5 = fieldDecoder
+			} else if fieldName6 == 0 {
+				fieldName6 = fieldHash
+				fieldDecoder6 = fieldDecoder
+			} else if fieldName7 == 0 {
+				fieldName7 = fieldHash
+				fieldDecoder7 = fieldDecoder
+			} else {
+				fieldName8 = fieldHash
+				fieldDecoder8 = fieldDecoder
+			}
+		}
+		return &eightFieldsStructDecoder{typ,
+			fieldName1, fieldDecoder1,
+			fieldName2, fieldDecoder2,
+			fieldName3, fieldDecoder3,
+			fieldName4, fieldDecoder4,
+			fieldName5, fieldDecoder5,
+			fieldName6, fieldDecoder6,
+			fieldName7, fieldDecoder7,
+			fieldName8, fieldDecoder8}
+	case 9:
+		var fieldName1 int64
+		var fieldName2 int64
+		var fieldName3 int64
+		var fieldName4 int64
+		var fieldName5 int64
+		var fieldName6 int64
+		var fieldName7 int64
+		var fieldName8 int64
+		var fieldName9 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		var fieldDecoder3 *structFieldDecoder
+		var fieldDecoder4 *structFieldDecoder
+		var fieldDecoder5 *structFieldDecoder
+		var fieldDecoder6 *structFieldDecoder
+		var fieldDecoder7 *structFieldDecoder
+		var fieldDecoder8 *structFieldDecoder
+		var fieldDecoder9 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldName1 == 0 {
+				fieldName1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else if fieldName2 == 0 {
+				fieldName2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			} else if fieldName3 == 0 {
+				fieldName3 = fieldHash
+				fieldDecoder3 = fieldDecoder
+			} else if fieldName4 == 0 {
+				fieldName4 = fieldHash
+				fieldDecoder4 = fieldDecoder
+			} else if fieldName5 == 0 {
+				fieldName5 = fieldHash
+				fieldDecoder5 = fieldDecoder
+			} else if fieldName6 == 0 {
+				fieldName6 = fieldHash
+				fieldDecoder6 = fieldDecoder
+			} else if fieldName7 == 0 {
+				fieldName7 = fieldHash
+				fieldDecoder7 = fieldDecoder
+			} else if fieldName8 == 0 {
+				fieldName8 = fieldHash
+				fieldDecoder8 = fieldDecoder
+			} else {
+				fieldName9 = fieldHash
+				fieldDecoder9 = fieldDecoder
+			}
+		}
+		return &nineFieldsStructDecoder{typ,
+			fieldName1, fieldDecoder1,
+			fieldName2, fieldDecoder2,
+			fieldName3, fieldDecoder3,
+			fieldName4, fieldDecoder4,
+			fieldName5, fieldDecoder5,
+			fieldName6, fieldDecoder6,
+			fieldName7, fieldDecoder7,
+			fieldName8, fieldDecoder8,
+			fieldName9, fieldDecoder9}
+	case 10:
+		var fieldName1 int64
+		var fieldName2 int64
+		var fieldName3 int64
+		var fieldName4 int64
+		var fieldName5 int64
+		var fieldName6 int64
+		var fieldName7 int64
+		var fieldName8 int64
+		var fieldName9 int64
+		var fieldName10 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		var fieldDecoder3 *structFieldDecoder
+		var fieldDecoder4 *structFieldDecoder
+		var fieldDecoder5 *structFieldDecoder
+		var fieldDecoder6 *structFieldDecoder
+		var fieldDecoder7 *structFieldDecoder
+		var fieldDecoder8 *structFieldDecoder
+		var fieldDecoder9 *structFieldDecoder
+		var fieldDecoder10 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldName1 == 0 {
+				fieldName1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else if fieldName2 == 0 {
+				fieldName2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			} else if fieldName3 == 0 {
+				fieldName3 = fieldHash
+				fieldDecoder3 = fieldDecoder
+			} else if fieldName4 == 0 {
+				fieldName4 = fieldHash
+				fieldDecoder4 = fieldDecoder
+			} else if fieldName5 == 0 {
+				fieldName5 = fieldHash
+				fieldDecoder5 = fieldDecoder
+			} else if fieldName6 == 0 {
+				fieldName6 = fieldHash
+				fieldDecoder6 = fieldDecoder
+			} else if fieldName7 == 0 {
+				fieldName7 = fieldHash
+				fieldDecoder7 = fieldDecoder
+			} else if fieldName8 == 0 {
+				fieldName8 = fieldHash
+				fieldDecoder8 = fieldDecoder
+			} else if fieldName9 == 0 {
+				fieldName9 = fieldHash
+				fieldDecoder9 = fieldDecoder
+			} else {
+				fieldName10 = fieldHash
+				fieldDecoder10 = fieldDecoder
+			}
+		}
+		return &tenFieldsStructDecoder{typ,
+			fieldName1, fieldDecoder1,
+			fieldName2, fieldDecoder2,
+			fieldName3, fieldDecoder3,
+			fieldName4, fieldDecoder4,
+			fieldName5, fieldDecoder5,
+			fieldName6, fieldDecoder6,
+			fieldName7, fieldDecoder7,
+			fieldName8, fieldDecoder8,
+			fieldName9, fieldDecoder9,
+			fieldName10, fieldDecoder10}
+	}
+	return &generalStructDecoder{typ, fields, false}
+}
+
+type generalStructDecoder struct {
+	typ                   reflect2.Type
+	fields                map[string]*structFieldDecoder
+	disallowUnknownFields bool
+}
+
+func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	var c byte
+	for c = ','; c == ','; c = iter.nextToken() {
+		decoder.decodeOneField(ptr, iter)
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+	if c != '}' {
+		iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c}))
+	}
+}
+
+func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) {
+	var field string
+	var fieldDecoder *structFieldDecoder
+	if iter.cfg.objectFieldMustBeSimpleString {
+		fieldBytes := iter.ReadStringAsSlice()
+		field = *(*string)(unsafe.Pointer(&fieldBytes))
+		fieldDecoder = decoder.fields[field]
+		if fieldDecoder == nil && !iter.cfg.caseSensitive {
+			fieldDecoder = decoder.fields[strings.ToLower(field)]
+		}
+	} else {
+		field = iter.ReadString()
+		fieldDecoder = decoder.fields[field]
+		if fieldDecoder == nil && !iter.cfg.caseSensitive {
+			fieldDecoder = decoder.fields[strings.ToLower(field)]
+		}
+	}
+	if fieldDecoder == nil {
+		msg := "found unknown field: " + field
+		if decoder.disallowUnknownFields {
+			iter.ReportError("ReadObject", msg)
+		}
+		c := iter.nextToken()
+		if c != ':' {
+			iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+		}
+		iter.Skip()
+		return
+	}
+	c := iter.nextToken()
+	if c != ':' {
+		iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+	}
+	fieldDecoder.Decode(ptr, iter)
+}
+
+type skipObjectDecoder struct {
+	typ reflect2.Type
+}
+
+func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	valueType := iter.WhatIsNext()
+	if valueType != ObjectValue && valueType != NilValue {
+		iter.ReportError("skipObjectDecoder", "expect object or null")
+		return
+	}
+	iter.Skip()
+}
+
+type oneFieldStructDecoder struct {
+	typ          reflect2.Type
+	fieldHash    int64
+	fieldDecoder *structFieldDecoder
+}
+
+func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		if iter.readFieldHash() == decoder.fieldHash {
+			decoder.fieldDecoder.Decode(ptr, iter)
+		} else {
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type twoFieldsStructDecoder struct {
+	typ           reflect2.Type
+	fieldHash1    int64
+	fieldDecoder1 *structFieldDecoder
+	fieldHash2    int64
+	fieldDecoder2 *structFieldDecoder
+}
+
+func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type threeFieldsStructDecoder struct {
+	typ           reflect2.Type
+	fieldHash1    int64
+	fieldDecoder1 *structFieldDecoder
+	fieldHash2    int64
+	fieldDecoder2 *structFieldDecoder
+	fieldHash3    int64
+	fieldDecoder3 *structFieldDecoder
+}
+
+func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		case decoder.fieldHash3:
+			decoder.fieldDecoder3.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type fourFieldsStructDecoder struct {
+	typ           reflect2.Type
+	fieldHash1    int64
+	fieldDecoder1 *structFieldDecoder
+	fieldHash2    int64
+	fieldDecoder2 *structFieldDecoder
+	fieldHash3    int64
+	fieldDecoder3 *structFieldDecoder
+	fieldHash4    int64
+	fieldDecoder4 *structFieldDecoder
+}
+
+func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		case decoder.fieldHash3:
+			decoder.fieldDecoder3.Decode(ptr, iter)
+		case decoder.fieldHash4:
+			decoder.fieldDecoder4.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type fiveFieldsStructDecoder struct {
+	typ           reflect2.Type
+	fieldHash1    int64
+	fieldDecoder1 *structFieldDecoder
+	fieldHash2    int64
+	fieldDecoder2 *structFieldDecoder
+	fieldHash3    int64
+	fieldDecoder3 *structFieldDecoder
+	fieldHash4    int64
+	fieldDecoder4 *structFieldDecoder
+	fieldHash5    int64
+	fieldDecoder5 *structFieldDecoder
+}
+
+func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		case decoder.fieldHash3:
+			decoder.fieldDecoder3.Decode(ptr, iter)
+		case decoder.fieldHash4:
+			decoder.fieldDecoder4.Decode(ptr, iter)
+		case decoder.fieldHash5:
+			decoder.fieldDecoder5.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type sixFieldsStructDecoder struct {
+	typ           reflect2.Type
+	fieldHash1    int64
+	fieldDecoder1 *structFieldDecoder
+	fieldHash2    int64
+	fieldDecoder2 *structFieldDecoder
+	fieldHash3    int64
+	fieldDecoder3 *structFieldDecoder
+	fieldHash4    int64
+	fieldDecoder4 *structFieldDecoder
+	fieldHash5    int64
+	fieldDecoder5 *structFieldDecoder
+	fieldHash6    int64
+	fieldDecoder6 *structFieldDecoder
+}
+
+func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		case decoder.fieldHash3:
+			decoder.fieldDecoder3.Decode(ptr, iter)
+		case decoder.fieldHash4:
+			decoder.fieldDecoder4.Decode(ptr, iter)
+		case decoder.fieldHash5:
+			decoder.fieldDecoder5.Decode(ptr, iter)
+		case decoder.fieldHash6:
+			decoder.fieldDecoder6.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type sevenFieldsStructDecoder struct {
+	typ           reflect2.Type
+	fieldHash1    int64
+	fieldDecoder1 *structFieldDecoder
+	fieldHash2    int64
+	fieldDecoder2 *structFieldDecoder
+	fieldHash3    int64
+	fieldDecoder3 *structFieldDecoder
+	fieldHash4    int64
+	fieldDecoder4 *structFieldDecoder
+	fieldHash5    int64
+	fieldDecoder5 *structFieldDecoder
+	fieldHash6    int64
+	fieldDecoder6 *structFieldDecoder
+	fieldHash7    int64
+	fieldDecoder7 *structFieldDecoder
+}
+
+func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		case decoder.fieldHash3:
+			decoder.fieldDecoder3.Decode(ptr, iter)
+		case decoder.fieldHash4:
+			decoder.fieldDecoder4.Decode(ptr, iter)
+		case decoder.fieldHash5:
+			decoder.fieldDecoder5.Decode(ptr, iter)
+		case decoder.fieldHash6:
+			decoder.fieldDecoder6.Decode(ptr, iter)
+		case decoder.fieldHash7:
+			decoder.fieldDecoder7.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type eightFieldsStructDecoder struct {
+	typ           reflect2.Type
+	fieldHash1    int64
+	fieldDecoder1 *structFieldDecoder
+	fieldHash2    int64
+	fieldDecoder2 *structFieldDecoder
+	fieldHash3    int64
+	fieldDecoder3 *structFieldDecoder
+	fieldHash4    int64
+	fieldDecoder4 *structFieldDecoder
+	fieldHash5    int64
+	fieldDecoder5 *structFieldDecoder
+	fieldHash6    int64
+	fieldDecoder6 *structFieldDecoder
+	fieldHash7    int64
+	fieldDecoder7 *structFieldDecoder
+	fieldHash8    int64
+	fieldDecoder8 *structFieldDecoder
+}
+
+func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		case decoder.fieldHash3:
+			decoder.fieldDecoder3.Decode(ptr, iter)
+		case decoder.fieldHash4:
+			decoder.fieldDecoder4.Decode(ptr, iter)
+		case decoder.fieldHash5:
+			decoder.fieldDecoder5.Decode(ptr, iter)
+		case decoder.fieldHash6:
+			decoder.fieldDecoder6.Decode(ptr, iter)
+		case decoder.fieldHash7:
+			decoder.fieldDecoder7.Decode(ptr, iter)
+		case decoder.fieldHash8:
+			decoder.fieldDecoder8.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type nineFieldsStructDecoder struct {
+	typ           reflect2.Type
+	fieldHash1    int64
+	fieldDecoder1 *structFieldDecoder
+	fieldHash2    int64
+	fieldDecoder2 *structFieldDecoder
+	fieldHash3    int64
+	fieldDecoder3 *structFieldDecoder
+	fieldHash4    int64
+	fieldDecoder4 *structFieldDecoder
+	fieldHash5    int64
+	fieldDecoder5 *structFieldDecoder
+	fieldHash6    int64
+	fieldDecoder6 *structFieldDecoder
+	fieldHash7    int64
+	fieldDecoder7 *structFieldDecoder
+	fieldHash8    int64
+	fieldDecoder8 *structFieldDecoder
+	fieldHash9    int64
+	fieldDecoder9 *structFieldDecoder
+}
+
+func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		case decoder.fieldHash3:
+			decoder.fieldDecoder3.Decode(ptr, iter)
+		case decoder.fieldHash4:
+			decoder.fieldDecoder4.Decode(ptr, iter)
+		case decoder.fieldHash5:
+			decoder.fieldDecoder5.Decode(ptr, iter)
+		case decoder.fieldHash6:
+			decoder.fieldDecoder6.Decode(ptr, iter)
+		case decoder.fieldHash7:
+			decoder.fieldDecoder7.Decode(ptr, iter)
+		case decoder.fieldHash8:
+			decoder.fieldDecoder8.Decode(ptr, iter)
+		case decoder.fieldHash9:
+			decoder.fieldDecoder9.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type tenFieldsStructDecoder struct {
+	typ            reflect2.Type
+	fieldHash1     int64
+	fieldDecoder1  *structFieldDecoder
+	fieldHash2     int64
+	fieldDecoder2  *structFieldDecoder
+	fieldHash3     int64
+	fieldDecoder3  *structFieldDecoder
+	fieldHash4     int64
+	fieldDecoder4  *structFieldDecoder
+	fieldHash5     int64
+	fieldDecoder5  *structFieldDecoder
+	fieldHash6     int64
+	fieldDecoder6  *structFieldDecoder
+	fieldHash7     int64
+	fieldDecoder7  *structFieldDecoder
+	fieldHash8     int64
+	fieldDecoder8  *structFieldDecoder
+	fieldHash9     int64
+	fieldDecoder9  *structFieldDecoder
+	fieldHash10    int64
+	fieldDecoder10 *structFieldDecoder
+}
+
+func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		case decoder.fieldHash3:
+			decoder.fieldDecoder3.Decode(ptr, iter)
+		case decoder.fieldHash4:
+			decoder.fieldDecoder4.Decode(ptr, iter)
+		case decoder.fieldHash5:
+			decoder.fieldDecoder5.Decode(ptr, iter)
+		case decoder.fieldHash6:
+			decoder.fieldDecoder6.Decode(ptr, iter)
+		case decoder.fieldHash7:
+			decoder.fieldDecoder7.Decode(ptr, iter)
+		case decoder.fieldHash8:
+			decoder.fieldDecoder8.Decode(ptr, iter)
+		case decoder.fieldHash9:
+			decoder.fieldDecoder9.Decode(ptr, iter)
+		case decoder.fieldHash10:
+			decoder.fieldDecoder10.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type structFieldDecoder struct {
+	field        reflect2.StructField
+	fieldDecoder ValDecoder
+}
+
+func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	fieldPtr := decoder.field.UnsafeGet(ptr)
+	decoder.fieldDecoder.Decode(fieldPtr, iter)
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error())
+	}
+}
+
+type stringModeStringDecoder struct {
+	elemDecoder ValDecoder
+	cfg         *frozenConfig
+}
+
+func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	decoder.elemDecoder.Decode(ptr, iter)
+	str := *((*string)(ptr))
+	tempIter := decoder.cfg.BorrowIterator([]byte(str))
+	defer decoder.cfg.ReturnIterator(tempIter)
+	*((*string)(ptr)) = tempIter.ReadString()
+}
+
+type stringModeNumberDecoder struct {
+	elemDecoder ValDecoder
+}
+
+func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	c := iter.nextToken()
+	if c != '"' {
+		iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
+		return
+	}
+	decoder.elemDecoder.Decode(ptr, iter)
+	if iter.Error != nil {
+		return
+	}
+	c = iter.readByte()
+	if c != '"' {
+		iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
+		return
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
new file mode 100644
index 0000000..d0759cf
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
@@ -0,0 +1,210 @@
+package jsoniter
+
+import (
+	"fmt"
+	"github.com/modern-go/reflect2"
+	"io"
+	"reflect"
+	"unsafe"
+)
+
+func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder {
+	type bindingTo struct {
+		binding *Binding
+		toName  string
+		ignored bool
+	}
+	orderedBindings := []*bindingTo{}
+	structDescriptor := describeStruct(ctx, typ)
+	for _, binding := range structDescriptor.Fields {
+		for _, toName := range binding.ToNames {
+			new := &bindingTo{
+				binding: binding,
+				toName:  toName,
+			}
+			for _, old := range orderedBindings {
+				if old.toName != toName {
+					continue
+				}
+				old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding)
+			}
+			orderedBindings = append(orderedBindings, new)
+		}
+	}
+	if len(orderedBindings) == 0 {
+		return &emptyStructEncoder{}
+	}
+	finalOrderedFields := []structFieldTo{}
+	for _, bindingTo := range orderedBindings {
+		if !bindingTo.ignored {
+			finalOrderedFields = append(finalOrderedFields, structFieldTo{
+				encoder: bindingTo.binding.Encoder.(*structFieldEncoder),
+				toName:  bindingTo.toName,
+			})
+		}
+	}
+	return &structEncoder{typ, finalOrderedFields}
+}
+
+func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty {
+	encoder := createEncoderOfNative(ctx, typ)
+	if encoder != nil {
+		return encoder
+	}
+	kind := typ.Kind()
+	switch kind {
+	case reflect.Interface:
+		return &dynamicEncoder{typ}
+	case reflect.Struct:
+		return &structEncoder{typ: typ}
+	case reflect.Array:
+		return &arrayEncoder{}
+	case reflect.Slice:
+		return &sliceEncoder{}
+	case reflect.Map:
+		return encoderOfMap(ctx, typ)
+	case reflect.Ptr:
+		return &OptionalEncoder{}
+	default:
+		return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)}
+	}
+}
+
+func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) {
+	newTagged := new.Field.Tag().Get(cfg.getTagKey()) != ""
+	oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != ""
+	if newTagged {
+		if oldTagged {
+			if len(old.levels) > len(new.levels) {
+				return true, false
+			} else if len(new.levels) > len(old.levels) {
+				return false, true
+			} else {
+				return true, true
+			}
+		} else {
+			return true, false
+		}
+	} else {
+		if oldTagged {
+			return true, false
+		}
+		if len(old.levels) > len(new.levels) {
+			return true, false
+		} else if len(new.levels) > len(old.levels) {
+			return false, true
+		} else {
+			return true, true
+		}
+	}
+}
+
+type structFieldEncoder struct {
+	field        reflect2.StructField
+	fieldEncoder ValEncoder
+	omitempty    bool
+}
+
+func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	fieldPtr := encoder.field.UnsafeGet(ptr)
+	encoder.fieldEncoder.Encode(fieldPtr, stream)
+	if stream.Error != nil && stream.Error != io.EOF {
+		stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error())
+	}
+}
+
+func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	fieldPtr := encoder.field.UnsafeGet(ptr)
+	return encoder.fieldEncoder.IsEmpty(fieldPtr)
+}
+
+func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
+	isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil)
+	if !converted {
+		return false
+	}
+	fieldPtr := encoder.field.UnsafeGet(ptr)
+	return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
+}
+
+type IsEmbeddedPtrNil interface {
+	IsEmbeddedPtrNil(ptr unsafe.Pointer) bool
+}
+
+type structEncoder struct {
+	typ    reflect2.Type
+	fields []structFieldTo
+}
+
+type structFieldTo struct {
+	encoder *structFieldEncoder
+	toName  string
+}
+
+func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteObjectStart()
+	isNotFirst := false
+	for _, field := range encoder.fields {
+		if field.encoder.omitempty && field.encoder.IsEmpty(ptr) {
+			continue
+		}
+		if field.encoder.IsEmbeddedPtrNil(ptr) {
+			continue
+		}
+		if isNotFirst {
+			stream.WriteMore()
+		}
+		stream.WriteObjectField(field.toName)
+		field.encoder.Encode(ptr, stream)
+		isNotFirst = true
+	}
+	stream.WriteObjectEnd()
+	if stream.Error != nil && stream.Error != io.EOF {
+		stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error())
+	}
+}
+
+func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return false
+}
+
+type emptyStructEncoder struct {
+}
+
+func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteEmptyObject()
+}
+
+func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return false
+}
+
+type stringModeNumberEncoder struct {
+	elemEncoder ValEncoder
+}
+
+func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.writeByte('"')
+	encoder.elemEncoder.Encode(ptr, stream)
+	stream.writeByte('"')
+}
+
+func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.elemEncoder.IsEmpty(ptr)
+}
+
+type stringModeStringEncoder struct {
+	elemEncoder ValEncoder
+	cfg         *frozenConfig
+}
+
+func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	tempStream := encoder.cfg.BorrowStream(nil)
+	defer encoder.cfg.ReturnStream(tempStream)
+	encoder.elemEncoder.Encode(ptr, tempStream)
+	stream.WriteString(string(tempStream.Buffer()))
+}
+
+func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.elemEncoder.IsEmpty(ptr)
+}
diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go
new file mode 100644
index 0000000..17662fd
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream.go
@@ -0,0 +1,211 @@
+package jsoniter
+
+import (
+	"io"
+)
+
+// stream is a io.Writer like object, with JSON specific write functions.
+// Error is not returned as return value, but stored as Error member on this stream instance.
+type Stream struct {
+	cfg        *frozenConfig
+	out        io.Writer
+	buf        []byte
+	Error      error
+	indention  int
+	Attachment interface{} // open for customized encoder
+}
+
+// NewStream create new stream instance.
+// cfg can be jsoniter.ConfigDefault.
+// out can be nil if write to internal buffer.
+// bufSize is the initial size for the internal buffer in bytes.
+func NewStream(cfg API, out io.Writer, bufSize int) *Stream {
+	return &Stream{
+		cfg:       cfg.(*frozenConfig),
+		out:       out,
+		buf:       make([]byte, 0, bufSize),
+		Error:     nil,
+		indention: 0,
+	}
+}
+
+// Pool returns a pool can provide more stream with same configuration
+func (stream *Stream) Pool() StreamPool {
+	return stream.cfg
+}
+
+// Reset reuse this stream instance by assign a new writer
+func (stream *Stream) Reset(out io.Writer) {
+	stream.out = out
+	stream.buf = stream.buf[:0]
+}
+
+// Available returns how many bytes are unused in the buffer.
+func (stream *Stream) Available() int {
+	return cap(stream.buf) - len(stream.buf)
+}
+
+// Buffered returns the number of bytes that have been written into the current buffer.
+func (stream *Stream) Buffered() int {
+	return len(stream.buf)
+}
+
+// Buffer if writer is nil, use this method to take the result
+func (stream *Stream) Buffer() []byte {
+	return stream.buf
+}
+
+// SetBuffer allows to append to the internal buffer directly
+func (stream *Stream) SetBuffer(buf []byte) {
+	stream.buf = buf
+}
+
+// Write writes the contents of p into the buffer.
+// It returns the number of bytes written.
+// If nn < len(p), it also returns an error explaining
+// why the write is short.
+func (stream *Stream) Write(p []byte) (nn int, err error) {
+	stream.buf = append(stream.buf, p...)
+	if stream.out != nil {
+		nn, err = stream.out.Write(stream.buf)
+		stream.buf = stream.buf[nn:]
+		return
+	}
+	return len(p), nil
+}
+
+// WriteByte writes a single byte.
+func (stream *Stream) writeByte(c byte) {
+	stream.buf = append(stream.buf, c)
+}
+
+func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) {
+	stream.buf = append(stream.buf, c1, c2)
+}
+
+func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) {
+	stream.buf = append(stream.buf, c1, c2, c3)
+}
+
+func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) {
+	stream.buf = append(stream.buf, c1, c2, c3, c4)
+}
+
+func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) {
+	stream.buf = append(stream.buf, c1, c2, c3, c4, c5)
+}
+
+// Flush writes any buffered data to the underlying io.Writer.
+func (stream *Stream) Flush() error {
+	if stream.out == nil {
+		return nil
+	}
+	if stream.Error != nil {
+		return stream.Error
+	}
+	n, err := stream.out.Write(stream.buf)
+	if err != nil {
+		if stream.Error == nil {
+			stream.Error = err
+		}
+		return err
+	}
+	stream.buf = stream.buf[n:]
+	return nil
+}
+
+// WriteRaw write string out without quotes, just like []byte
+func (stream *Stream) WriteRaw(s string) {
+	stream.buf = append(stream.buf, s...)
+}
+
+// WriteNil write null to stream
+func (stream *Stream) WriteNil() {
+	stream.writeFourBytes('n', 'u', 'l', 'l')
+}
+
+// WriteTrue write true to stream
+func (stream *Stream) WriteTrue() {
+	stream.writeFourBytes('t', 'r', 'u', 'e')
+}
+
+// WriteFalse write false to stream
+func (stream *Stream) WriteFalse() {
+	stream.writeFiveBytes('f', 'a', 'l', 's', 'e')
+}
+
+// WriteBool write true or false into stream
+func (stream *Stream) WriteBool(val bool) {
+	if val {
+		stream.WriteTrue()
+	} else {
+		stream.WriteFalse()
+	}
+}
+
+// WriteObjectStart write { with possible indention
+func (stream *Stream) WriteObjectStart() {
+	stream.indention += stream.cfg.indentionStep
+	stream.writeByte('{')
+	stream.writeIndention(0)
+}
+
+// WriteObjectField write "field": with possible indention
+func (stream *Stream) WriteObjectField(field string) {
+	stream.WriteString(field)
+	if stream.indention > 0 {
+		stream.writeTwoBytes(':', ' ')
+	} else {
+		stream.writeByte(':')
+	}
+}
+
+// WriteObjectEnd write } with possible indention
+func (stream *Stream) WriteObjectEnd() {
+	stream.writeIndention(stream.cfg.indentionStep)
+	stream.indention -= stream.cfg.indentionStep
+	stream.writeByte('}')
+}
+
+// WriteEmptyObject write {}
+func (stream *Stream) WriteEmptyObject() {
+	stream.writeByte('{')
+	stream.writeByte('}')
+}
+
+// WriteMore write , with possible indention
+func (stream *Stream) WriteMore() {
+	stream.writeByte(',')
+	stream.writeIndention(0)
+	stream.Flush()
+}
+
+// WriteArrayStart write [ with possible indention
+func (stream *Stream) WriteArrayStart() {
+	stream.indention += stream.cfg.indentionStep
+	stream.writeByte('[')
+	stream.writeIndention(0)
+}
+
+// WriteEmptyArray write []
+func (stream *Stream) WriteEmptyArray() {
+	stream.writeTwoBytes('[', ']')
+}
+
+// WriteArrayEnd write ] with possible indention
+func (stream *Stream) WriteArrayEnd() {
+	stream.writeIndention(stream.cfg.indentionStep)
+	stream.indention -= stream.cfg.indentionStep
+	stream.writeByte(']')
+}
+
+func (stream *Stream) writeIndention(delta int) {
+	if stream.indention == 0 {
+		return
+	}
+	stream.writeByte('\n')
+	toWrite := stream.indention - delta
+	for i := 0; i < toWrite; i++ {
+		stream.buf = append(stream.buf, ' ')
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go
new file mode 100644
index 0000000..f318d2c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_float.go
@@ -0,0 +1,94 @@
+package jsoniter
+
+import (
+	"math"
+	"strconv"
+)
+
+var pow10 []uint64
+
+func init() {
+	pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000}
+}
+
+// WriteFloat32 write float32 to stream
+func (stream *Stream) WriteFloat32(val float32) {
+	abs := math.Abs(float64(val))
+	fmt := byte('f')
+	// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
+	if abs != 0 {
+		if float32(abs) < 1e-6 || float32(abs) >= 1e21 {
+			fmt = 'e'
+		}
+	}
+	stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32)
+}
+
+// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster
+func (stream *Stream) WriteFloat32Lossy(val float32) {
+	if val < 0 {
+		stream.writeByte('-')
+		val = -val
+	}
+	if val > 0x4ffffff {
+		stream.WriteFloat32(val)
+		return
+	}
+	precision := 6
+	exp := uint64(1000000) // 6
+	lval := uint64(float64(val)*float64(exp) + 0.5)
+	stream.WriteUint64(lval / exp)
+	fval := lval % exp
+	if fval == 0 {
+		return
+	}
+	stream.writeByte('.')
+	for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
+		stream.writeByte('0')
+	}
+	stream.WriteUint64(fval)
+	for stream.buf[len(stream.buf)-1] == '0' {
+		stream.buf = stream.buf[:len(stream.buf)-1]
+	}
+}
+
+// WriteFloat64 write float64 to stream
+func (stream *Stream) WriteFloat64(val float64) {
+	abs := math.Abs(val)
+	fmt := byte('f')
+	// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
+	if abs != 0 {
+		if abs < 1e-6 || abs >= 1e21 {
+			fmt = 'e'
+		}
+	}
+	stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64)
+}
+
+// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster
+func (stream *Stream) WriteFloat64Lossy(val float64) {
+	if val < 0 {
+		stream.writeByte('-')
+		val = -val
+	}
+	if val > 0x4ffffff {
+		stream.WriteFloat64(val)
+		return
+	}
+	precision := 6
+	exp := uint64(1000000) // 6
+	lval := uint64(val*float64(exp) + 0.5)
+	stream.WriteUint64(lval / exp)
+	fval := lval % exp
+	if fval == 0 {
+		return
+	}
+	stream.writeByte('.')
+	for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
+		stream.writeByte('0')
+	}
+	stream.WriteUint64(fval)
+	for stream.buf[len(stream.buf)-1] == '0' {
+		stream.buf = stream.buf[:len(stream.buf)-1]
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go
new file mode 100644
index 0000000..d1059ee
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_int.go
@@ -0,0 +1,190 @@
+package jsoniter
+
+var digits []uint32
+
+func init() {
+	digits = make([]uint32, 1000)
+	for i := uint32(0); i < 1000; i++ {
+		digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0'
+		if i < 10 {
+			digits[i] += 2 << 24
+		} else if i < 100 {
+			digits[i] += 1 << 24
+		}
+	}
+}
+
+func writeFirstBuf(space []byte, v uint32) []byte {
+	start := v >> 24
+	if start == 0 {
+		space = append(space, byte(v>>16), byte(v>>8))
+	} else if start == 1 {
+		space = append(space, byte(v>>8))
+	}
+	space = append(space, byte(v))
+	return space
+}
+
+func writeBuf(buf []byte, v uint32) []byte {
+	return append(buf, byte(v>>16), byte(v>>8), byte(v))
+}
+
+// WriteUint8 write uint8 to stream
+func (stream *Stream) WriteUint8(val uint8) {
+	stream.buf = writeFirstBuf(stream.buf, digits[val])
+}
+
+// WriteInt8 write int8 to stream
+func (stream *Stream) WriteInt8(nval int8) {
+	var val uint8
+	if nval < 0 {
+		val = uint8(-nval)
+		stream.buf = append(stream.buf, '-')
+	} else {
+		val = uint8(nval)
+	}
+	stream.buf = writeFirstBuf(stream.buf, digits[val])
+}
+
+// WriteUint16 write uint16 to stream
+func (stream *Stream) WriteUint16(val uint16) {
+	q1 := val / 1000
+	if q1 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[val])
+		return
+	}
+	r1 := val - q1*1000
+	stream.buf = writeFirstBuf(stream.buf, digits[q1])
+	stream.buf = writeBuf(stream.buf, digits[r1])
+	return
+}
+
+// WriteInt16 write int16 to stream
+func (stream *Stream) WriteInt16(nval int16) {
+	var val uint16
+	if nval < 0 {
+		val = uint16(-nval)
+		stream.buf = append(stream.buf, '-')
+	} else {
+		val = uint16(nval)
+	}
+	stream.WriteUint16(val)
+}
+
+// WriteUint32 write uint32 to stream
+func (stream *Stream) WriteUint32(val uint32) {
+	q1 := val / 1000
+	if q1 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[val])
+		return
+	}
+	r1 := val - q1*1000
+	q2 := q1 / 1000
+	if q2 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[q1])
+		stream.buf = writeBuf(stream.buf, digits[r1])
+		return
+	}
+	r2 := q1 - q2*1000
+	q3 := q2 / 1000
+	if q3 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[q2])
+	} else {
+		r3 := q2 - q3*1000
+		stream.buf = append(stream.buf, byte(q3+'0'))
+		stream.buf = writeBuf(stream.buf, digits[r3])
+	}
+	stream.buf = writeBuf(stream.buf, digits[r2])
+	stream.buf = writeBuf(stream.buf, digits[r1])
+}
+
+// WriteInt32 write int32 to stream
+func (stream *Stream) WriteInt32(nval int32) {
+	var val uint32
+	if nval < 0 {
+		val = uint32(-nval)
+		stream.buf = append(stream.buf, '-')
+	} else {
+		val = uint32(nval)
+	}
+	stream.WriteUint32(val)
+}
+
+// WriteUint64 write uint64 to stream
+func (stream *Stream) WriteUint64(val uint64) {
+	q1 := val / 1000
+	if q1 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[val])
+		return
+	}
+	r1 := val - q1*1000
+	q2 := q1 / 1000
+	if q2 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[q1])
+		stream.buf = writeBuf(stream.buf, digits[r1])
+		return
+	}
+	r2 := q1 - q2*1000
+	q3 := q2 / 1000
+	if q3 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[q2])
+		stream.buf = writeBuf(stream.buf, digits[r2])
+		stream.buf = writeBuf(stream.buf, digits[r1])
+		return
+	}
+	r3 := q2 - q3*1000
+	q4 := q3 / 1000
+	if q4 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[q3])
+		stream.buf = writeBuf(stream.buf, digits[r3])
+		stream.buf = writeBuf(stream.buf, digits[r2])
+		stream.buf = writeBuf(stream.buf, digits[r1])
+		return
+	}
+	r4 := q3 - q4*1000
+	q5 := q4 / 1000
+	if q5 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[q4])
+		stream.buf = writeBuf(stream.buf, digits[r4])
+		stream.buf = writeBuf(stream.buf, digits[r3])
+		stream.buf = writeBuf(stream.buf, digits[r2])
+		stream.buf = writeBuf(stream.buf, digits[r1])
+		return
+	}
+	r5 := q4 - q5*1000
+	q6 := q5 / 1000
+	if q6 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[q5])
+	} else {
+		stream.buf = writeFirstBuf(stream.buf, digits[q6])
+		r6 := q5 - q6*1000
+		stream.buf = writeBuf(stream.buf, digits[r6])
+	}
+	stream.buf = writeBuf(stream.buf, digits[r5])
+	stream.buf = writeBuf(stream.buf, digits[r4])
+	stream.buf = writeBuf(stream.buf, digits[r3])
+	stream.buf = writeBuf(stream.buf, digits[r2])
+	stream.buf = writeBuf(stream.buf, digits[r1])
+}
+
+// WriteInt64 write int64 to stream
+func (stream *Stream) WriteInt64(nval int64) {
+	var val uint64
+	if nval < 0 {
+		val = uint64(-nval)
+		stream.buf = append(stream.buf, '-')
+	} else {
+		val = uint64(nval)
+	}
+	stream.WriteUint64(val)
+}
+
+// WriteInt write int to stream
+func (stream *Stream) WriteInt(val int) {
+	stream.WriteInt64(int64(val))
+}
+
+// WriteUint write uint to stream
+func (stream *Stream) WriteUint(val uint) {
+	stream.WriteUint64(uint64(val))
+}
diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go
new file mode 100644
index 0000000..54c2ba0
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_str.go
@@ -0,0 +1,372 @@
+package jsoniter
+
+import (
+	"unicode/utf8"
+)
+
+// htmlSafeSet holds the value true if the ASCII character with the given
+// array position can be safely represented inside a JSON string, embedded
+// inside of HTML <script> tags, without any additional escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), the backslash character ("\"), HTML opening and closing
+// tags ("<" and ">"), and the ampersand ("&").
+var htmlSafeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      false,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      false,
+	'=':      true,
+	'>':      false,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}
+
+// safeSet holds the value true if the ASCII character with the given array
+// position can be represented inside a JSON string without any further
+// escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), and the backslash character ("\").
+var safeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      true,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      true,
+	'=':      true,
+	'>':      true,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}
+
+var hex = "0123456789abcdef"
+
+// WriteStringWithHTMLEscaped write string to stream with html special characters escaped
+func (stream *Stream) WriteStringWithHTMLEscaped(s string) {
+	valLen := len(s)
+	stream.buf = append(stream.buf, '"')
+	// write string, the fast path, without utf8 and escape support
+	i := 0
+	for ; i < valLen; i++ {
+		c := s[i]
+		if c < utf8.RuneSelf && htmlSafeSet[c] {
+			stream.buf = append(stream.buf, c)
+		} else {
+			break
+		}
+	}
+	if i == valLen {
+		stream.buf = append(stream.buf, '"')
+		return
+	}
+	writeStringSlowPathWithHTMLEscaped(stream, i, s, valLen)
+}
+
+func writeStringSlowPathWithHTMLEscaped(stream *Stream, i int, s string, valLen int) {
+	start := i
+	// for the remaining parts, we process them char by char
+	for i < valLen {
+		if b := s[i]; b < utf8.RuneSelf {
+			if htmlSafeSet[b] {
+				i++
+				continue
+			}
+			if start < i {
+				stream.WriteRaw(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				stream.writeTwoBytes('\\', b)
+			case '\n':
+				stream.writeTwoBytes('\\', 'n')
+			case '\r':
+				stream.writeTwoBytes('\\', 'r')
+			case '\t':
+				stream.writeTwoBytes('\\', 't')
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// If escapeHTML is set, it also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				stream.WriteRaw(`\u00`)
+				stream.writeTwoBytes(hex[b>>4], hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRuneInString(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				stream.WriteRaw(s[start:i])
+			}
+			stream.WriteRaw(`\ufffd`)
+			i++
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				stream.WriteRaw(s[start:i])
+			}
+			stream.WriteRaw(`\u202`)
+			stream.writeByte(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		stream.WriteRaw(s[start:])
+	}
+	stream.writeByte('"')
+}
+
+// WriteString write string to stream without html escape
+func (stream *Stream) WriteString(s string) {
+	valLen := len(s)
+	stream.buf = append(stream.buf, '"')
+	// write string, the fast path, without utf8 and escape support
+	i := 0
+	for ; i < valLen; i++ {
+		c := s[i]
+		if c > 31 && c != '"' && c != '\\' {
+			stream.buf = append(stream.buf, c)
+		} else {
+			break
+		}
+	}
+	if i == valLen {
+		stream.buf = append(stream.buf, '"')
+		return
+	}
+	writeStringSlowPath(stream, i, s, valLen)
+}
+
+func writeStringSlowPath(stream *Stream, i int, s string, valLen int) {
+	start := i
+	// for the remaining parts, we process them char by char
+	for i < valLen {
+		if b := s[i]; b < utf8.RuneSelf {
+			if safeSet[b] {
+				i++
+				continue
+			}
+			if start < i {
+				stream.WriteRaw(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				stream.writeTwoBytes('\\', b)
+			case '\n':
+				stream.writeTwoBytes('\\', 'n')
+			case '\r':
+				stream.writeTwoBytes('\\', 'r')
+			case '\t':
+				stream.writeTwoBytes('\\', 't')
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// If escapeHTML is set, it also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				stream.WriteRaw(`\u00`)
+				stream.writeTwoBytes(hex[b>>4], hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		i++
+		continue
+	}
+	if start < len(s) {
+		stream.WriteRaw(s[start:])
+	}
+	stream.writeByte('"')
+}
diff --git a/vendor/github.com/json-iterator/go/test.sh b/vendor/github.com/json-iterator/go/test.sh
new file mode 100755
index 0000000..f4e7c0b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/test.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -e
+echo "" > coverage.txt
+
+for d in $(go list ./... | grep -v vendor); do
+    go test -coverprofile=profile.out -coverpkg=github.com/json-iterator/go $d
+    if [ -f profile.out ]; then
+        cat profile.out >> coverage.txt
+        rm profile.out
+    fi
+done
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
new file mode 100644
index 0000000..14127cd
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
@@ -0,0 +1,9 @@
+(The MIT License)
+
+Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
new file mode 100644
index 0000000..949b77e
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
@@ -0,0 +1,40 @@
+# Windows Terminal Sequences
+
+This library allow for enabling Windows terminal color support for Go.
+
+See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details.
+
+## Usage
+
+```go
+import (
+	"syscall"
+	
+	sequences "github.com/konsorten/go-windows-terminal-sequences"
+)
+
+func main() {
+	sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true)
+}
+
+```
+
+## Authors
+
+The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de).
+
+We thank all the authors who provided code to this library:
+
+* Felix Kollmann
+
+## License
+
+(The MIT License)
+
+Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
new file mode 100644
index 0000000..716c613
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
@@ -0,0 +1 @@
+module github.com/konsorten/go-windows-terminal-sequences
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
new file mode 100644
index 0000000..ef18d8f
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
@@ -0,0 +1,36 @@
+// +build windows
+
+package sequences
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var (
+	kernel32Dll    *syscall.LazyDLL  = syscall.NewLazyDLL("Kernel32.dll")
+	setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode")
+)
+
+func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error {
+	const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4
+
+	var mode uint32
+	err := syscall.GetConsoleMode(syscall.Stdout, &mode)
+	if err != nil {
+		return err
+	}
+
+	if enable {
+		mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
+	} else {
+		mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING
+	}
+
+	ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode))
+	if ret == 0 {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
new file mode 100644
index 0000000..5d8cb5b
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
@@ -0,0 +1 @@
+Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
new file mode 100644
index 0000000..e16fb94
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
@@ -0,0 +1 @@
+cover.dat
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
new file mode 100644
index 0000000..81be214
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
@@ -0,0 +1,7 @@
+all:
+
+cover:
+	go test -cover -v -coverprofile=cover.dat ./...
+	go tool cover -func cover.dat
+
+.PHONY: cover
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
new file mode 100644
index 0000000..258c063
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
@@ -0,0 +1,75 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+
+	"github.com/golang/protobuf/proto"
+)
+
+var errInvalidVarint = errors.New("invalid varint32 encountered")
+
+// ReadDelimited decodes a message from the provided length-delimited stream,
+// where the length is encoded as 32-bit varint prefix to the message body.
+// It returns the total number of bytes read and any applicable error.  This is
+// roughly equivalent to the companion Java API's
+// MessageLite#parseDelimitedFrom.  As per the reader contract, this function
+// calls r.Read repeatedly as required until exactly one message including its
+// prefix is read and decoded (or an error has occurred).  The function never
+// reads more bytes from the stream than required.  The function never returns
+// an error if a message has been read and decoded correctly, even if the end
+// of the stream has been reached in doing so.  In that case, any subsequent
+// calls return (0, io.EOF).
+func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
+	// Per AbstractParser#parsePartialDelimitedFrom with
+	// CodedInputStream#readRawVarint32.
+	var headerBuf [binary.MaxVarintLen32]byte
+	var bytesRead, varIntBytes int
+	var messageLength uint64
+	for varIntBytes == 0 { // i.e. no varint has been decoded yet.
+		if bytesRead >= len(headerBuf) {
+			return bytesRead, errInvalidVarint
+		}
+		// We have to read byte by byte here to avoid reading more bytes
+		// than required. Each read byte is appended to what we have
+		// read before.
+		newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
+		if newBytesRead == 0 {
+			if err != nil {
+				return bytesRead, err
+			}
+			// A Reader should not return (0, nil), but if it does,
+			// it should be treated as no-op (according to the
+			// Reader contract). So let's go on...
+			continue
+		}
+		bytesRead += newBytesRead
+		// Now present everything read so far to the varint decoder and
+		// see if a varint can be decoded already.
+		messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
+	}
+
+	messageBuf := make([]byte, messageLength)
+	newBytesRead, err := io.ReadFull(r, messageBuf)
+	bytesRead += newBytesRead
+	if err != nil {
+		return bytesRead, err
+	}
+
+	return bytesRead, proto.Unmarshal(messageBuf, m)
+}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
new file mode 100644
index 0000000..c318385
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil provides record length-delimited Protocol Buffer streaming.
+package pbutil
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
new file mode 100644
index 0000000..8fb59ad
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
@@ -0,0 +1,46 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+	"encoding/binary"
+	"io"
+
+	"github.com/golang/protobuf/proto"
+)
+
+// WriteDelimited encodes and dumps a message to the provided writer prefixed
+// with a 32-bit varint indicating the length of the encoded message, producing
+// a length-delimited record stream, which can be used to chain together
+// encoded messages of the same type together in a file.  It returns the total
+// number of bytes written and any applicable error.  This is roughly
+// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
+func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
+	buffer, err := proto.Marshal(m)
+	if err != nil {
+		return 0, err
+	}
+
+	var buf [binary.MaxVarintLen32]byte
+	encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
+
+	sync, err := w.Write(buf[:encodedLength])
+	if err != nil {
+		return sync, err
+	}
+
+	n, err = w.Write(buffer)
+	return n + sync, err
+}
diff --git a/vendor/github.com/modern-go/concurrent/.gitignore b/vendor/github.com/modern-go/concurrent/.gitignore
new file mode 100644
index 0000000..3f2bc47
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/.gitignore
@@ -0,0 +1 @@
+/coverage.txt
diff --git a/vendor/github.com/modern-go/concurrent/.travis.yml b/vendor/github.com/modern-go/concurrent/.travis.yml
new file mode 100644
index 0000000..449e67c
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+  - 1.8.x
+  - 1.x
+
+before_install:
+  - go get -t -v ./...
+
+script:
+  - ./test.sh
+
+after_success:
+  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/modern-go/concurrent/LICENSE b/vendor/github.com/modern-go/concurrent/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/modern-go/concurrent/README.md b/vendor/github.com/modern-go/concurrent/README.md
new file mode 100644
index 0000000..acab320
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/README.md
@@ -0,0 +1,49 @@
+# concurrent
+
+[![Sourcegraph](https://sourcegraph.com/github.com/modern-go/concurrent/-/badge.svg)](https://sourcegraph.com/github.com/modern-go/concurrent?badge)
+[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/modern-go/concurrent)
+[![Build Status](https://travis-ci.org/modern-go/concurrent.svg?branch=master)](https://travis-ci.org/modern-go/concurrent)
+[![codecov](https://codecov.io/gh/modern-go/concurrent/branch/master/graph/badge.svg)](https://codecov.io/gh/modern-go/concurrent)
+[![rcard](https://goreportcard.com/badge/github.com/modern-go/concurrent)](https://goreportcard.com/report/github.com/modern-go/concurrent)
+[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://raw.githubusercontent.com/modern-go/concurrent/master/LICENSE)
+
+* concurrent.Map: backport sync.Map for go below 1.9
+* concurrent.Executor: goroutine with explicit ownership and cancellable
+
+# concurrent.Map
+
+because sync.Map is only available in go 1.9, we can use concurrent.Map to make code portable
+
+```go
+m := concurrent.NewMap()
+m.Store("hello", "world")
+elem, found := m.Load("hello")
+// elem will be "world"
+// found will be true
+```
+
+# concurrent.Executor
+
+```go
+executor := concurrent.NewUnboundedExecutor()
+executor.Go(func(ctx context.Context) {
+    everyMillisecond := time.NewTicker(time.Millisecond)
+    for {
+        select {
+        case <-ctx.Done():
+            fmt.Println("goroutine exited")
+            return
+        case <-everyMillisecond.C:
+            // do something
+        }
+    }
+})
+time.Sleep(time.Second)
+executor.StopAndWaitForever()
+fmt.Println("executor stopped")
+```
+
+attach goroutine to executor instance, so that we can
+
+* cancel it by stop the executor with Stop/StopAndWait/StopAndWaitForever
+* handle panic by callback: the default behavior will no longer crash your application
\ No newline at end of file
diff --git a/vendor/github.com/modern-go/concurrent/executor.go b/vendor/github.com/modern-go/concurrent/executor.go
new file mode 100644
index 0000000..623dba1
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/executor.go
@@ -0,0 +1,14 @@
+package concurrent
+
+import "context"
+
+// Executor replace go keyword to start a new goroutine
+// the goroutine should cancel itself if the context passed in has been cancelled
+// the goroutine started by the executor, is owned by the executor
+// we can cancel all executors owned by the executor just by stop the executor itself
+// however Executor interface does not Stop method, the one starting and owning executor
+// should use the concrete type of executor, instead of this interface.
+type Executor interface {
+	// Go starts a new goroutine controlled by the context
+	Go(handler func(ctx context.Context))
+}
diff --git a/vendor/github.com/modern-go/concurrent/go_above_19.go b/vendor/github.com/modern-go/concurrent/go_above_19.go
new file mode 100644
index 0000000..aeabf8c
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/go_above_19.go
@@ -0,0 +1,15 @@
+//+build go1.9
+
+package concurrent
+
+import "sync"
+
+// Map is a wrapper for sync.Map introduced in go1.9
+type Map struct {
+	sync.Map
+}
+
+// NewMap creates a thread safe Map
+func NewMap() *Map {
+	return &Map{}
+}
diff --git a/vendor/github.com/modern-go/concurrent/go_below_19.go b/vendor/github.com/modern-go/concurrent/go_below_19.go
new file mode 100644
index 0000000..b9c8df7
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/go_below_19.go
@@ -0,0 +1,33 @@
+//+build !go1.9
+
+package concurrent
+
+import "sync"
+
+// Map implements a thread safe map for go version below 1.9 using mutex
+type Map struct {
+	lock sync.RWMutex
+	data map[interface{}]interface{}
+}
+
+// NewMap creates a thread safe map
+func NewMap() *Map {
+	return &Map{
+		data: make(map[interface{}]interface{}, 32),
+	}
+}
+
+// Load is same as sync.Map Load
+func (m *Map) Load(key interface{}) (elem interface{}, found bool) {
+	m.lock.RLock()
+	elem, found = m.data[key]
+	m.lock.RUnlock()
+	return
+}
+
+// Load is same as sync.Map Store
+func (m *Map) Store(key interface{}, elem interface{}) {
+	m.lock.Lock()
+	m.data[key] = elem
+	m.lock.Unlock()
+}
diff --git a/vendor/github.com/modern-go/concurrent/log.go b/vendor/github.com/modern-go/concurrent/log.go
new file mode 100644
index 0000000..9756fcc
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/log.go
@@ -0,0 +1,13 @@
+package concurrent
+
+import (
+	"os"
+	"log"
+	"io/ioutil"
+)
+
+// ErrorLogger is used to print out error, can be set to writer other than stderr
+var ErrorLogger = log.New(os.Stderr, "", 0)
+
+// InfoLogger is used to print informational message, default to off
+var InfoLogger = log.New(ioutil.Discard, "", 0)
\ No newline at end of file
diff --git a/vendor/github.com/modern-go/concurrent/test.sh b/vendor/github.com/modern-go/concurrent/test.sh
new file mode 100755
index 0000000..d1e6b2e
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/test.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -e
+echo "" > coverage.txt
+
+for d in $(go list ./... | grep -v vendor); do
+    go test -coverprofile=profile.out -coverpkg=github.com/modern-go/concurrent $d
+    if [ -f profile.out ]; then
+        cat profile.out >> coverage.txt
+        rm profile.out
+    fi
+done
diff --git a/vendor/github.com/modern-go/concurrent/unbounded_executor.go b/vendor/github.com/modern-go/concurrent/unbounded_executor.go
new file mode 100644
index 0000000..05a77dc
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/unbounded_executor.go
@@ -0,0 +1,119 @@
+package concurrent
+
+import (
+	"context"
+	"fmt"
+	"runtime"
+	"runtime/debug"
+	"sync"
+	"time"
+	"reflect"
+)
+
+// HandlePanic logs goroutine panic by default
+var HandlePanic = func(recovered interface{}, funcName string) {
+	ErrorLogger.Println(fmt.Sprintf("%s panic: %v", funcName, recovered))
+	ErrorLogger.Println(string(debug.Stack()))
+}
+
+// UnboundedExecutor is a executor without limits on counts of alive goroutines
+// it tracks the goroutine started by it, and can cancel them when shutdown
+type UnboundedExecutor struct {
+	ctx                   context.Context
+	cancel                context.CancelFunc
+	activeGoroutinesMutex *sync.Mutex
+	activeGoroutines      map[string]int
+	HandlePanic           func(recovered interface{}, funcName string)
+}
+
+// GlobalUnboundedExecutor has the life cycle of the program itself
+// any goroutine want to be shutdown before main exit can be started from this executor
+// GlobalUnboundedExecutor expects the main function to call stop
+// it does not magically knows the main function exits
+var GlobalUnboundedExecutor = NewUnboundedExecutor()
+
+// NewUnboundedExecutor creates a new UnboundedExecutor,
+// UnboundedExecutor can not be created by &UnboundedExecutor{}
+// HandlePanic can be set with a callback to override global HandlePanic
+func NewUnboundedExecutor() *UnboundedExecutor {
+	ctx, cancel := context.WithCancel(context.TODO())
+	return &UnboundedExecutor{
+		ctx:                   ctx,
+		cancel:                cancel,
+		activeGoroutinesMutex: &sync.Mutex{},
+		activeGoroutines:      map[string]int{},
+	}
+}
+
+// Go starts a new goroutine and tracks its lifecycle.
+// Panic will be recovered and logged automatically, except for StopSignal
+func (executor *UnboundedExecutor) Go(handler func(ctx context.Context)) {
+	pc := reflect.ValueOf(handler).Pointer()
+	f := runtime.FuncForPC(pc)
+	funcName := f.Name()
+	file, line := f.FileLine(pc)
+	executor.activeGoroutinesMutex.Lock()
+	defer executor.activeGoroutinesMutex.Unlock()
+	startFrom := fmt.Sprintf("%s:%d", file, line)
+	executor.activeGoroutines[startFrom] += 1
+	go func() {
+		defer func() {
+			recovered := recover()
+			// if you want to quit a goroutine without trigger HandlePanic
+			// use runtime.Goexit() to quit
+			if recovered != nil {
+				if executor.HandlePanic == nil {
+					HandlePanic(recovered, funcName)
+				} else {
+					executor.HandlePanic(recovered, funcName)
+				}
+			}
+			executor.activeGoroutinesMutex.Lock()
+			executor.activeGoroutines[startFrom] -= 1
+			executor.activeGoroutinesMutex.Unlock()
+		}()
+		handler(executor.ctx)
+	}()
+}
+
+// Stop cancel all goroutines started by this executor without wait
+func (executor *UnboundedExecutor) Stop() {
+	executor.cancel()
+}
+
+// StopAndWaitForever cancel all goroutines started by this executor and
+// wait until all goroutines exited
+func (executor *UnboundedExecutor) StopAndWaitForever() {
+	executor.StopAndWait(context.Background())
+}
+
+// StopAndWait cancel all goroutines started by this executor and wait.
+// Wait can be cancelled by the context passed in.
+func (executor *UnboundedExecutor) StopAndWait(ctx context.Context) {
+	executor.cancel()
+	for {
+		oneHundredMilliseconds := time.NewTimer(time.Millisecond * 100)
+		select {
+		case <-oneHundredMilliseconds.C:
+			if executor.checkNoActiveGoroutines() {
+				return
+			}
+		case <-ctx.Done():
+			return
+		}
+	}
+}
+
+func (executor *UnboundedExecutor) checkNoActiveGoroutines() bool {
+	executor.activeGoroutinesMutex.Lock()
+	defer executor.activeGoroutinesMutex.Unlock()
+	for startFrom, count := range executor.activeGoroutines {
+		if count > 0 {
+			InfoLogger.Println("UnboundedExecutor is still waiting goroutines to quit",
+				"startFrom", startFrom,
+				"count", count)
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/modern-go/reflect2/.gitignore b/vendor/github.com/modern-go/reflect2/.gitignore
new file mode 100644
index 0000000..7b26c94
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/.gitignore
@@ -0,0 +1,2 @@
+/vendor
+/coverage.txt
diff --git a/vendor/github.com/modern-go/reflect2/.travis.yml b/vendor/github.com/modern-go/reflect2/.travis.yml
new file mode 100644
index 0000000..fbb4374
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+
+go:
+  - 1.8.x
+  - 1.x
+
+before_install:
+  - go get -t -v ./...
+  - go get -t -v github.com/modern-go/reflect2-tests/...
+
+script:
+  - ./test.sh
+
+after_success:
+  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.lock b/vendor/github.com/modern-go/reflect2/Gopkg.lock
new file mode 100644
index 0000000..2a3a698
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/Gopkg.lock
@@ -0,0 +1,15 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+  name = "github.com/modern-go/concurrent"
+  packages = ["."]
+  revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
+  version = "1.0.0"
+
+[solve-meta]
+  analyzer-name = "dep"
+  analyzer-version = 1
+  inputs-digest = "daee8a88b3498b61c5640056665b8b9eea062006f5e596bbb6a3ed9119a11ec7"
+  solver-name = "gps-cdcl"
+  solver-version = 1
diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.toml b/vendor/github.com/modern-go/reflect2/Gopkg.toml
new file mode 100644
index 0000000..2f4f4db
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/Gopkg.toml
@@ -0,0 +1,35 @@
+# Gopkg.toml example
+#
+# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+#   name = "github.com/user/project"
+#   version = "1.0.0"
+#
+# [[constraint]]
+#   name = "github.com/user/project2"
+#   branch = "dev"
+#   source = "github.com/myfork/project2"
+#
+# [[override]]
+#   name = "github.com/x/y"
+#   version = "2.4.0"
+#
+# [prune]
+#   non-go = false
+#   go-tests = true
+#   unused-packages = true
+
+ignored = []
+
+[[constraint]]
+  name = "github.com/modern-go/concurrent"
+  version = "1.0.0"
+
+[prune]
+  go-tests = true
+  unused-packages = true
diff --git a/vendor/github.com/modern-go/reflect2/LICENSE b/vendor/github.com/modern-go/reflect2/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/modern-go/reflect2/README.md b/vendor/github.com/modern-go/reflect2/README.md
new file mode 100644
index 0000000..6f968aa
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/README.md
@@ -0,0 +1,71 @@
+# reflect2
+
+[![Sourcegraph](https://sourcegraph.com/github.com/modern-go/reflect2/-/badge.svg)](https://sourcegraph.com/github.com/modern-go/reflect2?badge)
+[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/modern-go/reflect2)
+[![Build Status](https://travis-ci.org/modern-go/reflect2.svg?branch=master)](https://travis-ci.org/modern-go/reflect2)
+[![codecov](https://codecov.io/gh/modern-go/reflect2/branch/master/graph/badge.svg)](https://codecov.io/gh/modern-go/reflect2)
+[![rcard](https://goreportcard.com/badge/github.com/modern-go/reflect2)](https://goreportcard.com/report/github.com/modern-go/reflect2)
+[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://raw.githubusercontent.com/modern-go/reflect2/master/LICENSE)
+
+reflect api that avoids runtime reflect.Value cost
+
+* reflect get/set interface{}, with type checking
+* reflect get/set unsafe.Pointer, without type checking
+* `reflect2.TypeByName` works like `Class.forName` found in java
+
+[json-iterator](https://github.com/json-iterator/go) use this package to save runtime dispatching cost.
+This package is designed for low level libraries to optimize reflection performance.
+General application should still use reflect standard library.
+
+# reflect2.TypeByName
+
+```go
+// given package is github.com/your/awesome-package
+type MyStruct struct {
+	// ...
+}
+
+// will return the type
+reflect2.TypeByName("awesome-package.MyStruct")
+// however, if the type has not been used
+// it will be eliminated by compiler, so we can not get it in runtime
+```
+
+# reflect2 get/set interface{}
+
+```go
+valType := reflect2.TypeOf(1)
+i := 1
+j := 10
+valType.Set(&i, &j)
+// i will be 10
+```
+
+to get set `type`, always use its pointer `*type`
+
+# reflect2 get/set unsafe.Pointer
+
+```go
+valType := reflect2.TypeOf(1)
+i := 1
+j := 10
+valType.UnsafeSet(unsafe.Pointer(&i), unsafe.Pointer(&j))
+// i will be 10
+```
+
+to get set `type`, always use its pointer `*type`
+
+# benchmark
+
+Benchmark is not necessary for this package. It does nothing actually.
+As it is just a thin wrapper to make go runtime public. 
+Both `reflect2` and `reflect` call same function 
+provided by `runtime` package exposed by go language.
+
+# unsafe safety
+
+Instead of casting `[]byte` to `sliceHeader` in your application using unsafe.
+We can use reflect2 instead. This way, if `sliceHeader` changes in the future,
+only reflect2 need to be upgraded.
+
+reflect2 tries its best to keep the implementation same as reflect (by testing).
\ No newline at end of file
diff --git a/vendor/github.com/modern-go/reflect2/go_above_17.go b/vendor/github.com/modern-go/reflect2/go_above_17.go
new file mode 100644
index 0000000..5c1cea8
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/go_above_17.go
@@ -0,0 +1,8 @@
+//+build go1.7
+
+package reflect2
+
+import "unsafe"
+
+//go:linkname resolveTypeOff reflect.resolveTypeOff
+func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
diff --git a/vendor/github.com/modern-go/reflect2/go_above_19.go b/vendor/github.com/modern-go/reflect2/go_above_19.go
new file mode 100644
index 0000000..c7e3b78
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/go_above_19.go
@@ -0,0 +1,14 @@
+//+build go1.9
+
+package reflect2
+
+import (
+	"unsafe"
+)
+
+//go:linkname makemap reflect.makemap
+func makemap(rtype unsafe.Pointer, cap int) (m unsafe.Pointer)
+
+func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer {
+	return makemap(rtype, cap)
+}
diff --git a/vendor/github.com/modern-go/reflect2/go_below_17.go b/vendor/github.com/modern-go/reflect2/go_below_17.go
new file mode 100644
index 0000000..65a93c8
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/go_below_17.go
@@ -0,0 +1,9 @@
+//+build !go1.7
+
+package reflect2
+
+import "unsafe"
+
+func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
+	return nil
+}
diff --git a/vendor/github.com/modern-go/reflect2/go_below_19.go b/vendor/github.com/modern-go/reflect2/go_below_19.go
new file mode 100644
index 0000000..b050ef7
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/go_below_19.go
@@ -0,0 +1,14 @@
+//+build !go1.9
+
+package reflect2
+
+import (
+	"unsafe"
+)
+
+//go:linkname makemap reflect.makemap
+func makemap(rtype unsafe.Pointer) (m unsafe.Pointer)
+
+func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer {
+	return makemap(rtype)
+}
diff --git a/vendor/github.com/modern-go/reflect2/reflect2.go b/vendor/github.com/modern-go/reflect2/reflect2.go
new file mode 100644
index 0000000..63b49c7
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/reflect2.go
@@ -0,0 +1,298 @@
+package reflect2
+
+import (
+	"github.com/modern-go/concurrent"
+	"reflect"
+	"unsafe"
+)
+
+type Type interface {
+	Kind() reflect.Kind
+	// New return pointer to data of this type
+	New() interface{}
+	// UnsafeNew return the allocated space pointed by unsafe.Pointer
+	UnsafeNew() unsafe.Pointer
+	// PackEFace cast a unsafe pointer to object represented pointer
+	PackEFace(ptr unsafe.Pointer) interface{}
+	// Indirect dereference object represented pointer to this type
+	Indirect(obj interface{}) interface{}
+	// UnsafeIndirect dereference pointer to this type
+	UnsafeIndirect(ptr unsafe.Pointer) interface{}
+	// Type1 returns reflect.Type
+	Type1() reflect.Type
+	Implements(thatType Type) bool
+	String() string
+	RType() uintptr
+	// interface{} of this type has pointer like behavior
+	LikePtr() bool
+	IsNullable() bool
+	IsNil(obj interface{}) bool
+	UnsafeIsNil(ptr unsafe.Pointer) bool
+	Set(obj interface{}, val interface{})
+	UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer)
+	AssignableTo(anotherType Type) bool
+}
+
+type ListType interface {
+	Type
+	Elem() Type
+	SetIndex(obj interface{}, index int, elem interface{})
+	UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer)
+	GetIndex(obj interface{}, index int) interface{}
+	UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer
+}
+
+type ArrayType interface {
+	ListType
+	Len() int
+}
+
+type SliceType interface {
+	ListType
+	MakeSlice(length int, cap int) interface{}
+	UnsafeMakeSlice(length int, cap int) unsafe.Pointer
+	Grow(obj interface{}, newLength int)
+	UnsafeGrow(ptr unsafe.Pointer, newLength int)
+	Append(obj interface{}, elem interface{})
+	UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer)
+	LengthOf(obj interface{}) int
+	UnsafeLengthOf(ptr unsafe.Pointer) int
+	SetNil(obj interface{})
+	UnsafeSetNil(ptr unsafe.Pointer)
+	Cap(obj interface{}) int
+	UnsafeCap(ptr unsafe.Pointer) int
+}
+
+type StructType interface {
+	Type
+	NumField() int
+	Field(i int) StructField
+	FieldByName(name string) StructField
+	FieldByIndex(index []int) StructField
+	FieldByNameFunc(match func(string) bool) StructField
+}
+
+type StructField interface {
+	Offset() uintptr
+	Name() string
+	PkgPath() string
+	Type() Type
+	Tag() reflect.StructTag
+	Index() []int
+	Anonymous() bool
+	Set(obj interface{}, value interface{})
+	UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer)
+	Get(obj interface{}) interface{}
+	UnsafeGet(obj unsafe.Pointer) unsafe.Pointer
+}
+
+type MapType interface {
+	Type
+	Key() Type
+	Elem() Type
+	MakeMap(cap int) interface{}
+	UnsafeMakeMap(cap int) unsafe.Pointer
+	SetIndex(obj interface{}, key interface{}, elem interface{})
+	UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer)
+	TryGetIndex(obj interface{}, key interface{}) (interface{}, bool)
+	GetIndex(obj interface{}, key interface{}) interface{}
+	UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
+	Iterate(obj interface{}) MapIterator
+	UnsafeIterate(obj unsafe.Pointer) MapIterator
+}
+
+type MapIterator interface {
+	HasNext() bool
+	Next() (key interface{}, elem interface{})
+	UnsafeNext() (key unsafe.Pointer, elem unsafe.Pointer)
+}
+
+type PtrType interface {
+	Type
+	Elem() Type
+}
+
+type InterfaceType interface {
+	NumMethod() int
+}
+
+type Config struct {
+	UseSafeImplementation bool
+}
+
+type API interface {
+	TypeOf(obj interface{}) Type
+	Type2(type1 reflect.Type) Type
+}
+
+var ConfigUnsafe = Config{UseSafeImplementation: false}.Froze()
+var ConfigSafe = Config{UseSafeImplementation: true}.Froze()
+
+type frozenConfig struct {
+	useSafeImplementation bool
+	cache                 *concurrent.Map
+}
+
+func (cfg Config) Froze() *frozenConfig {
+	return &frozenConfig{
+		useSafeImplementation: cfg.UseSafeImplementation,
+		cache: concurrent.NewMap(),
+	}
+}
+
+func (cfg *frozenConfig) TypeOf(obj interface{}) Type {
+	cacheKey := uintptr(unpackEFace(obj).rtype)
+	typeObj, found := cfg.cache.Load(cacheKey)
+	if found {
+		return typeObj.(Type)
+	}
+	return cfg.Type2(reflect.TypeOf(obj))
+}
+
+func (cfg *frozenConfig) Type2(type1 reflect.Type) Type {
+	if type1 == nil {
+		return nil
+	}
+	cacheKey := uintptr(unpackEFace(type1).data)
+	typeObj, found := cfg.cache.Load(cacheKey)
+	if found {
+		return typeObj.(Type)
+	}
+	type2 := cfg.wrapType(type1)
+	cfg.cache.Store(cacheKey, type2)
+	return type2
+}
+
+func (cfg *frozenConfig) wrapType(type1 reflect.Type) Type {
+	safeType := safeType{Type: type1, cfg: cfg}
+	switch type1.Kind() {
+	case reflect.Struct:
+		if cfg.useSafeImplementation {
+			return &safeStructType{safeType}
+		}
+		return newUnsafeStructType(cfg, type1)
+	case reflect.Array:
+		if cfg.useSafeImplementation {
+			return &safeSliceType{safeType}
+		}
+		return newUnsafeArrayType(cfg, type1)
+	case reflect.Slice:
+		if cfg.useSafeImplementation {
+			return &safeSliceType{safeType}
+		}
+		return newUnsafeSliceType(cfg, type1)
+	case reflect.Map:
+		if cfg.useSafeImplementation {
+			return &safeMapType{safeType}
+		}
+		return newUnsafeMapType(cfg, type1)
+	case reflect.Ptr, reflect.Chan, reflect.Func:
+		if cfg.useSafeImplementation {
+			return &safeMapType{safeType}
+		}
+		return newUnsafePtrType(cfg, type1)
+	case reflect.Interface:
+		if cfg.useSafeImplementation {
+			return &safeMapType{safeType}
+		}
+		if type1.NumMethod() == 0 {
+			return newUnsafeEFaceType(cfg, type1)
+		}
+		return newUnsafeIFaceType(cfg, type1)
+	default:
+		if cfg.useSafeImplementation {
+			return &safeType
+		}
+		return newUnsafeType(cfg, type1)
+	}
+}
+
+func TypeOf(obj interface{}) Type {
+	return ConfigUnsafe.TypeOf(obj)
+}
+
+func TypeOfPtr(obj interface{}) PtrType {
+	return TypeOf(obj).(PtrType)
+}
+
+func Type2(type1 reflect.Type) Type {
+	if type1 == nil {
+		return nil
+	}
+	return ConfigUnsafe.Type2(type1)
+}
+
+func PtrTo(typ Type) Type {
+	return Type2(reflect.PtrTo(typ.Type1()))
+}
+
+func PtrOf(obj interface{}) unsafe.Pointer {
+	return unpackEFace(obj).data
+}
+
+func RTypeOf(obj interface{}) uintptr {
+	return uintptr(unpackEFace(obj).rtype)
+}
+
+func IsNil(obj interface{}) bool {
+	if obj == nil {
+		return true
+	}
+	return unpackEFace(obj).data == nil
+}
+
+func IsNullable(kind reflect.Kind) bool {
+	switch kind {
+	case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Func, reflect.Slice, reflect.Interface:
+		return true
+	}
+	return false
+}
+
+func likePtrKind(kind reflect.Kind) bool {
+	switch kind {
+	case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Func:
+		return true
+	}
+	return false
+}
+
+func likePtrType(typ reflect.Type) bool {
+	if likePtrKind(typ.Kind()) {
+		return true
+	}
+	if typ.Kind() == reflect.Struct {
+		if typ.NumField() != 1 {
+			return false
+		}
+		return likePtrType(typ.Field(0).Type)
+	}
+	if typ.Kind() == reflect.Array {
+		if typ.Len() != 1 {
+			return false
+		}
+		return likePtrType(typ.Elem())
+	}
+	return false
+}
+
+// NoEscape hides a pointer from escape analysis.  noescape is
+// the identity function but escape analysis doesn't think the
+// output depends on the input.  noescape is inlined and currently
+// compiles down to zero instructions.
+// USE CAREFULLY!
+//go:nosplit
+func NoEscape(p unsafe.Pointer) unsafe.Pointer {
+	x := uintptr(p)
+	return unsafe.Pointer(x ^ 0)
+}
+
+func UnsafeCastString(str string) []byte {
+	stringHeader := (*reflect.StringHeader)(unsafe.Pointer(&str))
+	sliceHeader := &reflect.SliceHeader{
+		Data: stringHeader.Data,
+		Cap: stringHeader.Len,
+		Len: stringHeader.Len,
+	}
+	return *(*[]byte)(unsafe.Pointer(sliceHeader))
+}
diff --git a/vendor/github.com/modern-go/reflect2/reflect2_amd64.s b/vendor/github.com/modern-go/reflect2/reflect2_amd64.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/reflect2_amd64.s
diff --git a/vendor/github.com/modern-go/reflect2/reflect2_kind.go b/vendor/github.com/modern-go/reflect2/reflect2_kind.go
new file mode 100644
index 0000000..62f299e
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/reflect2_kind.go
@@ -0,0 +1,30 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+// DefaultTypeOfKind return the non aliased default type for the kind
+func DefaultTypeOfKind(kind reflect.Kind) Type {
+	return kindTypes[kind]
+}
+
+var kindTypes = map[reflect.Kind]Type{
+	reflect.Bool:          TypeOf(true),
+	reflect.Uint8:         TypeOf(uint8(0)),
+	reflect.Int8:          TypeOf(int8(0)),
+	reflect.Uint16:        TypeOf(uint16(0)),
+	reflect.Int16:         TypeOf(int16(0)),
+	reflect.Uint32:        TypeOf(uint32(0)),
+	reflect.Int32:         TypeOf(int32(0)),
+	reflect.Uint64:        TypeOf(uint64(0)),
+	reflect.Int64:         TypeOf(int64(0)),
+	reflect.Uint:          TypeOf(uint(0)),
+	reflect.Int:           TypeOf(int(0)),
+	reflect.Float32:       TypeOf(float32(0)),
+	reflect.Float64:       TypeOf(float64(0)),
+	reflect.Uintptr:       TypeOf(uintptr(0)),
+	reflect.String:        TypeOf(""),
+	reflect.UnsafePointer: TypeOf(unsafe.Pointer(nil)),
+}
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_386.s b/vendor/github.com/modern-go/reflect2/relfect2_386.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_386.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s b/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_arm.s b/vendor/github.com/modern-go/reflect2/relfect2_arm.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_arm.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_arm64.s b/vendor/github.com/modern-go/reflect2/relfect2_arm64.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_arm64.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s b/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s b/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s b/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_s390x.s b/vendor/github.com/modern-go/reflect2/relfect2_s390x.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_s390x.s
diff --git a/vendor/github.com/modern-go/reflect2/safe_field.go b/vendor/github.com/modern-go/reflect2/safe_field.go
new file mode 100644
index 0000000..d4ba1f4
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/safe_field.go
@@ -0,0 +1,58 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type safeField struct {
+	reflect.StructField
+}
+
+func (field *safeField) Offset() uintptr {
+	return field.StructField.Offset
+}
+
+func (field *safeField) Name() string {
+	return field.StructField.Name
+}
+
+func (field *safeField) PkgPath() string {
+	return field.StructField.PkgPath
+}
+
+func (field *safeField) Type() Type {
+	panic("not implemented")
+}
+
+func (field *safeField) Tag() reflect.StructTag {
+	return field.StructField.Tag
+}
+
+func (field *safeField) Index() []int {
+	return field.StructField.Index
+}
+
+func (field *safeField) Anonymous() bool {
+	return field.StructField.Anonymous
+}
+
+func (field *safeField) Set(obj interface{}, value interface{}) {
+	val := reflect.ValueOf(obj).Elem()
+	val.FieldByIndex(field.Index()).Set(reflect.ValueOf(value).Elem())
+}
+
+func (field *safeField) UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) {
+	panic("unsafe operation is not supported")
+}
+
+func (field *safeField) Get(obj interface{}) interface{} {
+	val := reflect.ValueOf(obj).Elem().FieldByIndex(field.Index())
+	ptr := reflect.New(val.Type())
+	ptr.Elem().Set(val)
+	return ptr.Interface()
+}
+
+func (field *safeField) UnsafeGet(obj unsafe.Pointer) unsafe.Pointer {
+	panic("does not support unsafe operation")
+}
diff --git a/vendor/github.com/modern-go/reflect2/safe_map.go b/vendor/github.com/modern-go/reflect2/safe_map.go
new file mode 100644
index 0000000..8836220
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/safe_map.go
@@ -0,0 +1,101 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type safeMapType struct {
+	safeType
+}
+
+func (type2 *safeMapType) Key() Type {
+	return type2.safeType.cfg.Type2(type2.Type.Key())
+}
+
+func (type2 *safeMapType) MakeMap(cap int) interface{} {
+	ptr := reflect.New(type2.Type)
+	ptr.Elem().Set(reflect.MakeMap(type2.Type))
+	return ptr.Interface()
+}
+
+func (type2 *safeMapType) UnsafeMakeMap(cap int) unsafe.Pointer {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeMapType) SetIndex(obj interface{}, key interface{}, elem interface{}) {
+	keyVal := reflect.ValueOf(key)
+	elemVal := reflect.ValueOf(elem)
+	val := reflect.ValueOf(obj)
+	val.Elem().SetMapIndex(keyVal.Elem(), elemVal.Elem())
+}
+
+func (type2 *safeMapType) UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeMapType) TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) {
+	keyVal := reflect.ValueOf(key)
+	if key == nil {
+		keyVal = reflect.New(type2.Type.Key()).Elem()
+	}
+	val := reflect.ValueOf(obj).MapIndex(keyVal)
+	if !val.IsValid() {
+		return nil, false
+	}
+	return val.Interface(), true
+}
+
+func (type2 *safeMapType) GetIndex(obj interface{}, key interface{}) interface{} {
+	val := reflect.ValueOf(obj).Elem()
+	keyVal := reflect.ValueOf(key).Elem()
+	elemVal := val.MapIndex(keyVal)
+	if !elemVal.IsValid() {
+		ptr := reflect.New(reflect.PtrTo(val.Type().Elem()))
+		return ptr.Elem().Interface()
+	}
+	ptr := reflect.New(elemVal.Type())
+	ptr.Elem().Set(elemVal)
+	return ptr.Interface()
+}
+
+func (type2 *safeMapType) UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeMapType) Iterate(obj interface{}) MapIterator {
+	m := reflect.ValueOf(obj).Elem()
+	return &safeMapIterator{
+		m:    m,
+		keys: m.MapKeys(),
+	}
+}
+
+func (type2 *safeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
+	panic("does not support unsafe operation")
+}
+
+type safeMapIterator struct {
+	i    int
+	m    reflect.Value
+	keys []reflect.Value
+}
+
+func (iter *safeMapIterator) HasNext() bool {
+	return iter.i != len(iter.keys)
+}
+
+func (iter *safeMapIterator) Next() (interface{}, interface{}) {
+	key := iter.keys[iter.i]
+	elem := iter.m.MapIndex(key)
+	iter.i += 1
+	keyPtr := reflect.New(key.Type())
+	keyPtr.Elem().Set(key)
+	elemPtr := reflect.New(elem.Type())
+	elemPtr.Elem().Set(elem)
+	return keyPtr.Interface(), elemPtr.Interface()
+}
+
+func (iter *safeMapIterator) UnsafeNext() (unsafe.Pointer, unsafe.Pointer) {
+	panic("does not support unsafe operation")
+}
diff --git a/vendor/github.com/modern-go/reflect2/safe_slice.go b/vendor/github.com/modern-go/reflect2/safe_slice.go
new file mode 100644
index 0000000..bcce6fd
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/safe_slice.go
@@ -0,0 +1,92 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type safeSliceType struct {
+	safeType
+}
+
+func (type2 *safeSliceType) SetIndex(obj interface{}, index int, value interface{}) {
+	val := reflect.ValueOf(obj).Elem()
+	elem := reflect.ValueOf(value).Elem()
+	val.Index(index).Set(elem)
+}
+
+func (type2 *safeSliceType) UnsafeSetIndex(obj unsafe.Pointer, index int, value unsafe.Pointer) {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeSliceType) GetIndex(obj interface{}, index int) interface{} {
+	val := reflect.ValueOf(obj).Elem()
+	elem := val.Index(index)
+	ptr := reflect.New(elem.Type())
+	ptr.Elem().Set(elem)
+	return ptr.Interface()
+}
+
+func (type2 *safeSliceType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeSliceType) MakeSlice(length int, cap int) interface{} {
+	val := reflect.MakeSlice(type2.Type, length, cap)
+	ptr := reflect.New(val.Type())
+	ptr.Elem().Set(val)
+	return ptr.Interface()
+}
+
+func (type2 *safeSliceType) UnsafeMakeSlice(length int, cap int) unsafe.Pointer {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeSliceType) Grow(obj interface{}, newLength int) {
+	oldCap := type2.Cap(obj)
+	oldSlice := reflect.ValueOf(obj).Elem()
+	delta := newLength - oldCap
+	deltaVals := make([]reflect.Value, delta)
+	newSlice := reflect.Append(oldSlice, deltaVals...)
+	oldSlice.Set(newSlice)
+}
+
+func (type2 *safeSliceType) UnsafeGrow(ptr unsafe.Pointer, newLength int) {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeSliceType) Append(obj interface{}, elem interface{}) {
+	val := reflect.ValueOf(obj).Elem()
+	elemVal := reflect.ValueOf(elem).Elem()
+	newVal := reflect.Append(val, elemVal)
+	val.Set(newVal)
+}
+
+func (type2 *safeSliceType) UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeSliceType) SetNil(obj interface{}) {
+	val := reflect.ValueOf(obj).Elem()
+	val.Set(reflect.Zero(val.Type()))
+}
+
+func (type2 *safeSliceType) UnsafeSetNil(ptr unsafe.Pointer) {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeSliceType) LengthOf(obj interface{}) int {
+	return reflect.ValueOf(obj).Elem().Len()
+}
+
+func (type2 *safeSliceType) UnsafeLengthOf(ptr unsafe.Pointer) int {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeSliceType) Cap(obj interface{}) int {
+	return reflect.ValueOf(obj).Elem().Cap()
+}
+
+func (type2 *safeSliceType) UnsafeCap(ptr unsafe.Pointer) int {
+	panic("does not support unsafe operation")
+}
diff --git a/vendor/github.com/modern-go/reflect2/safe_struct.go b/vendor/github.com/modern-go/reflect2/safe_struct.go
new file mode 100644
index 0000000..e5fb9b3
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/safe_struct.go
@@ -0,0 +1,29 @@
+package reflect2
+
+type safeStructType struct {
+	safeType
+}
+
+func (type2 *safeStructType) FieldByName(name string) StructField {
+	field, found := type2.Type.FieldByName(name)
+	if !found {
+		panic("field " + name + " not found")
+	}
+	return &safeField{StructField: field}
+}
+
+func (type2 *safeStructType) Field(i int) StructField {
+	return &safeField{StructField: type2.Type.Field(i)}
+}
+
+func (type2 *safeStructType) FieldByIndex(index []int) StructField {
+	return &safeField{StructField: type2.Type.FieldByIndex(index)}
+}
+
+func (type2 *safeStructType) FieldByNameFunc(match func(string) bool) StructField {
+	field, found := type2.Type.FieldByNameFunc(match)
+	if !found {
+		panic("field match condition not found in " + type2.Type.String())
+	}
+	return &safeField{StructField: field}
+}
diff --git a/vendor/github.com/modern-go/reflect2/safe_type.go b/vendor/github.com/modern-go/reflect2/safe_type.go
new file mode 100644
index 0000000..ee4e7bb
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/safe_type.go
@@ -0,0 +1,78 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type safeType struct {
+	reflect.Type
+	cfg *frozenConfig
+}
+
+func (type2 *safeType) New() interface{} {
+	return reflect.New(type2.Type).Interface()
+}
+
+func (type2 *safeType) UnsafeNew() unsafe.Pointer {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeType) Elem() Type {
+	return type2.cfg.Type2(type2.Type.Elem())
+}
+
+func (type2 *safeType) Type1() reflect.Type {
+	return type2.Type
+}
+
+func (type2 *safeType) PackEFace(ptr unsafe.Pointer) interface{} {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeType) Implements(thatType Type) bool {
+	return type2.Type.Implements(thatType.Type1())
+}
+
+func (type2 *safeType) RType() uintptr {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeType) Indirect(obj interface{}) interface{} {
+	return reflect.Indirect(reflect.ValueOf(obj)).Interface()
+}
+
+func (type2 *safeType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeType) LikePtr() bool {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeType) IsNullable() bool {
+	return IsNullable(type2.Kind())
+}
+
+func (type2 *safeType) IsNil(obj interface{}) bool {
+	if obj == nil {
+		return true
+	}
+	return reflect.ValueOf(obj).Elem().IsNil()
+}
+
+func (type2 *safeType) UnsafeIsNil(ptr unsafe.Pointer) bool {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeType) Set(obj interface{}, val interface{}) {
+	reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(val).Elem())
+}
+
+func (type2 *safeType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeType) AssignableTo(anotherType Type) bool {
+	return type2.Type1().AssignableTo(anotherType.Type1())
+}
diff --git a/vendor/github.com/modern-go/reflect2/test.sh b/vendor/github.com/modern-go/reflect2/test.sh
new file mode 100755
index 0000000..3d2b976
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/test.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -e
+echo "" > coverage.txt
+
+for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do
+    go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d
+    if [ -f profile.out ]; then
+        cat profile.out >> coverage.txt
+        rm profile.out
+    fi
+done
diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go
new file mode 100644
index 0000000..6d48911
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/type_map.go
@@ -0,0 +1,103 @@
+package reflect2
+
+import (
+	"reflect"
+	"runtime"
+	"strings"
+	"unsafe"
+)
+
+// typelinks1 for 1.5 ~ 1.6
+//go:linkname typelinks1 reflect.typelinks
+func typelinks1() [][]unsafe.Pointer
+
+// typelinks2 for 1.7 ~
+//go:linkname typelinks2 reflect.typelinks
+func typelinks2() (sections []unsafe.Pointer, offset [][]int32)
+
+var types = map[string]reflect.Type{}
+var packages = map[string]map[string]reflect.Type{}
+
+func init() {
+	ver := runtime.Version()
+	if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") {
+		loadGo15Types()
+	} else if ver == "go1.6" || strings.HasPrefix(ver, "go1.6.") {
+		loadGo15Types()
+	} else {
+		loadGo17Types()
+	}
+}
+
+func loadGo15Types() {
+	var obj interface{} = reflect.TypeOf(0)
+	typePtrss := typelinks1()
+	for _, typePtrs := range typePtrss {
+		for _, typePtr := range typePtrs {
+			(*emptyInterface)(unsafe.Pointer(&obj)).word = typePtr
+			typ := obj.(reflect.Type)
+			if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct {
+				loadedType := typ.Elem()
+				pkgTypes := packages[loadedType.PkgPath()]
+				if pkgTypes == nil {
+					pkgTypes = map[string]reflect.Type{}
+					packages[loadedType.PkgPath()] = pkgTypes
+				}
+				types[loadedType.String()] = loadedType
+				pkgTypes[loadedType.Name()] = loadedType
+			}
+			if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Ptr &&
+				typ.Elem().Elem().Kind() == reflect.Struct {
+				loadedType := typ.Elem().Elem()
+				pkgTypes := packages[loadedType.PkgPath()]
+				if pkgTypes == nil {
+					pkgTypes = map[string]reflect.Type{}
+					packages[loadedType.PkgPath()] = pkgTypes
+				}
+				types[loadedType.String()] = loadedType
+				pkgTypes[loadedType.Name()] = loadedType
+			}
+		}
+	}
+}
+
+func loadGo17Types() {
+	var obj interface{} = reflect.TypeOf(0)
+	sections, offset := typelinks2()
+	for i, offs := range offset {
+		rodata := sections[i]
+		for _, off := range offs {
+			(*emptyInterface)(unsafe.Pointer(&obj)).word = resolveTypeOff(unsafe.Pointer(rodata), off)
+			typ := obj.(reflect.Type)
+			if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct {
+				loadedType := typ.Elem()
+				pkgTypes := packages[loadedType.PkgPath()]
+				if pkgTypes == nil {
+					pkgTypes = map[string]reflect.Type{}
+					packages[loadedType.PkgPath()] = pkgTypes
+				}
+				types[loadedType.String()] = loadedType
+				pkgTypes[loadedType.Name()] = loadedType
+			}
+		}
+	}
+}
+
+type emptyInterface struct {
+	typ  unsafe.Pointer
+	word unsafe.Pointer
+}
+
+// TypeByName return the type by its name, just like Class.forName in java
+func TypeByName(typeName string) Type {
+	return Type2(types[typeName])
+}
+
+// TypeByPackageName return the type by its package and name
+func TypeByPackageName(pkgPath string, name string) Type {
+	pkgTypes := packages[pkgPath]
+	if pkgTypes == nil {
+		return nil
+	}
+	return Type2(pkgTypes[name])
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_array.go b/vendor/github.com/modern-go/reflect2/unsafe_array.go
new file mode 100644
index 0000000..76cbdba
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_array.go
@@ -0,0 +1,65 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type UnsafeArrayType struct {
+	unsafeType
+	elemRType  unsafe.Pointer
+	pElemRType unsafe.Pointer
+	elemSize   uintptr
+	likePtr    bool
+}
+
+func newUnsafeArrayType(cfg *frozenConfig, type1 reflect.Type) *UnsafeArrayType {
+	return &UnsafeArrayType{
+		unsafeType: *newUnsafeType(cfg, type1),
+		elemRType:  unpackEFace(type1.Elem()).data,
+		pElemRType: unpackEFace(reflect.PtrTo(type1.Elem())).data,
+		elemSize:   type1.Elem().Size(),
+		likePtr:    likePtrType(type1),
+	}
+}
+
+func (type2 *UnsafeArrayType) LikePtr() bool {
+	return type2.likePtr
+}
+
+func (type2 *UnsafeArrayType) Indirect(obj interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIndirect(objEFace.data)
+}
+
+func (type2 *UnsafeArrayType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
+	if type2.likePtr {
+		return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
+	}
+	return packEFace(type2.rtype, ptr)
+}
+
+func (type2 *UnsafeArrayType) SetIndex(obj interface{}, index int, elem interface{}) {
+	objEFace := unpackEFace(obj)
+	assertType("ArrayType.SetIndex argument 1", type2.ptrRType, objEFace.rtype)
+	elemEFace := unpackEFace(elem)
+	assertType("ArrayType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype)
+	type2.UnsafeSetIndex(objEFace.data, index, elemEFace.data)
+}
+
+func (type2 *UnsafeArrayType) UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) {
+	elemPtr := arrayAt(obj, index, type2.elemSize, "i < s.Len")
+	typedmemmove(type2.elemRType, elemPtr, elem)
+}
+
+func (type2 *UnsafeArrayType) GetIndex(obj interface{}, index int) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("ArrayType.GetIndex argument 1", type2.ptrRType, objEFace.rtype)
+	elemPtr := type2.UnsafeGetIndex(objEFace.data, index)
+	return packEFace(type2.pElemRType, elemPtr)
+}
+
+func (type2 *UnsafeArrayType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer {
+	return arrayAt(obj, index, type2.elemSize, "i < s.Len")
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_eface.go b/vendor/github.com/modern-go/reflect2/unsafe_eface.go
new file mode 100644
index 0000000..805010f
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_eface.go
@@ -0,0 +1,59 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type eface struct {
+	rtype unsafe.Pointer
+	data  unsafe.Pointer
+}
+
+func unpackEFace(obj interface{}) *eface {
+	return (*eface)(unsafe.Pointer(&obj))
+}
+
+func packEFace(rtype unsafe.Pointer, data unsafe.Pointer) interface{} {
+	var i interface{}
+	e := (*eface)(unsafe.Pointer(&i))
+	e.rtype = rtype
+	e.data = data
+	return i
+}
+
+type UnsafeEFaceType struct {
+	unsafeType
+}
+
+func newUnsafeEFaceType(cfg *frozenConfig, type1 reflect.Type) *UnsafeEFaceType {
+	return &UnsafeEFaceType{
+		unsafeType: *newUnsafeType(cfg, type1),
+	}
+}
+
+func (type2 *UnsafeEFaceType) IsNil(obj interface{}) bool {
+	if obj == nil {
+		return true
+	}
+	objEFace := unpackEFace(obj)
+	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIsNil(objEFace.data)
+}
+
+func (type2 *UnsafeEFaceType) UnsafeIsNil(ptr unsafe.Pointer) bool {
+	if ptr == nil {
+		return true
+	}
+	return unpackEFace(*(*interface{})(ptr)).data == nil
+}
+
+func (type2 *UnsafeEFaceType) Indirect(obj interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIndirect(objEFace.data)
+}
+
+func (type2 *UnsafeEFaceType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
+	return *(*interface{})(ptr)
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_field.go b/vendor/github.com/modern-go/reflect2/unsafe_field.go
new file mode 100644
index 0000000..5eb5313
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_field.go
@@ -0,0 +1,74 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type UnsafeStructField struct {
+	reflect.StructField
+	structType *UnsafeStructType
+	rtype      unsafe.Pointer
+	ptrRType   unsafe.Pointer
+}
+
+func newUnsafeStructField(structType *UnsafeStructType, structField reflect.StructField) *UnsafeStructField {
+	return &UnsafeStructField{
+		StructField: structField,
+		rtype:       unpackEFace(structField.Type).data,
+		ptrRType:    unpackEFace(reflect.PtrTo(structField.Type)).data,
+		structType:  structType,
+	}
+}
+
+func (field *UnsafeStructField) Offset() uintptr {
+	return field.StructField.Offset
+}
+
+func (field *UnsafeStructField) Name() string {
+	return field.StructField.Name
+}
+
+func (field *UnsafeStructField) PkgPath() string {
+	return field.StructField.PkgPath
+}
+
+func (field *UnsafeStructField) Type() Type {
+	return field.structType.cfg.Type2(field.StructField.Type)
+}
+
+func (field *UnsafeStructField) Tag() reflect.StructTag {
+	return field.StructField.Tag
+}
+
+func (field *UnsafeStructField) Index() []int {
+	return field.StructField.Index
+}
+
+func (field *UnsafeStructField) Anonymous() bool {
+	return field.StructField.Anonymous
+}
+
+func (field *UnsafeStructField) Set(obj interface{}, value interface{}) {
+	objEFace := unpackEFace(obj)
+	assertType("StructField.SetIndex argument 1", field.structType.ptrRType, objEFace.rtype)
+	valueEFace := unpackEFace(value)
+	assertType("StructField.SetIndex argument 2", field.ptrRType, valueEFace.rtype)
+	field.UnsafeSet(objEFace.data, valueEFace.data)
+}
+
+func (field *UnsafeStructField) UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) {
+	fieldPtr := add(obj, field.StructField.Offset, "same as non-reflect &v.field")
+	typedmemmove(field.rtype, fieldPtr, value)
+}
+
+func (field *UnsafeStructField) Get(obj interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("StructField.GetIndex argument 1", field.structType.ptrRType, objEFace.rtype)
+	value := field.UnsafeGet(objEFace.data)
+	return packEFace(field.ptrRType, value)
+}
+
+func (field *UnsafeStructField) UnsafeGet(obj unsafe.Pointer) unsafe.Pointer {
+	return add(obj, field.StructField.Offset, "same as non-reflect &v.field")
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_iface.go b/vendor/github.com/modern-go/reflect2/unsafe_iface.go
new file mode 100644
index 0000000..b601955
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_iface.go
@@ -0,0 +1,64 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type iface struct {
+	itab *itab
+	data unsafe.Pointer
+}
+
+type itab struct {
+	ignore unsafe.Pointer
+	rtype  unsafe.Pointer
+}
+
+func IFaceToEFace(ptr unsafe.Pointer) interface{} {
+	iface := (*iface)(ptr)
+	if iface.itab == nil {
+		return nil
+	}
+	return packEFace(iface.itab.rtype, iface.data)
+}
+
+type UnsafeIFaceType struct {
+	unsafeType
+}
+
+func newUnsafeIFaceType(cfg *frozenConfig, type1 reflect.Type) *UnsafeIFaceType {
+	return &UnsafeIFaceType{
+		unsafeType: *newUnsafeType(cfg, type1),
+	}
+}
+
+func (type2 *UnsafeIFaceType) Indirect(obj interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIndirect(objEFace.data)
+}
+
+func (type2 *UnsafeIFaceType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
+	return IFaceToEFace(ptr)
+}
+
+func (type2 *UnsafeIFaceType) IsNil(obj interface{}) bool {
+	if obj == nil {
+		return true
+	}
+	objEFace := unpackEFace(obj)
+	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIsNil(objEFace.data)
+}
+
+func (type2 *UnsafeIFaceType) UnsafeIsNil(ptr unsafe.Pointer) bool {
+	if ptr == nil {
+		return true
+	}
+	iface := (*iface)(ptr)
+	if iface.itab == nil {
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_link.go b/vendor/github.com/modern-go/reflect2/unsafe_link.go
new file mode 100644
index 0000000..57229c8
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_link.go
@@ -0,0 +1,70 @@
+package reflect2
+
+import "unsafe"
+
+//go:linkname unsafe_New reflect.unsafe_New
+func unsafe_New(rtype unsafe.Pointer) unsafe.Pointer
+
+//go:linkname typedmemmove reflect.typedmemmove
+func typedmemmove(rtype unsafe.Pointer, dst, src unsafe.Pointer)
+
+//go:linkname unsafe_NewArray reflect.unsafe_NewArray
+func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer
+
+// typedslicecopy copies a slice of elemType values from src to dst,
+// returning the number of elements copied.
+//go:linkname typedslicecopy reflect.typedslicecopy
+//go:noescape
+func typedslicecopy(elemType unsafe.Pointer, dst, src sliceHeader) int
+
+//go:linkname mapassign reflect.mapassign
+//go:noescape
+func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key, val unsafe.Pointer)
+
+//go:linkname mapaccess reflect.mapaccess
+//go:noescape
+func mapaccess(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
+
+// m escapes into the return value, but the caller of mapiterinit
+// doesn't let the return value escape.
+//go:noescape
+//go:linkname mapiterinit reflect.mapiterinit
+func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) *hiter
+
+//go:noescape
+//go:linkname mapiternext reflect.mapiternext
+func mapiternext(it *hiter)
+
+//go:linkname ifaceE2I reflect.ifaceE2I
+func ifaceE2I(rtype unsafe.Pointer, src interface{}, dst unsafe.Pointer)
+
+// A hash iteration structure.
+// If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
+// the layout of this structure.
+type hiter struct {
+	key   unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/internal/gc/range.go).
+	value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
+	// rest fields are ignored
+}
+
+// add returns p+x.
+//
+// The whySafe string is ignored, so that the function still inlines
+// as efficiently as p+x, but all call sites should use the string to
+// record why the addition is safe, which is to say why the addition
+// does not cause x to advance to the very end of p's allocation
+// and therefore point incorrectly at the next block in memory.
+func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
+	return unsafe.Pointer(uintptr(p) + x)
+}
+
+// arrayAt returns the i-th element of p,
+// an array whose elements are eltSize bytes wide.
+// The array pointed at by p must have at least i+1 elements:
+// it is invalid (but impossible to check here) to pass i >= len,
+// because then the result will point outside the array.
+// whySafe must explain why i < len. (Passing "i < len" is fine;
+// the benefit is to surface this assumption at the call site.)
+func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer {
+	return add(p, uintptr(i)*eltSize, "i < len")
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_map.go b/vendor/github.com/modern-go/reflect2/unsafe_map.go
new file mode 100644
index 0000000..f2e76e6
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_map.go
@@ -0,0 +1,138 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type UnsafeMapType struct {
+	unsafeType
+	pKeyRType  unsafe.Pointer
+	pElemRType unsafe.Pointer
+}
+
+func newUnsafeMapType(cfg *frozenConfig, type1 reflect.Type) MapType {
+	return &UnsafeMapType{
+		unsafeType: *newUnsafeType(cfg, type1),
+		pKeyRType:  unpackEFace(reflect.PtrTo(type1.Key())).data,
+		pElemRType: unpackEFace(reflect.PtrTo(type1.Elem())).data,
+	}
+}
+
+func (type2 *UnsafeMapType) IsNil(obj interface{}) bool {
+	if obj == nil {
+		return true
+	}
+	objEFace := unpackEFace(obj)
+	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIsNil(objEFace.data)
+}
+
+func (type2 *UnsafeMapType) UnsafeIsNil(ptr unsafe.Pointer) bool {
+	if ptr == nil {
+		return true
+	}
+	return *(*unsafe.Pointer)(ptr) == nil
+}
+
+func (type2 *UnsafeMapType) LikePtr() bool {
+	return true
+}
+
+func (type2 *UnsafeMapType) Indirect(obj interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("MapType.Indirect argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIndirect(objEFace.data)
+}
+
+func (type2 *UnsafeMapType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
+	return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
+}
+
+func (type2 *UnsafeMapType) Key() Type {
+	return type2.cfg.Type2(type2.Type.Key())
+}
+
+func (type2 *UnsafeMapType) MakeMap(cap int) interface{} {
+	return packEFace(type2.ptrRType, type2.UnsafeMakeMap(cap))
+}
+
+func (type2 *UnsafeMapType) UnsafeMakeMap(cap int) unsafe.Pointer {
+	m := makeMapWithSize(type2.rtype, cap)
+	return unsafe.Pointer(&m)
+}
+
+func (type2 *UnsafeMapType) SetIndex(obj interface{}, key interface{}, elem interface{}) {
+	objEFace := unpackEFace(obj)
+	assertType("MapType.SetIndex argument 1", type2.ptrRType, objEFace.rtype)
+	keyEFace := unpackEFace(key)
+	assertType("MapType.SetIndex argument 2", type2.pKeyRType, keyEFace.rtype)
+	elemEFace := unpackEFace(elem)
+	assertType("MapType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype)
+	type2.UnsafeSetIndex(objEFace.data, keyEFace.data, elemEFace.data)
+}
+
+func (type2 *UnsafeMapType) UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) {
+	mapassign(type2.rtype, *(*unsafe.Pointer)(obj), key, elem)
+}
+
+func (type2 *UnsafeMapType) TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) {
+	objEFace := unpackEFace(obj)
+	assertType("MapType.TryGetIndex argument 1", type2.ptrRType, objEFace.rtype)
+	keyEFace := unpackEFace(key)
+	assertType("MapType.TryGetIndex argument 2", type2.pKeyRType, keyEFace.rtype)
+	elemPtr := type2.UnsafeGetIndex(objEFace.data, keyEFace.data)
+	if elemPtr == nil {
+		return nil, false
+	}
+	return packEFace(type2.pElemRType, elemPtr), true
+}
+
+func (type2 *UnsafeMapType) GetIndex(obj interface{}, key interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("MapType.GetIndex argument 1", type2.ptrRType, objEFace.rtype)
+	keyEFace := unpackEFace(key)
+	assertType("MapType.GetIndex argument 2", type2.pKeyRType, keyEFace.rtype)
+	elemPtr := type2.UnsafeGetIndex(objEFace.data, keyEFace.data)
+	return packEFace(type2.pElemRType, elemPtr)
+}
+
+func (type2 *UnsafeMapType) UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer {
+	return mapaccess(type2.rtype, *(*unsafe.Pointer)(obj), key)
+}
+
+func (type2 *UnsafeMapType) Iterate(obj interface{}) MapIterator {
+	objEFace := unpackEFace(obj)
+	assertType("MapType.Iterate argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIterate(objEFace.data)
+}
+
+func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
+	return &UnsafeMapIterator{
+		hiter:      mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)),
+		pKeyRType:  type2.pKeyRType,
+		pElemRType: type2.pElemRType,
+	}
+}
+
+type UnsafeMapIterator struct {
+	*hiter
+	pKeyRType  unsafe.Pointer
+	pElemRType unsafe.Pointer
+}
+
+func (iter *UnsafeMapIterator) HasNext() bool {
+	return iter.key != nil
+}
+
+func (iter *UnsafeMapIterator) Next() (interface{}, interface{}) {
+	key, elem := iter.UnsafeNext()
+	return packEFace(iter.pKeyRType, key), packEFace(iter.pElemRType, elem)
+}
+
+func (iter *UnsafeMapIterator) UnsafeNext() (unsafe.Pointer, unsafe.Pointer) {
+	key := iter.key
+	elem := iter.value
+	mapiternext(iter.hiter)
+	return key, elem
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_ptr.go b/vendor/github.com/modern-go/reflect2/unsafe_ptr.go
new file mode 100644
index 0000000..8e5ec9c
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_ptr.go
@@ -0,0 +1,46 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type UnsafePtrType struct {
+	unsafeType
+}
+
+func newUnsafePtrType(cfg *frozenConfig, type1 reflect.Type) *UnsafePtrType {
+	return &UnsafePtrType{
+		unsafeType: *newUnsafeType(cfg, type1),
+	}
+}
+
+func (type2 *UnsafePtrType) IsNil(obj interface{}) bool {
+	if obj == nil {
+		return true
+	}
+	objEFace := unpackEFace(obj)
+	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIsNil(objEFace.data)
+}
+
+func (type2 *UnsafePtrType) UnsafeIsNil(ptr unsafe.Pointer) bool {
+	if ptr == nil {
+		return true
+	}
+	return *(*unsafe.Pointer)(ptr) == nil
+}
+
+func (type2 *UnsafePtrType) LikePtr() bool {
+	return true
+}
+
+func (type2 *UnsafePtrType) Indirect(obj interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIndirect(objEFace.data)
+}
+
+func (type2 *UnsafePtrType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
+	return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_slice.go b/vendor/github.com/modern-go/reflect2/unsafe_slice.go
new file mode 100644
index 0000000..1c6d876
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_slice.go
@@ -0,0 +1,177 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+// sliceHeader is a safe version of SliceHeader used within this package.
+type sliceHeader struct {
+	Data unsafe.Pointer
+	Len  int
+	Cap  int
+}
+
+type UnsafeSliceType struct {
+	unsafeType
+	elemRType  unsafe.Pointer
+	pElemRType unsafe.Pointer
+	elemSize   uintptr
+}
+
+func newUnsafeSliceType(cfg *frozenConfig, type1 reflect.Type) SliceType {
+	elemType := type1.Elem()
+	return &UnsafeSliceType{
+		unsafeType: *newUnsafeType(cfg, type1),
+		pElemRType: unpackEFace(reflect.PtrTo(elemType)).data,
+		elemRType:  unpackEFace(elemType).data,
+		elemSize:   elemType.Size(),
+	}
+}
+
+func (type2 *UnsafeSliceType) Set(obj interface{}, val interface{}) {
+	objEFace := unpackEFace(obj)
+	assertType("Type.Set argument 1", type2.ptrRType, objEFace.rtype)
+	valEFace := unpackEFace(val)
+	assertType("Type.Set argument 2", type2.ptrRType, valEFace.rtype)
+	type2.UnsafeSet(objEFace.data, valEFace.data)
+}
+
+func (type2 *UnsafeSliceType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) {
+	*(*sliceHeader)(ptr) = *(*sliceHeader)(val)
+}
+
+func (type2 *UnsafeSliceType) IsNil(obj interface{}) bool {
+	if obj == nil {
+		return true
+	}
+	objEFace := unpackEFace(obj)
+	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIsNil(objEFace.data)
+}
+
+func (type2 *UnsafeSliceType) UnsafeIsNil(ptr unsafe.Pointer) bool {
+	if ptr == nil {
+		return true
+	}
+	return (*sliceHeader)(ptr).Data == nil
+}
+
+func (type2 *UnsafeSliceType) SetNil(obj interface{}) {
+	objEFace := unpackEFace(obj)
+	assertType("SliceType.SetNil argument 1", type2.ptrRType, objEFace.rtype)
+	type2.UnsafeSetNil(objEFace.data)
+}
+
+func (type2 *UnsafeSliceType) UnsafeSetNil(ptr unsafe.Pointer) {
+	header := (*sliceHeader)(ptr)
+	header.Len = 0
+	header.Cap = 0
+	header.Data = nil
+}
+
+func (type2 *UnsafeSliceType) MakeSlice(length int, cap int) interface{} {
+	return packEFace(type2.ptrRType, type2.UnsafeMakeSlice(length, cap))
+}
+
+func (type2 *UnsafeSliceType) UnsafeMakeSlice(length int, cap int) unsafe.Pointer {
+	header := &sliceHeader{unsafe_NewArray(type2.elemRType, cap), length, cap}
+	return unsafe.Pointer(header)
+}
+
+func (type2 *UnsafeSliceType) LengthOf(obj interface{}) int {
+	objEFace := unpackEFace(obj)
+	assertType("SliceType.Len argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeLengthOf(objEFace.data)
+}
+
+func (type2 *UnsafeSliceType) UnsafeLengthOf(obj unsafe.Pointer) int {
+	header := (*sliceHeader)(obj)
+	return header.Len
+}
+
+func (type2 *UnsafeSliceType) SetIndex(obj interface{}, index int, elem interface{}) {
+	objEFace := unpackEFace(obj)
+	assertType("SliceType.SetIndex argument 1", type2.ptrRType, objEFace.rtype)
+	elemEFace := unpackEFace(elem)
+	assertType("SliceType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype)
+	type2.UnsafeSetIndex(objEFace.data, index, elemEFace.data)
+}
+
+func (type2 *UnsafeSliceType) UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) {
+	header := (*sliceHeader)(obj)
+	elemPtr := arrayAt(header.Data, index, type2.elemSize, "i < s.Len")
+	typedmemmove(type2.elemRType, elemPtr, elem)
+}
+
+func (type2 *UnsafeSliceType) GetIndex(obj interface{}, index int) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("SliceType.GetIndex argument 1", type2.ptrRType, objEFace.rtype)
+	elemPtr := type2.UnsafeGetIndex(objEFace.data, index)
+	return packEFace(type2.pElemRType, elemPtr)
+}
+
+func (type2 *UnsafeSliceType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer {
+	header := (*sliceHeader)(obj)
+	return arrayAt(header.Data, index, type2.elemSize, "i < s.Len")
+}
+
+func (type2 *UnsafeSliceType) Append(obj interface{}, elem interface{}) {
+	objEFace := unpackEFace(obj)
+	assertType("SliceType.Append argument 1", type2.ptrRType, objEFace.rtype)
+	elemEFace := unpackEFace(elem)
+	assertType("SliceType.Append argument 2", type2.pElemRType, elemEFace.rtype)
+	type2.UnsafeAppend(objEFace.data, elemEFace.data)
+}
+
+func (type2 *UnsafeSliceType) UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) {
+	header := (*sliceHeader)(obj)
+	oldLen := header.Len
+	type2.UnsafeGrow(obj, oldLen+1)
+	type2.UnsafeSetIndex(obj, oldLen, elem)
+}
+
+func (type2 *UnsafeSliceType) Cap(obj interface{}) int {
+	objEFace := unpackEFace(obj)
+	assertType("SliceType.Cap argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeCap(objEFace.data)
+}
+
+func (type2 *UnsafeSliceType) UnsafeCap(ptr unsafe.Pointer) int {
+	return (*sliceHeader)(ptr).Cap
+}
+
+func (type2 *UnsafeSliceType) Grow(obj interface{}, newLength int) {
+	objEFace := unpackEFace(obj)
+	assertType("SliceType.Grow argument 1", type2.ptrRType, objEFace.rtype)
+	type2.UnsafeGrow(objEFace.data, newLength)
+}
+
+func (type2 *UnsafeSliceType) UnsafeGrow(obj unsafe.Pointer, newLength int) {
+	header := (*sliceHeader)(obj)
+	if newLength <= header.Cap {
+		header.Len = newLength
+		return
+	}
+	newCap := calcNewCap(header.Cap, newLength)
+	newHeader := (*sliceHeader)(type2.UnsafeMakeSlice(header.Len, newCap))
+	typedslicecopy(type2.elemRType, *newHeader, *header)
+	header.Data = newHeader.Data
+	header.Cap = newHeader.Cap
+	header.Len = newLength
+}
+
+func calcNewCap(cap int, expectedCap int) int {
+	if cap == 0 {
+		cap = expectedCap
+	} else {
+		for cap < expectedCap {
+			if cap < 1024 {
+				cap += cap
+			} else {
+				cap += cap / 4
+			}
+		}
+	}
+	return cap
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_struct.go b/vendor/github.com/modern-go/reflect2/unsafe_struct.go
new file mode 100644
index 0000000..804d916
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_struct.go
@@ -0,0 +1,59 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type UnsafeStructType struct {
+	unsafeType
+	likePtr bool
+}
+
+func newUnsafeStructType(cfg *frozenConfig, type1 reflect.Type) *UnsafeStructType {
+	return &UnsafeStructType{
+		unsafeType: *newUnsafeType(cfg, type1),
+		likePtr:    likePtrType(type1),
+	}
+}
+
+func (type2 *UnsafeStructType) LikePtr() bool {
+	return type2.likePtr
+}
+
+func (type2 *UnsafeStructType) Indirect(obj interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIndirect(objEFace.data)
+}
+
+func (type2 *UnsafeStructType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
+	if type2.likePtr {
+		return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
+	}
+	return packEFace(type2.rtype, ptr)
+}
+
+func (type2 *UnsafeStructType) FieldByName(name string) StructField {
+	structField, found := type2.Type.FieldByName(name)
+	if !found {
+		return nil
+	}
+	return newUnsafeStructField(type2, structField)
+}
+
+func (type2 *UnsafeStructType) Field(i int) StructField {
+	return newUnsafeStructField(type2, type2.Type.Field(i))
+}
+
+func (type2 *UnsafeStructType) FieldByIndex(index []int) StructField {
+	return newUnsafeStructField(type2, type2.Type.FieldByIndex(index))
+}
+
+func (type2 *UnsafeStructType) FieldByNameFunc(match func(string) bool) StructField {
+	structField, found := type2.Type.FieldByNameFunc(match)
+	if !found {
+		panic("field match condition not found in " + type2.Type.String())
+	}
+	return newUnsafeStructField(type2, structField)
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_type.go b/vendor/github.com/modern-go/reflect2/unsafe_type.go
new file mode 100644
index 0000000..1394171
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_type.go
@@ -0,0 +1,85 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type unsafeType struct {
+	safeType
+	rtype    unsafe.Pointer
+	ptrRType unsafe.Pointer
+}
+
+func newUnsafeType(cfg *frozenConfig, type1 reflect.Type) *unsafeType {
+	return &unsafeType{
+		safeType: safeType{
+			Type: type1,
+			cfg:  cfg,
+		},
+		rtype:    unpackEFace(type1).data,
+		ptrRType: unpackEFace(reflect.PtrTo(type1)).data,
+	}
+}
+
+func (type2 *unsafeType) Set(obj interface{}, val interface{}) {
+	objEFace := unpackEFace(obj)
+	assertType("Type.Set argument 1", type2.ptrRType, objEFace.rtype)
+	valEFace := unpackEFace(val)
+	assertType("Type.Set argument 2", type2.ptrRType, valEFace.rtype)
+	type2.UnsafeSet(objEFace.data, valEFace.data)
+}
+
+func (type2 *unsafeType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) {
+	typedmemmove(type2.rtype, ptr, val)
+}
+
+func (type2 *unsafeType) IsNil(obj interface{}) bool {
+	objEFace := unpackEFace(obj)
+	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIsNil(objEFace.data)
+}
+
+func (type2 *unsafeType) UnsafeIsNil(ptr unsafe.Pointer) bool {
+	return ptr == nil
+}
+
+func (type2 *unsafeType) UnsafeNew() unsafe.Pointer {
+	return unsafe_New(type2.rtype)
+}
+
+func (type2 *unsafeType) New() interface{} {
+	return packEFace(type2.ptrRType, type2.UnsafeNew())
+}
+
+func (type2 *unsafeType) PackEFace(ptr unsafe.Pointer) interface{} {
+	return packEFace(type2.ptrRType, ptr)
+}
+
+func (type2 *unsafeType) RType() uintptr {
+	return uintptr(type2.rtype)
+}
+
+func (type2 *unsafeType) Indirect(obj interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIndirect(objEFace.data)
+}
+
+func (type2 *unsafeType) UnsafeIndirect(obj unsafe.Pointer) interface{} {
+	return packEFace(type2.rtype, obj)
+}
+
+func (type2 *unsafeType) LikePtr() bool {
+	return false
+}
+
+func assertType(where string, expectRType unsafe.Pointer, actualRType unsafe.Pointer) {
+	if expectRType != actualRType {
+		expectType := reflect.TypeOf(0)
+		(*iface)(unsafe.Pointer(&expectType)).data = expectRType
+		actualType := reflect.TypeOf(0)
+		(*iface)(unsafe.Pointer(&actualType)).data = actualRType
+		panic(where + ": expect " + expectType.String() + ", actual " + actualType.String())
+	}
+}
diff --git a/vendor/github.com/petar/GoLLRB/AUTHORS b/vendor/github.com/petar/GoLLRB/AUTHORS
new file mode 100644
index 0000000..78d1de4
--- /dev/null
+++ b/vendor/github.com/petar/GoLLRB/AUTHORS
@@ -0,0 +1,4 @@
+Petar Maymounkov <petar@5ttt.org>
+Vadim Vygonets <vadik@vygo.net>
+Ian Smith <iansmith@acm.org>
+Martin Bruse
diff --git a/vendor/github.com/petar/GoLLRB/LICENSE b/vendor/github.com/petar/GoLLRB/LICENSE
new file mode 100644
index 0000000..b75312c
--- /dev/null
+++ b/vendor/github.com/petar/GoLLRB/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2010, Petar Maymounkov
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+(*) Redistributions of source code must retain the above copyright notice, this list
+of conditions and the following disclaimer.
+
+(*) Redistributions in binary form must reproduce the above copyright notice, this
+list of conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.
+
+(*) Neither the name of Petar Maymounkov nor the names of its contributors may be
+used to endorse or promote products derived from this software without specific
+prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/petar/GoLLRB/llrb/avgvar.go b/vendor/github.com/petar/GoLLRB/llrb/avgvar.go
new file mode 100644
index 0000000..2d7e2a3
--- /dev/null
+++ b/vendor/github.com/petar/GoLLRB/llrb/avgvar.go
@@ -0,0 +1,39 @@
+// Copyright 2010 Petar Maymounkov. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package llrb
+
+import "math"
+
+// avgVar maintains the average and variance of a stream of numbers
+// in a space-efficient manner.
+type avgVar struct {
+	count      int64
+	sum, sumsq float64
+}
+
+func (av *avgVar) Init() {
+	av.count = 0
+	av.sum = 0.0
+	av.sumsq = 0.0
+}
+
+func (av *avgVar) Add(sample float64) {
+	av.count++
+	av.sum += sample
+	av.sumsq += sample * sample
+}
+
+func (av *avgVar) GetCount() int64 { return av.count }
+
+func (av *avgVar) GetAvg() float64 { return av.sum / float64(av.count) }
+
+func (av *avgVar) GetTotal() float64 { return av.sum }
+
+func (av *avgVar) GetVar() float64 {
+	a := av.GetAvg()
+	return av.sumsq/float64(av.count) - a*a
+}
+
+func (av *avgVar) GetStdDev() float64 { return math.Sqrt(av.GetVar()) }
diff --git a/vendor/github.com/petar/GoLLRB/llrb/iterator.go b/vendor/github.com/petar/GoLLRB/llrb/iterator.go
new file mode 100644
index 0000000..ee7b27f
--- /dev/null
+++ b/vendor/github.com/petar/GoLLRB/llrb/iterator.go
@@ -0,0 +1,93 @@
+package llrb
+
+type ItemIterator func(i Item) bool
+
+//func (t *Tree) Ascend(iterator ItemIterator) {
+//	t.AscendGreaterOrEqual(Inf(-1), iterator)
+//}
+
+func (t *LLRB) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
+	t.ascendRange(t.root, greaterOrEqual, lessThan, iterator)
+}
+
+func (t *LLRB) ascendRange(h *Node, inf, sup Item, iterator ItemIterator) bool {
+	if h == nil {
+		return true
+	}
+	if !less(h.Item, sup) {
+		return t.ascendRange(h.Left, inf, sup, iterator)
+	}
+	if less(h.Item, inf) {
+		return t.ascendRange(h.Right, inf, sup, iterator)
+	}
+
+	if !t.ascendRange(h.Left, inf, sup, iterator) {
+		return false
+	}
+	if !iterator(h.Item) {
+		return false
+	}
+	return t.ascendRange(h.Right, inf, sup, iterator)
+}
+
+// AscendGreaterOrEqual will call iterator once for each element greater or equal to
+// pivot in ascending order. It will stop whenever the iterator returns false.
+func (t *LLRB) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
+	t.ascendGreaterOrEqual(t.root, pivot, iterator)
+}
+
+func (t *LLRB) ascendGreaterOrEqual(h *Node, pivot Item, iterator ItemIterator) bool {
+	if h == nil {
+		return true
+	}
+	if !less(h.Item, pivot) {
+		if !t.ascendGreaterOrEqual(h.Left, pivot, iterator) {
+			return false
+		}
+		if !iterator(h.Item) {
+			return false
+		}
+	}
+	return t.ascendGreaterOrEqual(h.Right, pivot, iterator)
+}
+
+func (t *LLRB) AscendLessThan(pivot Item, iterator ItemIterator) {
+	t.ascendLessThan(t.root, pivot, iterator)
+}
+
+func (t *LLRB) ascendLessThan(h *Node, pivot Item, iterator ItemIterator) bool {
+	if h == nil {
+		return true
+	}
+	if !t.ascendLessThan(h.Left, pivot, iterator) {
+		return false
+	}
+	if !iterator(h.Item) {
+		return false
+	}
+	if less(h.Item, pivot) {
+		return t.ascendLessThan(h.Left, pivot, iterator)
+	}
+	return true
+}
+
+// DescendLessOrEqual will call iterator once for each element less than the
+// pivot in descending order. It will stop whenever the iterator returns false.
+func (t *LLRB) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
+	t.descendLessOrEqual(t.root, pivot, iterator)
+}
+
+func (t *LLRB) descendLessOrEqual(h *Node, pivot Item, iterator ItemIterator) bool {
+	if h == nil {
+		return true
+	}
+	if less(h.Item, pivot) || !less(pivot, h.Item) {
+		if !t.descendLessOrEqual(h.Right, pivot, iterator) {
+			return false
+		}
+		if !iterator(h.Item) {
+			return false
+		}
+	}
+	return t.descendLessOrEqual(h.Left, pivot, iterator)
+}
diff --git a/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go b/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go
new file mode 100644
index 0000000..47126a3
--- /dev/null
+++ b/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go
@@ -0,0 +1,46 @@
+// Copyright 2010 Petar Maymounkov. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package llrb
+
+// GetHeight() returns an item in the tree with key @key, and it's height in the tree
+func (t *LLRB) GetHeight(key Item) (result Item, depth int) {
+	return t.getHeight(t.root, key)
+}
+
+func (t *LLRB) getHeight(h *Node, item Item) (Item, int) {
+	if h == nil {
+		return nil, 0
+	}
+	if less(item, h.Item) {
+		result, depth := t.getHeight(h.Left, item)
+		return result, depth + 1
+	}
+	if less(h.Item, item) {
+		result, depth := t.getHeight(h.Right, item)
+		return result, depth + 1
+	}
+	return h.Item, 0
+}
+
+// HeightStats() returns the average and standard deviation of the height
+// of elements in the tree
+func (t *LLRB) HeightStats() (avg, stddev float64) {
+	av := &avgVar{}
+	heightStats(t.root, 0, av)
+	return av.GetAvg(), av.GetStdDev()
+}
+
+func heightStats(h *Node, d int, av *avgVar) {
+	if h == nil {
+		return
+	}
+	av.Add(float64(d))
+	if h.Left != nil {
+		heightStats(h.Left, d+1, av)
+	}
+	if h.Right != nil {
+		heightStats(h.Right, d+1, av)
+	}
+}
diff --git a/vendor/github.com/petar/GoLLRB/llrb/llrb.go b/vendor/github.com/petar/GoLLRB/llrb/llrb.go
new file mode 100644
index 0000000..81373fb
--- /dev/null
+++ b/vendor/github.com/petar/GoLLRB/llrb/llrb.go
@@ -0,0 +1,456 @@
+// Copyright 2010 Petar Maymounkov. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// A Left-Leaning Red-Black (LLRB) implementation of 2-3 balanced binary search trees,
+// based on the following work:
+//
+//   http://www.cs.princeton.edu/~rs/talks/LLRB/08Penn.pdf
+//   http://www.cs.princeton.edu/~rs/talks/LLRB/LLRB.pdf
+//   http://www.cs.princeton.edu/~rs/talks/LLRB/Java/RedBlackBST.java
+//
+//  2-3 trees (and the run-time equivalent 2-3-4 trees) are the de facto standard BST
+//  algoritms found in implementations of Python, Java, and other libraries. The LLRB
+//  implementation of 2-3 trees is a recent improvement on the traditional implementation,
+//  observed and documented by Robert Sedgewick.
+//
+package llrb
+
+// Tree is a Left-Leaning Red-Black (LLRB) implementation of 2-3 trees
+type LLRB struct {
+	count int
+	root  *Node
+}
+
+type Node struct {
+	Item
+	Left, Right *Node // Pointers to left and right child nodes
+	Black       bool  // If set, the color of the link (incoming from the parent) is black
+	// In the LLRB, new nodes are always red, hence the zero-value for node
+}
+
+type Item interface {
+	Less(than Item) bool
+}
+
+//
+func less(x, y Item) bool {
+	if x == pinf {
+		return false
+	}
+	if x == ninf {
+		return true
+	}
+	return x.Less(y)
+}
+
+// Inf returns an Item that is "bigger than" any other item, if sign is positive.
+// Otherwise  it returns an Item that is "smaller than" any other item.
+func Inf(sign int) Item {
+	if sign == 0 {
+		panic("sign")
+	}
+	if sign > 0 {
+		return pinf
+	}
+	return ninf
+}
+
+var (
+	ninf = nInf{}
+	pinf = pInf{}
+)
+
+type nInf struct{}
+
+func (nInf) Less(Item) bool {
+	return true
+}
+
+type pInf struct{}
+
+func (pInf) Less(Item) bool {
+	return false
+}
+
+// New() allocates a new tree
+func New() *LLRB {
+	return &LLRB{}
+}
+
+// SetRoot sets the root node of the tree.
+// It is intended to be used by functions that deserialize the tree.
+func (t *LLRB) SetRoot(r *Node) {
+	t.root = r
+}
+
+// Root returns the root node of the tree.
+// It is intended to be used by functions that serialize the tree.
+func (t *LLRB) Root() *Node {
+	return t.root
+}
+
+// Len returns the number of nodes in the tree.
+func (t *LLRB) Len() int { return t.count }
+
+// Has returns true if the tree contains an element whose order is the same as that of key.
+func (t *LLRB) Has(key Item) bool {
+	return t.Get(key) != nil
+}
+
+// Get retrieves an element from the tree whose order is the same as that of key.
+func (t *LLRB) Get(key Item) Item {
+	h := t.root
+	for h != nil {
+		switch {
+		case less(key, h.Item):
+			h = h.Left
+		case less(h.Item, key):
+			h = h.Right
+		default:
+			return h.Item
+		}
+	}
+	return nil
+}
+
+// Min returns the minimum element in the tree.
+func (t *LLRB) Min() Item {
+	h := t.root
+	if h == nil {
+		return nil
+	}
+	for h.Left != nil {
+		h = h.Left
+	}
+	return h.Item
+}
+
+// Max returns the maximum element in the tree.
+func (t *LLRB) Max() Item {
+	h := t.root
+	if h == nil {
+		return nil
+	}
+	for h.Right != nil {
+		h = h.Right
+	}
+	return h.Item
+}
+
+func (t *LLRB) ReplaceOrInsertBulk(items ...Item) {
+	for _, i := range items {
+		t.ReplaceOrInsert(i)
+	}
+}
+
+func (t *LLRB) InsertNoReplaceBulk(items ...Item) {
+	for _, i := range items {
+		t.InsertNoReplace(i)
+	}
+}
+
+// ReplaceOrInsert inserts item into the tree. If an existing
+// element has the same order, it is removed from the tree and returned.
+func (t *LLRB) ReplaceOrInsert(item Item) Item {
+	if item == nil {
+		panic("inserting nil item")
+	}
+	var replaced Item
+	t.root, replaced = t.replaceOrInsert(t.root, item)
+	t.root.Black = true
+	if replaced == nil {
+		t.count++
+	}
+	return replaced
+}
+
+func (t *LLRB) replaceOrInsert(h *Node, item Item) (*Node, Item) {
+	if h == nil {
+		return newNode(item), nil
+	}
+
+	h = walkDownRot23(h)
+
+	var replaced Item
+	if less(item, h.Item) { // BUG
+		h.Left, replaced = t.replaceOrInsert(h.Left, item)
+	} else if less(h.Item, item) {
+		h.Right, replaced = t.replaceOrInsert(h.Right, item)
+	} else {
+		replaced, h.Item = h.Item, item
+	}
+
+	h = walkUpRot23(h)
+
+	return h, replaced
+}
+
+// InsertNoReplace inserts item into the tree. If an existing
+// element has the same order, both elements remain in the tree.
+func (t *LLRB) InsertNoReplace(item Item) {
+	if item == nil {
+		panic("inserting nil item")
+	}
+	t.root = t.insertNoReplace(t.root, item)
+	t.root.Black = true
+	t.count++
+}
+
+func (t *LLRB) insertNoReplace(h *Node, item Item) *Node {
+	if h == nil {
+		return newNode(item)
+	}
+
+	h = walkDownRot23(h)
+
+	if less(item, h.Item) {
+		h.Left = t.insertNoReplace(h.Left, item)
+	} else {
+		h.Right = t.insertNoReplace(h.Right, item)
+	}
+
+	return walkUpRot23(h)
+}
+
+// Rotation driver routines for 2-3 algorithm
+
+func walkDownRot23(h *Node) *Node { return h }
+
+func walkUpRot23(h *Node) *Node {
+	if isRed(h.Right) && !isRed(h.Left) {
+		h = rotateLeft(h)
+	}
+
+	if isRed(h.Left) && isRed(h.Left.Left) {
+		h = rotateRight(h)
+	}
+
+	if isRed(h.Left) && isRed(h.Right) {
+		flip(h)
+	}
+
+	return h
+}
+
+// Rotation driver routines for 2-3-4 algorithm
+
+func walkDownRot234(h *Node) *Node {
+	if isRed(h.Left) && isRed(h.Right) {
+		flip(h)
+	}
+
+	return h
+}
+
+func walkUpRot234(h *Node) *Node {
+	if isRed(h.Right) && !isRed(h.Left) {
+		h = rotateLeft(h)
+	}
+
+	if isRed(h.Left) && isRed(h.Left.Left) {
+		h = rotateRight(h)
+	}
+
+	return h
+}
+
+// DeleteMin deletes the minimum element in the tree and returns the
+// deleted item or nil otherwise.
+func (t *LLRB) DeleteMin() Item {
+	var deleted Item
+	t.root, deleted = deleteMin(t.root)
+	if t.root != nil {
+		t.root.Black = true
+	}
+	if deleted != nil {
+		t.count--
+	}
+	return deleted
+}
+
+// deleteMin code for LLRB 2-3 trees
+func deleteMin(h *Node) (*Node, Item) {
+	if h == nil {
+		return nil, nil
+	}
+	if h.Left == nil {
+		return nil, h.Item
+	}
+
+	if !isRed(h.Left) && !isRed(h.Left.Left) {
+		h = moveRedLeft(h)
+	}
+
+	var deleted Item
+	h.Left, deleted = deleteMin(h.Left)
+
+	return fixUp(h), deleted
+}
+
+// DeleteMax deletes the maximum element in the tree and returns
+// the deleted item or nil otherwise
+func (t *LLRB) DeleteMax() Item {
+	var deleted Item
+	t.root, deleted = deleteMax(t.root)
+	if t.root != nil {
+		t.root.Black = true
+	}
+	if deleted != nil {
+		t.count--
+	}
+	return deleted
+}
+
+func deleteMax(h *Node) (*Node, Item) {
+	if h == nil {
+		return nil, nil
+	}
+	if isRed(h.Left) {
+		h = rotateRight(h)
+	}
+	if h.Right == nil {
+		return nil, h.Item
+	}
+	if !isRed(h.Right) && !isRed(h.Right.Left) {
+		h = moveRedRight(h)
+	}
+	var deleted Item
+	h.Right, deleted = deleteMax(h.Right)
+
+	return fixUp(h), deleted
+}
+
+// Delete deletes an item from the tree whose key equals key.
+// The deleted item is return, otherwise nil is returned.
+func (t *LLRB) Delete(key Item) Item {
+	var deleted Item
+	t.root, deleted = t.delete(t.root, key)
+	if t.root != nil {
+		t.root.Black = true
+	}
+	if deleted != nil {
+		t.count--
+	}
+	return deleted
+}
+
+func (t *LLRB) delete(h *Node, item Item) (*Node, Item) {
+	var deleted Item
+	if h == nil {
+		return nil, nil
+	}
+	if less(item, h.Item) {
+		if h.Left == nil { // item not present. Nothing to delete
+			return h, nil
+		}
+		if !isRed(h.Left) && !isRed(h.Left.Left) {
+			h = moveRedLeft(h)
+		}
+		h.Left, deleted = t.delete(h.Left, item)
+	} else {
+		if isRed(h.Left) {
+			h = rotateRight(h)
+		}
+		// If @item equals @h.Item and no right children at @h
+		if !less(h.Item, item) && h.Right == nil {
+			return nil, h.Item
+		}
+		// PETAR: Added 'h.Right != nil' below
+		if h.Right != nil && !isRed(h.Right) && !isRed(h.Right.Left) {
+			h = moveRedRight(h)
+		}
+		// If @item equals @h.Item, and (from above) 'h.Right != nil'
+		if !less(h.Item, item) {
+			var subDeleted Item
+			h.Right, subDeleted = deleteMin(h.Right)
+			if subDeleted == nil {
+				panic("logic")
+			}
+			deleted, h.Item = h.Item, subDeleted
+		} else { // Else, @item is bigger than @h.Item
+			h.Right, deleted = t.delete(h.Right, item)
+		}
+	}
+
+	return fixUp(h), deleted
+}
+
+// Internal node manipulation routines
+
+func newNode(item Item) *Node { return &Node{Item: item} }
+
+func isRed(h *Node) bool {
+	if h == nil {
+		return false
+	}
+	return !h.Black
+}
+
+func rotateLeft(h *Node) *Node {
+	x := h.Right
+	if x.Black {
+		panic("rotating a black link")
+	}
+	h.Right = x.Left
+	x.Left = h
+	x.Black = h.Black
+	h.Black = false
+	return x
+}
+
+func rotateRight(h *Node) *Node {
+	x := h.Left
+	if x.Black {
+		panic("rotating a black link")
+	}
+	h.Left = x.Right
+	x.Right = h
+	x.Black = h.Black
+	h.Black = false
+	return x
+}
+
+// REQUIRE: Left and Right children must be present
+func flip(h *Node) {
+	h.Black = !h.Black
+	h.Left.Black = !h.Left.Black
+	h.Right.Black = !h.Right.Black
+}
+
+// REQUIRE: Left and Right children must be present
+func moveRedLeft(h *Node) *Node {
+	flip(h)
+	if isRed(h.Right.Left) {
+		h.Right = rotateRight(h.Right)
+		h = rotateLeft(h)
+		flip(h)
+	}
+	return h
+}
+
+// REQUIRE: Left and Right children must be present
+func moveRedRight(h *Node) *Node {
+	flip(h)
+	if isRed(h.Left.Left) {
+		h = rotateRight(h)
+		flip(h)
+	}
+	return h
+}
+
+func fixUp(h *Node) *Node {
+	if isRed(h.Right) {
+		h = rotateLeft(h)
+	}
+
+	if isRed(h.Left) && isRed(h.Left.Left) {
+		h = rotateRight(h)
+	}
+
+	if isRed(h.Left) && isRed(h.Right) {
+		flip(h)
+	}
+
+	return h
+}
diff --git a/vendor/github.com/petar/GoLLRB/llrb/util.go b/vendor/github.com/petar/GoLLRB/llrb/util.go
new file mode 100644
index 0000000..63dbdb2
--- /dev/null
+++ b/vendor/github.com/petar/GoLLRB/llrb/util.go
@@ -0,0 +1,17 @@
+// Copyright 2010 Petar Maymounkov. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package llrb
+
+type Int int
+
+func (x Int) Less(than Item) bool {
+	return x < than.(Int)
+}
+
+type String string
+
+func (x String) Less(than Item) bool {
+	return x < than.(String)
+}
diff --git a/vendor/github.com/peterbourgon/diskv/LICENSE b/vendor/github.com/peterbourgon/diskv/LICENSE
new file mode 100644
index 0000000..41ce7f1
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2011-2012 Peter Bourgon
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/peterbourgon/diskv/README.md b/vendor/github.com/peterbourgon/diskv/README.md
new file mode 100644
index 0000000..3474739
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/README.md
@@ -0,0 +1,141 @@
+# What is diskv?
+
+Diskv (disk-vee) is a simple, persistent key-value store written in the Go
+language. It starts with an incredibly simple API for storing arbitrary data on
+a filesystem by key, and builds several layers of performance-enhancing
+abstraction on top.  The end result is a conceptually simple, but highly
+performant, disk-backed storage system.
+
+[![Build Status][1]][2]
+
+[1]: https://drone.io/github.com/peterbourgon/diskv/status.png
+[2]: https://drone.io/github.com/peterbourgon/diskv/latest
+
+
+# Installing
+
+Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5].
+Then,
+
+```bash
+$ go get github.com/peterbourgon/diskv
+```
+
+[3]: http://golang.org
+[4]: http://golang.org/doc/install/source
+[5]: http://golang.org/doc/install
+
+
+# Usage
+
+```go
+package main
+
+import (
+	"fmt"
+	"github.com/peterbourgon/diskv"
+)
+
+func main() {
+	// Simplest transform function: put all the data files into the base dir.
+	flatTransform := func(s string) []string { return []string{} }
+
+	// Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache.
+	d := diskv.New(diskv.Options{
+		BasePath:     "my-data-dir",
+		Transform:    flatTransform,
+		CacheSizeMax: 1024 * 1024,
+	})
+
+	// Write three bytes to the key "alpha".
+	key := "alpha"
+	d.Write(key, []byte{'1', '2', '3'})
+
+	// Read the value back out of the store.
+	value, _ := d.Read(key)
+	fmt.Printf("%v\n", value)
+
+	// Erase the key+value from the store (and the disk).
+	d.Erase(key)
+}
+```
+
+More complex examples can be found in the "examples" subdirectory.
+
+
+# Theory
+
+## Basic idea
+
+At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`).
+The data is written to a single file on disk, with the same name as the key.
+The key determines where that file will be stored, via a user-provided
+`TransformFunc`, which takes a key and returns a slice (`[]string`)
+corresponding to a path list where the key file will be stored. The simplest
+TransformFunc,
+
+```go
+func SimpleTransform (key string) []string {
+    return []string{}
+}
+```
+
+will place all keys in the same, base directory. The design is inspired by
+[Redis diskstore][6]; a TransformFunc which emulates the default diskstore
+behavior is available in the content-addressable-storage example.
+
+[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1
+
+**Note** that your TransformFunc should ensure that one valid key doesn't
+transform to a subset of another valid key. That is, it shouldn't be possible
+to construct valid keys that resolve to directory names. As a concrete example,
+if your TransformFunc splits on every 3 characters, then
+
+```go
+d.Write("abcabc", val) // OK: written to <base>/abc/abc/abcabc
+d.Write("abc", val)    // Error: attempted write to <base>/abc/abc, but it's a directory
+```
+
+This will be addressed in an upcoming version of diskv.
+
+Probably the most important design principle behind diskv is that your data is
+always flatly available on the disk. diskv will never do anything that would
+prevent you from accessing, copying, backing up, or otherwise interacting with
+your data via common UNIX commandline tools.
+
+## Adding a cache
+
+An in-memory caching layer is provided by combining the BasicStore
+functionality with a simple map structure, and keeping it up-to-date as
+appropriate. Since the map structure in Go is not threadsafe, it's combined
+with a RWMutex to provide safe concurrent access.
+
+## Adding order
+
+diskv is a key-value store and therefore inherently unordered. An ordering
+system can be injected into the store by passing something which satisfies the
+diskv.Index interface. (A default implementation, using Google's
+[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a
+user-provided Less function) index of the keys, which can be queried.
+
+[7]: https://github.com/google/btree
+
+## Adding compression
+
+Something which implements the diskv.Compression interface may be passed
+during store creation, so that all Writes and Reads are filtered through
+a compression/decompression pipeline. Several default implementations,
+using stdlib compression algorithms, are provided. Note that data is cached
+compressed; the cost of decompression is borne with each Read.
+
+## Streaming
+
+diskv also now provides ReadStream and WriteStream methods, to allow very large
+data to be handled efficiently.
+
+
+# Future plans
+
+ * Needs plenty of robust testing: huge datasets, etc...
+ * More thorough benchmarking
+ * Your suggestions for use-cases I haven't thought of
diff --git a/vendor/github.com/peterbourgon/diskv/compression.go b/vendor/github.com/peterbourgon/diskv/compression.go
new file mode 100644
index 0000000..5192b02
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/compression.go
@@ -0,0 +1,64 @@
+package diskv
+
+import (
+	"compress/flate"
+	"compress/gzip"
+	"compress/zlib"
+	"io"
+)
+
+// Compression is an interface that Diskv uses to implement compression of
+// data. Writer takes a destination io.Writer and returns a WriteCloser that
+// compresses all data written through it. Reader takes a source io.Reader and
+// returns a ReadCloser that decompresses all data read through it. You may
+// define these methods on your own type, or use one of the NewCompression
+// helpers.
+type Compression interface {
+	Writer(dst io.Writer) (io.WriteCloser, error)
+	Reader(src io.Reader) (io.ReadCloser, error)
+}
+
+// NewGzipCompression returns a Gzip-based Compression.
+func NewGzipCompression() Compression {
+	return NewGzipCompressionLevel(flate.DefaultCompression)
+}
+
+// NewGzipCompressionLevel returns a Gzip-based Compression with the given level.
+func NewGzipCompressionLevel(level int) Compression {
+	return &genericCompression{
+		wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) },
+		rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) },
+	}
+}
+
+// NewZlibCompression returns a Zlib-based Compression.
+func NewZlibCompression() Compression {
+	return NewZlibCompressionLevel(flate.DefaultCompression)
+}
+
+// NewZlibCompressionLevel returns a Zlib-based Compression with the given level.
+func NewZlibCompressionLevel(level int) Compression {
+	return NewZlibCompressionLevelDict(level, nil)
+}
+
+// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given
+// level, based on the given dictionary.
+func NewZlibCompressionLevelDict(level int, dict []byte) Compression {
+	return &genericCompression{
+		func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) },
+		func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) },
+	}
+}
+
+type genericCompression struct {
+	wf func(w io.Writer) (io.WriteCloser, error)
+	rf func(r io.Reader) (io.ReadCloser, error)
+}
+
+func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) {
+	return g.wf(dst)
+}
+
+func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) {
+	return g.rf(src)
+}
diff --git a/vendor/github.com/peterbourgon/diskv/diskv.go b/vendor/github.com/peterbourgon/diskv/diskv.go
new file mode 100644
index 0000000..524dc0a
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/diskv.go
@@ -0,0 +1,624 @@
+// Diskv (disk-vee) is a simple, persistent, key-value store.
+// It stores all data flatly on the filesystem.
+
+package diskv
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+	"syscall"
+)
+
+const (
+	defaultBasePath             = "diskv"
+	defaultFilePerm os.FileMode = 0666
+	defaultPathPerm os.FileMode = 0777
+)
+
+var (
+	defaultTransform   = func(s string) []string { return []string{} }
+	errCanceled        = errors.New("canceled")
+	errEmptyKey        = errors.New("empty key")
+	errBadKey          = errors.New("bad key")
+	errImportDirectory = errors.New("can't import a directory")
+)
+
+// TransformFunction transforms a key into a slice of strings, with each
+// element in the slice representing a directory in the file path where the
+// key's entry will eventually be stored.
+//
+// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
+// the final location of the data file will be <basedir>/ab/cde/f/abcdef
+type TransformFunction func(s string) []string
+
+// Options define a set of properties that dictate Diskv behavior.
+// All values are optional.
+type Options struct {
+	BasePath     string
+	Transform    TransformFunction
+	CacheSizeMax uint64 // bytes
+	PathPerm     os.FileMode
+	FilePerm     os.FileMode
+	// If TempDir is set, it will enable filesystem atomic writes by
+	// writing temporary files to that location before being moved
+	// to BasePath.
+	// Note that TempDir MUST be on the same device/partition as
+	// BasePath.
+	TempDir string
+
+	Index     Index
+	IndexLess LessFunction
+
+	Compression Compression
+}
+
+// Diskv implements the Diskv interface. You shouldn't construct Diskv
+// structures directly; instead, use the New constructor.
+type Diskv struct {
+	Options
+	mu        sync.RWMutex
+	cache     map[string][]byte
+	cacheSize uint64
+}
+
+// New returns an initialized Diskv structure, ready to use.
+// If the path identified by baseDir already contains data,
+// it will be accessible, but not yet cached.
+func New(o Options) *Diskv {
+	if o.BasePath == "" {
+		o.BasePath = defaultBasePath
+	}
+	if o.Transform == nil {
+		o.Transform = defaultTransform
+	}
+	if o.PathPerm == 0 {
+		o.PathPerm = defaultPathPerm
+	}
+	if o.FilePerm == 0 {
+		o.FilePerm = defaultFilePerm
+	}
+
+	d := &Diskv{
+		Options:   o,
+		cache:     map[string][]byte{},
+		cacheSize: 0,
+	}
+
+	if d.Index != nil && d.IndexLess != nil {
+		d.Index.Initialize(d.IndexLess, d.Keys(nil))
+	}
+
+	return d
+}
+
+// Write synchronously writes the key-value pair to disk, making it immediately
+// available for reads. Write relies on the filesystem to perform an eventual
+// sync to physical media. If you need stronger guarantees, see WriteStream.
+func (d *Diskv) Write(key string, val []byte) error {
+	return d.WriteStream(key, bytes.NewBuffer(val), false)
+}
+
+// WriteStream writes the data represented by the io.Reader to the disk, under
+// the provided key. If sync is true, WriteStream performs an explicit sync on
+// the file as soon as it's written.
+//
+// bytes.Buffer provides io.Reader semantics for basic data types.
+func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
+	if len(key) <= 0 {
+		return errEmptyKey
+	}
+
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	return d.writeStreamWithLock(key, r, sync)
+}
+
+// createKeyFileWithLock either creates the key file directly, or
+// creates a temporary file in TempDir if it is set.
+func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) {
+	if d.TempDir != "" {
+		if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
+			return nil, fmt.Errorf("temp mkdir: %s", err)
+		}
+		f, err := ioutil.TempFile(d.TempDir, "")
+		if err != nil {
+			return nil, fmt.Errorf("temp file: %s", err)
+		}
+
+		if err := f.Chmod(d.FilePerm); err != nil {
+			f.Close()           // error deliberately ignored
+			os.Remove(f.Name()) // error deliberately ignored
+			return nil, fmt.Errorf("chmod: %s", err)
+		}
+		return f, nil
+	}
+
+	mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
+	f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm)
+	if err != nil {
+		return nil, fmt.Errorf("open file: %s", err)
+	}
+	return f, nil
+}
+
+// writeStream does no input validation checking.
+func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error {
+	if err := d.ensurePathWithLock(key); err != nil {
+		return fmt.Errorf("ensure path: %s", err)
+	}
+
+	f, err := d.createKeyFileWithLock(key)
+	if err != nil {
+		return fmt.Errorf("create key file: %s", err)
+	}
+
+	wc := io.WriteCloser(&nopWriteCloser{f})
+	if d.Compression != nil {
+		wc, err = d.Compression.Writer(f)
+		if err != nil {
+			f.Close()           // error deliberately ignored
+			os.Remove(f.Name()) // error deliberately ignored
+			return fmt.Errorf("compression writer: %s", err)
+		}
+	}
+
+	if _, err := io.Copy(wc, r); err != nil {
+		f.Close()           // error deliberately ignored
+		os.Remove(f.Name()) // error deliberately ignored
+		return fmt.Errorf("i/o copy: %s", err)
+	}
+
+	if err := wc.Close(); err != nil {
+		f.Close()           // error deliberately ignored
+		os.Remove(f.Name()) // error deliberately ignored
+		return fmt.Errorf("compression close: %s", err)
+	}
+
+	if sync {
+		if err := f.Sync(); err != nil {
+			f.Close()           // error deliberately ignored
+			os.Remove(f.Name()) // error deliberately ignored
+			return fmt.Errorf("file sync: %s", err)
+		}
+	}
+
+	if err := f.Close(); err != nil {
+		return fmt.Errorf("file close: %s", err)
+	}
+
+	if f.Name() != d.completeFilename(key) {
+		if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil {
+			os.Remove(f.Name()) // error deliberately ignored
+			return fmt.Errorf("rename: %s", err)
+		}
+	}
+
+	if d.Index != nil {
+		d.Index.Insert(key)
+	}
+
+	d.bustCacheWithLock(key) // cache only on read
+
+	return nil
+}
+
+// Import imports the source file into diskv under the destination key. If the
+// destination key already exists, it's overwritten. If move is true, the
+// source file is removed after a successful import.
+func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
+	if dstKey == "" {
+		return errEmptyKey
+	}
+
+	if fi, err := os.Stat(srcFilename); err != nil {
+		return err
+	} else if fi.IsDir() {
+		return errImportDirectory
+	}
+
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	if err := d.ensurePathWithLock(dstKey); err != nil {
+		return fmt.Errorf("ensure path: %s", err)
+	}
+
+	if move {
+		if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil {
+			d.bustCacheWithLock(dstKey)
+			return nil
+		} else if err != syscall.EXDEV {
+			// If it failed due to being on a different device, fall back to copying
+			return err
+		}
+	}
+
+	f, err := os.Open(srcFilename)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	err = d.writeStreamWithLock(dstKey, f, false)
+	if err == nil && move {
+		err = os.Remove(srcFilename)
+	}
+	return err
+}
+
+// Read reads the key and returns the value.
+// If the key is available in the cache, Read won't touch the disk.
+// If the key is not in the cache, Read will have the side-effect of
+// lazily caching the value.
+func (d *Diskv) Read(key string) ([]byte, error) {
+	rc, err := d.ReadStream(key, false)
+	if err != nil {
+		return []byte{}, err
+	}
+	defer rc.Close()
+	return ioutil.ReadAll(rc)
+}
+
+// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
+// If the value is cached from a previous read, and direct is false,
+// ReadStream will use the cached value. Otherwise, it will return a handle to
+// the file on disk, and cache the data on read.
+//
+// If direct is true, ReadStream will lazily delete any cached value for the
+// key, and return a direct handle to the file on disk.
+//
+// If compression is enabled, ReadStream taps into the io.Reader stream prior
+// to decompression, and caches the compressed data.
+func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
+	d.mu.RLock()
+	defer d.mu.RUnlock()
+
+	if val, ok := d.cache[key]; ok {
+		if !direct {
+			buf := bytes.NewBuffer(val)
+			if d.Compression != nil {
+				return d.Compression.Reader(buf)
+			}
+			return ioutil.NopCloser(buf), nil
+		}
+
+		go func() {
+			d.mu.Lock()
+			defer d.mu.Unlock()
+			d.uncacheWithLock(key, uint64(len(val)))
+		}()
+	}
+
+	return d.readWithRLock(key)
+}
+
+// read ignores the cache, and returns an io.ReadCloser representing the
+// decompressed data for the given key, streamed from the disk. Clients should
+// acquire a read lock on the Diskv and check the cache themselves before
+// calling read.
+func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) {
+	filename := d.completeFilename(key)
+
+	fi, err := os.Stat(filename)
+	if err != nil {
+		return nil, err
+	}
+	if fi.IsDir() {
+		return nil, os.ErrNotExist
+	}
+
+	f, err := os.Open(filename)
+	if err != nil {
+		return nil, err
+	}
+
+	var r io.Reader
+	if d.CacheSizeMax > 0 {
+		r = newSiphon(f, d, key)
+	} else {
+		r = &closingReader{f}
+	}
+
+	var rc = io.ReadCloser(ioutil.NopCloser(r))
+	if d.Compression != nil {
+		rc, err = d.Compression.Reader(r)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return rc, nil
+}
+
+// closingReader provides a Reader that automatically closes the
+// embedded ReadCloser when it reaches EOF
+type closingReader struct {
+	rc io.ReadCloser
+}
+
+func (cr closingReader) Read(p []byte) (int, error) {
+	n, err := cr.rc.Read(p)
+	if err == io.EOF {
+		if closeErr := cr.rc.Close(); closeErr != nil {
+			return n, closeErr // close must succeed for Read to succeed
+		}
+	}
+	return n, err
+}
+
+// siphon is like a TeeReader: it copies all data read through it to an
+// internal buffer, and moves that buffer to the cache at EOF.
+type siphon struct {
+	f   *os.File
+	d   *Diskv
+	key string
+	buf *bytes.Buffer
+}
+
+// newSiphon constructs a siphoning reader that represents the passed file.
+// When a successful series of reads ends in an EOF, the siphon will write
+// the buffered data to Diskv's cache under the given key.
+func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
+	return &siphon{
+		f:   f,
+		d:   d,
+		key: key,
+		buf: &bytes.Buffer{},
+	}
+}
+
+// Read implements the io.Reader interface for siphon.
+func (s *siphon) Read(p []byte) (int, error) {
+	n, err := s.f.Read(p)
+
+	if err == nil {
+		return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
+	}
+
+	if err == io.EOF {
+		s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
+		if closeErr := s.f.Close(); closeErr != nil {
+			return n, closeErr // close must succeed for Read to succeed
+		}
+		return n, err
+	}
+
+	return n, err
+}
+
+// Erase synchronously erases the given key from the disk and the cache.
+func (d *Diskv) Erase(key string) error {
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	d.bustCacheWithLock(key)
+
+	// erase from index
+	if d.Index != nil {
+		d.Index.Delete(key)
+	}
+
+	// erase from disk
+	filename := d.completeFilename(key)
+	if s, err := os.Stat(filename); err == nil {
+		if s.IsDir() {
+			return errBadKey
+		}
+		if err = os.Remove(filename); err != nil {
+			return err
+		}
+	} else {
+		// Return err as-is so caller can do os.IsNotExist(err).
+		return err
+	}
+
+	// clean up and return
+	d.pruneDirsWithLock(key)
+	return nil
+}
+
+// EraseAll will delete all of the data from the store, both in the cache and on
+// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
+// diskv-related data. Care should be taken to always specify a diskv base
+// directory that is exclusively for diskv data.
+func (d *Diskv) EraseAll() error {
+	d.mu.Lock()
+	defer d.mu.Unlock()
+	d.cache = make(map[string][]byte)
+	d.cacheSize = 0
+	if d.TempDir != "" {
+		os.RemoveAll(d.TempDir) // errors ignored
+	}
+	return os.RemoveAll(d.BasePath)
+}
+
+// Has returns true if the given key exists.
+func (d *Diskv) Has(key string) bool {
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	if _, ok := d.cache[key]; ok {
+		return true
+	}
+
+	filename := d.completeFilename(key)
+	s, err := os.Stat(filename)
+	if err != nil {
+		return false
+	}
+	if s.IsDir() {
+		return false
+	}
+
+	return true
+}
+
+// Keys returns a channel that will yield every key accessible by the store,
+// in undefined order. If a cancel channel is provided, closing it will
+// terminate and close the keys channel.
+func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
+	return d.KeysPrefix("", cancel)
+}
+
+// KeysPrefix returns a channel that will yield every key accessible by the
+// store with the given prefix, in undefined order. If a cancel channel is
+// provided, closing it will terminate and close the keys channel. If the
+// provided prefix is the empty string, all keys will be yielded.
+func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
+	var prepath string
+	if prefix == "" {
+		prepath = d.BasePath
+	} else {
+		prepath = d.pathFor(prefix)
+	}
+	c := make(chan string)
+	go func() {
+		filepath.Walk(prepath, walker(c, prefix, cancel))
+		close(c)
+	}()
+	return c
+}
+
+// walker returns a function which satisfies the filepath.WalkFunc interface.
+// It sends every non-directory file entry down the channel c.
+func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
+	return func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+
+		if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) {
+			return nil // "pass"
+		}
+
+		select {
+		case c <- info.Name():
+		case <-cancel:
+			return errCanceled
+		}
+
+		return nil
+	}
+}
+
+// pathFor returns the absolute path for location on the filesystem where the
+// data for the given key will be stored.
+func (d *Diskv) pathFor(key string) string {
+	return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...))
+}
+
+// ensurePathWithLock is a helper function that generates all necessary
+// directories on the filesystem for the given key.
+func (d *Diskv) ensurePathWithLock(key string) error {
+	return os.MkdirAll(d.pathFor(key), d.PathPerm)
+}
+
+// completeFilename returns the absolute path to the file for the given key.
+func (d *Diskv) completeFilename(key string) string {
+	return filepath.Join(d.pathFor(key), key)
+}
+
+// cacheWithLock attempts to cache the given key-value pair in the store's
+// cache. It can fail if the value is larger than the cache's maximum size.
+func (d *Diskv) cacheWithLock(key string, val []byte) error {
+	valueSize := uint64(len(val))
+	if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
+		return fmt.Errorf("%s; not caching", err)
+	}
+
+	// be very strict about memory guarantees
+	if (d.cacheSize + valueSize) > d.CacheSizeMax {
+		panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
+	}
+
+	d.cache[key] = val
+	d.cacheSize += valueSize
+	return nil
+}
+
+// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
+func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
+	d.mu.Lock()
+	defer d.mu.Unlock()
+	return d.cacheWithLock(key, val)
+}
+
+func (d *Diskv) bustCacheWithLock(key string) {
+	if val, ok := d.cache[key]; ok {
+		d.uncacheWithLock(key, uint64(len(val)))
+	}
+}
+
+func (d *Diskv) uncacheWithLock(key string, sz uint64) {
+	d.cacheSize -= sz
+	delete(d.cache, key)
+}
+
+// pruneDirsWithLock deletes empty directories in the path walk leading to the
+// key k. Typically this function is called after an Erase is made.
+func (d *Diskv) pruneDirsWithLock(key string) error {
+	pathlist := d.Transform(key)
+	for i := range pathlist {
+		dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
+
+		// thanks to Steven Blenkinsop for this snippet
+		switch fi, err := os.Stat(dir); true {
+		case err != nil:
+			return err
+		case !fi.IsDir():
+			panic(fmt.Sprintf("corrupt dirstate at %s", dir))
+		}
+
+		nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
+		if err != nil {
+			return err
+		} else if len(nlinks) > 0 {
+			return nil // has subdirs -- do not prune
+		}
+		if err = os.Remove(dir); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
+// until the cache has at least valueSize bytes available.
+func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
+	if valueSize > d.CacheSizeMax {
+		return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
+	}
+
+	safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
+
+	for key, val := range d.cache {
+		if safe() {
+			break
+		}
+
+		d.uncacheWithLock(key, uint64(len(val)))
+	}
+
+	if !safe() {
+		panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
+	}
+
+	return nil
+}
+
+// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
+// satisfy the io.WriteCloser interface.
+type nopWriteCloser struct {
+	io.Writer
+}
+
+func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
+func (wc *nopWriteCloser) Close() error                { return nil }
diff --git a/vendor/github.com/peterbourgon/diskv/index.go b/vendor/github.com/peterbourgon/diskv/index.go
new file mode 100644
index 0000000..96fee51
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/index.go
@@ -0,0 +1,115 @@
+package diskv
+
+import (
+	"sync"
+
+	"github.com/google/btree"
+)
+
+// Index is a generic interface for things that can
+// provide an ordered list of keys.
+type Index interface {
+	Initialize(less LessFunction, keys <-chan string)
+	Insert(key string)
+	Delete(key string)
+	Keys(from string, n int) []string
+}
+
+// LessFunction is used to initialize an Index of keys in a specific order.
+type LessFunction func(string, string) bool
+
+// btreeString is a custom data type that satisfies the BTree Less interface,
+// making the strings it wraps sortable by the BTree package.
+type btreeString struct {
+	s string
+	l LessFunction
+}
+
+// Less satisfies the BTree.Less interface using the btreeString's LessFunction.
+func (s btreeString) Less(i btree.Item) bool {
+	return s.l(s.s, i.(btreeString).s)
+}
+
+// BTreeIndex is an implementation of the Index interface using google/btree.
+type BTreeIndex struct {
+	sync.RWMutex
+	LessFunction
+	*btree.BTree
+}
+
+// Initialize populates the BTree tree with data from the keys channel,
+// according to the passed less function. It's destructive to the BTreeIndex.
+func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) {
+	i.Lock()
+	defer i.Unlock()
+	i.LessFunction = less
+	i.BTree = rebuild(less, keys)
+}
+
+// Insert inserts the given key (only) into the BTree tree.
+func (i *BTreeIndex) Insert(key string) {
+	i.Lock()
+	defer i.Unlock()
+	if i.BTree == nil || i.LessFunction == nil {
+		panic("uninitialized index")
+	}
+	i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction})
+}
+
+// Delete removes the given key (only) from the BTree tree.
+func (i *BTreeIndex) Delete(key string) {
+	i.Lock()
+	defer i.Unlock()
+	if i.BTree == nil || i.LessFunction == nil {
+		panic("uninitialized index")
+	}
+	i.BTree.Delete(btreeString{s: key, l: i.LessFunction})
+}
+
+// Keys yields a maximum of n keys in order. If the passed 'from' key is empty,
+// Keys will return the first n keys. If the passed 'from' key is non-empty, the
+// first key in the returned slice will be the key that immediately follows the
+// passed key, in key order.
+func (i *BTreeIndex) Keys(from string, n int) []string {
+	i.RLock()
+	defer i.RUnlock()
+
+	if i.BTree == nil || i.LessFunction == nil {
+		panic("uninitialized index")
+	}
+
+	if i.BTree.Len() <= 0 {
+		return []string{}
+	}
+
+	btreeFrom := btreeString{s: from, l: i.LessFunction}
+	skipFirst := true
+	if len(from) <= 0 || !i.BTree.Has(btreeFrom) {
+		// no such key, so fabricate an always-smallest item
+		btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }}
+		skipFirst = false
+	}
+
+	keys := []string{}
+	iterator := func(i btree.Item) bool {
+		keys = append(keys, i.(btreeString).s)
+		return len(keys) < n
+	}
+	i.BTree.AscendGreaterOrEqual(btreeFrom, iterator)
+
+	if skipFirst && len(keys) > 0 {
+		keys = keys[1:]
+	}
+
+	return keys
+}
+
+// rebuildIndex does the work of regenerating the index
+// with the given keys.
+func rebuild(less LessFunction, keys <-chan string) *btree.BTree {
+	tree := btree.New(2)
+	for key := range keys {
+		tree.ReplaceOrInsert(btreeString{s: key, l: less})
+	}
+	return tree
+}
diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
new file mode 100644
index 0000000..dd878a3
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/NOTICE
@@ -0,0 +1,23 @@
+Prometheus instrumentation library for Go applications
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
+
+
+The following components are included in this product:
+
+perks - a fork of https://github.com/bmizerany/perks
+https://github.com/beorn7/perks
+Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
+See https://github.com/beorn7/perks/blob/master/README.md for license details.
+
+Go support for Protocol Buffers - Google's data interchange format
+http://github.com/golang/protobuf/
+Copyright 2010 The Go Authors
+See source code for license details.
+
+Support for streaming Protocol Buffer messages for the Go language (golang).
+https://github.com/matttproud/golang_protobuf_extensions
+Copyright 2013 Matt T. Proud
+Licensed under the Apache License, Version 2.0
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
new file mode 100644
index 0000000..3460f03
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
@@ -0,0 +1 @@
+command-line-arguments.test
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
new file mode 100644
index 0000000..44986bf
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -0,0 +1 @@
+See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
new file mode 100644
index 0000000..c0d70b2
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -0,0 +1,120 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Collector is the interface implemented by anything that can be used by
+// Prometheus to collect metrics. A Collector has to be registered for
+// collection. See Registerer.Register.
+//
+// The stock metrics provided by this package (Gauge, Counter, Summary,
+// Histogram, Untyped) are also Collectors (which only ever collect one metric,
+// namely itself). An implementer of Collector may, however, collect multiple
+// metrics in a coordinated fashion and/or create metrics on the fly. Examples
+// for collectors already implemented in this library are the metric vectors
+// (i.e. collection of multiple instances of the same Metric but with different
+// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
+type Collector interface {
+	// Describe sends the super-set of all possible descriptors of metrics
+	// collected by this Collector to the provided channel and returns once
+	// the last descriptor has been sent. The sent descriptors fulfill the
+	// consistency and uniqueness requirements described in the Desc
+	// documentation.
+	//
+	// It is valid if one and the same Collector sends duplicate
+	// descriptors. Those duplicates are simply ignored. However, two
+	// different Collectors must not send duplicate descriptors.
+	//
+	// Sending no descriptor at all marks the Collector as “unchecked”,
+	// i.e. no checks will be performed at registration time, and the
+	// Collector may yield any Metric it sees fit in its Collect method.
+	//
+	// This method idempotently sends the same descriptors throughout the
+	// lifetime of the Collector. It may be called concurrently and
+	// therefore must be implemented in a concurrency safe way.
+	//
+	// If a Collector encounters an error while executing this method, it
+	// must send an invalid descriptor (created with NewInvalidDesc) to
+	// signal the error to the registry.
+	Describe(chan<- *Desc)
+	// Collect is called by the Prometheus registry when collecting
+	// metrics. The implementation sends each collected metric via the
+	// provided channel and returns once the last metric has been sent. The
+	// descriptor of each sent metric is one of those returned by Describe
+	// (unless the Collector is unchecked, see above). Returned metrics that
+	// share the same descriptor must differ in their variable label
+	// values.
+	//
+	// This method may be called concurrently and must therefore be
+	// implemented in a concurrency safe way. Blocking occurs at the expense
+	// of total performance of rendering all registered metrics. Ideally,
+	// Collector implementations support concurrent readers.
+	Collect(chan<- Metric)
+}
+
+// DescribeByCollect is a helper to implement the Describe method of a custom
+// Collector. It collects the metrics from the provided Collector and sends
+// their descriptors to the provided channel.
+//
+// If a Collector collects the same metrics throughout its lifetime, its
+// Describe method can simply be implemented as:
+//
+//   func (c customCollector) Describe(ch chan<- *Desc) {
+//   	DescribeByCollect(c, ch)
+//   }
+//
+// However, this will not work if the metrics collected change dynamically over
+// the lifetime of the Collector in a way that their combined set of descriptors
+// changes as well. The shortcut implementation will then violate the contract
+// of the Describe method. If a Collector sometimes collects no metrics at all
+// (for example vectors like CounterVec, GaugeVec, etc., which only collect
+// metrics after a metric with a fully specified label set has been accessed),
+// it might even get registered as an unchecked Collecter (cf. the Register
+// method of the Registerer interface). Hence, only use this shortcut
+// implementation of Describe if you are certain to fulfill the contract.
+//
+// The Collector example demonstrates a use of DescribeByCollect.
+func DescribeByCollect(c Collector, descs chan<- *Desc) {
+	metrics := make(chan Metric)
+	go func() {
+		c.Collect(metrics)
+		close(metrics)
+	}()
+	for m := range metrics {
+		descs <- m.Desc()
+	}
+}
+
+// selfCollector implements Collector for a single Metric so that the Metric
+// collects itself. Add it as an anonymous field to a struct that implements
+// Metric, and call init with the Metric itself as an argument.
+type selfCollector struct {
+	self Metric
+}
+
+// init provides the selfCollector with a reference to the metric it is supposed
+// to collect. It is usually called within the factory function to create a
+// metric. See example.
+func (c *selfCollector) init(self Metric) {
+	c.self = self
+}
+
+// Describe implements Collector.
+func (c *selfCollector) Describe(ch chan<- *Desc) {
+	ch <- c.self.Desc()
+}
+
+// Collect implements Collector.
+func (c *selfCollector) Collect(ch chan<- Metric) {
+	ch <- c.self
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
new file mode 100644
index 0000000..d463e36
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -0,0 +1,277 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"errors"
+	"math"
+	"sync/atomic"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// Counter is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+//
+// A Counter is typically used to count requests served, tasks completed, errors
+// occurred, etc.
+//
+// To create Counter instances, use NewCounter.
+type Counter interface {
+	Metric
+	Collector
+
+	// Inc increments the counter by 1. Use Add to increment it by arbitrary
+	// non-negative values.
+	Inc()
+	// Add adds the given value to the counter. It panics if the value is <
+	// 0.
+	Add(float64)
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts Opts
+
+// NewCounter creates a new Counter based on the provided CounterOpts.
+//
+// The returned implementation tracks the counter value in two separate
+// variables, a float64 and a uint64. The latter is used to track calls of the
+// Inc method and calls of the Add method with a value that can be represented
+// as a uint64. This allows atomic increments of the counter with optimal
+// performance. (It is common to have an Inc call in very hot execution paths.)
+// Both internal tracking values are added up in the Write method. This has to
+// be taken into account when it comes to precision and overflow behavior.
+func NewCounter(opts CounterOpts) Counter {
+	desc := NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		nil,
+		opts.ConstLabels,
+	)
+	result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
+	result.init(result) // Init self-collection.
+	return result
+}
+
+type counter struct {
+	// valBits contains the bits of the represented float64 value, while
+	// valInt stores values that are exact integers. Both have to go first
+	// in the struct to guarantee alignment for atomic operations.
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	valBits uint64
+	valInt  uint64
+
+	selfCollector
+	desc *Desc
+
+	labelPairs []*dto.LabelPair
+}
+
+func (c *counter) Desc() *Desc {
+	return c.desc
+}
+
+func (c *counter) Add(v float64) {
+	if v < 0 {
+		panic(errors.New("counter cannot decrease in value"))
+	}
+	ival := uint64(v)
+	if float64(ival) == v {
+		atomic.AddUint64(&c.valInt, ival)
+		return
+	}
+
+	for {
+		oldBits := atomic.LoadUint64(&c.valBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+		if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
+			return
+		}
+	}
+}
+
+func (c *counter) Inc() {
+	atomic.AddUint64(&c.valInt, 1)
+}
+
+func (c *counter) Write(out *dto.Metric) error {
+	fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
+	ival := atomic.LoadUint64(&c.valInt)
+	val := fval + float64(ival)
+
+	return populateMetric(CounterValue, val, c.labelPairs, out)
+}
+
+// CounterVec is a Collector that bundles a set of Counters that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. number of HTTP requests, partitioned by response code and
+// method). Create instances with NewCounterVec.
+type CounterVec struct {
+	*metricVec
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
+// partitioned by the given label names.
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
+	desc := NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		labelNames,
+		opts.ConstLabels,
+	)
+	return &CounterVec{
+		metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+			if len(lvs) != len(desc.variableLabels) {
+				panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
+			}
+			result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
+			result.init(result) // Init self-collection.
+			return result
+		}),
+	}
+}
+
+// GetMetricWithLabelValues returns the Counter for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Counter is created.
+//
+// It is possible to call this method without using the returned Counter to only
+// create the new Counter but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Counter for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Counter from the CounterVec. In that case,
+// the Counter will still exist, but it will not be exported anymore, even if a
+// Counter with the same label values is created later.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+	metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
+	if metric != nil {
+		return metric.(Counter), err
+	}
+	return nil, err
+}
+
+// GetMetricWith returns the Counter for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Counter is created. Implications of
+// creating a Counter without using it and keeping the Counter for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+	metric, err := v.metricVec.getMetricWith(labels)
+	if metric != nil {
+		return metric.(Counter), err
+	}
+	return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+//     myVec.WithLabelValues("404", "GET").Add(42)
+func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
+	c, err := v.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		panic(err)
+	}
+	return c
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *CounterVec) With(labels Labels) Counter {
+	c, err := v.GetMetricWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return c
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the CounterVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
+	vec, err := v.curryWith(labels)
+	if vec != nil {
+		return &CounterVec{vec}, err
+	}
+	return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
+	vec, err := v.CurryWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return vec
+}
+
+// CounterFunc is a Counter whose value is determined at collect time by calling a
+// provided function.
+//
+// To create CounterFunc instances, use NewCounterFunc.
+type CounterFunc interface {
+	Metric
+	Collector
+}
+
+// NewCounterFunc creates a new CounterFunc based on the provided
+// CounterOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a CounterFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe. The function should also honor
+// the contract for a Counter (values only go up, not down), but compliance will
+// not be checked.
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
+	return newValueFunc(NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		nil,
+		opts.ConstLabels,
+	), CounterValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
new file mode 100644
index 0000000..1d034f8
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -0,0 +1,184 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"errors"
+	"fmt"
+	"sort"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/prometheus/common/model"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
+// the immutable meta-data of a Metric. The normal Metric implementations
+// included in this package manage their Desc under the hood. Users only have to
+// deal with Desc if they use advanced features like the ExpvarCollector or
+// custom Collectors and Metrics.
+//
+// Descriptors registered with the same registry have to fulfill certain
+// consistency and uniqueness criteria if they share the same fully-qualified
+// name: They must have the same help string and the same label names (aka label
+// dimensions) in each, constLabels and variableLabels, but they must differ in
+// the values of the constLabels.
+//
+// Descriptors that share the same fully-qualified names and the same label
+// values of their constLabels are considered equal.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+	// fqName has been built from Namespace, Subsystem, and Name.
+	fqName string
+	// help provides some helpful information about this metric.
+	help string
+	// constLabelPairs contains precalculated DTO label pairs based on
+	// the constant labels.
+	constLabelPairs []*dto.LabelPair
+	// VariableLabels contains names of labels for which the metric
+	// maintains variable values.
+	variableLabels []string
+	// id is a hash of the values of the ConstLabels and fqName. This
+	// must be unique among all registered descriptors and can therefore be
+	// used as an identifier of the descriptor.
+	id uint64
+	// dimHash is a hash of the label names (preset and variable) and the
+	// Help string. Each Desc with the same fqName must have the same
+	// dimHash.
+	dimHash uint64
+	// err is an error that occurred during construction. It is reported on
+	// registration time.
+	err error
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName must not be empty.
+//
+// variableLabels only contain the label names. Their label values are variable
+// and therefore not part of the Desc. (They are managed within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Collector example for a usage pattern.
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+	d := &Desc{
+		fqName:         fqName,
+		help:           help,
+		variableLabels: variableLabels,
+	}
+	if !model.IsValidMetricName(model.LabelValue(fqName)) {
+		d.err = fmt.Errorf("%q is not a valid metric name", fqName)
+		return d
+	}
+	// labelValues contains the label values of const labels (in order of
+	// their sorted label names) plus the fqName (at position 0).
+	labelValues := make([]string, 1, len(constLabels)+1)
+	labelValues[0] = fqName
+	labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+	labelNameSet := map[string]struct{}{}
+	// First add only the const label names and sort them...
+	for labelName := range constLabels {
+		if !checkLabelName(labelName) {
+			d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
+			return d
+		}
+		labelNames = append(labelNames, labelName)
+		labelNameSet[labelName] = struct{}{}
+	}
+	sort.Strings(labelNames)
+	// ... so that we can now add const label values in the order of their names.
+	for _, labelName := range labelNames {
+		labelValues = append(labelValues, constLabels[labelName])
+	}
+	// Validate the const label values. They can't have a wrong cardinality, so
+	// use in len(labelValues) as expectedNumberOfValues.
+	if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
+		d.err = err
+		return d
+	}
+	// Now add the variable label names, but prefix them with something that
+	// cannot be in a regular label name. That prevents matching the label
+	// dimension with a different mix between preset and variable labels.
+	for _, labelName := range variableLabels {
+		if !checkLabelName(labelName) {
+			d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
+			return d
+		}
+		labelNames = append(labelNames, "$"+labelName)
+		labelNameSet[labelName] = struct{}{}
+	}
+	if len(labelNames) != len(labelNameSet) {
+		d.err = errors.New("duplicate label names")
+		return d
+	}
+
+	vh := hashNew()
+	for _, val := range labelValues {
+		vh = hashAdd(vh, val)
+		vh = hashAddByte(vh, separatorByte)
+	}
+	d.id = vh
+	// Sort labelNames so that order doesn't matter for the hash.
+	sort.Strings(labelNames)
+	// Now hash together (in this order) the help string and the sorted
+	// label names.
+	lh := hashNew()
+	lh = hashAdd(lh, help)
+	lh = hashAddByte(lh, separatorByte)
+	for _, labelName := range labelNames {
+		lh = hashAdd(lh, labelName)
+		lh = hashAddByte(lh, separatorByte)
+	}
+	d.dimHash = lh
+
+	d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
+	for n, v := range constLabels {
+		d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
+			Name:  proto.String(n),
+			Value: proto.String(v),
+		})
+	}
+	sort.Sort(labelPairSorter(d.constLabelPairs))
+	return d
+}
+
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
+// provided error set. If a collector returning such a descriptor is registered,
+// registration will fail with the provided error. NewInvalidDesc can be used by
+// a Collector to signal inability to describe itself.
+func NewInvalidDesc(err error) *Desc {
+	return &Desc{
+		err: err,
+	}
+}
+
+func (d *Desc) String() string {
+	lpStrings := make([]string, 0, len(d.constLabelPairs))
+	for _, lp := range d.constLabelPairs {
+		lpStrings = append(
+			lpStrings,
+			fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
+		)
+	}
+	return fmt.Sprintf(
+		"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+		d.fqName,
+		d.help,
+		strings.Join(lpStrings, ","),
+		d.variableLabels,
+	)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
new file mode 100644
index 0000000..5d9525d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -0,0 +1,201 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus is the core instrumentation package. It provides metrics
+// primitives to instrument code for monitoring. It also offers a registry for
+// metrics. Sub-packages allow to expose the registered metrics via HTTP
+// (package promhttp) or push them to a Pushgateway (package push). There is
+// also a sub-package promauto, which provides metrics constructors with
+// automatic registration.
+//
+// All exported functions and methods are safe to be used concurrently unless
+// specified otherwise.
+//
+// A Basic Example
+//
+// As a starting point, a very basic usage example:
+//
+//    package main
+//
+//    import (
+//    	"log"
+//    	"net/http"
+//
+//    	"github.com/prometheus/client_golang/prometheus"
+//    	"github.com/prometheus/client_golang/prometheus/promhttp"
+//    )
+//
+//    var (
+//    	cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
+//    		Name: "cpu_temperature_celsius",
+//    		Help: "Current temperature of the CPU.",
+//    	})
+//    	hdFailures = prometheus.NewCounterVec(
+//    		prometheus.CounterOpts{
+//    			Name: "hd_errors_total",
+//    			Help: "Number of hard-disk errors.",
+//    		},
+//    		[]string{"device"},
+//    	)
+//    )
+//
+//    func init() {
+//    	// Metrics have to be registered to be exposed:
+//    	prometheus.MustRegister(cpuTemp)
+//    	prometheus.MustRegister(hdFailures)
+//    }
+//
+//    func main() {
+//    	cpuTemp.Set(65.3)
+//    	hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
+//
+//    	// The Handler function provides a default handler to expose metrics
+//    	// via an HTTP server. "/metrics" is the usual endpoint for that.
+//    	http.Handle("/metrics", promhttp.Handler())
+//    	log.Fatal(http.ListenAndServe(":8080", nil))
+//    }
+//
+//
+// This is a complete program that exports two metrics, a Gauge and a Counter,
+// the latter with a label attached to turn it into a (one-dimensional) vector.
+//
+// Metrics
+//
+// The number of exported identifiers in this package might appear a bit
+// overwhelming. However, in addition to the basic plumbing shown in the example
+// above, you only need to understand the different metric types and their
+// vector versions for basic usage. Furthermore, if you are not concerned with
+// fine-grained control of when and how to register metrics with the registry,
+// have a look at the promauto package, which will effectively allow you to
+// ignore registration altogether in simple cases.
+//
+// Above, you have already touched the Counter and the Gauge. There are two more
+// advanced metric types: the Summary and Histogram. A more thorough description
+// of those four metric types can be found in the Prometheus docs:
+// https://prometheus.io/docs/concepts/metric_types/
+//
+// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
+// Prometheus server not to assume anything about its type.
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary,
+// Histogram, and Untyped, a very important part of the Prometheus data model is
+// the partitioning of samples along dimensions called labels, which results in
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
+// HistogramVec, and UntypedVec.
+//
+// While only the fundamental metric types implement the Metric interface, both
+// the metrics and their vector versions implement the Collector interface. A
+// Collector manages the collection of a number of Metrics, but for convenience,
+// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
+// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
+// SummaryVec, HistogramVec, and UntypedVec are not.
+//
+// To create instances of Metrics and their vector versions, you need a suitable
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
+// UntypedOpts.
+//
+// Custom Collectors and constant Metrics
+//
+// While you could create your own implementations of Metric, most likely you
+// will only ever implement the Collector interface on your own. At a first
+// glance, a custom Collector seems handy to bundle Metrics for common
+// registration (with the prime example of the different metric vectors above,
+// which bundle all the metrics of the same name but with different labels).
+//
+// There is a more involved use case, too: If you already have metrics
+// available, created outside of the Prometheus context, you don't need the
+// interface of the various Metric types. You essentially want to mirror the
+// existing numbers into Prometheus Metrics during collection. An own
+// implementation of the Collector interface is perfect for that. You can create
+// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
+// NewConstSummary (and their respective Must… versions). That will happen in
+// the Collect method. The Describe method has to return separate Desc
+// instances, representative of the “throw-away” metrics to be created later.
+// NewDesc comes in handy to create those Desc instances. Alternatively, you
+// could return no Desc at all, which will marke the Collector “unchecked”.  No
+// checks are porformed at registration time, but metric consistency will still
+// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
+// errors. Thus, with unchecked Collectors, the responsibility to not collect
+// metrics that lead to inconsistencies in the total scrape result lies with the
+// implementer of the Collector. While this is not a desirable state, it is
+// sometimes necessary. The typical use case is a situatios where the exact
+// metrics to be returned by a Collector cannot be predicted at registration
+// time, but the implementer has sufficient knowledge of the whole system to
+// guarantee metric consistency.
+//
+// The Collector example illustrates the use case. You can also look at the
+// source code of the processCollector (mirroring process metrics), the
+// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
+// metrics) as examples that are used in this package itself.
+//
+// If you just need to call a function to get a single float value to collect as
+// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
+// shortcuts.
+//
+// Advanced Uses of the Registry
+//
+// While MustRegister is the by far most common way of registering a Collector,
+// sometimes you might want to handle the errors the registration might cause.
+// As suggested by the name, MustRegister panics if an error occurs. With the
+// Register function, the error is returned and can be handled.
+//
+// An error is returned if the registered Collector is incompatible or
+// inconsistent with already registered metrics. The registry aims for
+// consistency of the collected metrics according to the Prometheus data model.
+// Inconsistencies are ideally detected at registration time, not at collect
+// time. The former will usually be detected at start-up time of a program,
+// while the latter will only happen at scrape time, possibly not even on the
+// first scrape if the inconsistency only becomes relevant later. That is the
+// main reason why a Collector and a Metric have to describe themselves to the
+// registry.
+//
+// So far, everything we did operated on the so-called default registry, as it
+// can be found in the global DefaultRegisterer variable. With NewRegistry, you
+// can create a custom registry, or you can even implement the Registerer or
+// Gatherer interfaces yourself. The methods Register and Unregister work in the
+// same way on a custom registry as the global functions Register and Unregister
+// on the default registry.
+//
+// There are a number of uses for custom registries: You can use registries with
+// special properties, see NewPedanticRegistry. You can avoid global state, as
+// it is imposed by the DefaultRegisterer. You can use multiple registries at
+// the same time to expose different metrics in different ways.  You can use
+// separate registries for testing purposes.
+//
+// Also note that the DefaultRegisterer comes registered with a Collector for Go
+// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
+// NewProcessCollector). With a custom registry, you are in control and decide
+// yourself about the Collectors to register.
+//
+// HTTP Exposition
+//
+// The Registry implements the Gatherer interface. The caller of the Gather
+// method can then expose the gathered metrics in some way. Usually, the metrics
+// are served via HTTP on the /metrics endpoint. That's happening in the example
+// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
+// (The top-level functions in the prometheus package are deprecated.)
+//
+// Pushing to the Pushgateway
+//
+// Function for pushing to the Pushgateway can be found in the push sub-package.
+//
+// Graphite Bridge
+//
+// Functions and examples to push metrics from a Gatherer to Graphite can be
+// found in the graphite sub-package.
+//
+// Other Means of Exposition
+//
+// More ways of exposing metrics can easily be added by following the approaches
+// of the existing implementations.
+package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
new file mode 100644
index 0000000..18a99d5
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
@@ -0,0 +1,119 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"encoding/json"
+	"expvar"
+)
+
+type expvarCollector struct {
+	exports map[string]*Desc
+}
+
+// NewExpvarCollector returns a newly allocated expvar Collector that still has
+// to be registered with a Prometheus registry.
+//
+// An expvar Collector collects metrics from the expvar interface. It provides a
+// quick way to expose numeric values that are already exported via expvar as
+// Prometheus metrics. Note that the data models of expvar and Prometheus are
+// fundamentally different, and that the expvar Collector is inherently slower
+// than native Prometheus metrics. Thus, the expvar Collector is probably great
+// for experiments and prototying, but you should seriously consider a more
+// direct implementation of Prometheus metrics for monitoring production
+// systems.
+//
+// The exports map has the following meaning:
+//
+// The keys in the map correspond to expvar keys, i.e. for every expvar key you
+// want to export as Prometheus metric, you need an entry in the exports
+// map. The descriptor mapped to each key describes how to export the expvar
+// value. It defines the name and the help string of the Prometheus metric
+// proxying the expvar value. The type will always be Untyped.
+//
+// For descriptors without variable labels, the expvar value must be a number or
+// a bool. The number is then directly exported as the Prometheus sample
+// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
+// that are not numbers or bools are silently ignored.
+//
+// If the descriptor has one variable label, the expvar value must be an expvar
+// map. The keys in the expvar map become the various values of the one
+// Prometheus label. The values in the expvar map must be numbers or bools again
+// as above.
+//
+// For descriptors with more than one variable label, the expvar must be a
+// nested expvar map, i.e. where the values of the topmost map are maps again
+// etc. until a depth is reached that corresponds to the number of labels. The
+// leaves of that structure must be numbers or bools as above to serve as the
+// sample values.
+//
+// Anything that does not fit into the scheme above is silently ignored.
+func NewExpvarCollector(exports map[string]*Desc) Collector {
+	return &expvarCollector{
+		exports: exports,
+	}
+}
+
+// Describe implements Collector.
+func (e *expvarCollector) Describe(ch chan<- *Desc) {
+	for _, desc := range e.exports {
+		ch <- desc
+	}
+}
+
+// Collect implements Collector.
+func (e *expvarCollector) Collect(ch chan<- Metric) {
+	for name, desc := range e.exports {
+		var m Metric
+		expVar := expvar.Get(name)
+		if expVar == nil {
+			continue
+		}
+		var v interface{}
+		labels := make([]string, len(desc.variableLabels))
+		if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
+			ch <- NewInvalidMetric(desc, err)
+			continue
+		}
+		var processValue func(v interface{}, i int)
+		processValue = func(v interface{}, i int) {
+			if i >= len(labels) {
+				copiedLabels := append(make([]string, 0, len(labels)), labels...)
+				switch v := v.(type) {
+				case float64:
+					m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
+				case bool:
+					if v {
+						m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
+					} else {
+						m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
+					}
+				default:
+					return
+				}
+				ch <- m
+				return
+			}
+			vm, ok := v.(map[string]interface{})
+			if !ok {
+				return
+			}
+			for lv, val := range vm {
+				labels[i] = lv
+				processValue(val, i+1)
+			}
+		}
+		processValue(v, 0)
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
new file mode 100644
index 0000000..3d383a7
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+	offset64 = 14695981039346656037
+	prime64  = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+	return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+	for i := 0; i < len(s); i++ {
+		h ^= uint64(s[i])
+		h *= prime64
+	}
+	return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+	h ^= uint64(b)
+	h *= prime64
+	return h
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
new file mode 100644
index 0000000..71d406b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -0,0 +1,286 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"math"
+	"sync/atomic"
+	"time"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// Gauge is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// A Gauge is typically used for measured values like temperatures or current
+// memory usage, but also "counts" that can go up and down, like the number of
+// running goroutines.
+//
+// To create Gauge instances, use NewGauge.
+type Gauge interface {
+	Metric
+	Collector
+
+	// Set sets the Gauge to an arbitrary value.
+	Set(float64)
+	// Inc increments the Gauge by 1. Use Add to increment it by arbitrary
+	// values.
+	Inc()
+	// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
+	// values.
+	Dec()
+	// Add adds the given value to the Gauge. (The value can be negative,
+	// resulting in a decrease of the Gauge.)
+	Add(float64)
+	// Sub subtracts the given value from the Gauge. (The value can be
+	// negative, resulting in an increase of the Gauge.)
+	Sub(float64)
+
+	// SetToCurrentTime sets the Gauge to the current Unix time in seconds.
+	SetToCurrentTime()
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts Opts
+
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
+//
+// The returned implementation is optimized for a fast Set method. If you have a
+// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
+// the former. For example, the Inc method of the returned Gauge is slower than
+// the Inc method of a Counter returned by NewCounter. This matches the typical
+// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
+// the latter Inc-heavy.
+func NewGauge(opts GaugeOpts) Gauge {
+	desc := NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		nil,
+		opts.ConstLabels,
+	)
+	result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
+	result.init(result) // Init self-collection.
+	return result
+}
+
+type gauge struct {
+	// valBits contains the bits of the represented float64 value. It has
+	// to go first in the struct to guarantee alignment for atomic
+	// operations.  http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	valBits uint64
+
+	selfCollector
+
+	desc       *Desc
+	labelPairs []*dto.LabelPair
+}
+
+func (g *gauge) Desc() *Desc {
+	return g.desc
+}
+
+func (g *gauge) Set(val float64) {
+	atomic.StoreUint64(&g.valBits, math.Float64bits(val))
+}
+
+func (g *gauge) SetToCurrentTime() {
+	g.Set(float64(time.Now().UnixNano()) / 1e9)
+}
+
+func (g *gauge) Inc() {
+	g.Add(1)
+}
+
+func (g *gauge) Dec() {
+	g.Add(-1)
+}
+
+func (g *gauge) Add(val float64) {
+	for {
+		oldBits := atomic.LoadUint64(&g.valBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+		if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
+			return
+		}
+	}
+}
+
+func (g *gauge) Sub(val float64) {
+	g.Add(val * -1)
+}
+
+func (g *gauge) Write(out *dto.Metric) error {
+	val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
+	return populateMetric(GaugeValue, val, g.labelPairs, out)
+}
+
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
+// Desc, but have different values for their variable labels. This is used if
+// you want to count the same thing partitioned by various dimensions
+// (e.g. number of operations queued, partitioned by user and operation
+// type). Create instances with NewGaugeVec.
+type GaugeVec struct {
+	*metricVec
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
+// partitioned by the given label names.
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
+	desc := NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		labelNames,
+		opts.ConstLabels,
+	)
+	return &GaugeVec{
+		metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+			if len(lvs) != len(desc.variableLabels) {
+				panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
+			}
+			result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
+			result.init(result) // Init self-collection.
+			return result
+		}),
+	}
+}
+
+// GetMetricWithLabelValues returns the Gauge for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Gauge is created.
+//
+// It is possible to call this method without using the returned Gauge to only
+// create the new Gauge but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Gauge for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
+// Gauge will still exist, but it will not be exported anymore, even if a
+// Gauge with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+	metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
+	if metric != nil {
+		return metric.(Gauge), err
+	}
+	return nil, err
+}
+
+// GetMetricWith returns the Gauge for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Gauge is created. Implications of
+// creating a Gauge without using it and keeping the Gauge for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+	metric, err := v.metricVec.getMetricWith(labels)
+	if metric != nil {
+		return metric.(Gauge), err
+	}
+	return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+//     myVec.WithLabelValues("404", "GET").Add(42)
+func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+	g, err := v.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		panic(err)
+	}
+	return g
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *GaugeVec) With(labels Labels) Gauge {
+	g, err := v.GetMetricWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return g
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the GaugeVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
+	vec, err := v.curryWith(labels)
+	if vec != nil {
+		return &GaugeVec{vec}, err
+	}
+	return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
+	vec, err := v.CurryWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return vec
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+	Metric
+	Collector
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. If that results in concurrent calls to Write, like in the case
+// where a GaugeFunc is directly registered with Prometheus, the provided
+// function must be concurrency-safe.
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+	return newValueFunc(NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		nil,
+		opts.ConstLabels,
+	), GaugeValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
new file mode 100644
index 0000000..ba3b933
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -0,0 +1,301 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"fmt"
+	"runtime"
+	"runtime/debug"
+	"time"
+)
+
+type goCollector struct {
+	goroutinesDesc *Desc
+	threadsDesc    *Desc
+	gcDesc         *Desc
+	goInfoDesc     *Desc
+
+	// metrics to describe and collect
+	metrics memStatsMetrics
+}
+
+// NewGoCollector returns a collector which exports metrics about the current Go
+// process. This includes memory stats. To collect those, runtime.ReadMemStats
+// is called. This causes a stop-the-world, which is very short with Go1.9+
+// (~25µs). However, with older Go versions, the stop-the-world duration depends
+// on the heap size and can be quite significant (~1.7 ms/GiB as per
+// https://go-review.googlesource.com/c/go/+/34937).
+func NewGoCollector() Collector {
+	return &goCollector{
+		goroutinesDesc: NewDesc(
+			"go_goroutines",
+			"Number of goroutines that currently exist.",
+			nil, nil),
+		threadsDesc: NewDesc(
+			"go_threads",
+			"Number of OS threads created.",
+			nil, nil),
+		gcDesc: NewDesc(
+			"go_gc_duration_seconds",
+			"A summary of the GC invocation durations.",
+			nil, nil),
+		goInfoDesc: NewDesc(
+			"go_info",
+			"Information about the Go environment.",
+			nil, Labels{"version": runtime.Version()}),
+		metrics: memStatsMetrics{
+			{
+				desc: NewDesc(
+					memstatNamespace("alloc_bytes"),
+					"Number of bytes allocated and still in use.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("alloc_bytes_total"),
+					"Total number of bytes allocated, even if freed.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+				valType: CounterValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("sys_bytes"),
+					"Number of bytes obtained from system.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("lookups_total"),
+					"Total number of pointer lookups.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
+				valType: CounterValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("mallocs_total"),
+					"Total number of mallocs.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+				valType: CounterValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("frees_total"),
+					"Total number of frees.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+				valType: CounterValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("heap_alloc_bytes"),
+					"Number of heap bytes allocated and still in use.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("heap_sys_bytes"),
+					"Number of heap bytes obtained from system.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("heap_idle_bytes"),
+					"Number of heap bytes waiting to be used.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("heap_inuse_bytes"),
+					"Number of heap bytes that are in use.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("heap_released_bytes"),
+					"Number of heap bytes released to OS.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("heap_objects"),
+					"Number of allocated objects.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("stack_inuse_bytes"),
+					"Number of bytes in use by the stack allocator.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("stack_sys_bytes"),
+					"Number of bytes obtained from system for stack allocator.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("mspan_inuse_bytes"),
+					"Number of bytes in use by mspan structures.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("mspan_sys_bytes"),
+					"Number of bytes used for mspan structures obtained from system.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("mcache_inuse_bytes"),
+					"Number of bytes in use by mcache structures.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("mcache_sys_bytes"),
+					"Number of bytes used for mcache structures obtained from system.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("buck_hash_sys_bytes"),
+					"Number of bytes used by the profiling bucket hash table.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("gc_sys_bytes"),
+					"Number of bytes used for garbage collection system metadata.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("other_sys_bytes"),
+					"Number of bytes used for other system allocations.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("next_gc_bytes"),
+					"Number of heap bytes when next garbage collection will take place.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("last_gc_time_seconds"),
+					"Number of seconds since 1970 of last garbage collection.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
+				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("gc_cpu_fraction"),
+					"The fraction of this program's available CPU time used by the GC since the program started.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+				valType: GaugeValue,
+			},
+		},
+	}
+}
+
+func memstatNamespace(s string) string {
+	return fmt.Sprintf("go_memstats_%s", s)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+	ch <- c.goroutinesDesc
+	ch <- c.threadsDesc
+	ch <- c.gcDesc
+	ch <- c.goInfoDesc
+	for _, i := range c.metrics {
+		ch <- i.desc
+	}
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+	ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
+	n, _ := runtime.ThreadCreateProfile(nil)
+	ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
+
+	var stats debug.GCStats
+	stats.PauseQuantiles = make([]time.Duration, 5)
+	debug.ReadGCStats(&stats)
+
+	quantiles := make(map[float64]float64)
+	for idx, pq := range stats.PauseQuantiles[1:] {
+		quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
+	}
+	quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
+	ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
+
+	ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
+
+	ms := &runtime.MemStats{}
+	runtime.ReadMemStats(ms)
+	for _, i := range c.metrics {
+		ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+	}
+}
+
+// memStatsMetrics provide description, value, and value type for memstat metrics.
+type memStatsMetrics []struct {
+	desc    *Desc
+	eval    func(*runtime.MemStats) float64
+	valType ValueType
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
new file mode 100644
index 0000000..f88da70
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -0,0 +1,614 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"fmt"
+	"math"
+	"runtime"
+	"sort"
+	"sync"
+	"sync/atomic"
+
+	"github.com/golang/protobuf/proto"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// A Histogram counts individual observations from an event or sample stream in
+// configurable buckets. Similar to a summary, it also provides a sum of
+// observations and an observation count.
+//
+// On the Prometheus server, quantiles can be calculated from a Histogram using
+// the histogram_quantile function in the query language.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated with the
+// Prometheus query language (see the documentation for detailed
+// procedures). However, Histograms require the user to pre-define suitable
+// buckets, and they are in general less accurate. The Observe method of a
+// Histogram has a very low performance overhead in comparison with the Observe
+// method of a Summary.
+//
+// To create Histogram instances, use NewHistogram.
+type Histogram interface {
+	Metric
+	Collector
+
+	// Observe adds a single observation to the histogram.
+	Observe(float64)
+}
+
+// bucketLabel is used for the label that defines the upper bound of a
+// bucket of a histogram ("le" -> "less or equal").
+const bucketLabel = "le"
+
+// DefBuckets are the default Histogram buckets. The default buckets are
+// tailored to broadly measure the response time (in seconds) of a network
+// service. Most likely, however, you will be required to define buckets
+// customized to your use case.
+var (
+	DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+	errBucketLabelNotAllowed = fmt.Errorf(
+		"%q is not allowed as label name in histograms", bucketLabel,
+	)
+)
+
+// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
+// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+	if count < 1 {
+		panic("LinearBuckets needs a positive count")
+	}
+	buckets := make([]float64, count)
+	for i := range buckets {
+		buckets[i] = start
+		start += width
+	}
+	return buckets
+}
+
+// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
+// upper bound of 'start' and each following bucket's upper bound is 'factor'
+// times the previous bucket's upper bound. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+	if count < 1 {
+		panic("ExponentialBuckets needs a positive count")
+	}
+	if start <= 0 {
+		panic("ExponentialBuckets needs a positive start value")
+	}
+	if factor <= 1 {
+		panic("ExponentialBuckets needs a factor greater than 1")
+	}
+	buckets := make([]float64, count)
+	for i := range buckets {
+		buckets[i] = start
+		start *= factor
+	}
+	return buckets
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name to a non-empty string. All other fields are optional
+// and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
+type HistogramOpts struct {
+	// Namespace, Subsystem, and Name are components of the fully-qualified
+	// name of the Histogram (created by joining these components with
+	// "_"). Only Name is mandatory, the others merely help structuring the
+	// name. Note that the fully-qualified name of the Histogram must be a
+	// valid Prometheus metric name.
+	Namespace string
+	Subsystem string
+	Name      string
+
+	// Help provides information about this Histogram.
+	//
+	// Metrics with the same fully-qualified name must have the same Help
+	// string.
+	Help string
+
+	// ConstLabels are used to attach fixed labels to this metric. Metrics
+	// with the same fully-qualified name must have the same label names in
+	// their ConstLabels.
+	//
+	// ConstLabels are only used rarely. In particular, do not use them to
+	// attach the same labels to all your metrics. Those use cases are
+	// better covered by target labels set by the scraping Prometheus
+	// server, or by one specific metric (e.g. a build_info or a
+	// machine_role metric). See also
+	// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
+	ConstLabels Labels
+
+	// Buckets defines the buckets into which observations are counted. Each
+	// element in the slice is the upper inclusive bound of a bucket. The
+	// values must be sorted in strictly increasing order. There is no need
+	// to add a highest bucket with +Inf bound, it will be added
+	// implicitly. The default value is DefBuckets.
+	Buckets []float64
+}
+
+// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
+// panics if the buckets in HistogramOpts are not in strictly increasing order.
+func NewHistogram(opts HistogramOpts) Histogram {
+	return newHistogram(
+		NewDesc(
+			BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+			opts.Help,
+			nil,
+			opts.ConstLabels,
+		),
+		opts,
+	)
+}
+
+func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
+	if len(desc.variableLabels) != len(labelValues) {
+		panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
+	}
+
+	for _, n := range desc.variableLabels {
+		if n == bucketLabel {
+			panic(errBucketLabelNotAllowed)
+		}
+	}
+	for _, lp := range desc.constLabelPairs {
+		if lp.GetName() == bucketLabel {
+			panic(errBucketLabelNotAllowed)
+		}
+	}
+
+	if len(opts.Buckets) == 0 {
+		opts.Buckets = DefBuckets
+	}
+
+	h := &histogram{
+		desc:        desc,
+		upperBounds: opts.Buckets,
+		labelPairs:  makeLabelPairs(desc, labelValues),
+		counts:      [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}},
+	}
+	for i, upperBound := range h.upperBounds {
+		if i < len(h.upperBounds)-1 {
+			if upperBound >= h.upperBounds[i+1] {
+				panic(fmt.Errorf(
+					"histogram buckets must be in increasing order: %f >= %f",
+					upperBound, h.upperBounds[i+1],
+				))
+			}
+		} else {
+			if math.IsInf(upperBound, +1) {
+				// The +Inf bucket is implicit. Remove it here.
+				h.upperBounds = h.upperBounds[:i]
+			}
+		}
+	}
+	// Finally we know the final length of h.upperBounds and can make counts
+	// for both states:
+	h.counts[0].buckets = make([]uint64, len(h.upperBounds))
+	h.counts[1].buckets = make([]uint64, len(h.upperBounds))
+
+	h.init(h) // Init self-collection.
+	return h
+}
+
+type histogramCounts struct {
+	// sumBits contains the bits of the float64 representing the sum of all
+	// observations. sumBits and count have to go first in the struct to
+	// guarantee alignment for atomic operations.
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	sumBits uint64
+	count   uint64
+	buckets []uint64
+}
+
+type histogram struct {
+	// countAndHotIdx is a complicated one. For lock-free yet atomic
+	// observations, we need to save the total count of observations again,
+	// combined with the index of the currently-hot counts struct, so that
+	// we can perform the operation on both values atomically. The least
+	// significant bit defines the hot counts struct. The remaining 63 bits
+	// represent the total count of observations. This happens under the
+	// assumption that the 63bit count will never overflow. Rationale: An
+	// observations takes about 30ns. Let's assume it could happen in
+	// 10ns. Overflowing the counter will then take at least (2^63)*10ns,
+	// which is about 3000 years.
+	//
+	// This has to be first in the struct for 64bit alignment. See
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	countAndHotIdx uint64
+
+	selfCollector
+	desc     *Desc
+	writeMtx sync.Mutex // Only used in the Write method.
+
+	upperBounds []float64
+
+	// Two counts, one is "hot" for lock-free observations, the other is
+	// "cold" for writing out a dto.Metric. It has to be an array of
+	// pointers to guarantee 64bit alignment of the histogramCounts, see
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+	counts [2]*histogramCounts
+	hotIdx int // Index of currently-hot counts. Only used within Write.
+
+	labelPairs []*dto.LabelPair
+}
+
+func (h *histogram) Desc() *Desc {
+	return h.desc
+}
+
+func (h *histogram) Observe(v float64) {
+	// TODO(beorn7): For small numbers of buckets (<30), a linear search is
+	// slightly faster than the binary search. If we really care, we could
+	// switch from one search strategy to the other depending on the number
+	// of buckets.
+	//
+	// Microbenchmarks (BenchmarkHistogramNoLabels):
+	// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
+	// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
+	// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+	i := sort.SearchFloat64s(h.upperBounds, v)
+
+	// We increment h.countAndHotIdx by 2 so that the counter in the upper
+	// 63 bits gets incremented by 1. At the same time, we get the new value
+	// back, which we can use to find the currently-hot counts.
+	n := atomic.AddUint64(&h.countAndHotIdx, 2)
+	hotCounts := h.counts[n%2]
+
+	if i < len(h.upperBounds) {
+		atomic.AddUint64(&hotCounts.buckets[i], 1)
+	}
+	for {
+		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+			break
+		}
+	}
+	// Increment count last as we take it as a signal that the observation
+	// is complete.
+	atomic.AddUint64(&hotCounts.count, 1)
+}
+
+func (h *histogram) Write(out *dto.Metric) error {
+	var (
+		his                   = &dto.Histogram{}
+		buckets               = make([]*dto.Bucket, len(h.upperBounds))
+		hotCounts, coldCounts *histogramCounts
+		count                 uint64
+	)
+
+	// For simplicity, we mutex the rest of this method. It is not in the
+	// hot path, i.e.  Observe is called much more often than Write. The
+	// complication of making Write lock-free isn't worth it.
+	h.writeMtx.Lock()
+	defer h.writeMtx.Unlock()
+
+	// This is a bit arcane, which is why the following spells out this if
+	// clause in English:
+	//
+	// If the currently-hot counts struct is #0, we atomically increment
+	// h.countAndHotIdx by 1 so that from now on Observe will use the counts
+	// struct #1. Furthermore, the atomic increment gives us the new value,
+	// which, in its most significant 63 bits, tells us the count of
+	// observations done so far up to and including currently ongoing
+	// observations still using the counts struct just changed from hot to
+	// cold. To have a normal uint64 for the count, we bitshift by 1 and
+	// save the result in count. We also set h.hotIdx to 1 for the next
+	// Write call, and we will refer to counts #1 as hotCounts and to counts
+	// #0 as coldCounts.
+	//
+	// If the currently-hot counts struct is #1, we do the corresponding
+	// things the other way round. We have to _decrement_ h.countAndHotIdx
+	// (which is a bit arcane in itself, as we have to express -1 with an
+	// unsigned int...).
+	if h.hotIdx == 0 {
+		count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1
+		h.hotIdx = 1
+		hotCounts = h.counts[1]
+		coldCounts = h.counts[0]
+	} else {
+		count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
+		h.hotIdx = 0
+		hotCounts = h.counts[0]
+		coldCounts = h.counts[1]
+	}
+
+	// Now we have to wait for the now-declared-cold counts to actually cool
+	// down, i.e. wait for all observations still using it to finish. That's
+	// the case once the count in the cold counts struct is the same as the
+	// one atomically retrieved from the upper 63bits of h.countAndHotIdx.
+	for {
+		if count == atomic.LoadUint64(&coldCounts.count) {
+			break
+		}
+		runtime.Gosched() // Let observations get work done.
+	}
+
+	his.SampleCount = proto.Uint64(count)
+	his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits)))
+	var cumCount uint64
+	for i, upperBound := range h.upperBounds {
+		cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
+		buckets[i] = &dto.Bucket{
+			CumulativeCount: proto.Uint64(cumCount),
+			UpperBound:      proto.Float64(upperBound),
+		}
+	}
+
+	his.Bucket = buckets
+	out.Histogram = his
+	out.Label = h.labelPairs
+
+	// Finally add all the cold counts to the new hot counts and reset the cold counts.
+	atomic.AddUint64(&hotCounts.count, count)
+	atomic.StoreUint64(&coldCounts.count, 0)
+	for {
+		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
+		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+			atomic.StoreUint64(&coldCounts.sumBits, 0)
+			break
+		}
+	}
+	for i := range h.upperBounds {
+		atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
+		atomic.StoreUint64(&coldCounts.buckets[i], 0)
+	}
+	return nil
+}
+
+// HistogramVec is a Collector that bundles a set of Histograms that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewHistogramVec.
+type HistogramVec struct {
+	*metricVec
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
+// partitioned by the given label names.
+func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
+	desc := NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		labelNames,
+		opts.ConstLabels,
+	)
+	return &HistogramVec{
+		metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+			return newHistogram(desc, opts, lvs...)
+		}),
+	}
+}
+
+// GetMetricWithLabelValues returns the Histogram for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Histogram is created.
+//
+// It is possible to call this method without using the returned Histogram to only
+// create the new Histogram but leave it at its starting value, a Histogram without
+// any observations.
+//
+// Keeping the Histogram for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
+// Histogram will still exist, but it will not be exported anymore, even if a
+// Histogram with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+	metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
+	if metric != nil {
+		return metric.(Observer), err
+	}
+	return nil, err
+}
+
+// GetMetricWith returns the Histogram for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Histogram is created. Implications of
+// creating a Histogram without using it and keeping the Histogram for later use
+// are the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
+	metric, err := v.metricVec.getMetricWith(labels)
+	if metric != nil {
+		return metric.(Observer), err
+	}
+	return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+//     myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
+	h, err := v.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		panic(err)
+	}
+	return h
+}
+
+// With works as GetMetricWith but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (v *HistogramVec) With(labels Labels) Observer {
+	h, err := v.GetMetricWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return h
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the HistogramVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
+	vec, err := v.curryWith(labels)
+	if vec != nil {
+		return &HistogramVec{vec}, err
+	}
+	return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
+	vec, err := v.CurryWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return vec
+}
+
+type constHistogram struct {
+	desc       *Desc
+	count      uint64
+	sum        float64
+	buckets    map[float64]uint64
+	labelPairs []*dto.LabelPair
+}
+
+func (h *constHistogram) Desc() *Desc {
+	return h.desc
+}
+
+func (h *constHistogram) Write(out *dto.Metric) error {
+	his := &dto.Histogram{}
+	buckets := make([]*dto.Bucket, 0, len(h.buckets))
+
+	his.SampleCount = proto.Uint64(h.count)
+	his.SampleSum = proto.Float64(h.sum)
+
+	for upperBound, count := range h.buckets {
+		buckets = append(buckets, &dto.Bucket{
+			CumulativeCount: proto.Uint64(count),
+			UpperBound:      proto.Float64(upperBound),
+		})
+	}
+
+	if len(buckets) > 0 {
+		sort.Sort(buckSort(buckets))
+	}
+	his.Bucket = buckets
+
+	out.Histogram = his
+	out.Label = h.labelPairs
+
+	return nil
+}
+
+// NewConstHistogram returns a metric representing a Prometheus histogram with
+// fixed values for the count, sum, and bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
+// bucket.
+//
+// NewConstHistogram returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc or if Desc is invalid.
+func NewConstHistogram(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	buckets map[float64]uint64,
+	labelValues ...string,
+) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+		return nil, err
+	}
+	return &constHistogram{
+		desc:       desc,
+		count:      count,
+		sum:        sum,
+		buckets:    buckets,
+		labelPairs: makeLabelPairs(desc, labelValues),
+	}, nil
+}
+
+// MustNewConstHistogram is a version of NewConstHistogram that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstHistogram(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	buckets map[float64]uint64,
+	labelValues ...string,
+) Metric {
+	m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
+type buckSort []*dto.Bucket
+
+func (s buckSort) Len() int {
+	return len(s)
+}
+
+func (s buckSort) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s buckSort) Less(i, j int) bool {
+	return s[i].GetUpperBound() < s[j].GetUpperBound()
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go
new file mode 100644
index 0000000..9f0875b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go
@@ -0,0 +1,504 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"bufio"
+	"compress/gzip"
+	"io"
+	"net"
+	"net/http"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/prometheus/common/expfmt"
+)
+
+// TODO(beorn7): Remove this whole file. It is a partial mirror of
+// promhttp/http.go (to avoid circular import chains) where everything HTTP
+// related should live. The functions here are just for avoiding
+// breakage. Everything is deprecated.
+
+const (
+	contentTypeHeader     = "Content-Type"
+	contentLengthHeader   = "Content-Length"
+	contentEncodingHeader = "Content-Encoding"
+	acceptEncodingHeader  = "Accept-Encoding"
+)
+
+var gzipPool = sync.Pool{
+	New: func() interface{} {
+		return gzip.NewWriter(nil)
+	},
+}
+
+// Handler returns an HTTP handler for the DefaultGatherer. It is
+// already instrumented with InstrumentHandler (using "prometheus" as handler
+// name).
+//
+// Deprecated: Please note the issues described in the doc comment of
+// InstrumentHandler. You might want to consider using promhttp.Handler instead.
+func Handler() http.Handler {
+	return InstrumentHandler("prometheus", UninstrumentedHandler())
+}
+
+// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
+//
+// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{})
+// instead. See there for further documentation.
+func UninstrumentedHandler() http.Handler {
+	return http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
+		mfs, err := DefaultGatherer.Gather()
+		if err != nil {
+			httpError(rsp, err)
+			return
+		}
+
+		contentType := expfmt.Negotiate(req.Header)
+		header := rsp.Header()
+		header.Set(contentTypeHeader, string(contentType))
+
+		w := io.Writer(rsp)
+		if gzipAccepted(req.Header) {
+			header.Set(contentEncodingHeader, "gzip")
+			gz := gzipPool.Get().(*gzip.Writer)
+			defer gzipPool.Put(gz)
+
+			gz.Reset(w)
+			defer gz.Close()
+
+			w = gz
+		}
+
+		enc := expfmt.NewEncoder(w, contentType)
+
+		for _, mf := range mfs {
+			if err := enc.Encode(mf); err != nil {
+				httpError(rsp, err)
+				return
+			}
+		}
+	})
+}
+
+var instLabels = []string{"method", "code"}
+
+type nower interface {
+	Now() time.Time
+}
+
+type nowFunc func() time.Time
+
+func (n nowFunc) Now() time.Time {
+	return n()
+}
+
+var now nower = nowFunc(func() time.Time {
+	return time.Now()
+})
+
+// InstrumentHandler wraps the given HTTP handler for instrumentation. It
+// registers four metric collectors (if not already done) and reports HTTP
+// metrics to the (newly or already) registered collectors: http_requests_total
+// (CounterVec), http_request_duration_microseconds (Summary),
+// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
+// has a constant label named "handler" with the provided handlerName as
+// value. http_requests_total is a metric vector partitioned by HTTP method
+// (label name "method") and HTTP status code (label name "code").
+//
+// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
+// package promhttp instead. The issues are the following: (1) It uses Summaries
+// rather than Histograms. Summaries are not useful if aggregation across
+// multiple instances is required. (2) It uses microseconds as unit, which is
+// deprecated and should be replaced by seconds. (3) The size of the request is
+// calculated in a separate goroutine. Since this calculator requires access to
+// the request header, it creates a race with any writes to the header performed
+// during request handling.  httputil.ReverseProxy is a prominent example for a
+// handler performing such writes. (4) It has additional issues with HTTP/2, cf.
+// https://github.com/prometheus/client_golang/issues/272.
+func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
+	return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFunc wraps the given function for instrumentation. It
+// otherwise works in the same way as InstrumentHandler (and shares the same
+// issues).
+//
+// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
+// InstrumentHandler is. Use the tooling provided in package promhttp instead.
+func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+	return InstrumentHandlerFuncWithOpts(
+		SummaryOpts{
+			Subsystem:   "http",
+			ConstLabels: Labels{"handler": handlerName},
+			Objectives:  map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+		},
+		handlerFunc,
+	)
+}
+
+// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
+// issues) but provides more flexibility (at the cost of a more complex call
+// syntax). As InstrumentHandler, this function registers four metric
+// collectors, but it uses the provided SummaryOpts to create them. However, the
+// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
+// by "requests_total", "request_duration_microseconds", "request_size_bytes",
+// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
+// help string. The names of the variable labels of the http_requests_total
+// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
+//
+// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
+// behavior of InstrumentHandler:
+//
+//     prometheus.InstrumentHandlerWithOpts(
+//         prometheus.SummaryOpts{
+//              Subsystem:   "http",
+//              ConstLabels: prometheus.Labels{"handler": handlerName},
+//         },
+//         handler,
+//     )
+//
+// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
+// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
+// and all its fields are set to the equally named fields in the provided
+// SummaryOpts.
+//
+// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
+// InstrumentHandler is. Use the tooling provided in package promhttp instead.
+func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
+	return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
+// the same issues) but provides more flexibility (at the cost of a more complex
+// call syntax). See InstrumentHandlerWithOpts for details how the provided
+// SummaryOpts are used.
+//
+// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
+// as InstrumentHandler is. Use the tooling provided in package promhttp instead.
+func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+	reqCnt := NewCounterVec(
+		CounterOpts{
+			Namespace:   opts.Namespace,
+			Subsystem:   opts.Subsystem,
+			Name:        "requests_total",
+			Help:        "Total number of HTTP requests made.",
+			ConstLabels: opts.ConstLabels,
+		},
+		instLabels,
+	)
+	if err := Register(reqCnt); err != nil {
+		if are, ok := err.(AlreadyRegisteredError); ok {
+			reqCnt = are.ExistingCollector.(*CounterVec)
+		} else {
+			panic(err)
+		}
+	}
+
+	opts.Name = "request_duration_microseconds"
+	opts.Help = "The HTTP request latencies in microseconds."
+	reqDur := NewSummary(opts)
+	if err := Register(reqDur); err != nil {
+		if are, ok := err.(AlreadyRegisteredError); ok {
+			reqDur = are.ExistingCollector.(Summary)
+		} else {
+			panic(err)
+		}
+	}
+
+	opts.Name = "request_size_bytes"
+	opts.Help = "The HTTP request sizes in bytes."
+	reqSz := NewSummary(opts)
+	if err := Register(reqSz); err != nil {
+		if are, ok := err.(AlreadyRegisteredError); ok {
+			reqSz = are.ExistingCollector.(Summary)
+		} else {
+			panic(err)
+		}
+	}
+
+	opts.Name = "response_size_bytes"
+	opts.Help = "The HTTP response sizes in bytes."
+	resSz := NewSummary(opts)
+	if err := Register(resSz); err != nil {
+		if are, ok := err.(AlreadyRegisteredError); ok {
+			resSz = are.ExistingCollector.(Summary)
+		} else {
+			panic(err)
+		}
+	}
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		now := time.Now()
+
+		delegate := &responseWriterDelegator{ResponseWriter: w}
+		out := computeApproximateRequestSize(r)
+
+		_, cn := w.(http.CloseNotifier)
+		_, fl := w.(http.Flusher)
+		_, hj := w.(http.Hijacker)
+		_, rf := w.(io.ReaderFrom)
+		var rw http.ResponseWriter
+		if cn && fl && hj && rf {
+			rw = &fancyResponseWriterDelegator{delegate}
+		} else {
+			rw = delegate
+		}
+		handlerFunc(rw, r)
+
+		elapsed := float64(time.Since(now)) / float64(time.Microsecond)
+
+		method := sanitizeMethod(r.Method)
+		code := sanitizeCode(delegate.status)
+		reqCnt.WithLabelValues(method, code).Inc()
+		reqDur.Observe(elapsed)
+		resSz.Observe(float64(delegate.written))
+		reqSz.Observe(float64(<-out))
+	})
+}
+
+func computeApproximateRequestSize(r *http.Request) <-chan int {
+	// Get URL length in current goroutine for avoiding a race condition.
+	// HandlerFunc that runs in parallel may modify the URL.
+	s := 0
+	if r.URL != nil {
+		s += len(r.URL.String())
+	}
+
+	out := make(chan int, 1)
+
+	go func() {
+		s += len(r.Method)
+		s += len(r.Proto)
+		for name, values := range r.Header {
+			s += len(name)
+			for _, value := range values {
+				s += len(value)
+			}
+		}
+		s += len(r.Host)
+
+		// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+		if r.ContentLength != -1 {
+			s += int(r.ContentLength)
+		}
+		out <- s
+		close(out)
+	}()
+
+	return out
+}
+
+type responseWriterDelegator struct {
+	http.ResponseWriter
+
+	status      int
+	written     int64
+	wroteHeader bool
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+	r.status = code
+	r.wroteHeader = true
+	r.ResponseWriter.WriteHeader(code)
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+	if !r.wroteHeader {
+		r.WriteHeader(http.StatusOK)
+	}
+	n, err := r.ResponseWriter.Write(b)
+	r.written += int64(n)
+	return n, err
+}
+
+type fancyResponseWriterDelegator struct {
+	*responseWriterDelegator
+}
+
+func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
+	return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+func (f *fancyResponseWriterDelegator) Flush() {
+	f.ResponseWriter.(http.Flusher).Flush()
+}
+
+func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+	return f.ResponseWriter.(http.Hijacker).Hijack()
+}
+
+func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
+	if !f.wroteHeader {
+		f.WriteHeader(http.StatusOK)
+	}
+	n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
+	f.written += n
+	return n, err
+}
+
+func sanitizeMethod(m string) string {
+	switch m {
+	case "GET", "get":
+		return "get"
+	case "PUT", "put":
+		return "put"
+	case "HEAD", "head":
+		return "head"
+	case "POST", "post":
+		return "post"
+	case "DELETE", "delete":
+		return "delete"
+	case "CONNECT", "connect":
+		return "connect"
+	case "OPTIONS", "options":
+		return "options"
+	case "NOTIFY", "notify":
+		return "notify"
+	default:
+		return strings.ToLower(m)
+	}
+}
+
+func sanitizeCode(s int) string {
+	switch s {
+	case 100:
+		return "100"
+	case 101:
+		return "101"
+
+	case 200:
+		return "200"
+	case 201:
+		return "201"
+	case 202:
+		return "202"
+	case 203:
+		return "203"
+	case 204:
+		return "204"
+	case 205:
+		return "205"
+	case 206:
+		return "206"
+
+	case 300:
+		return "300"
+	case 301:
+		return "301"
+	case 302:
+		return "302"
+	case 304:
+		return "304"
+	case 305:
+		return "305"
+	case 307:
+		return "307"
+
+	case 400:
+		return "400"
+	case 401:
+		return "401"
+	case 402:
+		return "402"
+	case 403:
+		return "403"
+	case 404:
+		return "404"
+	case 405:
+		return "405"
+	case 406:
+		return "406"
+	case 407:
+		return "407"
+	case 408:
+		return "408"
+	case 409:
+		return "409"
+	case 410:
+		return "410"
+	case 411:
+		return "411"
+	case 412:
+		return "412"
+	case 413:
+		return "413"
+	case 414:
+		return "414"
+	case 415:
+		return "415"
+	case 416:
+		return "416"
+	case 417:
+		return "417"
+	case 418:
+		return "418"
+
+	case 500:
+		return "500"
+	case 501:
+		return "501"
+	case 502:
+		return "502"
+	case 503:
+		return "503"
+	case 504:
+		return "504"
+	case 505:
+		return "505"
+
+	case 428:
+		return "428"
+	case 429:
+		return "429"
+	case 431:
+		return "431"
+	case 511:
+		return "511"
+
+	default:
+		return strconv.Itoa(s)
+	}
+}
+
+// gzipAccepted returns whether the client will accept gzip-encoded content.
+func gzipAccepted(header http.Header) bool {
+	a := header.Get(acceptEncodingHeader)
+	parts := strings.Split(a, ",")
+	for _, part := range parts {
+		part = strings.TrimSpace(part)
+		if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+			return true
+		}
+	}
+	return false
+}
+
+// httpError removes any content-encoding header and then calls http.Error with
+// the provided error and http.StatusInternalServerErrer. Error contents is
+// supposed to be uncompressed plain text. However, same as with a plain
+// http.Error, any header settings will be void if the header has already been
+// sent. The error message will still be written to the writer, but it will
+// probably be of limited use.
+func httpError(rsp http.ResponseWriter, err error) {
+	rsp.Header().Del(contentEncodingHeader)
+	http.Error(
+		rsp,
+		"An error has occurred while serving metrics:\n\n"+err.Error(),
+		http.StatusInternalServerError,
+	)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
new file mode 100644
index 0000000..351c26e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
@@ -0,0 +1,85 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+	"sort"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// metricSorter is a sortable slice of *dto.Metric.
+type metricSorter []*dto.Metric
+
+func (s metricSorter) Len() int {
+	return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+	if len(s[i].Label) != len(s[j].Label) {
+		// This should not happen. The metrics are
+		// inconsistent. However, we have to deal with the fact, as
+		// people might use custom collectors or metric family injection
+		// to create inconsistent metrics. So let's simply compare the
+		// number of labels in this case. That will still yield
+		// reproducible sorting.
+		return len(s[i].Label) < len(s[j].Label)
+	}
+	for n, lp := range s[i].Label {
+		vi := lp.GetValue()
+		vj := s[j].Label[n].GetValue()
+		if vi != vj {
+			return vi < vj
+		}
+	}
+
+	// We should never arrive here. Multiple metrics with the same
+	// label set in the same scrape will lead to undefined ingestion
+	// behavior. However, as above, we have to provide stable sorting
+	// here, even for inconsistent metrics. So sort equal metrics
+	// by their timestamp, with missing timestamps (implying "now")
+	// coming last.
+	if s[i].TimestampMs == nil {
+		return false
+	}
+	if s[j].TimestampMs == nil {
+		return true
+	}
+	return s[i].GetTimestampMs() < s[j].GetTimestampMs()
+}
+
+// NormalizeMetricFamilies returns a MetricFamily slice with empty
+// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
+// the slice, with the contained Metrics sorted within each MetricFamily.
+func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
+	for _, mf := range metricFamiliesByName {
+		sort.Sort(metricSorter(mf.Metric))
+	}
+	names := make([]string, 0, len(metricFamiliesByName))
+	for name, mf := range metricFamiliesByName {
+		if len(mf.Metric) > 0 {
+			names = append(names, name)
+		}
+	}
+	sort.Strings(names)
+	result := make([]*dto.MetricFamily, 0, len(names))
+	for _, name := range names {
+		result = append(result, metricFamiliesByName[name])
+	}
+	return result
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
new file mode 100644
index 0000000..2744443
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -0,0 +1,87 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+	"unicode/utf8"
+
+	"github.com/prometheus/common/model"
+)
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+//     myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
+	return fmt.Errorf(
+		"%s: %q has %d variable labels named %q but %d values %q were provided",
+		errInconsistentCardinality, fqName,
+		len(labels), labels,
+		len(labelValues), labelValues,
+	)
+}
+
+func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
+	if len(labels) != expectedNumberOfValues {
+		return fmt.Errorf(
+			"%s: expected %d label values but got %d in %#v",
+			errInconsistentCardinality, expectedNumberOfValues,
+			len(labels), labels,
+		)
+	}
+
+	for name, val := range labels {
+		if !utf8.ValidString(val) {
+			return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
+		}
+	}
+
+	return nil
+}
+
+func validateLabelValues(vals []string, expectedNumberOfValues int) error {
+	if len(vals) != expectedNumberOfValues {
+		return fmt.Errorf(
+			"%s: expected %d label values but got %d in %#v",
+			errInconsistentCardinality, expectedNumberOfValues,
+			len(vals), vals,
+		)
+	}
+
+	for _, val := range vals {
+		if !utf8.ValidString(val) {
+			return fmt.Errorf("label value %q is not valid UTF-8", val)
+		}
+	}
+
+	return nil
+}
+
+func checkLabelName(l string) bool {
+	return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
new file mode 100644
index 0000000..55e6d86
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -0,0 +1,174 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+const separatorByte byte = 255
+
+// A Metric models a single sample value with its meta data being exported to
+// Prometheus. Implementations of Metric in this package are Gauge, Counter,
+// Histogram, Summary, and Untyped.
+type Metric interface {
+	// Desc returns the descriptor for the Metric. This method idempotently
+	// returns the same descriptor throughout the lifetime of the
+	// Metric. The returned descriptor is immutable by contract. A Metric
+	// unable to describe itself must return an invalid descriptor (created
+	// with NewInvalidDesc).
+	Desc() *Desc
+	// Write encodes the Metric into a "Metric" Protocol Buffer data
+	// transmission object.
+	//
+	// Metric implementations must observe concurrency safety as reads of
+	// this metric may occur at any time, and any blocking occurs at the
+	// expense of total performance of rendering all registered
+	// metrics. Ideally, Metric implementations should support concurrent
+	// readers.
+	//
+	// While populating dto.Metric, it is the responsibility of the
+	// implementation to ensure validity of the Metric protobuf (like valid
+	// UTF-8 strings or syntactically valid metric and label names). It is
+	// recommended to sort labels lexicographically. Callers of Write should
+	// still make sure of sorting if they depend on it.
+	Write(*dto.Metric) error
+	// TODO(beorn7): The original rationale of passing in a pre-allocated
+	// dto.Metric protobuf to save allocations has disappeared. The
+	// signature of this method should be changed to "Write() (*dto.Metric,
+	// error)".
+}
+
+// Opts bundles the options for creating most Metric types. Each metric
+// implementation XXX has its own XXXOpts type, but in most cases, it is just be
+// an alias of this type (which might change when the requirement arises.)
+//
+// It is mandatory to set Name to a non-empty string. All other fields are
+// optional and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
+type Opts struct {
+	// Namespace, Subsystem, and Name are components of the fully-qualified
+	// name of the Metric (created by joining these components with
+	// "_"). Only Name is mandatory, the others merely help structuring the
+	// name. Note that the fully-qualified name of the metric must be a
+	// valid Prometheus metric name.
+	Namespace string
+	Subsystem string
+	Name      string
+
+	// Help provides information about this metric.
+	//
+	// Metrics with the same fully-qualified name must have the same Help
+	// string.
+	Help string
+
+	// ConstLabels are used to attach fixed labels to this metric. Metrics
+	// with the same fully-qualified name must have the same label names in
+	// their ConstLabels.
+	//
+	// ConstLabels are only used rarely. In particular, do not use them to
+	// attach the same labels to all your metrics. Those use cases are
+	// better covered by target labels set by the scraping Prometheus
+	// server, or by one specific metric (e.g. a build_info or a
+	// machine_role metric). See also
+	// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
+	ConstLabels Labels
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+	if name == "" {
+		return ""
+	}
+	switch {
+	case namespace != "" && subsystem != "":
+		return strings.Join([]string{namespace, subsystem, name}, "_")
+	case namespace != "":
+		return strings.Join([]string{namespace, name}, "_")
+	case subsystem != "":
+		return strings.Join([]string{subsystem, name}, "_")
+	}
+	return name
+}
+
+// labelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers.
+type labelPairSorter []*dto.LabelPair
+
+func (s labelPairSorter) Len() int {
+	return len(s)
+}
+
+func (s labelPairSorter) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s labelPairSorter) Less(i, j int) bool {
+	return s[i].GetName() < s[j].GetName()
+}
+
+type invalidMetric struct {
+	desc *Desc
+	err  error
+}
+
+// NewInvalidMetric returns a metric whose Write method always returns the
+// provided error. It is useful if a Collector finds itself unable to collect
+// a metric and wishes to report an error to the registry.
+func NewInvalidMetric(desc *Desc, err error) Metric {
+	return &invalidMetric{desc, err}
+}
+
+func (m *invalidMetric) Desc() *Desc { return m.desc }
+
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
+
+type timestampedMetric struct {
+	Metric
+	t time.Time
+}
+
+func (m timestampedMetric) Write(pb *dto.Metric) error {
+	e := m.Metric.Write(pb)
+	pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
+	return e
+}
+
+// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
+// way that it has an explicit timestamp set to the provided Time. This is only
+// useful in rare cases as the timestamp of a Prometheus metric should usually
+// be set by the Prometheus server during scraping. Exceptions include mirroring
+// metrics with given timestamps from other metric
+// sources.
+//
+// NewMetricWithTimestamp works best with MustNewConstMetric,
+// MustNewConstHistogram, and MustNewConstSummary, see example.
+//
+// Currently, the exposition formats used by Prometheus are limited to
+// millisecond resolution. Thus, the provided time will be rounded down to the
+// next full millisecond value.
+func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
+	return timestampedMetric{Metric: m, t: t}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
new file mode 100644
index 0000000..5806cd0
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
@@ -0,0 +1,52 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Observer is the interface that wraps the Observe method, which is used by
+// Histogram and Summary to add observations.
+type Observer interface {
+	Observe(float64)
+}
+
+// The ObserverFunc type is an adapter to allow the use of ordinary
+// functions as Observers. If f is a function with the appropriate
+// signature, ObserverFunc(f) is an Observer that calls f.
+//
+// This adapter is usually used in connection with the Timer type, and there are
+// two general use cases:
+//
+// The most common one is to use a Gauge as the Observer for a Timer.
+// See the "Gauge" Timer example.
+//
+// The more advanced use case is to create a function that dynamically decides
+// which Observer to use for observing the duration. See the "Complex" Timer
+// example.
+type ObserverFunc func(float64)
+
+// Observe calls f(value). It implements Observer.
+func (f ObserverFunc) Observe(value float64) {
+	f(value)
+}
+
+// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
+type ObserverVec interface {
+	GetMetricWith(Labels) (Observer, error)
+	GetMetricWithLabelValues(lvs ...string) (Observer, error)
+	With(Labels) Observer
+	WithLabelValues(...string) Observer
+	CurryWith(Labels) (ObserverVec, error)
+	MustCurryWith(Labels) ObserverVec
+
+	Collector
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
new file mode 100644
index 0000000..55176d5
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -0,0 +1,204 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"errors"
+	"os"
+
+	"github.com/prometheus/procfs"
+)
+
+type processCollector struct {
+	collectFn       func(chan<- Metric)
+	pidFn           func() (int, error)
+	reportErrors    bool
+	cpuTotal        *Desc
+	openFDs, maxFDs *Desc
+	vsize, maxVsize *Desc
+	rss             *Desc
+	startTime       *Desc
+}
+
+// ProcessCollectorOpts defines the behavior of a process metrics collector
+// created with NewProcessCollector.
+type ProcessCollectorOpts struct {
+	// PidFn returns the PID of the process the collector collects metrics
+	// for. It is called upon each collection. By default, the PID of the
+	// current process is used, as determined on construction time by
+	// calling os.Getpid().
+	PidFn func() (int, error)
+	// If non-empty, each of the collected metrics is prefixed by the
+	// provided string and an underscore ("_").
+	Namespace string
+	// If true, any error encountered during collection is reported as an
+	// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
+	// and the collected metrics will be incomplete. (Possibly, no metrics
+	// will be collected at all.) While that's usually not desired, it is
+	// appropriate for the common "mix-in" of process metrics, where process
+	// metrics are nice to have, but failing to collect them should not
+	// disrupt the collection of the remaining metrics.
+	ReportErrors bool
+}
+
+// NewProcessCollector returns a collector which exports the current state of
+// process metrics including CPU, memory and file descriptor usage as well as
+// the process start time. The detailed behavior is defined by the provided
+// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
+// collector for the current process with an empty namespace string and no error
+// reporting.
+//
+// Currently, the collector depends on a Linux-style proc filesystem and
+// therefore only exports metrics for Linux.
+//
+// Note: An older version of this function had the following signature:
+//
+//     NewProcessCollector(pid int, namespace string) Collector
+//
+// Most commonly, it was called as
+//
+//     NewProcessCollector(os.Getpid(), "")
+//
+// The following call of the current version is equivalent to the above:
+//
+//     NewProcessCollector(ProcessCollectorOpts{})
+func NewProcessCollector(opts ProcessCollectorOpts) Collector {
+	ns := ""
+	if len(opts.Namespace) > 0 {
+		ns = opts.Namespace + "_"
+	}
+
+	c := &processCollector{
+		reportErrors: opts.ReportErrors,
+		cpuTotal: NewDesc(
+			ns+"process_cpu_seconds_total",
+			"Total user and system CPU time spent in seconds.",
+			nil, nil,
+		),
+		openFDs: NewDesc(
+			ns+"process_open_fds",
+			"Number of open file descriptors.",
+			nil, nil,
+		),
+		maxFDs: NewDesc(
+			ns+"process_max_fds",
+			"Maximum number of open file descriptors.",
+			nil, nil,
+		),
+		vsize: NewDesc(
+			ns+"process_virtual_memory_bytes",
+			"Virtual memory size in bytes.",
+			nil, nil,
+		),
+		maxVsize: NewDesc(
+			ns+"process_virtual_memory_max_bytes",
+			"Maximum amount of virtual memory available in bytes.",
+			nil, nil,
+		),
+		rss: NewDesc(
+			ns+"process_resident_memory_bytes",
+			"Resident memory size in bytes.",
+			nil, nil,
+		),
+		startTime: NewDesc(
+			ns+"process_start_time_seconds",
+			"Start time of the process since unix epoch in seconds.",
+			nil, nil,
+		),
+	}
+
+	if opts.PidFn == nil {
+		pid := os.Getpid()
+		c.pidFn = func() (int, error) { return pid, nil }
+	} else {
+		c.pidFn = opts.PidFn
+	}
+
+	// Set up process metric collection if supported by the runtime.
+	if _, err := procfs.NewStat(); err == nil {
+		c.collectFn = c.processCollect
+	} else {
+		c.collectFn = func(ch chan<- Metric) {
+			c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
+		}
+	}
+
+	return c
+}
+
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+	ch <- c.cpuTotal
+	ch <- c.openFDs
+	ch <- c.maxFDs
+	ch <- c.vsize
+	ch <- c.maxVsize
+	ch <- c.rss
+	ch <- c.startTime
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *processCollector) Collect(ch chan<- Metric) {
+	c.collectFn(ch)
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+	pid, err := c.pidFn()
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+
+	p, err := procfs.NewProc(pid)
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+
+	if stat, err := p.NewStat(); err == nil {
+		ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
+		ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
+		ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
+		if startTime, err := stat.StartTime(); err == nil {
+			ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+		} else {
+			c.reportError(ch, c.startTime, err)
+		}
+	} else {
+		c.reportError(ch, nil, err)
+	}
+
+	if fds, err := p.FileDescriptorsLen(); err == nil {
+		ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
+	} else {
+		c.reportError(ch, c.openFDs, err)
+	}
+
+	if limits, err := p.NewLimits(); err == nil {
+		ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
+		ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
+	} else {
+		c.reportError(ch, nil, err)
+	}
+}
+
+func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
+	if !c.reportErrors {
+		return
+	}
+	if desc == nil {
+		desc = NewInvalidDesc(err)
+	}
+	ch <- NewInvalidMetric(desc, err)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
new file mode 100644
index 0000000..67b56d3
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -0,0 +1,199 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+	"bufio"
+	"io"
+	"net"
+	"net/http"
+)
+
+const (
+	closeNotifier = 1 << iota
+	flusher
+	hijacker
+	readerFrom
+	pusher
+)
+
+type delegator interface {
+	http.ResponseWriter
+
+	Status() int
+	Written() int64
+}
+
+type responseWriterDelegator struct {
+	http.ResponseWriter
+
+	handler, method    string
+	status             int
+	written            int64
+	wroteHeader        bool
+	observeWriteHeader func(int)
+}
+
+func (r *responseWriterDelegator) Status() int {
+	return r.status
+}
+
+func (r *responseWriterDelegator) Written() int64 {
+	return r.written
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+	r.status = code
+	r.wroteHeader = true
+	r.ResponseWriter.WriteHeader(code)
+	if r.observeWriteHeader != nil {
+		r.observeWriteHeader(code)
+	}
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+	if !r.wroteHeader {
+		r.WriteHeader(http.StatusOK)
+	}
+	n, err := r.ResponseWriter.Write(b)
+	r.written += int64(n)
+	return n, err
+}
+
+type closeNotifierDelegator struct{ *responseWriterDelegator }
+type flusherDelegator struct{ *responseWriterDelegator }
+type hijackerDelegator struct{ *responseWriterDelegator }
+type readerFromDelegator struct{ *responseWriterDelegator }
+
+func (d closeNotifierDelegator) CloseNotify() <-chan bool {
+	return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+func (d flusherDelegator) Flush() {
+	d.ResponseWriter.(http.Flusher).Flush()
+}
+func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+	return d.ResponseWriter.(http.Hijacker).Hijack()
+}
+func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
+	if !d.wroteHeader {
+		d.WriteHeader(http.StatusOK)
+	}
+	n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
+	d.written += n
+	return n, err
+}
+
+var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
+
+func init() {
+	// TODO(beorn7): Code generation would help here.
+	pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
+		return d
+	}
+	pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
+		return closeNotifierDelegator{d}
+	}
+	pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
+		return flusherDelegator{d}
+	}
+	pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
+		return struct {
+			*responseWriterDelegator
+			http.Flusher
+			http.CloseNotifier
+		}{d, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
+		return hijackerDelegator{d}
+	}
+	pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
+		return struct {
+			*responseWriterDelegator
+			http.Hijacker
+			http.CloseNotifier
+		}{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
+		return struct {
+			*responseWriterDelegator
+			http.Hijacker
+			http.Flusher
+		}{d, hijackerDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
+		return struct {
+			*responseWriterDelegator
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
+		return readerFromDelegator{d}
+	}
+	pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.CloseNotifier
+		}{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Flusher
+		}{d, readerFromDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Flusher
+			http.CloseNotifier
+		}{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Hijacker
+		}{d, readerFromDelegator{d}, hijackerDelegator{d}}
+	}
+	pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Hijacker
+			http.CloseNotifier
+		}{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+		}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
new file mode 100644
index 0000000..31a7069
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
@@ -0,0 +1,181 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+	"io"
+	"net/http"
+)
+
+type pusherDelegator struct{ *responseWriterDelegator }
+
+func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
+	return d.ResponseWriter.(http.Pusher).Push(target, opts)
+}
+
+func init() {
+	pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
+		return pusherDelegator{d}
+	}
+	pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Flusher
+		}{d, pusherDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Flusher
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+		}{d, pusherDelegator{d}, hijackerDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+			http.Flusher
+		}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+		}{d, pusherDelegator{d}, readerFromDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Flusher
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Flusher
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+}
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+	d := &responseWriterDelegator{
+		ResponseWriter:     w,
+		observeWriteHeader: observeWriteHeaderFunc,
+	}
+
+	id := 0
+	if _, ok := w.(http.CloseNotifier); ok {
+		id += closeNotifier
+	}
+	if _, ok := w.(http.Flusher); ok {
+		id += flusher
+	}
+	if _, ok := w.(http.Hijacker); ok {
+		id += hijacker
+	}
+	if _, ok := w.(io.ReaderFrom); ok {
+		id += readerFrom
+	}
+	if _, ok := w.(http.Pusher); ok {
+		id += pusher
+	}
+
+	return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
new file mode 100644
index 0000000..8bb9b8b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
@@ -0,0 +1,44 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.8
+
+package promhttp
+
+import (
+	"io"
+	"net/http"
+)
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+	d := &responseWriterDelegator{
+		ResponseWriter:     w,
+		observeWriteHeader: observeWriteHeaderFunc,
+	}
+
+	id := 0
+	if _, ok := w.(http.CloseNotifier); ok {
+		id += closeNotifier
+	}
+	if _, ok := w.(http.Flusher); ok {
+		id += flusher
+	}
+	if _, ok := w.(http.Hijacker); ok {
+		id += hijacker
+	}
+	if _, ok := w.(io.ReaderFrom); ok {
+		id += readerFrom
+	}
+
+	return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
new file mode 100644
index 0000000..668eb6b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -0,0 +1,311 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package promhttp provides tooling around HTTP servers and clients.
+//
+// First, the package allows the creation of http.Handler instances to expose
+// Prometheus metrics via HTTP. promhttp.Handler acts on the
+// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
+// custom registry or anything that implements the Gatherer interface. It also
+// allows the creation of handlers that act differently on errors or allow to
+// log errors.
+//
+// Second, the package provides tooling to instrument instances of http.Handler
+// via middleware. Middleware wrappers follow the naming scheme
+// InstrumentHandlerX, where X describes the intended use of the middleware.
+// See each function's doc comment for specific details.
+//
+// Finally, the package allows for an http.RoundTripper to be instrumented via
+// middleware. Middleware wrappers follow the naming scheme
+// InstrumentRoundTripperX, where X describes the intended use of the
+// middleware. See each function's doc comment for specific details.
+package promhttp
+
+import (
+	"compress/gzip"
+	"fmt"
+	"io"
+	"net/http"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/prometheus/common/expfmt"
+
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+	contentTypeHeader     = "Content-Type"
+	contentLengthHeader   = "Content-Length"
+	contentEncodingHeader = "Content-Encoding"
+	acceptEncodingHeader  = "Accept-Encoding"
+)
+
+var gzipPool = sync.Pool{
+	New: func() interface{} {
+		return gzip.NewWriter(nil)
+	},
+}
+
+// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
+// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
+// no error logging, and it applies compression if requested by the client.
+//
+// The returned http.Handler is already instrumented using the
+// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
+// create multiple http.Handlers by separate calls of the Handler function, the
+// metrics used for instrumentation will be shared between them, providing
+// global scrape counts.
+//
+// This function is meant to cover the bulk of basic use cases. If you are doing
+// anything that requires more customization (including using a non-default
+// Gatherer, different instrumentation, and non-default HandlerOpts), use the
+// HandlerFor function. See there for details.
+func Handler() http.Handler {
+	return InstrumentMetricHandler(
+		prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
+	)
+}
+
+// HandlerFor returns an uninstrumented http.Handler for the provided
+// Gatherer. The behavior of the Handler is defined by the provided
+// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
+// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
+// instrumentation. Use the InstrumentMetricHandler function to apply the same
+// kind of instrumentation as it is used by the Handler function.
+func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
+	var inFlightSem chan struct{}
+	if opts.MaxRequestsInFlight > 0 {
+		inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
+	}
+
+	h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
+		if inFlightSem != nil {
+			select {
+			case inFlightSem <- struct{}{}: // All good, carry on.
+				defer func() { <-inFlightSem }()
+			default:
+				http.Error(rsp, fmt.Sprintf(
+					"Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
+				), http.StatusServiceUnavailable)
+				return
+			}
+		}
+		mfs, err := reg.Gather()
+		if err != nil {
+			if opts.ErrorLog != nil {
+				opts.ErrorLog.Println("error gathering metrics:", err)
+			}
+			switch opts.ErrorHandling {
+			case PanicOnError:
+				panic(err)
+			case ContinueOnError:
+				if len(mfs) == 0 {
+					// Still report the error if no metrics have been gathered.
+					httpError(rsp, err)
+					return
+				}
+			case HTTPErrorOnError:
+				httpError(rsp, err)
+				return
+			}
+		}
+
+		contentType := expfmt.Negotiate(req.Header)
+		header := rsp.Header()
+		header.Set(contentTypeHeader, string(contentType))
+
+		w := io.Writer(rsp)
+		if !opts.DisableCompression && gzipAccepted(req.Header) {
+			header.Set(contentEncodingHeader, "gzip")
+			gz := gzipPool.Get().(*gzip.Writer)
+			defer gzipPool.Put(gz)
+
+			gz.Reset(w)
+			defer gz.Close()
+
+			w = gz
+		}
+
+		enc := expfmt.NewEncoder(w, contentType)
+
+		var lastErr error
+		for _, mf := range mfs {
+			if err := enc.Encode(mf); err != nil {
+				lastErr = err
+				if opts.ErrorLog != nil {
+					opts.ErrorLog.Println("error encoding and sending metric family:", err)
+				}
+				switch opts.ErrorHandling {
+				case PanicOnError:
+					panic(err)
+				case ContinueOnError:
+					// Handled later.
+				case HTTPErrorOnError:
+					httpError(rsp, err)
+					return
+				}
+			}
+		}
+
+		if lastErr != nil {
+			httpError(rsp, lastErr)
+		}
+	})
+
+	if opts.Timeout <= 0 {
+		return h
+	}
+	return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
+		"Exceeded configured timeout of %v.\n",
+		opts.Timeout,
+	))
+}
+
+// InstrumentMetricHandler is usually used with an http.Handler returned by the
+// HandlerFor function. It instruments the provided http.Handler with two
+// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
+// scrapes partitioned by HTTP status code, and a gauge
+// "promhttp_metric_handler_requests_in_flight" to track the number of
+// simultaneous scrapes. This function idempotently registers collectors for
+// both metrics with the provided Registerer. It panics if the registration
+// fails. The provided metrics are useful to see how many scrapes hit the
+// monitored target (which could be from different Prometheus servers or other
+// scrapers), and how often they overlap (which would result in more than one
+// scrape in flight at the same time). Note that the scrapes-in-flight gauge
+// will contain the scrape by which it is exposed, while the scrape counter will
+// only get incremented after the scrape is complete (as only then the status
+// code is known). For tracking scrape durations, use the
+// "scrape_duration_seconds" gauge created by the Prometheus server upon each
+// scrape.
+func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
+	cnt := prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Name: "promhttp_metric_handler_requests_total",
+			Help: "Total number of scrapes by HTTP status code.",
+		},
+		[]string{"code"},
+	)
+	// Initialize the most likely HTTP status codes.
+	cnt.WithLabelValues("200")
+	cnt.WithLabelValues("500")
+	cnt.WithLabelValues("503")
+	if err := reg.Register(cnt); err != nil {
+		if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+			cnt = are.ExistingCollector.(*prometheus.CounterVec)
+		} else {
+			panic(err)
+		}
+	}
+
+	gge := prometheus.NewGauge(prometheus.GaugeOpts{
+		Name: "promhttp_metric_handler_requests_in_flight",
+		Help: "Current number of scrapes being served.",
+	})
+	if err := reg.Register(gge); err != nil {
+		if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+			gge = are.ExistingCollector.(prometheus.Gauge)
+		} else {
+			panic(err)
+		}
+	}
+
+	return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
+}
+
+// HandlerErrorHandling defines how a Handler serving metrics will handle
+// errors.
+type HandlerErrorHandling int
+
+// These constants cause handlers serving metrics to behave as described if
+// errors are encountered.
+const (
+	// Serve an HTTP status code 500 upon the first error
+	// encountered. Report the error message in the body.
+	HTTPErrorOnError HandlerErrorHandling = iota
+	// Ignore errors and try to serve as many metrics as possible.  However,
+	// if no metrics can be served, serve an HTTP status code 500 and the
+	// last error message in the body. Only use this in deliberate "best
+	// effort" metrics collection scenarios. It is recommended to at least
+	// log errors (by providing an ErrorLog in HandlerOpts) to not mask
+	// errors completely.
+	ContinueOnError
+	// Panic upon the first error encountered (useful for "crash only" apps).
+	PanicOnError
+)
+
+// Logger is the minimal interface HandlerOpts needs for logging. Note that
+// log.Logger from the standard library implements this interface, and it is
+// easy to implement by custom loggers, if they don't do so already anyway.
+type Logger interface {
+	Println(v ...interface{})
+}
+
+// HandlerOpts specifies options how to serve metrics via an http.Handler. The
+// zero value of HandlerOpts is a reasonable default.
+type HandlerOpts struct {
+	// ErrorLog specifies an optional logger for errors collecting and
+	// serving metrics. If nil, errors are not logged at all.
+	ErrorLog Logger
+	// ErrorHandling defines how errors are handled. Note that errors are
+	// logged regardless of the configured ErrorHandling provided ErrorLog
+	// is not nil.
+	ErrorHandling HandlerErrorHandling
+	// If DisableCompression is true, the handler will never compress the
+	// response, even if requested by the client.
+	DisableCompression bool
+	// The number of concurrent HTTP requests is limited to
+	// MaxRequestsInFlight. Additional requests are responded to with 503
+	// Service Unavailable and a suitable message in the body. If
+	// MaxRequestsInFlight is 0 or negative, no limit is applied.
+	MaxRequestsInFlight int
+	// If handling a request takes longer than Timeout, it is responded to
+	// with 503 ServiceUnavailable and a suitable Message. No timeout is
+	// applied if Timeout is 0 or negative. Note that with the current
+	// implementation, reaching the timeout simply ends the HTTP requests as
+	// described above (and even that only if sending of the body hasn't
+	// started yet), while the bulk work of gathering all the metrics keeps
+	// running in the background (with the eventual result to be thrown
+	// away). Until the implementation is improved, it is recommended to
+	// implement a separate timeout in potentially slow Collectors.
+	Timeout time.Duration
+}
+
+// gzipAccepted returns whether the client will accept gzip-encoded content.
+func gzipAccepted(header http.Header) bool {
+	a := header.Get(acceptEncodingHeader)
+	parts := strings.Split(a, ",")
+	for _, part := range parts {
+		part = strings.TrimSpace(part)
+		if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+			return true
+		}
+	}
+	return false
+}
+
+// httpError removes any content-encoding header and then calls http.Error with
+// the provided error and http.StatusInternalServerErrer. Error contents is
+// supposed to be uncompressed plain text. However, same as with a plain
+// http.Error, any header settings will be void if the header has already been
+// sent. The error message will still be written to the writer, but it will
+// probably be of limited use.
+func httpError(rsp http.ResponseWriter, err error) {
+	rsp.Header().Del(contentEncodingHeader)
+	http.Error(
+		rsp,
+		"An error has occurred while serving metrics:\n\n"+err.Error(),
+		http.StatusInternalServerError,
+	)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
new file mode 100644
index 0000000..86fd564
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -0,0 +1,97 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+	"net/http"
+	"time"
+
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+// The RoundTripperFunc type is an adapter to allow the use of ordinary
+// functions as RoundTrippers. If f is a function with the appropriate
+// signature, RountTripperFunc(f) is a RoundTripper that calls f.
+type RoundTripperFunc func(req *http.Request) (*http.Response, error)
+
+// RoundTrip implements the RoundTripper interface.
+func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
+	return rt(r)
+}
+
+// InstrumentRoundTripperInFlight is a middleware that wraps the provided
+// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.RoundTripper.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
+	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+		gauge.Inc()
+		defer gauge.Dec()
+		return next.RoundTrip(r)
+	})
+}
+
+// InstrumentRoundTripperCounter is a middleware that wraps the provided
+// http.RoundTripper to observe the request result with the provided CounterVec.
+// The CounterVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
+// and/or HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
+// is not incremented.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
+	code, method := checkLabels(counter)
+
+	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+		resp, err := next.RoundTrip(r)
+		if err == nil {
+			counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
+		}
+		return resp, err
+	})
+}
+
+// InstrumentRoundTripperDuration is a middleware that wraps the provided
+// http.RoundTripper to observe the request duration with the provided
+// ObserverVec.  The ObserverVec must have zero, one, or two non-const
+// non-curried labels. For those, the only allowed label names are "code" and
+// "method". The function panics otherwise. The Observe method of the Observer
+// in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, no values are
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
+	code, method := checkLabels(obs)
+
+	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+		start := time.Now()
+		resp, err := next.RoundTrip(r)
+		if err == nil {
+			obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
+		}
+		return resp, err
+	})
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
new file mode 100644
index 0000000..a034d1e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
@@ -0,0 +1,144 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+	"context"
+	"crypto/tls"
+	"net/http"
+	"net/http/httptrace"
+	"time"
+)
+
+// InstrumentTrace is used to offer flexibility in instrumenting the available
+// httptrace.ClientTrace hook functions. Each function is passed a float64
+// representing the time in seconds since the start of the http request. A user
+// may choose to use separately buckets Histograms, or implement custom
+// instance labels on a per function basis.
+type InstrumentTrace struct {
+	GotConn              func(float64)
+	PutIdleConn          func(float64)
+	GotFirstResponseByte func(float64)
+	Got100Continue       func(float64)
+	DNSStart             func(float64)
+	DNSDone              func(float64)
+	ConnectStart         func(float64)
+	ConnectDone          func(float64)
+	TLSHandshakeStart    func(float64)
+	TLSHandshakeDone     func(float64)
+	WroteHeaders         func(float64)
+	Wait100Continue      func(float64)
+	WroteRequest         func(float64)
+}
+
+// InstrumentRoundTripperTrace is a middleware that wraps the provided
+// RoundTripper and reports times to hook functions provided in the
+// InstrumentTrace struct. Hook functions that are not present in the provided
+// InstrumentTrace struct are ignored. Times reported to the hook functions are
+// time since the start of the request. Only with Go1.9+, those times are
+// guaranteed to never be negative. (Earlier Go versions are not using a
+// monotonic clock.) Note that partitioning of Histograms is expensive and
+// should be used judiciously.
+//
+// For hook functions that receive an error as an argument, no observations are
+// made in the event of a non-nil error value.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
+	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+		start := time.Now()
+
+		trace := &httptrace.ClientTrace{
+			GotConn: func(_ httptrace.GotConnInfo) {
+				if it.GotConn != nil {
+					it.GotConn(time.Since(start).Seconds())
+				}
+			},
+			PutIdleConn: func(err error) {
+				if err != nil {
+					return
+				}
+				if it.PutIdleConn != nil {
+					it.PutIdleConn(time.Since(start).Seconds())
+				}
+			},
+			DNSStart: func(_ httptrace.DNSStartInfo) {
+				if it.DNSStart != nil {
+					it.DNSStart(time.Since(start).Seconds())
+				}
+			},
+			DNSDone: func(_ httptrace.DNSDoneInfo) {
+				if it.DNSDone != nil {
+					it.DNSDone(time.Since(start).Seconds())
+				}
+			},
+			ConnectStart: func(_, _ string) {
+				if it.ConnectStart != nil {
+					it.ConnectStart(time.Since(start).Seconds())
+				}
+			},
+			ConnectDone: func(_, _ string, err error) {
+				if err != nil {
+					return
+				}
+				if it.ConnectDone != nil {
+					it.ConnectDone(time.Since(start).Seconds())
+				}
+			},
+			GotFirstResponseByte: func() {
+				if it.GotFirstResponseByte != nil {
+					it.GotFirstResponseByte(time.Since(start).Seconds())
+				}
+			},
+			Got100Continue: func() {
+				if it.Got100Continue != nil {
+					it.Got100Continue(time.Since(start).Seconds())
+				}
+			},
+			TLSHandshakeStart: func() {
+				if it.TLSHandshakeStart != nil {
+					it.TLSHandshakeStart(time.Since(start).Seconds())
+				}
+			},
+			TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
+				if err != nil {
+					return
+				}
+				if it.TLSHandshakeDone != nil {
+					it.TLSHandshakeDone(time.Since(start).Seconds())
+				}
+			},
+			WroteHeaders: func() {
+				if it.WroteHeaders != nil {
+					it.WroteHeaders(time.Since(start).Seconds())
+				}
+			},
+			Wait100Continue: func() {
+				if it.Wait100Continue != nil {
+					it.Wait100Continue(time.Since(start).Seconds())
+				}
+			},
+			WroteRequest: func(_ httptrace.WroteRequestInfo) {
+				if it.WroteRequest != nil {
+					it.WroteRequest(time.Since(start).Seconds())
+				}
+			},
+		}
+		r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
+
+		return next.RoundTrip(r)
+	})
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
new file mode 100644
index 0000000..9db2438
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -0,0 +1,447 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+	"errors"
+	"net/http"
+	"strconv"
+	"strings"
+	"time"
+
+	dto "github.com/prometheus/client_model/go"
+
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+// magicString is used for the hacky label test in checkLabels. Remove once fixed.
+const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
+
+// InstrumentHandlerInFlight is a middleware that wraps the provided
+// http.Handler. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.Handler.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		g.Inc()
+		defer g.Dec()
+		next.ServeHTTP(w, r)
+	})
+}
+
+// InstrumentHandlerDuration is a middleware that wraps the provided
+// http.Handler to observe the request duration with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request duration in seconds. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+	code, method := checkLabels(obs)
+
+	if code {
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			now := time.Now()
+			d := newDelegator(w, nil)
+			next.ServeHTTP(d, r)
+
+			obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
+		})
+	}
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		now := time.Now()
+		next.ServeHTTP(w, r)
+		obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
+	})
+}
+
+// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
+// to observe the request result with the provided CounterVec.  The CounterVec
+// must have zero, one, or two non-const non-curried labels. For those, the only
+// allowed label names are "code" and "method". The function panics
+// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or
+// HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, the Counter is not incremented.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
+	code, method := checkLabels(counter)
+
+	if code {
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			d := newDelegator(w, nil)
+			next.ServeHTTP(d, r)
+			counter.With(labels(code, method, r.Method, d.Status())).Inc()
+		})
+	}
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		next.ServeHTTP(w, r)
+		counter.With(labels(code, method, r.Method, 0)).Inc()
+	})
+}
+
+// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
+// http.Handler to observe with the provided ObserverVec the request duration
+// until the response headers are written. The ObserverVec must have zero, one,
+// or two non-const non-curried labels. For those, the only allowed label names
+// are "code" and "method". The function panics otherwise. The Observe method of
+// the Observer in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped Handler panics before calling WriteHeader, no value is
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+	code, method := checkLabels(obs)
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		now := time.Now()
+		d := newDelegator(w, func(status int) {
+			obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
+		})
+		next.ServeHTTP(d, r)
+	})
+}
+
+// InstrumentHandlerRequestSize is a middleware that wraps the provided
+// http.Handler to observe the request size with the provided ObserverVec.  The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+	code, method := checkLabels(obs)
+
+	if code {
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			d := newDelegator(w, nil)
+			next.ServeHTTP(d, r)
+			size := computeApproximateRequestSize(r)
+			obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
+		})
+	}
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		next.ServeHTTP(w, r)
+		size := computeApproximateRequestSize(r)
+		obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
+	})
+}
+
+// InstrumentHandlerResponseSize is a middleware that wraps the provided
+// http.Handler to observe the response size with the provided ObserverVec.  The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the response size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
+	code, method := checkLabels(obs)
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		d := newDelegator(w, nil)
+		next.ServeHTTP(d, r)
+		obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
+	})
+}
+
+func checkLabels(c prometheus.Collector) (code bool, method bool) {
+	// TODO(beorn7): Remove this hacky way to check for instance labels
+	// once Descriptors can have their dimensionality queried.
+	var (
+		desc *prometheus.Desc
+		m    prometheus.Metric
+		pm   dto.Metric
+		lvs  []string
+	)
+
+	// Get the Desc from the Collector.
+	descc := make(chan *prometheus.Desc, 1)
+	c.Describe(descc)
+
+	select {
+	case desc = <-descc:
+	default:
+		panic("no description provided by collector")
+	}
+	select {
+	case <-descc:
+		panic("more than one description provided by collector")
+	default:
+	}
+
+	close(descc)
+
+	// Create a ConstMetric with the Desc. Since we don't know how many
+	// variable labels there are, try for as long as it needs.
+	for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
+		m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
+	}
+
+	// Write out the metric into a proto message and look at the labels.
+	// If the value is not the magicString, it is a constLabel, which doesn't interest us.
+	// If the label is curried, it doesn't interest us.
+	// In all other cases, only "code" or "method" is allowed.
+	if err := m.Write(&pm); err != nil {
+		panic("error checking metric for labels")
+	}
+	for _, label := range pm.Label {
+		name, value := label.GetName(), label.GetValue()
+		if value != magicString || isLabelCurried(c, name) {
+			continue
+		}
+		switch name {
+		case "code":
+			code = true
+		case "method":
+			method = true
+		default:
+			panic("metric partitioned with non-supported labels")
+		}
+	}
+	return
+}
+
+func isLabelCurried(c prometheus.Collector, label string) bool {
+	// This is even hackier than the label test above.
+	// We essentially try to curry again and see if it works.
+	// But for that, we need to type-convert to the two
+	// types we use here, ObserverVec or *CounterVec.
+	switch v := c.(type) {
+	case *prometheus.CounterVec:
+		if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+			return false
+		}
+	case prometheus.ObserverVec:
+		if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+			return false
+		}
+	default:
+		panic("unsupported metric vec type")
+	}
+	return true
+}
+
+// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
+// unnecessary allocations on each request.
+var emptyLabels = prometheus.Labels{}
+
+func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
+	if !(code || method) {
+		return emptyLabels
+	}
+	labels := prometheus.Labels{}
+
+	if code {
+		labels["code"] = sanitizeCode(status)
+	}
+	if method {
+		labels["method"] = sanitizeMethod(reqMethod)
+	}
+
+	return labels
+}
+
+func computeApproximateRequestSize(r *http.Request) int {
+	s := 0
+	if r.URL != nil {
+		s += len(r.URL.String())
+	}
+
+	s += len(r.Method)
+	s += len(r.Proto)
+	for name, values := range r.Header {
+		s += len(name)
+		for _, value := range values {
+			s += len(value)
+		}
+	}
+	s += len(r.Host)
+
+	// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+	if r.ContentLength != -1 {
+		s += int(r.ContentLength)
+	}
+	return s
+}
+
+func sanitizeMethod(m string) string {
+	switch m {
+	case "GET", "get":
+		return "get"
+	case "PUT", "put":
+		return "put"
+	case "HEAD", "head":
+		return "head"
+	case "POST", "post":
+		return "post"
+	case "DELETE", "delete":
+		return "delete"
+	case "CONNECT", "connect":
+		return "connect"
+	case "OPTIONS", "options":
+		return "options"
+	case "NOTIFY", "notify":
+		return "notify"
+	default:
+		return strings.ToLower(m)
+	}
+}
+
+// If the wrapped http.Handler has not set a status code, i.e. the value is
+// currently 0, santizeCode will return 200, for consistency with behavior in
+// the stdlib.
+func sanitizeCode(s int) string {
+	switch s {
+	case 100:
+		return "100"
+	case 101:
+		return "101"
+
+	case 200, 0:
+		return "200"
+	case 201:
+		return "201"
+	case 202:
+		return "202"
+	case 203:
+		return "203"
+	case 204:
+		return "204"
+	case 205:
+		return "205"
+	case 206:
+		return "206"
+
+	case 300:
+		return "300"
+	case 301:
+		return "301"
+	case 302:
+		return "302"
+	case 304:
+		return "304"
+	case 305:
+		return "305"
+	case 307:
+		return "307"
+
+	case 400:
+		return "400"
+	case 401:
+		return "401"
+	case 402:
+		return "402"
+	case 403:
+		return "403"
+	case 404:
+		return "404"
+	case 405:
+		return "405"
+	case 406:
+		return "406"
+	case 407:
+		return "407"
+	case 408:
+		return "408"
+	case 409:
+		return "409"
+	case 410:
+		return "410"
+	case 411:
+		return "411"
+	case 412:
+		return "412"
+	case 413:
+		return "413"
+	case 414:
+		return "414"
+	case 415:
+		return "415"
+	case 416:
+		return "416"
+	case 417:
+		return "417"
+	case 418:
+		return "418"
+
+	case 500:
+		return "500"
+	case 501:
+		return "501"
+	case 502:
+		return "502"
+	case 503:
+		return "503"
+	case 504:
+		return "504"
+	case 505:
+		return "505"
+
+	case 428:
+		return "428"
+	case 429:
+		return "429"
+	case 431:
+		return "431"
+	case 511:
+		return "511"
+
+	default:
+		return strconv.Itoa(s)
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
new file mode 100644
index 0000000..b5e70b9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -0,0 +1,937 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"runtime"
+	"sort"
+	"strings"
+	"sync"
+	"unicode/utf8"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/prometheus/common/expfmt"
+
+	dto "github.com/prometheus/client_model/go"
+
+	"github.com/prometheus/client_golang/prometheus/internal"
+)
+
+const (
+	// Capacity for the channel to collect metrics and descriptors.
+	capMetricChan = 1000
+	capDescChan   = 10
+)
+
+// DefaultRegisterer and DefaultGatherer are the implementations of the
+// Registerer and Gatherer interface a number of convenience functions in this
+// package act on. Initially, both variables point to the same Registry, which
+// has a process collector (currently on Linux only, see NewProcessCollector)
+// and a Go collector (see NewGoCollector, in particular the note about
+// stop-the-world implication with Go versions older than 1.9) already
+// registered. This approach to keep default instances as global state mirrors
+// the approach of other packages in the Go standard library. Note that there
+// are caveats. Change the variables with caution and only if you understand the
+// consequences. Users who want to avoid global state altogether should not use
+// the convenience functions and act on custom instances instead.
+var (
+	defaultRegistry              = NewRegistry()
+	DefaultRegisterer Registerer = defaultRegistry
+	DefaultGatherer   Gatherer   = defaultRegistry
+)
+
+func init() {
+	MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
+	MustRegister(NewGoCollector())
+}
+
+// NewRegistry creates a new vanilla Registry without any Collectors
+// pre-registered.
+func NewRegistry() *Registry {
+	return &Registry{
+		collectorsByID:  map[uint64]Collector{},
+		descIDs:         map[uint64]struct{}{},
+		dimHashesByName: map[string]uint64{},
+	}
+}
+
+// NewPedanticRegistry returns a registry that checks during collection if each
+// collected Metric is consistent with its reported Desc, and if the Desc has
+// actually been registered with the registry. Unchecked Collectors (those whose
+// Describe methed does not yield any descriptors) are excluded from the check.
+//
+// Usually, a Registry will be happy as long as the union of all collected
+// Metrics is consistent and valid even if some metrics are not consistent with
+// their own Desc or a Desc provided by their registered Collector. Well-behaved
+// Collectors and Metrics will only provide consistent Descs. This Registry is
+// useful to test the implementation of Collectors and Metrics.
+func NewPedanticRegistry() *Registry {
+	r := NewRegistry()
+	r.pedanticChecksEnabled = true
+	return r
+}
+
+// Registerer is the interface for the part of a registry in charge of
+// registering and unregistering. Users of custom registries should use
+// Registerer as type for registration purposes (rather than the Registry type
+// directly). In that way, they are free to use custom Registerer implementation
+// (e.g. for testing purposes).
+type Registerer interface {
+	// Register registers a new Collector to be included in metrics
+	// collection. It returns an error if the descriptors provided by the
+	// Collector are invalid or if they — in combination with descriptors of
+	// already registered Collectors — do not fulfill the consistency and
+	// uniqueness criteria described in the documentation of metric.Desc.
+	//
+	// If the provided Collector is equal to a Collector already registered
+	// (which includes the case of re-registering the same Collector), the
+	// returned error is an instance of AlreadyRegisteredError, which
+	// contains the previously registered Collector.
+	//
+	// A Collector whose Describe method does not yield any Desc is treated
+	// as unchecked. Registration will always succeed. No check for
+	// re-registering (see previous paragraph) is performed. Thus, the
+	// caller is responsible for not double-registering the same unchecked
+	// Collector, and for providing a Collector that will not cause
+	// inconsistent metrics on collection. (This would lead to scrape
+	// errors.)
+	Register(Collector) error
+	// MustRegister works like Register but registers any number of
+	// Collectors and panics upon the first registration that causes an
+	// error.
+	MustRegister(...Collector)
+	// Unregister unregisters the Collector that equals the Collector passed
+	// in as an argument.  (Two Collectors are considered equal if their
+	// Describe method yields the same set of descriptors.) The function
+	// returns whether a Collector was unregistered. Note that an unchecked
+	// Collector cannot be unregistered (as its Describe method does not
+	// yield any descriptor).
+	//
+	// Note that even after unregistering, it will not be possible to
+	// register a new Collector that is inconsistent with the unregistered
+	// Collector, e.g. a Collector collecting metrics with the same name but
+	// a different help string. The rationale here is that the same registry
+	// instance must only collect consistent metrics throughout its
+	// lifetime.
+	Unregister(Collector) bool
+}
+
+// Gatherer is the interface for the part of a registry in charge of gathering
+// the collected metrics into a number of MetricFamilies. The Gatherer interface
+// comes with the same general implication as described for the Registerer
+// interface.
+type Gatherer interface {
+	// Gather calls the Collect method of the registered Collectors and then
+	// gathers the collected metrics into a lexicographically sorted slice
+	// of uniquely named MetricFamily protobufs. Gather ensures that the
+	// returned slice is valid and self-consistent so that it can be used
+	// for valid exposition. As an exception to the strict consistency
+	// requirements described for metric.Desc, Gather will tolerate
+	// different sets of label names for metrics of the same metric family.
+	//
+	// Even if an error occurs, Gather attempts to gather as many metrics as
+	// possible. Hence, if a non-nil error is returned, the returned
+	// MetricFamily slice could be nil (in case of a fatal error that
+	// prevented any meaningful metric collection) or contain a number of
+	// MetricFamily protobufs, some of which might be incomplete, and some
+	// might be missing altogether. The returned error (which might be a
+	// MultiError) explains the details. Note that this is mostly useful for
+	// debugging purposes. If the gathered protobufs are to be used for
+	// exposition in actual monitoring, it is almost always better to not
+	// expose an incomplete result and instead disregard the returned
+	// MetricFamily protobufs in case the returned error is non-nil.
+	Gather() ([]*dto.MetricFamily, error)
+}
+
+// Register registers the provided Collector with the DefaultRegisterer.
+//
+// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
+// details.
+func Register(c Collector) error {
+	return DefaultRegisterer.Register(c)
+}
+
+// MustRegister registers the provided Collectors with the DefaultRegisterer and
+// panics if any error occurs.
+//
+// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
+// there for more details.
+func MustRegister(cs ...Collector) {
+	DefaultRegisterer.MustRegister(cs...)
+}
+
+// Unregister removes the registration of the provided Collector from the
+// DefaultRegisterer.
+//
+// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
+// more details.
+func Unregister(c Collector) bool {
+	return DefaultRegisterer.Unregister(c)
+}
+
+// GathererFunc turns a function into a Gatherer.
+type GathererFunc func() ([]*dto.MetricFamily, error)
+
+// Gather implements Gatherer.
+func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
+	return gf()
+}
+
+// AlreadyRegisteredError is returned by the Register method if the Collector to
+// be registered has already been registered before, or a different Collector
+// that collects the same metrics has been registered before. Registration fails
+// in that case, but you can detect from the kind of error what has
+// happened. The error contains fields for the existing Collector and the
+// (rejected) new Collector that equals the existing one. This can be used to
+// find out if an equal Collector has been registered before and switch over to
+// using the old one, as demonstrated in the example.
+type AlreadyRegisteredError struct {
+	ExistingCollector, NewCollector Collector
+}
+
+func (err AlreadyRegisteredError) Error() string {
+	return "duplicate metrics collector registration attempted"
+}
+
+// MultiError is a slice of errors implementing the error interface. It is used
+// by a Gatherer to report multiple errors during MetricFamily gathering.
+type MultiError []error
+
+func (errs MultiError) Error() string {
+	if len(errs) == 0 {
+		return ""
+	}
+	buf := &bytes.Buffer{}
+	fmt.Fprintf(buf, "%d error(s) occurred:", len(errs))
+	for _, err := range errs {
+		fmt.Fprintf(buf, "\n* %s", err)
+	}
+	return buf.String()
+}
+
+// Append appends the provided error if it is not nil.
+func (errs *MultiError) Append(err error) {
+	if err != nil {
+		*errs = append(*errs, err)
+	}
+}
+
+// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
+// contained error as error if len(errs is 1). In all other cases, it returns
+// the MultiError directly. This is helpful for returning a MultiError in a way
+// that only uses the MultiError if needed.
+func (errs MultiError) MaybeUnwrap() error {
+	switch len(errs) {
+	case 0:
+		return nil
+	case 1:
+		return errs[0]
+	default:
+		return errs
+	}
+}
+
+// Registry registers Prometheus collectors, collects their metrics, and gathers
+// them into MetricFamilies for exposition. It implements both Registerer and
+// Gatherer. The zero value is not usable. Create instances with NewRegistry or
+// NewPedanticRegistry.
+type Registry struct {
+	mtx                   sync.RWMutex
+	collectorsByID        map[uint64]Collector // ID is a hash of the descIDs.
+	descIDs               map[uint64]struct{}
+	dimHashesByName       map[string]uint64
+	uncheckedCollectors   []Collector
+	pedanticChecksEnabled bool
+}
+
+// Register implements Registerer.
+func (r *Registry) Register(c Collector) error {
+	var (
+		descChan           = make(chan *Desc, capDescChan)
+		newDescIDs         = map[uint64]struct{}{}
+		newDimHashesByName = map[string]uint64{}
+		collectorID        uint64 // Just a sum of all desc IDs.
+		duplicateDescErr   error
+	)
+	go func() {
+		c.Describe(descChan)
+		close(descChan)
+	}()
+	r.mtx.Lock()
+	defer func() {
+		// Drain channel in case of premature return to not leak a goroutine.
+		for range descChan {
+		}
+		r.mtx.Unlock()
+	}()
+	// Conduct various tests...
+	for desc := range descChan {
+
+		// Is the descriptor valid at all?
+		if desc.err != nil {
+			return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+		}
+
+		// Is the descID unique?
+		// (In other words: Is the fqName + constLabel combination unique?)
+		if _, exists := r.descIDs[desc.id]; exists {
+			duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
+		}
+		// If it is not a duplicate desc in this collector, add it to
+		// the collectorID.  (We allow duplicate descs within the same
+		// collector, but their existence must be a no-op.)
+		if _, exists := newDescIDs[desc.id]; !exists {
+			newDescIDs[desc.id] = struct{}{}
+			collectorID += desc.id
+		}
+
+		// Are all the label names and the help string consistent with
+		// previous descriptors of the same name?
+		// First check existing descriptors...
+		if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
+			if dimHash != desc.dimHash {
+				return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+			}
+		} else {
+			// ...then check the new descriptors already seen.
+			if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+				if dimHash != desc.dimHash {
+					return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+				}
+			} else {
+				newDimHashesByName[desc.fqName] = desc.dimHash
+			}
+		}
+	}
+	// A Collector yielding no Desc at all is considered unchecked.
+	if len(newDescIDs) == 0 {
+		r.uncheckedCollectors = append(r.uncheckedCollectors, c)
+		return nil
+	}
+	if existing, exists := r.collectorsByID[collectorID]; exists {
+		return AlreadyRegisteredError{
+			ExistingCollector: existing,
+			NewCollector:      c,
+		}
+	}
+	// If the collectorID is new, but at least one of the descs existed
+	// before, we are in trouble.
+	if duplicateDescErr != nil {
+		return duplicateDescErr
+	}
+
+	// Only after all tests have passed, actually register.
+	r.collectorsByID[collectorID] = c
+	for hash := range newDescIDs {
+		r.descIDs[hash] = struct{}{}
+	}
+	for name, dimHash := range newDimHashesByName {
+		r.dimHashesByName[name] = dimHash
+	}
+	return nil
+}
+
+// Unregister implements Registerer.
+func (r *Registry) Unregister(c Collector) bool {
+	var (
+		descChan    = make(chan *Desc, capDescChan)
+		descIDs     = map[uint64]struct{}{}
+		collectorID uint64 // Just a sum of the desc IDs.
+	)
+	go func() {
+		c.Describe(descChan)
+		close(descChan)
+	}()
+	for desc := range descChan {
+		if _, exists := descIDs[desc.id]; !exists {
+			collectorID += desc.id
+			descIDs[desc.id] = struct{}{}
+		}
+	}
+
+	r.mtx.RLock()
+	if _, exists := r.collectorsByID[collectorID]; !exists {
+		r.mtx.RUnlock()
+		return false
+	}
+	r.mtx.RUnlock()
+
+	r.mtx.Lock()
+	defer r.mtx.Unlock()
+
+	delete(r.collectorsByID, collectorID)
+	for id := range descIDs {
+		delete(r.descIDs, id)
+	}
+	// dimHashesByName is left untouched as those must be consistent
+	// throughout the lifetime of a program.
+	return true
+}
+
+// MustRegister implements Registerer.
+func (r *Registry) MustRegister(cs ...Collector) {
+	for _, c := range cs {
+		if err := r.Register(c); err != nil {
+			panic(err)
+		}
+	}
+}
+
+// Gather implements Gatherer.
+func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
+	var (
+		checkedMetricChan   = make(chan Metric, capMetricChan)
+		uncheckedMetricChan = make(chan Metric, capMetricChan)
+		metricHashes        = map[uint64]struct{}{}
+		wg                  sync.WaitGroup
+		errs                MultiError          // The collected errors to return in the end.
+		registeredDescIDs   map[uint64]struct{} // Only used for pedantic checks
+	)
+
+	r.mtx.RLock()
+	goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
+	metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
+	checkedCollectors := make(chan Collector, len(r.collectorsByID))
+	uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
+	for _, collector := range r.collectorsByID {
+		checkedCollectors <- collector
+	}
+	for _, collector := range r.uncheckedCollectors {
+		uncheckedCollectors <- collector
+	}
+	// In case pedantic checks are enabled, we have to copy the map before
+	// giving up the RLock.
+	if r.pedanticChecksEnabled {
+		registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs))
+		for id := range r.descIDs {
+			registeredDescIDs[id] = struct{}{}
+		}
+	}
+	r.mtx.RUnlock()
+
+	wg.Add(goroutineBudget)
+
+	collectWorker := func() {
+		for {
+			select {
+			case collector := <-checkedCollectors:
+				collector.Collect(checkedMetricChan)
+			case collector := <-uncheckedCollectors:
+				collector.Collect(uncheckedMetricChan)
+			default:
+				return
+			}
+			wg.Done()
+		}
+	}
+
+	// Start the first worker now to make sure at least one is running.
+	go collectWorker()
+	goroutineBudget--
+
+	// Close checkedMetricChan and uncheckedMetricChan once all collectors
+	// are collected.
+	go func() {
+		wg.Wait()
+		close(checkedMetricChan)
+		close(uncheckedMetricChan)
+	}()
+
+	// Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
+	defer func() {
+		if checkedMetricChan != nil {
+			for range checkedMetricChan {
+			}
+		}
+		if uncheckedMetricChan != nil {
+			for range uncheckedMetricChan {
+			}
+		}
+	}()
+
+	// Copy the channel references so we can nil them out later to remove
+	// them from the select statements below.
+	cmc := checkedMetricChan
+	umc := uncheckedMetricChan
+
+	for {
+		select {
+		case metric, ok := <-cmc:
+			if !ok {
+				cmc = nil
+				break
+			}
+			errs.Append(processMetric(
+				metric, metricFamiliesByName,
+				metricHashes,
+				registeredDescIDs,
+			))
+		case metric, ok := <-umc:
+			if !ok {
+				umc = nil
+				break
+			}
+			errs.Append(processMetric(
+				metric, metricFamiliesByName,
+				metricHashes,
+				nil,
+			))
+		default:
+			if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
+				// All collectors are already being worked on or
+				// we have already as many goroutines started as
+				// there are collectors. Do the same as above,
+				// just without the default.
+				select {
+				case metric, ok := <-cmc:
+					if !ok {
+						cmc = nil
+						break
+					}
+					errs.Append(processMetric(
+						metric, metricFamiliesByName,
+						metricHashes,
+						registeredDescIDs,
+					))
+				case metric, ok := <-umc:
+					if !ok {
+						umc = nil
+						break
+					}
+					errs.Append(processMetric(
+						metric, metricFamiliesByName,
+						metricHashes,
+						nil,
+					))
+				}
+				break
+			}
+			// Start more workers.
+			go collectWorker()
+			goroutineBudget--
+			runtime.Gosched()
+		}
+		// Once both checkedMetricChan and uncheckdMetricChan are closed
+		// and drained, the contraption above will nil out cmc and umc,
+		// and then we can leave the collect loop here.
+		if cmc == nil && umc == nil {
+			break
+		}
+	}
+	return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
+// Prometheus text format, and writes it to a temporary file. Upon success, the
+// temporary file is renamed to the provided filename.
+//
+// This is intended for use with the textfile collector of the node exporter.
+// Note that the node exporter expects the filename to be suffixed with ".prom".
+func WriteToTextfile(filename string, g Gatherer) error {
+	tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))
+	if err != nil {
+		return err
+	}
+	defer os.Remove(tmp.Name())
+
+	mfs, err := g.Gather()
+	if err != nil {
+		return err
+	}
+	for _, mf := range mfs {
+		if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil {
+			return err
+		}
+	}
+	if err := tmp.Close(); err != nil {
+		return err
+	}
+
+	if err := os.Chmod(tmp.Name(), 0644); err != nil {
+		return err
+	}
+	return os.Rename(tmp.Name(), filename)
+}
+
+// processMetric is an internal helper method only used by the Gather method.
+func processMetric(
+	metric Metric,
+	metricFamiliesByName map[string]*dto.MetricFamily,
+	metricHashes map[uint64]struct{},
+	registeredDescIDs map[uint64]struct{},
+) error {
+	desc := metric.Desc()
+	// Wrapped metrics collected by an unchecked Collector can have an
+	// invalid Desc.
+	if desc.err != nil {
+		return desc.err
+	}
+	dtoMetric := &dto.Metric{}
+	if err := metric.Write(dtoMetric); err != nil {
+		return fmt.Errorf("error collecting metric %v: %s", desc, err)
+	}
+	metricFamily, ok := metricFamiliesByName[desc.fqName]
+	if ok { // Existing name.
+		if metricFamily.GetHelp() != desc.help {
+			return fmt.Errorf(
+				"collected metric %s %s has help %q but should have %q",
+				desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
+			)
+		}
+		// TODO(beorn7): Simplify switch once Desc has type.
+		switch metricFamily.GetType() {
+		case dto.MetricType_COUNTER:
+			if dtoMetric.Counter == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be a Counter",
+					desc.fqName, dtoMetric,
+				)
+			}
+		case dto.MetricType_GAUGE:
+			if dtoMetric.Gauge == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be a Gauge",
+					desc.fqName, dtoMetric,
+				)
+			}
+		case dto.MetricType_SUMMARY:
+			if dtoMetric.Summary == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be a Summary",
+					desc.fqName, dtoMetric,
+				)
+			}
+		case dto.MetricType_UNTYPED:
+			if dtoMetric.Untyped == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be Untyped",
+					desc.fqName, dtoMetric,
+				)
+			}
+		case dto.MetricType_HISTOGRAM:
+			if dtoMetric.Histogram == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be a Histogram",
+					desc.fqName, dtoMetric,
+				)
+			}
+		default:
+			panic("encountered MetricFamily with invalid type")
+		}
+	} else { // New name.
+		metricFamily = &dto.MetricFamily{}
+		metricFamily.Name = proto.String(desc.fqName)
+		metricFamily.Help = proto.String(desc.help)
+		// TODO(beorn7): Simplify switch once Desc has type.
+		switch {
+		case dtoMetric.Gauge != nil:
+			metricFamily.Type = dto.MetricType_GAUGE.Enum()
+		case dtoMetric.Counter != nil:
+			metricFamily.Type = dto.MetricType_COUNTER.Enum()
+		case dtoMetric.Summary != nil:
+			metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+		case dtoMetric.Untyped != nil:
+			metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+		case dtoMetric.Histogram != nil:
+			metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+		default:
+			return fmt.Errorf("empty metric collected: %s", dtoMetric)
+		}
+		if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
+			return err
+		}
+		metricFamiliesByName[desc.fqName] = metricFamily
+	}
+	if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
+		return err
+	}
+	if registeredDescIDs != nil {
+		// Is the desc registered at all?
+		if _, exist := registeredDescIDs[desc.id]; !exist {
+			return fmt.Errorf(
+				"collected metric %s %s with unregistered descriptor %s",
+				metricFamily.GetName(), dtoMetric, desc,
+			)
+		}
+		if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
+			return err
+		}
+	}
+	metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+	return nil
+}
+
+// Gatherers is a slice of Gatherer instances that implements the Gatherer
+// interface itself. Its Gather method calls Gather on all Gatherers in the
+// slice in order and returns the merged results. Errors returned from the
+// Gather calles are all returned in a flattened MultiError. Duplicate and
+// inconsistent Metrics are skipped (first occurrence in slice order wins) and
+// reported in the returned error.
+//
+// Gatherers can be used to merge the Gather results from multiple
+// Registries. It also provides a way to directly inject existing MetricFamily
+// protobufs into the gathering by creating a custom Gatherer with a Gather
+// method that simply returns the existing MetricFamily protobufs. Note that no
+// registration is involved (in contrast to Collector registration), so
+// obviously registration-time checks cannot happen. Any inconsistencies between
+// the gathered MetricFamilies are reported as errors by the Gather method, and
+// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
+// (e.g. syntactically invalid metric or label names) will go undetected.
+type Gatherers []Gatherer
+
+// Gather implements Gatherer.
+func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
+	var (
+		metricFamiliesByName = map[string]*dto.MetricFamily{}
+		metricHashes         = map[uint64]struct{}{}
+		errs                 MultiError // The collected errors to return in the end.
+	)
+
+	for i, g := range gs {
+		mfs, err := g.Gather()
+		if err != nil {
+			if multiErr, ok := err.(MultiError); ok {
+				for _, err := range multiErr {
+					errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+				}
+			} else {
+				errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+			}
+		}
+		for _, mf := range mfs {
+			existingMF, exists := metricFamiliesByName[mf.GetName()]
+			if exists {
+				if existingMF.GetHelp() != mf.GetHelp() {
+					errs = append(errs, fmt.Errorf(
+						"gathered metric family %s has help %q but should have %q",
+						mf.GetName(), mf.GetHelp(), existingMF.GetHelp(),
+					))
+					continue
+				}
+				if existingMF.GetType() != mf.GetType() {
+					errs = append(errs, fmt.Errorf(
+						"gathered metric family %s has type %s but should have %s",
+						mf.GetName(), mf.GetType(), existingMF.GetType(),
+					))
+					continue
+				}
+			} else {
+				existingMF = &dto.MetricFamily{}
+				existingMF.Name = mf.Name
+				existingMF.Help = mf.Help
+				existingMF.Type = mf.Type
+				if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
+					errs = append(errs, err)
+					continue
+				}
+				metricFamiliesByName[mf.GetName()] = existingMF
+			}
+			for _, m := range mf.Metric {
+				if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
+					errs = append(errs, err)
+					continue
+				}
+				existingMF.Metric = append(existingMF.Metric, m)
+			}
+		}
+	}
+	return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// checkSuffixCollisions checks for collisions with the “magic” suffixes the
+// Prometheus text format and the internal metric representation of the
+// Prometheus server add while flattening Summaries and Histograms.
+func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
+	var (
+		newName              = mf.GetName()
+		newType              = mf.GetType()
+		newNameWithoutSuffix = ""
+	)
+	switch {
+	case strings.HasSuffix(newName, "_count"):
+		newNameWithoutSuffix = newName[:len(newName)-6]
+	case strings.HasSuffix(newName, "_sum"):
+		newNameWithoutSuffix = newName[:len(newName)-4]
+	case strings.HasSuffix(newName, "_bucket"):
+		newNameWithoutSuffix = newName[:len(newName)-7]
+	}
+	if newNameWithoutSuffix != "" {
+		if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
+			switch existingMF.GetType() {
+			case dto.MetricType_SUMMARY:
+				if !strings.HasSuffix(newName, "_bucket") {
+					return fmt.Errorf(
+						"collected metric named %q collides with previously collected summary named %q",
+						newName, newNameWithoutSuffix,
+					)
+				}
+			case dto.MetricType_HISTOGRAM:
+				return fmt.Errorf(
+					"collected metric named %q collides with previously collected histogram named %q",
+					newName, newNameWithoutSuffix,
+				)
+			}
+		}
+	}
+	if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
+		if _, ok := mfs[newName+"_count"]; ok {
+			return fmt.Errorf(
+				"collected histogram or summary named %q collides with previously collected metric named %q",
+				newName, newName+"_count",
+			)
+		}
+		if _, ok := mfs[newName+"_sum"]; ok {
+			return fmt.Errorf(
+				"collected histogram or summary named %q collides with previously collected metric named %q",
+				newName, newName+"_sum",
+			)
+		}
+	}
+	if newType == dto.MetricType_HISTOGRAM {
+		if _, ok := mfs[newName+"_bucket"]; ok {
+			return fmt.Errorf(
+				"collected histogram named %q collides with previously collected metric named %q",
+				newName, newName+"_bucket",
+			)
+		}
+	}
+	return nil
+}
+
+// checkMetricConsistency checks if the provided Metric is consistent with the
+// provided MetricFamily. It also hashes the Metric labels and the MetricFamily
+// name. If the resulting hash is already in the provided metricHashes, an error
+// is returned. If not, it is added to metricHashes.
+func checkMetricConsistency(
+	metricFamily *dto.MetricFamily,
+	dtoMetric *dto.Metric,
+	metricHashes map[uint64]struct{},
+) error {
+	name := metricFamily.GetName()
+
+	// Type consistency with metric family.
+	if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
+		metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
+		metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
+		metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
+		metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
+		return fmt.Errorf(
+			"collected metric %q { %s} is not a %s",
+			name, dtoMetric, metricFamily.GetType(),
+		)
+	}
+
+	previousLabelName := ""
+	for _, labelPair := range dtoMetric.GetLabel() {
+		labelName := labelPair.GetName()
+		if labelName == previousLabelName {
+			return fmt.Errorf(
+				"collected metric %q { %s} has two or more labels with the same name: %s",
+				name, dtoMetric, labelName,
+			)
+		}
+		if !checkLabelName(labelName) {
+			return fmt.Errorf(
+				"collected metric %q { %s} has a label with an invalid name: %s",
+				name, dtoMetric, labelName,
+			)
+		}
+		if dtoMetric.Summary != nil && labelName == quantileLabel {
+			return fmt.Errorf(
+				"collected metric %q { %s} must not have an explicit %q label",
+				name, dtoMetric, quantileLabel,
+			)
+		}
+		if !utf8.ValidString(labelPair.GetValue()) {
+			return fmt.Errorf(
+				"collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
+				name, dtoMetric, labelName, labelPair.GetValue())
+		}
+		previousLabelName = labelName
+	}
+
+	// Is the metric unique (i.e. no other metric with the same name and the same labels)?
+	h := hashNew()
+	h = hashAdd(h, name)
+	h = hashAddByte(h, separatorByte)
+	// Make sure label pairs are sorted. We depend on it for the consistency
+	// check.
+	if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
+		// We cannot sort dtoMetric.Label in place as it is immutable by contract.
+		copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
+		copy(copiedLabels, dtoMetric.Label)
+		sort.Sort(labelPairSorter(copiedLabels))
+		dtoMetric.Label = copiedLabels
+	}
+	for _, lp := range dtoMetric.Label {
+		h = hashAdd(h, lp.GetName())
+		h = hashAddByte(h, separatorByte)
+		h = hashAdd(h, lp.GetValue())
+		h = hashAddByte(h, separatorByte)
+	}
+	if _, exists := metricHashes[h]; exists {
+		return fmt.Errorf(
+			"collected metric %q { %s} was collected before with the same name and label values",
+			name, dtoMetric,
+		)
+	}
+	metricHashes[h] = struct{}{}
+	return nil
+}
+
+func checkDescConsistency(
+	metricFamily *dto.MetricFamily,
+	dtoMetric *dto.Metric,
+	desc *Desc,
+) error {
+	// Desc help consistency with metric family help.
+	if metricFamily.GetHelp() != desc.help {
+		return fmt.Errorf(
+			"collected metric %s %s has help %q but should have %q",
+			metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
+		)
+	}
+
+	// Is the desc consistent with the content of the metric?
+	lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
+	copy(lpsFromDesc, desc.constLabelPairs)
+	for _, l := range desc.variableLabels {
+		lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
+			Name: proto.String(l),
+		})
+	}
+	if len(lpsFromDesc) != len(dtoMetric.Label) {
+		return fmt.Errorf(
+			"labels in collected metric %s %s are inconsistent with descriptor %s",
+			metricFamily.GetName(), dtoMetric, desc,
+		)
+	}
+	sort.Sort(labelPairSorter(lpsFromDesc))
+	for i, lpFromDesc := range lpsFromDesc {
+		lpFromMetric := dtoMetric.Label[i]
+		if lpFromDesc.GetName() != lpFromMetric.GetName() ||
+			lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
+			return fmt.Errorf(
+				"labels in collected metric %s %s are inconsistent with descriptor %s",
+				metricFamily.GetName(), dtoMetric, desc,
+			)
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
new file mode 100644
index 0000000..2980614
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -0,0 +1,626 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"fmt"
+	"math"
+	"sort"
+	"sync"
+	"time"
+
+	"github.com/beorn7/perks/quantile"
+	"github.com/golang/protobuf/proto"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// quantileLabel is used for the label that defines the quantile in a
+// summary.
+const quantileLabel = "quantile"
+
+// A Summary captures individual observations from an event or sample stream and
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
+// of observations, 2. observation count, 3. rank estimations.
+//
+// A typical use-case is the observation of request latencies. By default, a
+// Summary provides the median, the 90th and the 99th percentile of the latency
+// as rank estimations. However, the default behavior will change in the
+// upcoming v0.10 of the library. There will be no rank estimations at all by
+// default. For a sane transition, it is recommended to set the desired rank
+// estimations explicitly.
+//
+// Note that the rank estimations cannot be aggregated in a meaningful way with
+// the Prometheus query language (i.e. you cannot average or add them). If you
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
+// queries served across all instances of a service), consider the Histogram
+// metric type. See the Prometheus documentation for more details.
+//
+// To create Summary instances, use NewSummary.
+type Summary interface {
+	Metric
+	Collector
+
+	// Observe adds a single observation to the summary.
+	Observe(float64)
+}
+
+// DefObjectives are the default Summary quantile values.
+//
+// Deprecated: DefObjectives will not be used as the default objectives in
+// v0.10 of the library. The default Summary will have no quantiles then.
+var (
+	DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
+
+	errQuantileLabelNotAllowed = fmt.Errorf(
+		"%q is not allowed as label name in summaries", quantileLabel,
+	)
+)
+
+// Default values for SummaryOpts.
+const (
+	// DefMaxAge is the default duration for which observations stay
+	// relevant.
+	DefMaxAge time.Duration = 10 * time.Minute
+	// DefAgeBuckets is the default number of buckets used to calculate the
+	// age of observations.
+	DefAgeBuckets = 5
+	// DefBufCap is the standard buffer size for collecting Summary observations.
+	DefBufCap = 500
+)
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name to a non-empty string. While all other fields are
+// optional and can safely be left at their zero value, it is recommended to set
+// a help string and to explicitly set the Objectives field to the desired value
+// as the default value will change in the upcoming v0.10 of the library.
+type SummaryOpts struct {
+	// Namespace, Subsystem, and Name are components of the fully-qualified
+	// name of the Summary (created by joining these components with
+	// "_"). Only Name is mandatory, the others merely help structuring the
+	// name. Note that the fully-qualified name of the Summary must be a
+	// valid Prometheus metric name.
+	Namespace string
+	Subsystem string
+	Name      string
+
+	// Help provides information about this Summary.
+	//
+	// Metrics with the same fully-qualified name must have the same Help
+	// string.
+	Help string
+
+	// ConstLabels are used to attach fixed labels to this metric. Metrics
+	// with the same fully-qualified name must have the same label names in
+	// their ConstLabels.
+	//
+	// Due to the way a Summary is represented in the Prometheus text format
+	// and how it is handled by the Prometheus server internally, “quantile”
+	// is an illegal label name. Construction of a Summary or SummaryVec
+	// will panic if this label name is used in ConstLabels.
+	//
+	// ConstLabels are only used rarely. In particular, do not use them to
+	// attach the same labels to all your metrics. Those use cases are
+	// better covered by target labels set by the scraping Prometheus
+	// server, or by one specific metric (e.g. a build_info or a
+	// machine_role metric). See also
+	// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
+	ConstLabels Labels
+
+	// Objectives defines the quantile rank estimates with their respective
+	// absolute error. If Objectives[q] = e, then the value reported for q
+	// will be the φ-quantile value for some φ between q-e and q+e.  The
+	// default value is DefObjectives. It is used if Objectives is left at
+	// its zero value (i.e. nil). To create a Summary without Objectives,
+	// set it to an empty map (i.e. map[float64]float64{}).
+	//
+	// Deprecated: Note that the current value of DefObjectives is
+	// deprecated. It will be replaced by an empty map in v0.10 of the
+	// library. Please explicitly set Objectives to the desired value.
+	Objectives map[float64]float64
+
+	// MaxAge defines the duration for which an observation stays relevant
+	// for the summary. Must be positive. The default value is DefMaxAge.
+	MaxAge time.Duration
+
+	// AgeBuckets is the number of buckets used to exclude observations that
+	// are older than MaxAge from the summary. A higher number has a
+	// resource penalty, so only increase it if the higher resolution is
+	// really required. For very high observation rates, you might want to
+	// reduce the number of age buckets. With only one age bucket, you will
+	// effectively see a complete reset of the summary each time MaxAge has
+	// passed. The default value is DefAgeBuckets.
+	AgeBuckets uint32
+
+	// BufCap defines the default sample stream buffer size.  The default
+	// value of DefBufCap should suffice for most uses. If there is a need
+	// to increase the value, a multiple of 500 is recommended (because that
+	// is the internal buffer size of the underlying package
+	// "github.com/bmizerany/perks/quantile").
+	BufCap uint32
+}
+
+// Great fuck-up with the sliding-window decay algorithm... The Merge method of
+// perk/quantile is actually not working as advertised - and it might be
+// unfixable, as the underlying algorithm is apparently not capable of merging
+// summaries in the first place. To avoid using Merge, we are currently adding
+// observations to _each_ age bucket, i.e. the effort to add a sample is
+// essentially multiplied by the number of age buckets. When rotating age
+// buckets, we empty the previous head stream. On scrape time, we simply take
+// the quantiles from the head stream (no merging required). Result: More effort
+// on observation time, less effort on scrape time, which is exactly the
+// opposite of what we try to accomplish, but at least the results are correct.
+//
+// The quite elegant previous contraption to merge the age buckets efficiently
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
+// can't be used anymore.
+
+// NewSummary creates a new Summary based on the provided SummaryOpts.
+func NewSummary(opts SummaryOpts) Summary {
+	return newSummary(
+		NewDesc(
+			BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+			opts.Help,
+			nil,
+			opts.ConstLabels,
+		),
+		opts,
+	)
+}
+
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
+	if len(desc.variableLabels) != len(labelValues) {
+		panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
+	}
+
+	for _, n := range desc.variableLabels {
+		if n == quantileLabel {
+			panic(errQuantileLabelNotAllowed)
+		}
+	}
+	for _, lp := range desc.constLabelPairs {
+		if lp.GetName() == quantileLabel {
+			panic(errQuantileLabelNotAllowed)
+		}
+	}
+
+	if opts.Objectives == nil {
+		opts.Objectives = DefObjectives
+	}
+
+	if opts.MaxAge < 0 {
+		panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
+	}
+	if opts.MaxAge == 0 {
+		opts.MaxAge = DefMaxAge
+	}
+
+	if opts.AgeBuckets == 0 {
+		opts.AgeBuckets = DefAgeBuckets
+	}
+
+	if opts.BufCap == 0 {
+		opts.BufCap = DefBufCap
+	}
+
+	s := &summary{
+		desc: desc,
+
+		objectives:       opts.Objectives,
+		sortedObjectives: make([]float64, 0, len(opts.Objectives)),
+
+		labelPairs: makeLabelPairs(desc, labelValues),
+
+		hotBuf:         make([]float64, 0, opts.BufCap),
+		coldBuf:        make([]float64, 0, opts.BufCap),
+		streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
+	}
+	s.headStreamExpTime = time.Now().Add(s.streamDuration)
+	s.hotBufExpTime = s.headStreamExpTime
+
+	for i := uint32(0); i < opts.AgeBuckets; i++ {
+		s.streams = append(s.streams, s.newStream())
+	}
+	s.headStream = s.streams[0]
+
+	for qu := range s.objectives {
+		s.sortedObjectives = append(s.sortedObjectives, qu)
+	}
+	sort.Float64s(s.sortedObjectives)
+
+	s.init(s) // Init self-collection.
+	return s
+}
+
+type summary struct {
+	selfCollector
+
+	bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
+	mtx    sync.Mutex // Protects every other moving part.
+	// Lock bufMtx before mtx if both are needed.
+
+	desc *Desc
+
+	objectives       map[float64]float64
+	sortedObjectives []float64
+
+	labelPairs []*dto.LabelPair
+
+	sum float64
+	cnt uint64
+
+	hotBuf, coldBuf []float64
+
+	streams                          []*quantile.Stream
+	streamDuration                   time.Duration
+	headStream                       *quantile.Stream
+	headStreamIdx                    int
+	headStreamExpTime, hotBufExpTime time.Time
+}
+
+func (s *summary) Desc() *Desc {
+	return s.desc
+}
+
+func (s *summary) Observe(v float64) {
+	s.bufMtx.Lock()
+	defer s.bufMtx.Unlock()
+
+	now := time.Now()
+	if now.After(s.hotBufExpTime) {
+		s.asyncFlush(now)
+	}
+	s.hotBuf = append(s.hotBuf, v)
+	if len(s.hotBuf) == cap(s.hotBuf) {
+		s.asyncFlush(now)
+	}
+}
+
+func (s *summary) Write(out *dto.Metric) error {
+	sum := &dto.Summary{}
+	qs := make([]*dto.Quantile, 0, len(s.objectives))
+
+	s.bufMtx.Lock()
+	s.mtx.Lock()
+	// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
+	s.swapBufs(time.Now())
+	s.bufMtx.Unlock()
+
+	s.flushColdBuf()
+	sum.SampleCount = proto.Uint64(s.cnt)
+	sum.SampleSum = proto.Float64(s.sum)
+
+	for _, rank := range s.sortedObjectives {
+		var q float64
+		if s.headStream.Count() == 0 {
+			q = math.NaN()
+		} else {
+			q = s.headStream.Query(rank)
+		}
+		qs = append(qs, &dto.Quantile{
+			Quantile: proto.Float64(rank),
+			Value:    proto.Float64(q),
+		})
+	}
+
+	s.mtx.Unlock()
+
+	if len(qs) > 0 {
+		sort.Sort(quantSort(qs))
+	}
+	sum.Quantile = qs
+
+	out.Summary = sum
+	out.Label = s.labelPairs
+	return nil
+}
+
+func (s *summary) newStream() *quantile.Stream {
+	return quantile.NewTargeted(s.objectives)
+}
+
+// asyncFlush needs bufMtx locked.
+func (s *summary) asyncFlush(now time.Time) {
+	s.mtx.Lock()
+	s.swapBufs(now)
+
+	// Unblock the original goroutine that was responsible for the mutation
+	// that triggered the compaction.  But hold onto the global non-buffer
+	// state mutex until the operation finishes.
+	go func() {
+		s.flushColdBuf()
+		s.mtx.Unlock()
+	}()
+}
+
+// rotateStreams needs mtx AND bufMtx locked.
+func (s *summary) maybeRotateStreams() {
+	for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
+		s.headStream.Reset()
+		s.headStreamIdx++
+		if s.headStreamIdx >= len(s.streams) {
+			s.headStreamIdx = 0
+		}
+		s.headStream = s.streams[s.headStreamIdx]
+		s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
+	}
+}
+
+// flushColdBuf needs mtx locked.
+func (s *summary) flushColdBuf() {
+	for _, v := range s.coldBuf {
+		for _, stream := range s.streams {
+			stream.Insert(v)
+		}
+		s.cnt++
+		s.sum += v
+	}
+	s.coldBuf = s.coldBuf[0:0]
+	s.maybeRotateStreams()
+}
+
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
+func (s *summary) swapBufs(now time.Time) {
+	if len(s.coldBuf) != 0 {
+		panic("coldBuf is not empty")
+	}
+	s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
+	// hotBuf is now empty and gets new expiration set.
+	for now.After(s.hotBufExpTime) {
+		s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
+	}
+}
+
+type quantSort []*dto.Quantile
+
+func (s quantSort) Len() int {
+	return len(s)
+}
+
+func (s quantSort) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s quantSort) Less(i, j int) bool {
+	return s[i].GetQuantile() < s[j].GetQuantile()
+}
+
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewSummaryVec.
+type SummaryVec struct {
+	*metricVec
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
+// partitioned by the given label names.
+//
+// Due to the way a Summary is represented in the Prometheus text format and how
+// it is handled by the Prometheus server internally, “quantile” is an illegal
+// label name. NewSummaryVec will panic if this label name is used.
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+	for _, ln := range labelNames {
+		if ln == quantileLabel {
+			panic(errQuantileLabelNotAllowed)
+		}
+	}
+	desc := NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		labelNames,
+		opts.ConstLabels,
+	)
+	return &SummaryVec{
+		metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+			return newSummary(desc, opts, lvs...)
+		}),
+	}
+}
+
+// GetMetricWithLabelValues returns the Summary for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Summary is created.
+//
+// It is possible to call this method without using the returned Summary to only
+// create the new Summary but leave it at its starting value, a Summary without
+// any observations.
+//
+// Keeping the Summary for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Summary from the SummaryVec. In that case,
+// the Summary will still exist, but it will not be exported anymore, even if a
+// Summary with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+	metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
+	if metric != nil {
+		return metric.(Observer), err
+	}
+	return nil, err
+}
+
+// GetMetricWith returns the Summary for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Summary is created. Implications of
+// creating a Summary without using it and keeping the Summary for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
+	metric, err := v.metricVec.getMetricWith(labels)
+	if metric != nil {
+		return metric.(Observer), err
+	}
+	return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+//     myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
+	s, err := v.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		panic(err)
+	}
+	return s
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (v *SummaryVec) With(labels Labels) Observer {
+	s, err := v.GetMetricWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return s
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the SummaryVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
+	vec, err := v.curryWith(labels)
+	if vec != nil {
+		return &SummaryVec{vec}, err
+	}
+	return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
+	vec, err := v.CurryWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return vec
+}
+
+type constSummary struct {
+	desc       *Desc
+	count      uint64
+	sum        float64
+	quantiles  map[float64]float64
+	labelPairs []*dto.LabelPair
+}
+
+func (s *constSummary) Desc() *Desc {
+	return s.desc
+}
+
+func (s *constSummary) Write(out *dto.Metric) error {
+	sum := &dto.Summary{}
+	qs := make([]*dto.Quantile, 0, len(s.quantiles))
+
+	sum.SampleCount = proto.Uint64(s.count)
+	sum.SampleSum = proto.Float64(s.sum)
+
+	for rank, q := range s.quantiles {
+		qs = append(qs, &dto.Quantile{
+			Quantile: proto.Float64(rank),
+			Value:    proto.Float64(q),
+		})
+	}
+
+	if len(qs) > 0 {
+		sort.Sort(quantSort(qs))
+	}
+	sum.Quantile = qs
+
+	out.Summary = sum
+	out.Label = s.labelPairs
+
+	return nil
+}
+
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
+// values for the count, sum, and quantiles. As those parameters cannot be
+// changed, the returned value does not implement the Summary interface (but
+// only the Metric interface). Users of this package will not have much use for
+// it in regular operations. However, when implementing custom Collectors, it is
+// useful as a throw-away metric that is generated on the fly to send it to
+// Prometheus in the Collect method.
+//
+// quantiles maps ranks to quantile values. For example, a median latency of
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
+//     map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// NewConstSummary returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc or if Desc is invalid.
+func NewConstSummary(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	quantiles map[float64]float64,
+	labelValues ...string,
+) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+		return nil, err
+	}
+	return &constSummary{
+		desc:       desc,
+		count:      count,
+		sum:        sum,
+		quantiles:  quantiles,
+		labelPairs: makeLabelPairs(desc, labelValues),
+	}, nil
+}
+
+// MustNewConstSummary is a version of NewConstSummary that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstSummary(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	quantiles map[float64]float64,
+	labelValues ...string,
+) Metric {
+	m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
new file mode 100644
index 0000000..8d5f105
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -0,0 +1,54 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "time"
+
+// Timer is a helper type to time functions. Use NewTimer to create new
+// instances.
+type Timer struct {
+	begin    time.Time
+	observer Observer
+}
+
+// NewTimer creates a new Timer. The provided Observer is used to observe a
+// duration in seconds. Timer is usually used to time a function call in the
+// following way:
+//    func TimeMe() {
+//        timer := NewTimer(myHistogram)
+//        defer timer.ObserveDuration()
+//        // Do actual work.
+//    }
+func NewTimer(o Observer) *Timer {
+	return &Timer{
+		begin:    time.Now(),
+		observer: o,
+	}
+}
+
+// ObserveDuration records the duration passed since the Timer was created with
+// NewTimer. It calls the Observe method of the Observer provided during
+// construction with the duration in seconds as an argument. The observed
+// duration is also returned. ObserveDuration is usually called with a defer
+// statement.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func (t *Timer) ObserveDuration() time.Duration {
+	d := time.Since(t.begin)
+	if t.observer != nil {
+		t.observer.Observe(d.Seconds())
+	}
+	return d
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
new file mode 100644
index 0000000..0f9ce63
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -0,0 +1,42 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// UntypedOpts is an alias for Opts. See there for doc comments.
+type UntypedOpts Opts
+
+// UntypedFunc works like GaugeFunc but the collected metric is of type
+// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
+// type.
+//
+// To create UntypedFunc instances, use NewUntypedFunc.
+type UntypedFunc interface {
+	Metric
+	Collector
+}
+
+// NewUntypedFunc creates a new UntypedFunc based on the provided
+// UntypedOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where an UntypedFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
+	return newValueFunc(NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		nil,
+		opts.ConstLabels,
+	), UntypedValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
new file mode 100644
index 0000000..eb248f1
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -0,0 +1,162 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"fmt"
+	"sort"
+
+	"github.com/golang/protobuf/proto"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum.
+const (
+	_ ValueType = iota
+	CounterValue
+	GaugeValue
+	UntypedValue
+)
+
+// valueFunc is a generic metric for simple values retrieved on collect time
+// from a function. It implements Metric and Collector. Its effective type is
+// determined by ValueType. This is a low-level building block used by the
+// library to back the implementations of CounterFunc, GaugeFunc, and
+// UntypedFunc.
+type valueFunc struct {
+	selfCollector
+
+	desc       *Desc
+	valType    ValueType
+	function   func() float64
+	labelPairs []*dto.LabelPair
+}
+
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
+// ValueType. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a valueFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
+	result := &valueFunc{
+		desc:       desc,
+		valType:    valueType,
+		function:   function,
+		labelPairs: makeLabelPairs(desc, nil),
+	}
+	result.init(result)
+	return result
+}
+
+func (v *valueFunc) Desc() *Desc {
+	return v.desc
+}
+
+func (v *valueFunc) Write(out *dto.Metric) error {
+	return populateMetric(v.valType, v.function(), v.labelPairs, out)
+}
+
+// NewConstMetric returns a metric with one fixed value that cannot be
+// changed. Users of this package will not have much use for it in regular
+// operations. However, when implementing custom Collectors, it is useful as a
+// throw-away metric that is generated on the fly to send it to Prometheus in
+// the Collect method. NewConstMetric returns an error if the length of
+// labelValues is not consistent with the variable labels in Desc or if Desc is
+// invalid.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+		return nil, err
+	}
+	return &constMetric{
+		desc:       desc,
+		valType:    valueType,
+		val:        value,
+		labelPairs: makeLabelPairs(desc, labelValues),
+	}, nil
+}
+
+// MustNewConstMetric is a version of NewConstMetric that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+	m, err := NewConstMetric(desc, valueType, value, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
+type constMetric struct {
+	desc       *Desc
+	valType    ValueType
+	val        float64
+	labelPairs []*dto.LabelPair
+}
+
+func (m *constMetric) Desc() *Desc {
+	return m.desc
+}
+
+func (m *constMetric) Write(out *dto.Metric) error {
+	return populateMetric(m.valType, m.val, m.labelPairs, out)
+}
+
+func populateMetric(
+	t ValueType,
+	v float64,
+	labelPairs []*dto.LabelPair,
+	m *dto.Metric,
+) error {
+	m.Label = labelPairs
+	switch t {
+	case CounterValue:
+		m.Counter = &dto.Counter{Value: proto.Float64(v)}
+	case GaugeValue:
+		m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
+	case UntypedValue:
+		m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
+	default:
+		return fmt.Errorf("encountered unknown type %v", t)
+	}
+	return nil
+}
+
+func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
+	totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+	if totalLen == 0 {
+		// Super fast path.
+		return nil
+	}
+	if len(desc.variableLabels) == 0 {
+		// Moderately fast path.
+		return desc.constLabelPairs
+	}
+	labelPairs := make([]*dto.LabelPair, 0, totalLen)
+	for i, n := range desc.variableLabels {
+		labelPairs = append(labelPairs, &dto.LabelPair{
+			Name:  proto.String(n),
+			Value: proto.String(labelValues[i]),
+		})
+	}
+	labelPairs = append(labelPairs, desc.constLabelPairs...)
+	sort.Sort(labelPairSorter(labelPairs))
+	return labelPairs
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
new file mode 100644
index 0000000..14ed9e8
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -0,0 +1,472 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"fmt"
+	"sync"
+
+	"github.com/prometheus/common/model"
+)
+
+// metricVec is a Collector to bundle metrics of the same name that differ in
+// their label values. metricVec is not used directly (and therefore
+// unexported). It is used as a building block for implementations of vectors of
+// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
+// It also handles label currying. It uses basicMetricVec internally.
+type metricVec struct {
+	*metricMap
+
+	curry []curriedLabelValue
+
+	// hashAdd and hashAddByte can be replaced for testing collision handling.
+	hashAdd     func(h uint64, s string) uint64
+	hashAddByte func(h uint64, b byte) uint64
+}
+
+// newMetricVec returns an initialized metricVec.
+func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
+	return &metricVec{
+		metricMap: &metricMap{
+			metrics:   map[uint64][]metricWithLabelValues{},
+			desc:      desc,
+			newMetric: newMetric,
+		},
+		hashAdd:     hashAdd,
+		hashAddByte: hashAddByte,
+	}
+}
+
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
+	h, err := m.hashLabelValues(lvs)
+	if err != nil {
+		return false
+	}
+
+	return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *metricVec) Delete(labels Labels) bool {
+	h, err := m.hashLabels(labels)
+	if err != nil {
+		return false
+	}
+
+	return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
+}
+
+func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
+	var (
+		newCurry []curriedLabelValue
+		oldCurry = m.curry
+		iCurry   int
+	)
+	for i, label := range m.desc.variableLabels {
+		val, ok := labels[label]
+		if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
+			if ok {
+				return nil, fmt.Errorf("label name %q is already curried", label)
+			}
+			newCurry = append(newCurry, oldCurry[iCurry])
+			iCurry++
+		} else {
+			if !ok {
+				continue // Label stays uncurried.
+			}
+			newCurry = append(newCurry, curriedLabelValue{i, val})
+		}
+	}
+	if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
+		return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
+	}
+
+	return &metricVec{
+		metricMap:   m.metricMap,
+		curry:       newCurry,
+		hashAdd:     m.hashAdd,
+		hashAddByte: m.hashAddByte,
+	}, nil
+}
+
+func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
+	h, err := m.hashLabelValues(lvs)
+	if err != nil {
+		return nil, err
+	}
+
+	return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
+}
+
+func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
+	h, err := m.hashLabels(labels)
+	if err != nil {
+		return nil, err
+	}
+
+	return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
+}
+
+func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
+	if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+		return 0, err
+	}
+
+	var (
+		h             = hashNew()
+		curry         = m.curry
+		iVals, iCurry int
+	)
+	for i := 0; i < len(m.desc.variableLabels); i++ {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			h = m.hashAdd(h, curry[iCurry].value)
+			iCurry++
+		} else {
+			h = m.hashAdd(h, vals[iVals])
+			iVals++
+		}
+		h = m.hashAddByte(h, model.SeparatorByte)
+	}
+	return h, nil
+}
+
+func (m *metricVec) hashLabels(labels Labels) (uint64, error) {
+	if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+		return 0, err
+	}
+
+	var (
+		h      = hashNew()
+		curry  = m.curry
+		iCurry int
+	)
+	for i, label := range m.desc.variableLabels {
+		val, ok := labels[label]
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			if ok {
+				return 0, fmt.Errorf("label name %q is already curried", label)
+			}
+			h = m.hashAdd(h, curry[iCurry].value)
+			iCurry++
+		} else {
+			if !ok {
+				return 0, fmt.Errorf("label name %q missing in label map", label)
+			}
+			h = m.hashAdd(h, val)
+		}
+		h = m.hashAddByte(h, model.SeparatorByte)
+	}
+	return h, nil
+}
+
+// metricWithLabelValues provides the metric and its label values for
+// disambiguation on hash collision.
+type metricWithLabelValues struct {
+	values []string
+	metric Metric
+}
+
+// curriedLabelValue sets the curried value for a label at the given index.
+type curriedLabelValue struct {
+	index int
+	value string
+}
+
+// metricMap is a helper for metricVec and shared between differently curried
+// metricVecs.
+type metricMap struct {
+	mtx       sync.RWMutex // Protects metrics.
+	metrics   map[uint64][]metricWithLabelValues
+	desc      *Desc
+	newMetric func(labelValues ...string) Metric
+}
+
+// Describe implements Collector. It will send exactly one Desc to the provided
+// channel.
+func (m *metricMap) Describe(ch chan<- *Desc) {
+	ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *metricMap) Collect(ch chan<- Metric) {
+	m.mtx.RLock()
+	defer m.mtx.RUnlock()
+
+	for _, metrics := range m.metrics {
+		for _, metric := range metrics {
+			ch <- metric.metric
+		}
+	}
+}
+
+// Reset deletes all metrics in this vector.
+func (m *metricMap) Reset() {
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	for h := range m.metrics {
+		delete(m.metrics, h)
+	}
+}
+
+// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
+// there are multiple matches in the bucket, use lvs to select a metric and
+// remove only that metric.
+func (m *metricMap) deleteByHashWithLabelValues(
+	h uint64, lvs []string, curry []curriedLabelValue,
+) bool {
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	metrics, ok := m.metrics[h]
+	if !ok {
+		return false
+	}
+
+	i := findMetricWithLabelValues(metrics, lvs, curry)
+	if i >= len(metrics) {
+		return false
+	}
+
+	if len(metrics) > 1 {
+		m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
+	} else {
+		delete(m.metrics, h)
+	}
+	return true
+}
+
+// deleteByHashWithLabels removes the metric from the hash bucket h. If there
+// are multiple matches in the bucket, use lvs to select a metric and remove
+// only that metric.
+func (m *metricMap) deleteByHashWithLabels(
+	h uint64, labels Labels, curry []curriedLabelValue,
+) bool {
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	metrics, ok := m.metrics[h]
+	if !ok {
+		return false
+	}
+	i := findMetricWithLabels(m.desc, metrics, labels, curry)
+	if i >= len(metrics) {
+		return false
+	}
+
+	if len(metrics) > 1 {
+		m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
+	} else {
+		delete(m.metrics, h)
+	}
+	return true
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *metricMap) getOrCreateMetricWithLabelValues(
+	hash uint64, lvs []string, curry []curriedLabelValue,
+) Metric {
+	m.mtx.RLock()
+	metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry)
+	m.mtx.RUnlock()
+	if ok {
+		return metric
+	}
+
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry)
+	if !ok {
+		inlinedLVs := inlineLabelValues(lvs, curry)
+		metric = m.newMetric(inlinedLVs...)
+		m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric})
+	}
+	return metric
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *metricMap) getOrCreateMetricWithLabels(
+	hash uint64, labels Labels, curry []curriedLabelValue,
+) Metric {
+	m.mtx.RLock()
+	metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry)
+	m.mtx.RUnlock()
+	if ok {
+		return metric
+	}
+
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry)
+	if !ok {
+		lvs := extractLabelValues(m.desc, labels, curry)
+		metric = m.newMetric(lvs...)
+		m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric})
+	}
+	return metric
+}
+
+// getMetricWithHashAndLabelValues gets a metric while handling possible
+// collisions in the hash space. Must be called while holding the read mutex.
+func (m *metricMap) getMetricWithHashAndLabelValues(
+	h uint64, lvs []string, curry []curriedLabelValue,
+) (Metric, bool) {
+	metrics, ok := m.metrics[h]
+	if ok {
+		if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) {
+			return metrics[i].metric, true
+		}
+	}
+	return nil, false
+}
+
+// getMetricWithHashAndLabels gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *metricMap) getMetricWithHashAndLabels(
+	h uint64, labels Labels, curry []curriedLabelValue,
+) (Metric, bool) {
+	metrics, ok := m.metrics[h]
+	if ok {
+		if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) {
+			return metrics[i].metric, true
+		}
+	}
+	return nil, false
+}
+
+// findMetricWithLabelValues returns the index of the matching metric or
+// len(metrics) if not found.
+func findMetricWithLabelValues(
+	metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue,
+) int {
+	for i, metric := range metrics {
+		if matchLabelValues(metric.values, lvs, curry) {
+			return i
+		}
+	}
+	return len(metrics)
+}
+
+// findMetricWithLabels returns the index of the matching metric or len(metrics)
+// if not found.
+func findMetricWithLabels(
+	desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
+) int {
+	for i, metric := range metrics {
+		if matchLabels(desc, metric.values, labels, curry) {
+			return i
+		}
+	}
+	return len(metrics)
+}
+
+func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool {
+	if len(values) != len(lvs)+len(curry) {
+		return false
+	}
+	var iLVs, iCurry int
+	for i, v := range values {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			if v != curry[iCurry].value {
+				return false
+			}
+			iCurry++
+			continue
+		}
+		if v != lvs[iLVs] {
+			return false
+		}
+		iLVs++
+	}
+	return true
+}
+
+func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
+	if len(values) != len(labels)+len(curry) {
+		return false
+	}
+	iCurry := 0
+	for i, k := range desc.variableLabels {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			if values[i] != curry[iCurry].value {
+				return false
+			}
+			iCurry++
+			continue
+		}
+		if values[i] != labels[k] {
+			return false
+		}
+	}
+	return true
+}
+
+func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
+	labelValues := make([]string, len(labels)+len(curry))
+	iCurry := 0
+	for i, k := range desc.variableLabels {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			labelValues[i] = curry[iCurry].value
+			iCurry++
+			continue
+		}
+		labelValues[i] = labels[k]
+	}
+	return labelValues
+}
+
+func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
+	labelValues := make([]string, len(lvs)+len(curry))
+	var iCurry, iLVs int
+	for i := range labelValues {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			labelValues[i] = curry[iCurry].value
+			iCurry++
+			continue
+		}
+		labelValues[i] = lvs[iLVs]
+		iLVs++
+	}
+	return labelValues
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
new file mode 100644
index 0000000..49159bf
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -0,0 +1,179 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"fmt"
+	"sort"
+
+	"github.com/golang/protobuf/proto"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// WrapRegistererWith returns a Registerer wrapping the provided
+// Registerer. Collectors registered with the returned Registerer will be
+// registered with the wrapped Registerer in a modified way. The modified
+// Collector adds the provided Labels to all Metrics it collects (as
+// ConstLabels). The Metrics collected by the unmodified Collector must not
+// duplicate any of those labels.
+//
+// WrapRegistererWith provides a way to add fixed labels to a subset of
+// Collectors. It should not be used to add fixed labels to all metrics exposed.
+//
+// The Collector example demonstrates a use of WrapRegistererWith.
+func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
+	return &wrappingRegisterer{
+		wrappedRegisterer: reg,
+		labels:            labels,
+	}
+}
+
+// WrapRegistererWithPrefix returns a Registerer wrapping the provided
+// Registerer. Collectors registered with the returned Registerer will be
+// registered with the wrapped Registerer in a modified way. The modified
+// Collector adds the provided prefix to the name of all Metrics it collects.
+//
+// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
+// a sub-system. To make this work, register metrics of the sub-system with the
+// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
+// to use the same prefix for all metrics exposed. In particular, do not prefix
+// metric names that are standardized across applications, as that would break
+// horizontal monitoring, for example the metrics provided by the Go collector
+// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
+// fact, those metrics are already prefixed with “go_” or “process_”,
+// respectively.)
+func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
+	return &wrappingRegisterer{
+		wrappedRegisterer: reg,
+		prefix:            prefix,
+	}
+}
+
+type wrappingRegisterer struct {
+	wrappedRegisterer Registerer
+	prefix            string
+	labels            Labels
+}
+
+func (r *wrappingRegisterer) Register(c Collector) error {
+	return r.wrappedRegisterer.Register(&wrappingCollector{
+		wrappedCollector: c,
+		prefix:           r.prefix,
+		labels:           r.labels,
+	})
+}
+
+func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
+	for _, c := range cs {
+		if err := r.Register(c); err != nil {
+			panic(err)
+		}
+	}
+}
+
+func (r *wrappingRegisterer) Unregister(c Collector) bool {
+	return r.wrappedRegisterer.Unregister(&wrappingCollector{
+		wrappedCollector: c,
+		prefix:           r.prefix,
+		labels:           r.labels,
+	})
+}
+
+type wrappingCollector struct {
+	wrappedCollector Collector
+	prefix           string
+	labels           Labels
+}
+
+func (c *wrappingCollector) Collect(ch chan<- Metric) {
+	wrappedCh := make(chan Metric)
+	go func() {
+		c.wrappedCollector.Collect(wrappedCh)
+		close(wrappedCh)
+	}()
+	for m := range wrappedCh {
+		ch <- &wrappingMetric{
+			wrappedMetric: m,
+			prefix:        c.prefix,
+			labels:        c.labels,
+		}
+	}
+}
+
+func (c *wrappingCollector) Describe(ch chan<- *Desc) {
+	wrappedCh := make(chan *Desc)
+	go func() {
+		c.wrappedCollector.Describe(wrappedCh)
+		close(wrappedCh)
+	}()
+	for desc := range wrappedCh {
+		ch <- wrapDesc(desc, c.prefix, c.labels)
+	}
+}
+
+type wrappingMetric struct {
+	wrappedMetric Metric
+	prefix        string
+	labels        Labels
+}
+
+func (m *wrappingMetric) Desc() *Desc {
+	return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
+}
+
+func (m *wrappingMetric) Write(out *dto.Metric) error {
+	if err := m.wrappedMetric.Write(out); err != nil {
+		return err
+	}
+	if len(m.labels) == 0 {
+		// No wrapping labels.
+		return nil
+	}
+	for ln, lv := range m.labels {
+		out.Label = append(out.Label, &dto.LabelPair{
+			Name:  proto.String(ln),
+			Value: proto.String(lv),
+		})
+	}
+	sort.Sort(labelPairSorter(out.Label))
+	return nil
+}
+
+func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
+	constLabels := Labels{}
+	for _, lp := range desc.constLabelPairs {
+		constLabels[*lp.Name] = *lp.Value
+	}
+	for ln, lv := range labels {
+		if _, alreadyUsed := constLabels[ln]; alreadyUsed {
+			return &Desc{
+				fqName:          desc.fqName,
+				help:            desc.help,
+				variableLabels:  desc.variableLabels,
+				constLabelPairs: desc.constLabelPairs,
+				err:             fmt.Errorf("attempted wrapping with already existing label name %q", ln),
+			}
+		}
+		constLabels[ln] = lv
+	}
+	// NewDesc will do remaining validations.
+	newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
+	// Propagate errors if there was any. This will override any errer
+	// created by NewDesc above, i.e. earlier errors get precedence.
+	if desc.err != nil {
+		newDesc.err = desc.err
+	}
+	return newDesc
+}
diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE
new file mode 100644
index 0000000..20110e4
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/NOTICE
@@ -0,0 +1,5 @@
+Data model artifacts for Prometheus.
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/client_model/ruby/LICENSE b/vendor/github.com/prometheus/client_model/ruby/LICENSE
new file mode 100644
index 0000000..11069ed
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/ruby/LICENSE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE
new file mode 100644
index 0000000..636a2c1
--- /dev/null
+++ b/vendor/github.com/prometheus/common/NOTICE
@@ -0,0 +1,5 @@
+Common libraries shared by Prometheus Go components.
+Copyright 2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 0000000..c092723
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,429 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+	"fmt"
+	"io"
+	"math"
+	"mime"
+	"net/http"
+
+	dto "github.com/prometheus/client_model/go"
+
+	"github.com/matttproud/golang_protobuf_extensions/pbutil"
+	"github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+	Decode(*dto.MetricFamily) error
+}
+
+// DecodeOptions contains options used by the Decoder and in sample extraction.
+type DecodeOptions struct {
+	// Timestamp is added to each value from the stream that has no explicit timestamp set.
+	Timestamp model.Time
+}
+
+// ResponseFormat extracts the correct format from a HTTP response header.
+// If no matching format can be found FormatUnknown is returned.
+func ResponseFormat(h http.Header) Format {
+	ct := h.Get(hdrContentType)
+
+	mediatype, params, err := mime.ParseMediaType(ct)
+	if err != nil {
+		return FmtUnknown
+	}
+
+	const textType = "text/plain"
+
+	switch mediatype {
+	case ProtoType:
+		if p, ok := params["proto"]; ok && p != ProtoProtocol {
+			return FmtUnknown
+		}
+		if e, ok := params["encoding"]; ok && e != "delimited" {
+			return FmtUnknown
+		}
+		return FmtProtoDelim
+
+	case textType:
+		if v, ok := params["version"]; ok && v != TextVersion {
+			return FmtUnknown
+		}
+		return FmtText
+	}
+
+	return FmtUnknown
+}
+
+// NewDecoder returns a new decoder based on the given input format.
+// If the input format does not imply otherwise, a text format decoder is returned.
+func NewDecoder(r io.Reader, format Format) Decoder {
+	switch format {
+	case FmtProtoDelim:
+		return &protoDecoder{r: r}
+	}
+	return &textDecoder{r: r}
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+	r io.Reader
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+	_, err := pbutil.ReadDelimited(d.r, v)
+	if err != nil {
+		return err
+	}
+	if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
+		return fmt.Errorf("invalid metric name %q", v.GetName())
+	}
+	for _, m := range v.GetMetric() {
+		if m == nil {
+			continue
+		}
+		for _, l := range m.GetLabel() {
+			if l == nil {
+				continue
+			}
+			if !model.LabelValue(l.GetValue()).IsValid() {
+				return fmt.Errorf("invalid label value %q", l.GetValue())
+			}
+			if !model.LabelName(l.GetName()).IsValid() {
+				return fmt.Errorf("invalid label name %q", l.GetName())
+			}
+		}
+	}
+	return nil
+}
+
+// textDecoder implements the Decoder interface for the text protocol.
+type textDecoder struct {
+	r    io.Reader
+	p    TextParser
+	fams []*dto.MetricFamily
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+	// TODO(fabxc): Wrap this as a line reader to make streaming safer.
+	if len(d.fams) == 0 {
+		// No cached metric families, read everything and parse metrics.
+		fams, err := d.p.TextToMetricFamilies(d.r)
+		if err != nil {
+			return err
+		}
+		if len(fams) == 0 {
+			return io.EOF
+		}
+		d.fams = make([]*dto.MetricFamily, 0, len(fams))
+		for _, f := range fams {
+			d.fams = append(d.fams, f)
+		}
+	}
+
+	*v = *d.fams[0]
+	d.fams = d.fams[1:]
+
+	return nil
+}
+
+// SampleDecoder wraps a Decoder to extract samples from the metric families
+// decoded by the wrapped Decoder.
+type SampleDecoder struct {
+	Dec  Decoder
+	Opts *DecodeOptions
+
+	f dto.MetricFamily
+}
+
+// Decode calls the Decode method of the wrapped Decoder and then extracts the
+// samples from the decoded MetricFamily into the provided model.Vector.
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+	err := sd.Dec.Decode(&sd.f)
+	if err != nil {
+		return err
+	}
+	*s, err = extractSamples(&sd.f, sd.Opts)
+	return err
+}
+
+// ExtractSamples builds a slice of samples from the provided metric
+// families. If an error occurrs during sample extraction, it continues to
+// extract from the remaining metric families. The returned error is the last
+// error that has occurred.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
+	var (
+		all     model.Vector
+		lastErr error
+	)
+	for _, f := range fams {
+		some, err := extractSamples(f, o)
+		if err != nil {
+			lastErr = err
+			continue
+		}
+		all = append(all, some...)
+	}
+	return all, lastErr
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
+	switch f.GetType() {
+	case dto.MetricType_COUNTER:
+		return extractCounter(o, f), nil
+	case dto.MetricType_GAUGE:
+		return extractGauge(o, f), nil
+	case dto.MetricType_SUMMARY:
+		return extractSummary(o, f), nil
+	case dto.MetricType_UNTYPED:
+		return extractUntyped(o, f), nil
+	case dto.MetricType_HISTOGRAM:
+		return extractHistogram(o, f), nil
+	}
+	return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+	samples := make(model.Vector, 0, len(f.Metric))
+
+	for _, m := range f.Metric {
+		if m.Counter == nil {
+			continue
+		}
+
+		lset := make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+		smpl := &model.Sample{
+			Metric: model.Metric(lset),
+			Value:  model.SampleValue(m.Counter.GetValue()),
+		}
+
+		if m.TimestampMs != nil {
+			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+		} else {
+			smpl.Timestamp = o.Timestamp
+		}
+
+		samples = append(samples, smpl)
+	}
+
+	return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+	samples := make(model.Vector, 0, len(f.Metric))
+
+	for _, m := range f.Metric {
+		if m.Gauge == nil {
+			continue
+		}
+
+		lset := make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+		smpl := &model.Sample{
+			Metric: model.Metric(lset),
+			Value:  model.SampleValue(m.Gauge.GetValue()),
+		}
+
+		if m.TimestampMs != nil {
+			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+		} else {
+			smpl.Timestamp = o.Timestamp
+		}
+
+		samples = append(samples, smpl)
+	}
+
+	return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+	samples := make(model.Vector, 0, len(f.Metric))
+
+	for _, m := range f.Metric {
+		if m.Untyped == nil {
+			continue
+		}
+
+		lset := make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+		smpl := &model.Sample{
+			Metric: model.Metric(lset),
+			Value:  model.SampleValue(m.Untyped.GetValue()),
+		}
+
+		if m.TimestampMs != nil {
+			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+		} else {
+			smpl.Timestamp = o.Timestamp
+		}
+
+		samples = append(samples, smpl)
+	}
+
+	return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+	samples := make(model.Vector, 0, len(f.Metric))
+
+	for _, m := range f.Metric {
+		if m.Summary == nil {
+			continue
+		}
+
+		timestamp := o.Timestamp
+		if m.TimestampMs != nil {
+			timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+		}
+
+		for _, q := range m.Summary.Quantile {
+			lset := make(model.LabelSet, len(m.Label)+2)
+			for _, p := range m.Label {
+				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+			}
+			// BUG(matt): Update other names to "quantile".
+			lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+			lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+			samples = append(samples, &model.Sample{
+				Metric:    model.Metric(lset),
+				Value:     model.SampleValue(q.GetValue()),
+				Timestamp: timestamp,
+			})
+		}
+
+		lset := make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+		samples = append(samples, &model.Sample{
+			Metric:    model.Metric(lset),
+			Value:     model.SampleValue(m.Summary.GetSampleSum()),
+			Timestamp: timestamp,
+		})
+
+		lset = make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+		samples = append(samples, &model.Sample{
+			Metric:    model.Metric(lset),
+			Value:     model.SampleValue(m.Summary.GetSampleCount()),
+			Timestamp: timestamp,
+		})
+	}
+
+	return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+	samples := make(model.Vector, 0, len(f.Metric))
+
+	for _, m := range f.Metric {
+		if m.Histogram == nil {
+			continue
+		}
+
+		timestamp := o.Timestamp
+		if m.TimestampMs != nil {
+			timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+		}
+
+		infSeen := false
+
+		for _, q := range m.Histogram.Bucket {
+			lset := make(model.LabelSet, len(m.Label)+2)
+			for _, p := range m.Label {
+				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+			}
+			lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+			lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+			if math.IsInf(q.GetUpperBound(), +1) {
+				infSeen = true
+			}
+
+			samples = append(samples, &model.Sample{
+				Metric:    model.Metric(lset),
+				Value:     model.SampleValue(q.GetCumulativeCount()),
+				Timestamp: timestamp,
+			})
+		}
+
+		lset := make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+		samples = append(samples, &model.Sample{
+			Metric:    model.Metric(lset),
+			Value:     model.SampleValue(m.Histogram.GetSampleSum()),
+			Timestamp: timestamp,
+		})
+
+		lset = make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+		count := &model.Sample{
+			Metric:    model.Metric(lset),
+			Value:     model.SampleValue(m.Histogram.GetSampleCount()),
+			Timestamp: timestamp,
+		}
+		samples = append(samples, count)
+
+		if !infSeen {
+			// Append an infinity bucket sample.
+			lset := make(model.LabelSet, len(m.Label)+2)
+			for _, p := range m.Label {
+				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+			}
+			lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+			lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+			samples = append(samples, &model.Sample{
+				Metric:    model.Metric(lset),
+				Value:     count.Value,
+				Timestamp: timestamp,
+			})
+		}
+	}
+
+	return samples
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 0000000..11839ed
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+	"fmt"
+	"io"
+	"net/http"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/matttproud/golang_protobuf_extensions/pbutil"
+	"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+	Encode(*dto.MetricFamily) error
+}
+
+type encoder func(*dto.MetricFamily) error
+
+func (e encoder) Encode(v *dto.MetricFamily) error {
+	return e(v)
+}
+
+// Negotiate returns the Content-Type based on the given Accept header.
+// If no appropriate accepted type is found, FmtText is returned.
+func Negotiate(h http.Header) Format {
+	for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+		// Check for protocol buffer
+		if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+			switch ac.Params["encoding"] {
+			case "delimited":
+				return FmtProtoDelim
+			case "text":
+				return FmtProtoText
+			case "compact-text":
+				return FmtProtoCompact
+			}
+		}
+		// Check for text format.
+		ver := ac.Params["version"]
+		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+			return FmtText
+		}
+	}
+	return FmtText
+}
+
+// NewEncoder returns a new encoder based on content type negotiation.
+func NewEncoder(w io.Writer, format Format) Encoder {
+	switch format {
+	case FmtProtoDelim:
+		return encoder(func(v *dto.MetricFamily) error {
+			_, err := pbutil.WriteDelimited(w, v)
+			return err
+		})
+	case FmtProtoCompact:
+		return encoder(func(v *dto.MetricFamily) error {
+			_, err := fmt.Fprintln(w, v.String())
+			return err
+		})
+	case FmtProtoText:
+		return encoder(func(v *dto.MetricFamily) error {
+			_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+			return err
+		})
+	case FmtText:
+		return encoder(func(v *dto.MetricFamily) error {
+			_, err := MetricFamilyToText(w, v)
+			return err
+		})
+	}
+	panic("expfmt.NewEncoder: unknown format")
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 0000000..c71bcb9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,38 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package expfmt contains tools for reading and writing Prometheus metrics.
+package expfmt
+
+// Format specifies the HTTP content type of the different wire protocols.
+type Format string
+
+// Constants to assemble the Content-Type values for the different wire protocols.
+const (
+	TextVersion   = "0.0.4"
+	ProtoType     = `application/vnd.google.protobuf`
+	ProtoProtocol = `io.prometheus.client.MetricFamily`
+	ProtoFmt      = ProtoType + "; proto=" + ProtoProtocol + ";"
+
+	// The Content-Type values for the different wire protocols.
+	FmtUnknown      Format = `<unknown>`
+	FmtText         Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
+	FmtProtoDelim   Format = ProtoFmt + ` encoding=delimited`
+	FmtProtoText    Format = ProtoFmt + ` encoding=text`
+	FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+)
+
+const (
+	hdrContentType = "Content-Type"
+	hdrAccept      = "Accept"
+)
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
new file mode 100644
index 0000000..dc2eede
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+// +build gofuzz
+
+package expfmt
+
+import "bytes"
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+//     go-fuzz-build github.com/prometheus/common/expfmt
+//     go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+	parser := TextParser{}
+	_, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+
+	if err != nil {
+		return 0
+	}
+
+	return 1
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 0000000..16655d4
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,474 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+	"strings"
+	"sync"
+
+	"github.com/prometheus/common/model"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer
+// implements it.
+type enhancedWriter interface {
+	io.Writer
+	WriteRune(r rune) (n int, err error)
+	WriteString(s string) (n int, err error)
+	WriteByte(c byte) error
+}
+
+const (
+	initialBufSize    = 512
+	initialNumBufSize = 24
+)
+
+var (
+	bufPool = sync.Pool{
+		New: func() interface{} {
+			return bytes.NewBuffer(make([]byte, 0, initialBufSize))
+		},
+	}
+	numBufPool = sync.Pool{
+		New: func() interface{} {
+			b := make([]byte, 0, initialNumBufSize)
+			return &b
+		},
+	}
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. The output will have the same order as the input,
+// no further sorting is performed. Furthermore, this function assumes the input
+// is already sanitized and does not perform any sanity checks. If the input
+// contains duplicate metrics or invalid metric or label names, the conversion
+// will result in invalid text format output.
+//
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
+	// Fail-fast checks.
+	if len(in.Metric) == 0 {
+		return 0, fmt.Errorf("MetricFamily has no metrics: %s", in)
+	}
+	name := in.GetName()
+	if name == "" {
+		return 0, fmt.Errorf("MetricFamily has no name: %s", in)
+	}
+
+	// Try the interface upgrade. If it doesn't work, we'll use a
+	// bytes.Buffer from the sync.Pool and write out its content to out in a
+	// single go in the end.
+	w, ok := out.(enhancedWriter)
+	if !ok {
+		b := bufPool.Get().(*bytes.Buffer)
+		b.Reset()
+		w = b
+		defer func() {
+			bWritten, bErr := out.Write(b.Bytes())
+			written = bWritten
+			if err == nil {
+				err = bErr
+			}
+			bufPool.Put(b)
+		}()
+	}
+
+	var n int
+
+	// Comments, first HELP, then TYPE.
+	if in.Help != nil {
+		n, err = w.WriteString("# HELP ")
+		written += n
+		if err != nil {
+			return
+		}
+		n, err = w.WriteString(name)
+		written += n
+		if err != nil {
+			return
+		}
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return
+		}
+		n, err = writeEscapedString(w, *in.Help, false)
+		written += n
+		if err != nil {
+			return
+		}
+		err = w.WriteByte('\n')
+		written++
+		if err != nil {
+			return
+		}
+	}
+	n, err = w.WriteString("# TYPE ")
+	written += n
+	if err != nil {
+		return
+	}
+	n, err = w.WriteString(name)
+	written += n
+	if err != nil {
+		return
+	}
+	metricType := in.GetType()
+	switch metricType {
+	case dto.MetricType_COUNTER:
+		n, err = w.WriteString(" counter\n")
+	case dto.MetricType_GAUGE:
+		n, err = w.WriteString(" gauge\n")
+	case dto.MetricType_SUMMARY:
+		n, err = w.WriteString(" summary\n")
+	case dto.MetricType_UNTYPED:
+		n, err = w.WriteString(" untyped\n")
+	case dto.MetricType_HISTOGRAM:
+		n, err = w.WriteString(" histogram\n")
+	default:
+		return written, fmt.Errorf("unknown metric type %s", metricType.String())
+	}
+	written += n
+	if err != nil {
+		return
+	}
+
+	// Finally the samples, one line for each.
+	for _, metric := range in.Metric {
+		switch metricType {
+		case dto.MetricType_COUNTER:
+			if metric.Counter == nil {
+				return written, fmt.Errorf(
+					"expected counter in metric %s %s", name, metric,
+				)
+			}
+			n, err = writeSample(
+				w, name, "", metric, "", 0,
+				metric.Counter.GetValue(),
+			)
+		case dto.MetricType_GAUGE:
+			if metric.Gauge == nil {
+				return written, fmt.Errorf(
+					"expected gauge in metric %s %s", name, metric,
+				)
+			}
+			n, err = writeSample(
+				w, name, "", metric, "", 0,
+				metric.Gauge.GetValue(),
+			)
+		case dto.MetricType_UNTYPED:
+			if metric.Untyped == nil {
+				return written, fmt.Errorf(
+					"expected untyped in metric %s %s", name, metric,
+				)
+			}
+			n, err = writeSample(
+				w, name, "", metric, "", 0,
+				metric.Untyped.GetValue(),
+			)
+		case dto.MetricType_SUMMARY:
+			if metric.Summary == nil {
+				return written, fmt.Errorf(
+					"expected summary in metric %s %s", name, metric,
+				)
+			}
+			for _, q := range metric.Summary.Quantile {
+				n, err = writeSample(
+					w, name, "", metric,
+					model.QuantileLabel, q.GetQuantile(),
+					q.GetValue(),
+				)
+				written += n
+				if err != nil {
+					return
+				}
+			}
+			n, err = writeSample(
+				w, name, "_sum", metric, "", 0,
+				metric.Summary.GetSampleSum(),
+			)
+			written += n
+			if err != nil {
+				return
+			}
+			n, err = writeSample(
+				w, name, "_count", metric, "", 0,
+				float64(metric.Summary.GetSampleCount()),
+			)
+		case dto.MetricType_HISTOGRAM:
+			if metric.Histogram == nil {
+				return written, fmt.Errorf(
+					"expected histogram in metric %s %s", name, metric,
+				)
+			}
+			infSeen := false
+			for _, b := range metric.Histogram.Bucket {
+				n, err = writeSample(
+					w, name, "_bucket", metric,
+					model.BucketLabel, b.GetUpperBound(),
+					float64(b.GetCumulativeCount()),
+				)
+				written += n
+				if err != nil {
+					return
+				}
+				if math.IsInf(b.GetUpperBound(), +1) {
+					infSeen = true
+				}
+			}
+			if !infSeen {
+				n, err = writeSample(
+					w, name, "_bucket", metric,
+					model.BucketLabel, math.Inf(+1),
+					float64(metric.Histogram.GetSampleCount()),
+				)
+				written += n
+				if err != nil {
+					return
+				}
+			}
+			n, err = writeSample(
+				w, name, "_sum", metric, "", 0,
+				metric.Histogram.GetSampleSum(),
+			)
+			written += n
+			if err != nil {
+				return
+			}
+			n, err = writeSample(
+				w, name, "_count", metric, "", 0,
+				float64(metric.Histogram.GetSampleCount()),
+			)
+		default:
+			return written, fmt.Errorf(
+				"unexpected type in metric %s %s", name, metric,
+			)
+		}
+		written += n
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// writeSample writes a single sample in text format to w, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// with a float64 value (use empty string as label name if not required), and
+// the value. The function returns the number of bytes written and any error
+// encountered.
+func writeSample(
+	w enhancedWriter,
+	name, suffix string,
+	metric *dto.Metric,
+	additionalLabelName string, additionalLabelValue float64,
+	value float64,
+) (int, error) {
+	var written int
+	n, err := w.WriteString(name)
+	written += n
+	if err != nil {
+		return written, err
+	}
+	if suffix != "" {
+		n, err = w.WriteString(suffix)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+	n, err = writeLabelPairs(
+		w, metric.Label, additionalLabelName, additionalLabelValue,
+	)
+	written += n
+	if err != nil {
+		return written, err
+	}
+	err = w.WriteByte(' ')
+	written++
+	if err != nil {
+		return written, err
+	}
+	n, err = writeFloat(w, value)
+	written += n
+	if err != nil {
+		return written, err
+	}
+	if metric.TimestampMs != nil {
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return written, err
+		}
+		n, err = writeInt(w, *metric.TimestampMs)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+	err = w.WriteByte('\n')
+	written++
+	if err != nil {
+		return written, err
+	}
+	return written, nil
+}
+
+// writeLabelPairs converts a slice of LabelPair proto messages plus the
+// explicitly given additional label pair into text formatted as required by the
+// text format and writes it to 'w'. An empty slice in combination with an empty
+// string 'additionalLabelName' results in nothing being written. Otherwise, the
+// label pairs are written, escaped as required by the text format, and enclosed
+// in '{...}'. The function returns the number of bytes written and any error
+// encountered.
+func writeLabelPairs(
+	w enhancedWriter,
+	in []*dto.LabelPair,
+	additionalLabelName string, additionalLabelValue float64,
+) (int, error) {
+	if len(in) == 0 && additionalLabelName == "" {
+		return 0, nil
+	}
+	var (
+		written   int
+		separator byte = '{'
+	)
+	for _, lp := range in {
+		err := w.WriteByte(separator)
+		written++
+		if err != nil {
+			return written, err
+		}
+		n, err := w.WriteString(lp.GetName())
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = w.WriteString(`="`)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = writeEscapedString(w, lp.GetValue(), true)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		err = w.WriteByte('"')
+		written++
+		if err != nil {
+			return written, err
+		}
+		separator = ','
+	}
+	if additionalLabelName != "" {
+		err := w.WriteByte(separator)
+		written++
+		if err != nil {
+			return written, err
+		}
+		n, err := w.WriteString(additionalLabelName)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = w.WriteString(`="`)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = writeFloat(w, additionalLabelValue)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		err = w.WriteByte('"')
+		written++
+		if err != nil {
+			return written, err
+		}
+	}
+	err := w.WriteByte('}')
+	written++
+	if err != nil {
+		return written, err
+	}
+	return written, nil
+}
+
+// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+var (
+	escaper       = strings.NewReplacer("\\", `\\`, "\n", `\n`)
+	quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
+)
+
+func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
+	if includeDoubleQuote {
+		return quotedEscaper.WriteString(w, v)
+	} else {
+		return escaper.WriteString(w, v)
+	}
+}
+
+// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
+// a few common cases for increased efficiency. For non-hardcoded cases, it uses
+// strconv.AppendFloat to avoid allocations, similar to writeInt.
+func writeFloat(w enhancedWriter, f float64) (int, error) {
+	switch {
+	case f == 1:
+		return w.WriteString("1.0")
+	case f == 0:
+		return w.WriteString("0.0")
+	case f == -1:
+		return w.WriteString("-1.0")
+	case math.IsNaN(f):
+		return w.WriteString("NaN")
+	case math.IsInf(f, +1):
+		return w.WriteString("+Inf")
+	case math.IsInf(f, -1):
+		return w.WriteString("-Inf")
+	default:
+		bp := numBufPool.Get().(*[]byte)
+		*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
+		// Add a .0 if used fixed point and there is no decimal
+		// point already. This is for future proofing with OpenMetrics,
+		// where floats always contain either an exponent or decimal.
+		if !bytes.ContainsAny(*bp, "e.") {
+			*bp = append(*bp, '.', '0')
+		}
+		written, err := w.Write(*bp)
+		numBufPool.Put(bp)
+		return written, err
+	}
+}
+
+// writeInt is equivalent to fmt.Fprint with an int64 argument but uses
+// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid
+// allocations.
+func writeInt(w enhancedWriter, i int64) (int, error) {
+	bp := numBufPool.Get().(*[]byte)
+	*bp = strconv.AppendInt((*bp)[:0], i, 10)
+	written, err := w.Write(*bp)
+	numBufPool.Put(bp)
+	return written, err
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 0000000..ec3d86b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,757 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+	"strings"
+
+	dto "github.com/prometheus/client_model/go"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+	Line int
+	Msg  string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+	return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// zero value is ready to use.
+type TextParser struct {
+	metricFamiliesByName map[string]*dto.MetricFamily
+	buf                  *bufio.Reader // Where the parsed input is read through.
+	err                  error         // Most recent error.
+	lineCount            int           // Tracks the line count for error messages.
+	currentByte          byte          // The most recent byte read.
+	currentToken         bytes.Buffer  // Re-used each time a token has to be gathered from multiple bytes.
+	currentMF            *dto.MetricFamily
+	currentMetric        *dto.Metric
+	currentLabelPair     *dto.LabelPair
+
+	// The remaining member variables are only used for summaries/histograms.
+	currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+	// Summary specific.
+	summaries       map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+	currentQuantile float64
+	// Histogram specific.
+	histograms    map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+	currentBucket float64
+	// These tell us if the currently processed line ends on '_count' or
+	// '_sum' respectively and belong to a summary/histogram, representing the sample
+	// count and sum of that summary/histogram.
+	currentIsSummaryCount, currentIsSummarySum     bool
+	currentIsHistogramCount, currentIsHistogramSum bool
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+	p.reset(in)
+	for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+		// Magic happens here...
+	}
+	// Get rid of empty metric families.
+	for k, mf := range p.metricFamiliesByName {
+		if len(mf.GetMetric()) == 0 {
+			delete(p.metricFamiliesByName, k)
+		}
+	}
+	// If p.err is io.EOF now, we have run into a premature end of the input
+	// stream. Turn this error into something nicer and more
+	// meaningful. (io.EOF is often used as a signal for the legitimate end
+	// of an input stream.)
+	if p.err == io.EOF {
+		p.parseError("unexpected end of input stream")
+	}
+	return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+	p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+	if p.buf == nil {
+		p.buf = bufio.NewReader(in)
+	} else {
+		p.buf.Reset(in)
+	}
+	p.err = nil
+	p.lineCount = 0
+	if p.summaries == nil || len(p.summaries) > 0 {
+		p.summaries = map[uint64]*dto.Metric{}
+	}
+	if p.histograms == nil || len(p.histograms) > 0 {
+		p.histograms = map[uint64]*dto.Metric{}
+	}
+	p.currentQuantile = math.NaN()
+	p.currentBucket = math.NaN()
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+	p.lineCount++
+	if p.skipBlankTab(); p.err != nil {
+		// End of input reached. This is the only case where
+		// that is not an error but a signal that we are done.
+		p.err = nil
+		return nil
+	}
+	switch p.currentByte {
+	case '#':
+		return p.startComment
+	case '\n':
+		return p.startOfLine // Empty line, start the next one.
+	}
+	return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte == '\n' {
+		return p.startOfLine
+	}
+	if p.readTokenUntilWhitespace(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	// If we have hit the end of line already, there is nothing left
+	// to do. This is not considered a syntax error.
+	if p.currentByte == '\n' {
+		return p.startOfLine
+	}
+	keyword := p.currentToken.String()
+	if keyword != "HELP" && keyword != "TYPE" {
+		// Generic comment, ignore by fast forwarding to end of line.
+		for p.currentByte != '\n' {
+			if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+				return nil // Unexpected end of input.
+			}
+		}
+		return p.startOfLine
+	}
+	// There is something. Next has to be a metric name.
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.readTokenAsMetricName(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte == '\n' {
+		// At the end of the line already.
+		// Again, this is not considered a syntax error.
+		return p.startOfLine
+	}
+	if !isBlankOrTab(p.currentByte) {
+		p.parseError("invalid metric name in comment")
+		return nil
+	}
+	p.setOrCreateCurrentMF()
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte == '\n' {
+		// At the end of the line already.
+		// Again, this is not considered a syntax error.
+		return p.startOfLine
+	}
+	switch keyword {
+	case "HELP":
+		return p.readingHelp
+	case "TYPE":
+		return p.readingType
+	}
+	panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+	if p.readTokenAsMetricName(); p.err != nil {
+		return nil
+	}
+	if p.currentToken.Len() == 0 {
+		p.parseError("invalid metric name")
+		return nil
+	}
+	p.setOrCreateCurrentMF()
+	// Now is the time to fix the type if it hasn't happened yet.
+	if p.currentMF.Type == nil {
+		p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+	}
+	p.currentMetric = &dto.Metric{}
+	// Do not append the newly created currentMetric to
+	// currentMF.Metric right now. First wait if this is a summary,
+	// and the metric exists already, which we can only know after
+	// having read all the labels.
+	if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+	// Summaries/histograms are special. We have to reset the
+	// currentLabels map, currentQuantile and currentBucket before starting to
+	// read labels.
+	if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+		p.currentLabels = map[string]string{}
+		p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+		p.currentQuantile = math.NaN()
+		p.currentBucket = math.NaN()
+	}
+	if p.currentByte != '{' {
+		return p.readingValue
+	}
+	return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte == '}' {
+		if p.skipBlankTab(); p.err != nil {
+			return nil // Unexpected end of input.
+		}
+		return p.readingValue
+	}
+	if p.readTokenAsLabelName(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentToken.Len() == 0 {
+		p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+		return nil
+	}
+	p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+	if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+		p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+		return nil
+	}
+	// Special summary/histogram treatment. Don't add 'quantile' and 'le'
+	// labels to 'real' labels.
+	if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
+		!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+		p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
+	}
+	if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte != '=' {
+		p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+		return nil
+	}
+	return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte != '"' {
+		p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+		return nil
+	}
+	if p.readTokenAsLabelValue(); p.err != nil {
+		return nil
+	}
+	if !model.LabelValue(p.currentToken.String()).IsValid() {
+		p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
+		return nil
+	}
+	p.currentLabelPair.Value = proto.String(p.currentToken.String())
+	// Special treatment of summaries:
+	// - Quantile labels are special, will result in dto.Quantile later.
+	// - Other labels have to be added to currentLabels for signature calculation.
+	if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+		if p.currentLabelPair.GetName() == model.QuantileLabel {
+			if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+				// Create a more helpful error message.
+				p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+				return nil
+			}
+		} else {
+			p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+		}
+	}
+	// Similar special treatment of histograms.
+	if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+		if p.currentLabelPair.GetName() == model.BucketLabel {
+			if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+				// Create a more helpful error message.
+				p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+				return nil
+			}
+		} else {
+			p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+		}
+	}
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	switch p.currentByte {
+	case ',':
+		return p.startLabelName
+
+	case '}':
+		if p.skipBlankTab(); p.err != nil {
+			return nil // Unexpected end of input.
+		}
+		return p.readingValue
+	default:
+		p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
+		return nil
+	}
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+	// When we are here, we have read all the labels, so for the
+	// special case of a summary/histogram, we can finally find out
+	// if the metric already exists.
+	if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+		signature := model.LabelsToSignature(p.currentLabels)
+		if summary := p.summaries[signature]; summary != nil {
+			p.currentMetric = summary
+		} else {
+			p.summaries[signature] = p.currentMetric
+			p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+		}
+	} else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+		signature := model.LabelsToSignature(p.currentLabels)
+		if histogram := p.histograms[signature]; histogram != nil {
+			p.currentMetric = histogram
+		} else {
+			p.histograms[signature] = p.currentMetric
+			p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+		}
+	} else {
+		p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+	}
+	if p.readTokenUntilWhitespace(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+	if err != nil {
+		// Create a more helpful error message.
+		p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+		return nil
+	}
+	switch p.currentMF.GetType() {
+	case dto.MetricType_COUNTER:
+		p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+	case dto.MetricType_GAUGE:
+		p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+	case dto.MetricType_UNTYPED:
+		p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+	case dto.MetricType_SUMMARY:
+		// *sigh*
+		if p.currentMetric.Summary == nil {
+			p.currentMetric.Summary = &dto.Summary{}
+		}
+		switch {
+		case p.currentIsSummaryCount:
+			p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+		case p.currentIsSummarySum:
+			p.currentMetric.Summary.SampleSum = proto.Float64(value)
+		case !math.IsNaN(p.currentQuantile):
+			p.currentMetric.Summary.Quantile = append(
+				p.currentMetric.Summary.Quantile,
+				&dto.Quantile{
+					Quantile: proto.Float64(p.currentQuantile),
+					Value:    proto.Float64(value),
+				},
+			)
+		}
+	case dto.MetricType_HISTOGRAM:
+		// *sigh*
+		if p.currentMetric.Histogram == nil {
+			p.currentMetric.Histogram = &dto.Histogram{}
+		}
+		switch {
+		case p.currentIsHistogramCount:
+			p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+		case p.currentIsHistogramSum:
+			p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+		case !math.IsNaN(p.currentBucket):
+			p.currentMetric.Histogram.Bucket = append(
+				p.currentMetric.Histogram.Bucket,
+				&dto.Bucket{
+					UpperBound:      proto.Float64(p.currentBucket),
+					CumulativeCount: proto.Uint64(uint64(value)),
+				},
+			)
+		}
+	default:
+		p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+	}
+	if p.currentByte == '\n' {
+		return p.startOfLine
+	}
+	return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.readTokenUntilWhitespace(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+	if err != nil {
+		// Create a more helpful error message.
+		p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+		return nil
+	}
+	p.currentMetric.TimestampMs = proto.Int64(timestamp)
+	if p.readTokenUntilNewline(false); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentToken.Len() > 0 {
+		p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+		return nil
+	}
+	return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+	if p.currentMF.Help != nil {
+		p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+		return nil
+	}
+	// Rest of line is the docstring.
+	if p.readTokenUntilNewline(true); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	p.currentMF.Help = proto.String(p.currentToken.String())
+	return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+	if p.currentMF.Type != nil {
+		p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+		return nil
+	}
+	// Rest of line is the type.
+	if p.readTokenUntilNewline(false); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+	if !ok {
+		p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+		return nil
+	}
+	p.currentMF.Type = dto.MetricType(metricType).Enum()
+	return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+	p.err = ParseError{
+		Line: p.lineCount,
+		Msg:  msg,
+	}
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+	for {
+		if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+			return
+		}
+	}
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+	if isBlankOrTab(p.currentByte) {
+		p.skipBlankTab()
+	}
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken.  The
+// first byte considered is the byte already read (now in p.currentByte).  The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+	p.currentToken.Reset()
+	for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+		p.currentToken.WriteByte(p.currentByte)
+		p.currentByte, p.err = p.buf.ReadByte()
+	}
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken.  The first
+// byte considered is the byte already read (now in p.currentByte).  The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' translates into '\', and '\n' into a line-feed character.
+// All other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+	p.currentToken.Reset()
+	escaped := false
+	for p.err == nil {
+		if recognizeEscapeSequence && escaped {
+			switch p.currentByte {
+			case '\\':
+				p.currentToken.WriteByte(p.currentByte)
+			case 'n':
+				p.currentToken.WriteByte('\n')
+			default:
+				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+				return
+			}
+			escaped = false
+		} else {
+			switch p.currentByte {
+			case '\n':
+				return
+			case '\\':
+				escaped = true
+			default:
+				p.currentToken.WriteByte(p.currentByte)
+			}
+		}
+		p.currentByte, p.err = p.buf.ReadByte()
+	}
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+	p.currentToken.Reset()
+	if !isValidMetricNameStart(p.currentByte) {
+		return
+	}
+	for {
+		p.currentToken.WriteByte(p.currentByte)
+		p.currentByte, p.err = p.buf.ReadByte()
+		if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+			return
+		}
+	}
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+	p.currentToken.Reset()
+	if !isValidLabelNameStart(p.currentByte) {
+		return
+	}
+	for {
+		p.currentToken.WriteByte(p.currentByte)
+		p.currentByte, p.err = p.buf.ReadByte()
+		if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+			return
+		}
+	}
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+	p.currentToken.Reset()
+	escaped := false
+	for {
+		if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+			return
+		}
+		if escaped {
+			switch p.currentByte {
+			case '"', '\\':
+				p.currentToken.WriteByte(p.currentByte)
+			case 'n':
+				p.currentToken.WriteByte('\n')
+			default:
+				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+				return
+			}
+			escaped = false
+			continue
+		}
+		switch p.currentByte {
+		case '"':
+			return
+		case '\n':
+			p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+			return
+		case '\\':
+			escaped = true
+		default:
+			p.currentToken.WriteByte(p.currentByte)
+		}
+	}
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+	p.currentIsSummaryCount = false
+	p.currentIsSummarySum = false
+	p.currentIsHistogramCount = false
+	p.currentIsHistogramSum = false
+	name := p.currentToken.String()
+	if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+		return
+	}
+	// Try out if this is a _sum or _count for a summary/histogram.
+	summaryName := summaryMetricName(name)
+	if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+		if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+			if isCount(name) {
+				p.currentIsSummaryCount = true
+			}
+			if isSum(name) {
+				p.currentIsSummarySum = true
+			}
+			return
+		}
+	}
+	histogramName := histogramMetricName(name)
+	if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+		if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+			if isCount(name) {
+				p.currentIsHistogramCount = true
+			}
+			if isSum(name) {
+				p.currentIsHistogramSum = true
+			}
+			return
+		}
+	}
+	p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+	p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+	return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+}
+
+func isValidLabelNameContinuation(b byte) bool {
+	return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+}
+
+func isValidMetricNameStart(b byte) bool {
+	return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte) bool {
+	return isValidLabelNameContinuation(b) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+	return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+	return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+	return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+	return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+	switch {
+	case isCount(name):
+		return name[:len(name)-6]
+	case isSum(name):
+		return name[:len(name)-4]
+	default:
+		return name
+	}
+}
+
+func histogramMetricName(name string) string {
+	switch {
+	case isCount(name):
+		return name[:len(name)-6]
+	case isSum(name):
+		return name[:len(name)-4]
+	case isBucket(name):
+		return name[:len(name)-7]
+	default:
+		return name
+	}
+}
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
new file mode 100644
index 0000000..7723656
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+    Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+
+    Neither the name of the Open Knowledge Foundation Ltd. nor the
+    names of its contributors may be used to endorse or promote
+    products derived from this software without specific prior written
+    permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+    Type, SubType string
+    Q             float32
+    Params        map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+	.hg
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
new file mode 100644
index 0000000..26e9228
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
@@ -0,0 +1,162 @@
+/*
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+    Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+
+    Neither the name of the Open Knowledge Foundation Ltd. nor the
+    names of its contributors may be used to endorse or promote
+    products derived from this software without specific prior written
+    permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+*/
+package goautoneg
+
+import (
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+	Type, SubType string
+	Q             float64
+	Params        map[string]string
+}
+
+// For internal use, so that we can use the sort interface
+type accept_slice []Accept
+
+func (accept accept_slice) Len() int {
+	slice := []Accept(accept)
+	return len(slice)
+}
+
+func (accept accept_slice) Less(i, j int) bool {
+	slice := []Accept(accept)
+	ai, aj := slice[i], slice[j]
+	if ai.Q > aj.Q {
+		return true
+	}
+	if ai.Type != "*" && aj.Type == "*" {
+		return true
+	}
+	if ai.SubType != "*" && aj.SubType == "*" {
+		return true
+	}
+	return false
+}
+
+func (accept accept_slice) Swap(i, j int) {
+	slice := []Accept(accept)
+	slice[i], slice[j] = slice[j], slice[i]
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) (accept []Accept) {
+	parts := strings.Split(header, ",")
+	accept = make([]Accept, 0, len(parts))
+	for _, part := range parts {
+		part := strings.Trim(part, " ")
+
+		a := Accept{}
+		a.Params = make(map[string]string)
+		a.Q = 1.0
+
+		mrp := strings.Split(part, ";")
+
+		media_range := mrp[0]
+		sp := strings.Split(media_range, "/")
+		a.Type = strings.Trim(sp[0], " ")
+
+		switch {
+		case len(sp) == 1 && a.Type == "*":
+			a.SubType = "*"
+		case len(sp) == 2:
+			a.SubType = strings.Trim(sp[1], " ")
+		default:
+			continue
+		}
+
+		if len(mrp) == 1 {
+			accept = append(accept, a)
+			continue
+		}
+
+		for _, param := range mrp[1:] {
+			sp := strings.SplitN(param, "=", 2)
+			if len(sp) != 2 {
+				continue
+			}
+			token := strings.Trim(sp[0], " ")
+			if token == "q" {
+				a.Q, _ = strconv.ParseFloat(sp[1], 32)
+			} else {
+				a.Params[token] = strings.Trim(sp[1], " ")
+			}
+		}
+
+		accept = append(accept, a)
+	}
+
+	slice := accept_slice(accept)
+	sort.Sort(slice)
+
+	return
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+	asp := make([][]string, 0, len(alternatives))
+	for _, ctype := range alternatives {
+		asp = append(asp, strings.SplitN(ctype, "/", 2))
+	}
+	for _, clause := range ParseAccept(header) {
+		for i, ctsp := range asp {
+			if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+				content_type = alternatives[i]
+				return
+			}
+			if clause.Type == ctsp[0] && clause.SubType == "*" {
+				content_type = alternatives[i]
+				return
+			}
+			if clause.Type == "*" && clause.SubType == "*" {
+				content_type = alternatives[i]
+				return
+			}
+		}
+	}
+	return
+}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
new file mode 100644
index 0000000..35e739c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -0,0 +1,136 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"fmt"
+	"time"
+)
+
+type AlertStatus string
+
+const (
+	AlertFiring   AlertStatus = "firing"
+	AlertResolved AlertStatus = "resolved"
+)
+
+// Alert is a generic representation of an alert in the Prometheus eco-system.
+type Alert struct {
+	// Label value pairs for purpose of aggregation, matching, and disposition
+	// dispatching. This must minimally include an "alertname" label.
+	Labels LabelSet `json:"labels"`
+
+	// Extra key/value information which does not define alert identity.
+	Annotations LabelSet `json:"annotations"`
+
+	// The known time range for this alert. Both ends are optional.
+	StartsAt     time.Time `json:"startsAt,omitempty"`
+	EndsAt       time.Time `json:"endsAt,omitempty"`
+	GeneratorURL string    `json:"generatorURL"`
+}
+
+// Name returns the name of the alert. It is equivalent to the "alertname" label.
+func (a *Alert) Name() string {
+	return string(a.Labels[AlertNameLabel])
+}
+
+// Fingerprint returns a unique hash for the alert. It is equivalent to
+// the fingerprint of the alert's label set.
+func (a *Alert) Fingerprint() Fingerprint {
+	return a.Labels.Fingerprint()
+}
+
+func (a *Alert) String() string {
+	s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
+	if a.Resolved() {
+		return s + "[resolved]"
+	}
+	return s + "[active]"
+}
+
+// Resolved returns true iff the activity interval ended in the past.
+func (a *Alert) Resolved() bool {
+	return a.ResolvedAt(time.Now())
+}
+
+// ResolvedAt returns true off the activity interval ended before
+// the given timestamp.
+func (a *Alert) ResolvedAt(ts time.Time) bool {
+	if a.EndsAt.IsZero() {
+		return false
+	}
+	return !a.EndsAt.After(ts)
+}
+
+// Status returns the status of the alert.
+func (a *Alert) Status() AlertStatus {
+	if a.Resolved() {
+		return AlertResolved
+	}
+	return AlertFiring
+}
+
+// Validate checks whether the alert data is inconsistent.
+func (a *Alert) Validate() error {
+	if a.StartsAt.IsZero() {
+		return fmt.Errorf("start time missing")
+	}
+	if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
+		return fmt.Errorf("start time must be before end time")
+	}
+	if err := a.Labels.Validate(); err != nil {
+		return fmt.Errorf("invalid label set: %s", err)
+	}
+	if len(a.Labels) == 0 {
+		return fmt.Errorf("at least one label pair required")
+	}
+	if err := a.Annotations.Validate(); err != nil {
+		return fmt.Errorf("invalid annotations: %s", err)
+	}
+	return nil
+}
+
+// Alert is a list of alerts that can be sorted in chronological order.
+type Alerts []*Alert
+
+func (as Alerts) Len() int      { return len(as) }
+func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
+
+func (as Alerts) Less(i, j int) bool {
+	if as[i].StartsAt.Before(as[j].StartsAt) {
+		return true
+	}
+	if as[i].EndsAt.Before(as[j].EndsAt) {
+		return true
+	}
+	return as[i].Fingerprint() < as[j].Fingerprint()
+}
+
+// HasFiring returns true iff one of the alerts is not resolved.
+func (as Alerts) HasFiring() bool {
+	for _, a := range as {
+		if !a.Resolved() {
+			return true
+		}
+	}
+	return false
+}
+
+// Status returns StatusFiring iff at least one of the alerts is firing.
+func (as Alerts) Status() AlertStatus {
+	if as.HasFiring() {
+		return AlertFiring
+	}
+	return AlertResolved
+}
diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 0000000..fc4de41
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"fmt"
+	"strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+	num, err := strconv.ParseUint(s, 16, 64)
+	return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+	num, err := strconv.ParseUint(s, 16, 64)
+	if err != nil {
+		return 0, err
+	}
+	return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+	return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+	return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+	return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+	f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+	if len(s) != len(o) {
+		return false
+	}
+
+	for k := range s {
+		if _, ok := o[k]; !ok {
+			return false
+		}
+	}
+
+	return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+	myLength, otherLength := len(s), len(o)
+	if myLength == 0 || otherLength == 0 {
+		return FingerprintSet{}
+	}
+
+	subSet := s
+	superSet := o
+
+	if otherLength < myLength {
+		subSet = o
+		superSet = s
+	}
+
+	out := FingerprintSet{}
+
+	for k := range subSet {
+		if _, ok := superSet[k]; ok {
+			out[k] = struct{}{}
+		}
+	}
+
+	return out
+}
diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go
new file mode 100644
index 0000000..038fc1c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+	offset64 = 14695981039346656037
+	prime64  = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+	return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+	for i := 0; i < len(s); i++ {
+		h ^= uint64(s[i])
+		h *= prime64
+	}
+	return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+	h ^= uint64(b)
+	h *= prime64
+	return h
+}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
new file mode 100644
index 0000000..41051a0
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,210 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"fmt"
+	"regexp"
+	"strings"
+	"unicode/utf8"
+)
+
+const (
+	// AlertNameLabel is the name of the label containing the an alert's name.
+	AlertNameLabel = "alertname"
+
+	// ExportedLabelPrefix is the prefix to prepend to the label names present in
+	// exported metrics if a label of the same name is added by the server.
+	ExportedLabelPrefix = "exported_"
+
+	// MetricNameLabel is the label name indicating the metric name of a
+	// timeseries.
+	MetricNameLabel = "__name__"
+
+	// SchemeLabel is the name of the label that holds the scheme on which to
+	// scrape a target.
+	SchemeLabel = "__scheme__"
+
+	// AddressLabel is the name of the label that holds the address of
+	// a scrape target.
+	AddressLabel = "__address__"
+
+	// MetricsPathLabel is the name of the label that holds the path on which to
+	// scrape a target.
+	MetricsPathLabel = "__metrics_path__"
+
+	// ReservedLabelPrefix is a prefix which is not legal in user-supplied
+	// label names.
+	ReservedLabelPrefix = "__"
+
+	// MetaLabelPrefix is a prefix for labels that provide meta information.
+	// Labels with this prefix are used for intermediate label processing and
+	// will not be attached to time series.
+	MetaLabelPrefix = "__meta_"
+
+	// TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+	// Labels with this prefix are used for intermediate label processing and
+	// will not be attached to time series. This is reserved for use in
+	// Prometheus configuration files by users.
+	TmpLabelPrefix = "__tmp_"
+
+	// ParamLabelPrefix is a prefix for labels that provide URL parameters
+	// used to scrape a target.
+	ParamLabelPrefix = "__param_"
+
+	// JobLabel is the label name indicating the job from which a timeseries
+	// was scraped.
+	JobLabel = "job"
+
+	// InstanceLabel is the label name used for the instance label.
+	InstanceLabel = "instance"
+
+	// BucketLabel is used for the label that defines the upper bound of a
+	// bucket of a histogram ("le" -> "less or equal").
+	BucketLabel = "le"
+
+	// QuantileLabel is used for the label that defines the quantile in a
+	// summary.
+	QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names. Note that the
+// IsValid method of LabelName performs the same check but faster than a match
+// with this regular expression.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric.  It has a value associated
+// therewith.
+type LabelName string
+
+// IsValid is true iff the label name matches the pattern of LabelNameRE. This
+// method, however, does not use LabelNameRE for the check but a much faster
+// hardcoded implementation.
+func (ln LabelName) IsValid() bool {
+	if len(ln) == 0 {
+		return false
+	}
+	for i, b := range ln {
+		if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+			return false
+		}
+	}
+	return true
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var s string
+	if err := unmarshal(&s); err != nil {
+		return err
+	}
+	if !LabelName(s).IsValid() {
+		return fmt.Errorf("%q is not a valid label name", s)
+	}
+	*ln = LabelName(s)
+	return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+	var s string
+	if err := json.Unmarshal(b, &s); err != nil {
+		return err
+	}
+	if !LabelName(s).IsValid() {
+		return fmt.Errorf("%q is not a valid label name", s)
+	}
+	*ln = LabelName(s)
+	return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+	return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+	return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+	l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+	labelStrings := make([]string, 0, len(l))
+	for _, label := range l {
+		labelStrings = append(labelStrings, string(label))
+	}
+	return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// IsValid returns true iff the string is a valid UTF8.
+func (lv LabelValue) IsValid() bool {
+	return utf8.ValidString(string(lv))
+}
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+	return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+	return string(l[i]) < string(l[j])
+}
+
+func (l LabelValues) Swap(i, j int) {
+	l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+	Name  LabelName
+	Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+	return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+	switch {
+	case l[i].Name > l[j].Name:
+		return false
+	case l[i].Name < l[j].Name:
+		return true
+	case l[i].Value > l[j].Value:
+		return false
+	case l[i].Value < l[j].Value:
+		return true
+	default:
+		return false
+	}
+}
+
+func (l LabelPairs) Swap(i, j int) {
+	l[i], l[j] = l[j], l[i]
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 0000000..6eda08a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,169 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs.  The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not.  All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+// Validate checks whether all names and values in the label set
+// are valid.
+func (ls LabelSet) Validate() error {
+	for ln, lv := range ls {
+		if !ln.IsValid() {
+			return fmt.Errorf("invalid name %q", ln)
+		}
+		if !lv.IsValid() {
+			return fmt.Errorf("invalid value %q", lv)
+		}
+	}
+	return nil
+}
+
+// Equal returns true iff both label sets have exactly the same key/value pairs.
+func (ls LabelSet) Equal(o LabelSet) bool {
+	if len(ls) != len(o) {
+		return false
+	}
+	for ln, lv := range ls {
+		olv, ok := o[ln]
+		if !ok {
+			return false
+		}
+		if olv != lv {
+			return false
+		}
+	}
+	return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+	if len(ls) < len(o) {
+		return true
+	}
+	if len(ls) > len(o) {
+		return false
+	}
+
+	lns := make(LabelNames, 0, len(ls)+len(o))
+	for ln := range ls {
+		lns = append(lns, ln)
+	}
+	for ln := range o {
+		lns = append(lns, ln)
+	}
+	// It's probably not worth it to de-dup lns.
+	sort.Sort(lns)
+	for _, ln := range lns {
+		mlv, ok := ls[ln]
+		if !ok {
+			return true
+		}
+		olv, ok := o[ln]
+		if !ok {
+			return false
+		}
+		if mlv < olv {
+			return true
+		}
+		if mlv > olv {
+			return false
+		}
+	}
+	return false
+}
+
+// Clone returns a copy of the label set.
+func (ls LabelSet) Clone() LabelSet {
+	lsn := make(LabelSet, len(ls))
+	for ln, lv := range ls {
+		lsn[ln] = lv
+	}
+	return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (l LabelSet) Merge(other LabelSet) LabelSet {
+	result := make(LabelSet, len(l))
+
+	for k, v := range l {
+		result[k] = v
+	}
+
+	for k, v := range other {
+		result[k] = v
+	}
+
+	return result
+}
+
+func (l LabelSet) String() string {
+	lstrs := make([]string, 0, len(l))
+	for l, v := range l {
+		lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
+	}
+
+	sort.Strings(lstrs)
+	return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+	return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+	return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (l *LabelSet) UnmarshalJSON(b []byte) error {
+	var m map[LabelName]LabelValue
+	if err := json.Unmarshal(b, &m); err != nil {
+		return err
+	}
+	// encoding/json only unmarshals maps of the form map[string]T. It treats
+	// LabelName as a string and does not call its UnmarshalJSON method.
+	// Thus, we have to replicate the behavior here.
+	for ln := range m {
+		if !ln.IsValid() {
+			return fmt.Errorf("%q is not a valid label name", ln)
+		}
+	}
+	*l = LabelSet(m)
+	return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
new file mode 100644
index 0000000..00804b7
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,102 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"fmt"
+	"regexp"
+	"sort"
+	"strings"
+)
+
+var (
+	// MetricNameRE is a regular expression matching valid metric
+	// names. Note that the IsValidMetricName function performs the same
+	// check but faster than a match with this regular expression.
+	MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+)
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+	return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+	return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+	clone := make(Metric, len(m))
+	for k, v := range m {
+		clone[k] = v
+	}
+	return clone
+}
+
+func (m Metric) String() string {
+	metricName, hasName := m[MetricNameLabel]
+	numLabels := len(m) - 1
+	if !hasName {
+		numLabels = len(m)
+	}
+	labelStrings := make([]string, 0, numLabels)
+	for label, value := range m {
+		if label != MetricNameLabel {
+			labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+		}
+	}
+
+	switch numLabels {
+	case 0:
+		if hasName {
+			return string(metricName)
+		}
+		return "{}"
+	default:
+		sort.Strings(labelStrings)
+		return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+	}
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+	return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+	return LabelSet(m).FastFingerprint()
+}
+
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+// This function, however, does not use MetricNameRE for the check but a much
+// faster hardcoded implementation.
+func IsValidMetricName(n LabelValue) bool {
+	if len(n) == 0 {
+		return false
+	}
+	for i, b := range n {
+		if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go
new file mode 100644
index 0000000..a7b9691
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus components and libraries.
+package model
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
new file mode 100644
index 0000000..8762b13
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,144 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"sort"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+var (
+	// cache the signature of an empty label set.
+	emptyLabelSignature = hashNew()
+)
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+	if len(labels) == 0 {
+		return emptyLabelSignature
+	}
+
+	labelNames := make([]string, 0, len(labels))
+	for labelName := range labels {
+		labelNames = append(labelNames, labelName)
+	}
+	sort.Strings(labelNames)
+
+	sum := hashNew()
+	for _, labelName := range labelNames {
+		sum = hashAdd(sum, labelName)
+		sum = hashAddByte(sum, SeparatorByte)
+		sum = hashAdd(sum, labels[labelName])
+		sum = hashAddByte(sum, SeparatorByte)
+	}
+	return sum
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+	if len(ls) == 0 {
+		return Fingerprint(emptyLabelSignature)
+	}
+
+	labelNames := make(LabelNames, 0, len(ls))
+	for labelName := range ls {
+		labelNames = append(labelNames, labelName)
+	}
+	sort.Sort(labelNames)
+
+	sum := hashNew()
+	for _, labelName := range labelNames {
+		sum = hashAdd(sum, string(labelName))
+		sum = hashAddByte(sum, SeparatorByte)
+		sum = hashAdd(sum, string(ls[labelName]))
+		sum = hashAddByte(sum, SeparatorByte)
+	}
+	return Fingerprint(sum)
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+	if len(ls) == 0 {
+		return Fingerprint(emptyLabelSignature)
+	}
+
+	var result uint64
+	for labelName, labelValue := range ls {
+		sum := hashNew()
+		sum = hashAdd(sum, string(labelName))
+		sum = hashAddByte(sum, SeparatorByte)
+		sum = hashAdd(sum, string(labelValue))
+		result ^= sum
+	}
+	return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+	if len(labels) == 0 {
+		return emptyLabelSignature
+	}
+
+	sort.Sort(LabelNames(labels))
+
+	sum := hashNew()
+	for _, label := range labels {
+		sum = hashAdd(sum, string(label))
+		sum = hashAddByte(sum, SeparatorByte)
+		sum = hashAdd(sum, string(m[label]))
+		sum = hashAddByte(sum, SeparatorByte)
+	}
+	return sum
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+	if len(m) == 0 {
+		return emptyLabelSignature
+	}
+
+	labelNames := make(LabelNames, 0, len(m))
+	for labelName := range m {
+		if _, exclude := labels[labelName]; !exclude {
+			labelNames = append(labelNames, labelName)
+		}
+	}
+	if len(labelNames) == 0 {
+		return emptyLabelSignature
+	}
+	sort.Sort(labelNames)
+
+	sum := hashNew()
+	for _, labelName := range labelNames {
+		sum = hashAdd(sum, string(labelName))
+		sum = hashAddByte(sum, SeparatorByte)
+		sum = hashAdd(sum, string(m[labelName]))
+		sum = hashAddByte(sum, SeparatorByte)
+	}
+	return sum
+}
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
new file mode 100644
index 0000000..bb99889
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -0,0 +1,106 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"fmt"
+	"regexp"
+	"time"
+)
+
+// Matcher describes a matches the value of a given label.
+type Matcher struct {
+	Name    LabelName `json:"name"`
+	Value   string    `json:"value"`
+	IsRegex bool      `json:"isRegex"`
+}
+
+func (m *Matcher) UnmarshalJSON(b []byte) error {
+	type plain Matcher
+	if err := json.Unmarshal(b, (*plain)(m)); err != nil {
+		return err
+	}
+
+	if len(m.Name) == 0 {
+		return fmt.Errorf("label name in matcher must not be empty")
+	}
+	if m.IsRegex {
+		if _, err := regexp.Compile(m.Value); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Validate returns true iff all fields of the matcher have valid values.
+func (m *Matcher) Validate() error {
+	if !m.Name.IsValid() {
+		return fmt.Errorf("invalid name %q", m.Name)
+	}
+	if m.IsRegex {
+		if _, err := regexp.Compile(m.Value); err != nil {
+			return fmt.Errorf("invalid regular expression %q", m.Value)
+		}
+	} else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
+		return fmt.Errorf("invalid value %q", m.Value)
+	}
+	return nil
+}
+
+// Silence defines the representation of a silence definition in the Prometheus
+// eco-system.
+type Silence struct {
+	ID uint64 `json:"id,omitempty"`
+
+	Matchers []*Matcher `json:"matchers"`
+
+	StartsAt time.Time `json:"startsAt"`
+	EndsAt   time.Time `json:"endsAt"`
+
+	CreatedAt time.Time `json:"createdAt,omitempty"`
+	CreatedBy string    `json:"createdBy"`
+	Comment   string    `json:"comment,omitempty"`
+}
+
+// Validate returns true iff all fields of the silence have valid values.
+func (s *Silence) Validate() error {
+	if len(s.Matchers) == 0 {
+		return fmt.Errorf("at least one matcher required")
+	}
+	for _, m := range s.Matchers {
+		if err := m.Validate(); err != nil {
+			return fmt.Errorf("invalid matcher: %s", err)
+		}
+	}
+	if s.StartsAt.IsZero() {
+		return fmt.Errorf("start time missing")
+	}
+	if s.EndsAt.IsZero() {
+		return fmt.Errorf("end time missing")
+	}
+	if s.EndsAt.Before(s.StartsAt) {
+		return fmt.Errorf("start time must be before end time")
+	}
+	if s.CreatedBy == "" {
+		return fmt.Errorf("creator information missing")
+	}
+	if s.Comment == "" {
+		return fmt.Errorf("comment missing")
+	}
+	if s.CreatedAt.IsZero() {
+		return fmt.Errorf("creation timestamp missing")
+	}
+	return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
new file mode 100644
index 0000000..46259b1
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -0,0 +1,264 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"fmt"
+	"math"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+)
+
+const (
+	// MinimumTick is the minimum supported time resolution. This has to be
+	// at least time.Second in order for the code below to work.
+	minimumTick = time.Millisecond
+	// second is the Time duration equivalent to one second.
+	second = int64(time.Second / minimumTick)
+	// The number of nanoseconds per minimum tick.
+	nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+	// Earliest is the earliest Time representable. Handy for
+	// initializing a high watermark.
+	Earliest = Time(math.MinInt64)
+	// Latest is the latest Time representable. Handy for initializing
+	// a low watermark.
+	Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes an interval between two timestamps.
+type Interval struct {
+	Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+	return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+	return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+	return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+	return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+	return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+	return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+	return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+	return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+	return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+	return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+	return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+	return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+	return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+	p := strings.Split(string(b), ".")
+	switch len(p) {
+	case 1:
+		v, err := strconv.ParseInt(string(p[0]), 10, 64)
+		if err != nil {
+			return err
+		}
+		*t = Time(v * second)
+
+	case 2:
+		v, err := strconv.ParseInt(string(p[0]), 10, 64)
+		if err != nil {
+			return err
+		}
+		v *= second
+
+		prec := dotPrecision - len(p[1])
+		if prec < 0 {
+			p[1] = p[1][:dotPrecision]
+		} else if prec > 0 {
+			p[1] = p[1] + strings.Repeat("0", prec)
+		}
+
+		va, err := strconv.ParseInt(p[1], 10, 32)
+		if err != nil {
+			return err
+		}
+
+		*t = Time(v + va)
+
+	default:
+		return fmt.Errorf("invalid time %q", string(b))
+	}
+	return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+// Set implements pflag/flag.Value
+func (d *Duration) Set(s string) error {
+	var err error
+	*d, err = ParseDuration(s)
+	return err
+}
+
+// Type implements pflag.Value
+func (d *Duration) Type() string {
+	return "duration"
+}
+
+var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
+
+// ParseDuration parses a string into a time.Duration, assuming that a year
+// always has 365d, a week always has 7d, and a day always has 24h.
+func ParseDuration(durationStr string) (Duration, error) {
+	matches := durationRE.FindStringSubmatch(durationStr)
+	if len(matches) != 3 {
+		return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
+	}
+	var (
+		n, _ = strconv.Atoi(matches[1])
+		dur  = time.Duration(n) * time.Millisecond
+	)
+	switch unit := matches[2]; unit {
+	case "y":
+		dur *= 1000 * 60 * 60 * 24 * 365
+	case "w":
+		dur *= 1000 * 60 * 60 * 24 * 7
+	case "d":
+		dur *= 1000 * 60 * 60 * 24
+	case "h":
+		dur *= 1000 * 60 * 60
+	case "m":
+		dur *= 1000 * 60
+	case "s":
+		dur *= 1000
+	case "ms":
+		// Value already correct
+	default:
+		return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
+	}
+	return Duration(dur), nil
+}
+
+func (d Duration) String() string {
+	var (
+		ms   = int64(time.Duration(d) / time.Millisecond)
+		unit = "ms"
+	)
+	if ms == 0 {
+		return "0s"
+	}
+	factors := map[string]int64{
+		"y":  1000 * 60 * 60 * 24 * 365,
+		"w":  1000 * 60 * 60 * 24 * 7,
+		"d":  1000 * 60 * 60 * 24,
+		"h":  1000 * 60 * 60,
+		"m":  1000 * 60,
+		"s":  1000,
+		"ms": 1,
+	}
+
+	switch int64(0) {
+	case ms % factors["y"]:
+		unit = "y"
+	case ms % factors["w"]:
+		unit = "w"
+	case ms % factors["d"]:
+		unit = "d"
+	case ms % factors["h"]:
+		unit = "h"
+	case ms % factors["m"]:
+		unit = "m"
+	case ms % factors["s"]:
+		unit = "s"
+	}
+	return fmt.Sprintf("%v%v", ms/factors[unit], unit)
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+	return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var s string
+	if err := unmarshal(&s); err != nil {
+		return err
+	}
+	dur, err := ParseDuration(s)
+	if err != nil {
+		return err
+	}
+	*d = dur
+	return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
new file mode 100644
index 0000000..c9d8fb1
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -0,0 +1,416 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+var (
+	// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+	// non-existing sample pair. It is a SamplePair with timestamp Earliest and
+	// value 0.0. Note that the natural zero value of SamplePair has a timestamp
+	// of 0, which is possible to appear in a real SamplePair and thus not
+	// suitable to signal a non-existing SamplePair.
+	ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+	// ZeroSample is the pseudo zero-value of Sample used to signal a
+	// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+	// and metric nil. Note that the natural zero value of Sample has a timestamp
+	// of 0, which is possible to appear in a real Sample and thus not suitable
+	// to signal a non-existing Sample.
+	ZeroSample = Sample{Timestamp: Earliest}
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+	return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+	if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+		return fmt.Errorf("sample value must be a quoted string")
+	}
+	f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+	if err != nil {
+		return err
+	}
+	*v = SampleValue(f)
+	return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+	if v == o {
+		return true
+	}
+	return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+	return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+	Timestamp Time
+	Value     SampleValue
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+	t, err := json.Marshal(s.Timestamp)
+	if err != nil {
+		return nil, err
+	}
+	v, err := json.Marshal(s.Value)
+	if err != nil {
+		return nil, err
+	}
+	return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+	v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+	return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+	return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+	return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
+
+// Sample is a sample pair associated with a metric.
+type Sample struct {
+	Metric    Metric      `json:"metric"`
+	Value     SampleValue `json:"value"`
+	Timestamp Time        `json:"timestamp"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value. The
+// semantics of value equality is defined by SampleValue.Equal.
+func (s *Sample) Equal(o *Sample) bool {
+	if s == o {
+		return true
+	}
+
+	if !s.Metric.Equal(o.Metric) {
+		return false
+	}
+	if !s.Timestamp.Equal(o.Timestamp) {
+		return false
+	}
+
+	return s.Value.Equal(o.Value)
+}
+
+func (s Sample) String() string {
+	return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+		Timestamp: s.Timestamp,
+		Value:     s.Value,
+	})
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+	v := struct {
+		Metric Metric     `json:"metric"`
+		Value  SamplePair `json:"value"`
+	}{
+		Metric: s.Metric,
+		Value: SamplePair{
+			Timestamp: s.Timestamp,
+			Value:     s.Value,
+		},
+	}
+
+	return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+	v := struct {
+		Metric Metric     `json:"metric"`
+		Value  SamplePair `json:"value"`
+	}{
+		Metric: s.Metric,
+		Value: SamplePair{
+			Timestamp: s.Timestamp,
+			Value:     s.Value,
+		},
+	}
+
+	if err := json.Unmarshal(b, &v); err != nil {
+		return err
+	}
+
+	s.Metric = v.Metric
+	s.Timestamp = v.Value.Timestamp
+	s.Value = v.Value.Value
+
+	return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+	return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+	switch {
+	case s[i].Metric.Before(s[j].Metric):
+		return true
+	case s[j].Metric.Before(s[i].Metric):
+		return false
+	case s[i].Timestamp.Before(s[j].Timestamp):
+		return true
+	default:
+		return false
+	}
+}
+
+func (s Samples) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+	if len(s) != len(o) {
+		return false
+	}
+
+	for i, sample := range s {
+		if !sample.Equal(o[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+	Metric Metric       `json:"metric"`
+	Values []SamplePair `json:"values"`
+}
+
+func (ss SampleStream) String() string {
+	vals := make([]string, len(ss.Values))
+	for i, v := range ss.Values {
+		vals[i] = v.String()
+	}
+	return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+	Type() ValueType
+	String() string
+}
+
+func (Matrix) Type() ValueType  { return ValMatrix }
+func (Vector) Type() ValueType  { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+	ValNone ValueType = iota
+	ValScalar
+	ValVector
+	ValMatrix
+	ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+	return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+	var s string
+	if err := json.Unmarshal(b, &s); err != nil {
+		return err
+	}
+	switch s {
+	case "<ValNone>":
+		*et = ValNone
+	case "scalar":
+		*et = ValScalar
+	case "vector":
+		*et = ValVector
+	case "matrix":
+		*et = ValMatrix
+	case "string":
+		*et = ValString
+	default:
+		return fmt.Errorf("unknown value type %q", s)
+	}
+	return nil
+}
+
+func (e ValueType) String() string {
+	switch e {
+	case ValNone:
+		return "<ValNone>"
+	case ValScalar:
+		return "scalar"
+	case ValVector:
+		return "vector"
+	case ValMatrix:
+		return "matrix"
+	case ValString:
+		return "string"
+	}
+	panic("ValueType.String: unhandled value type")
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+	Value     SampleValue `json:"value"`
+	Timestamp Time        `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+	return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+	v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+	return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+	var f string
+	v := [...]interface{}{&s.Timestamp, &f}
+
+	if err := json.Unmarshal(b, &v); err != nil {
+		return err
+	}
+
+	value, err := strconv.ParseFloat(f, 64)
+	if err != nil {
+		return fmt.Errorf("error parsing sample value: %s", err)
+	}
+	s.Value = SampleValue(value)
+	return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+	Value     string `json:"value"`
+	Timestamp Time   `json:"timestamp"`
+}
+
+func (s *String) String() string {
+	return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+	return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+	v := [...]interface{}{&s.Timestamp, &s.Value}
+	return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+	entries := make([]string, len(vec))
+	for i, s := range vec {
+		entries[i] = s.String()
+	}
+	return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int      { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+	switch {
+	case vec[i].Metric.Before(vec[j].Metric):
+		return true
+	case vec[j].Metric.Before(vec[i].Metric):
+		return false
+	case vec[i].Timestamp.Before(vec[j].Timestamp):
+		return true
+	default:
+		return false
+	}
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+	if len(vec) != len(o) {
+		return false
+	}
+
+	for i, sample := range vec {
+		if !sample.Equal(o[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int           { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int)      { m[i], m[j] = m[j], m[i] }
+
+func (mat Matrix) String() string {
+	matCp := make(Matrix, len(mat))
+	copy(matCp, mat)
+	sort.Sort(matCp)
+
+	strs := make([]string, len(matCp))
+
+	for i, ss := range matCp {
+		strs[i] = ss.String()
+	}
+
+	return strings.Join(strs, "\n")
+}
diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore
new file mode 100644
index 0000000..25e3659
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/.gitignore
@@ -0,0 +1 @@
+/fixtures/
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
new file mode 100644
index 0000000..40503ed
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull request,
+  addressing (with `@...`) the maintainer of this repository (see
+  [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+  on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+  This will avoid unnecessary work and surely give you and us a good deal
+  of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+  Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+  and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+  Practices for Production
+  Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
new file mode 100644
index 0000000..35993c4
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
@@ -0,0 +1 @@
+* Tobias Schmidt <tobidt@gmail.com>
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
new file mode 100644
index 0000000..947d7d8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/Makefile
@@ -0,0 +1,30 @@
+# Copyright 2018 The Prometheus Authors
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+include Makefile.common
+
+%/.unpacked: %.ttar
+	./ttar -C $(dir $*) -x -f $*.ttar
+	touch $@
+
+update_fixtures: fixtures.ttar sysfs/fixtures.ttar
+
+%fixtures.ttar: %/fixtures
+	rm -v $(dir $*)fixtures/.unpacked
+	./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/
+
+.PHONY: build
+build:
+
+.PHONY: test
+test: fixtures/.unpacked sysfs/fixtures/.unpacked common-test
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
new file mode 100644
index 0000000..741579e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -0,0 +1,223 @@
+# Copyright 2018 The Prometheus Authors
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# A common Makefile that includes rules to be reused in different prometheus projects.
+# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
+
+# Example usage :
+# Create the main Makefile in the root project directory.
+# include Makefile.common
+# customTarget:
+# 	@echo ">> Running customTarget"
+#
+
+# Ensure GOBIN is not set during build so that promu is installed to the correct path
+unexport GOBIN
+
+GO           ?= go
+GOFMT        ?= $(GO)fmt
+FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
+GOOPTS       ?=
+
+GO_VERSION        ?= $(shell $(GO) version)
+GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
+PRE_GO_111        ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
+
+unexport GOVENDOR
+ifeq (, $(PRE_GO_111))
+	ifneq (,$(wildcard go.mod))
+		# Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
+		GO111MODULE := on
+
+		ifneq (,$(wildcard vendor))
+			# Always use the local vendor/ directory to satisfy the dependencies.
+			GOOPTS := $(GOOPTS) -mod=vendor
+		endif
+	endif
+else
+	ifneq (,$(wildcard go.mod))
+		ifneq (,$(wildcard vendor))
+$(warning This repository requires Go >= 1.11 because of Go modules)
+$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)')
+		endif
+	else
+		# This repository isn't using Go modules (yet).
+		GOVENDOR := $(FIRST_GOPATH)/bin/govendor
+	endif
+
+	unexport GO111MODULE
+endif
+PROMU        := $(FIRST_GOPATH)/bin/promu
+STATICCHECK  := $(FIRST_GOPATH)/bin/staticcheck
+pkgs          = ./...
+
+GO_VERSION        ?= $(shell $(GO) version)
+GO_BUILD_PLATFORM ?= $(subst /,-,$(lastword $(GO_VERSION)))
+
+PROMU_VERSION ?= 0.2.0
+PROMU_URL     := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
+
+PREFIX                  ?= $(shell pwd)
+BIN_DIR                 ?= $(shell pwd)
+DOCKER_IMAGE_TAG        ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
+DOCKER_REPO             ?= prom
+
+.PHONY: all
+all: precheck style staticcheck unused build test
+
+# This rule is used to forward a target like "build" to "common-build".  This
+# allows a new "build" target to be defined in a Makefile which includes this
+# one and override "common-build" without override warnings.
+%: common-% ;
+
+.PHONY: common-style
+common-style:
+	@echo ">> checking code style"
+	@fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \
+	if [ -n "$${fmtRes}" ]; then \
+		echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \
+		echo "Please ensure you are using $$($(GO) version) for formatting code."; \
+		exit 1; \
+	fi
+
+.PHONY: common-check_license
+common-check_license:
+	@echo ">> checking license header"
+	@licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
+               awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
+       done); \
+       if [ -n "$${licRes}" ]; then \
+               echo "license header checking failed:"; echo "$${licRes}"; \
+               exit 1; \
+       fi
+
+.PHONY: common-test-short
+common-test-short:
+	@echo ">> running short tests"
+	GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs)
+
+.PHONY: common-test
+common-test:
+	@echo ">> running all tests"
+	GO111MODULE=$(GO111MODULE) $(GO) test -race $(GOOPTS) $(pkgs)
+
+.PHONY: common-format
+common-format:
+	@echo ">> formatting code"
+	GO111MODULE=$(GO111MODULE) $(GO) fmt $(GOOPTS) $(pkgs)
+
+.PHONY: common-vet
+common-vet:
+	@echo ">> vetting code"
+	GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
+
+.PHONY: common-staticcheck
+common-staticcheck: $(STATICCHECK)
+	@echo ">> running staticcheck"
+ifdef GO111MODULE
+	GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" -checks "SA*" $(pkgs)
+else
+	$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
+endif
+
+.PHONY: common-unused
+common-unused: $(GOVENDOR)
+ifdef GOVENDOR
+	@echo ">> running check for unused packages"
+	@$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
+else
+ifdef GO111MODULE
+	@echo ">> running check for unused/missing packages in go.mod"
+	GO111MODULE=$(GO111MODULE) $(GO) mod tidy
+	@git diff --exit-code -- go.sum go.mod
+ifneq (,$(wildcard vendor))
+	@echo ">> running check for unused packages in vendor/"
+	GO111MODULE=$(GO111MODULE) $(GO) mod vendor
+	@git diff --exit-code -- go.sum go.mod vendor/
+endif
+endif
+endif
+
+.PHONY: common-build
+common-build: promu
+	@echo ">> building binaries"
+	GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX)
+
+.PHONY: common-tarball
+common-tarball: promu
+	@echo ">> building release tarball"
+	$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
+
+.PHONY: common-docker
+common-docker:
+	docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
+
+.PHONY: common-docker-publish
+common-docker-publish:
+	docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
+
+.PHONY: common-docker-tag-latest
+common-docker-tag-latest:
+	docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest"
+
+.PHONY: promu
+promu: $(PROMU)
+
+$(PROMU):
+	curl -s -L $(PROMU_URL) | tar -xvz -C /tmp
+	mkdir -v -p $(FIRST_GOPATH)/bin
+	cp -v /tmp/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(PROMU)
+
+.PHONY: proto
+proto:
+	@echo ">> generating code from proto files"
+	@./scripts/genproto.sh
+
+.PHONY: $(STATICCHECK)
+$(STATICCHECK):
+ifdef GO111MODULE
+# Get staticcheck from a temporary directory to avoid modifying the local go.{mod,sum}.
+# See https://github.com/golang/go/issues/27643.
+# For now, we are using the next branch of staticcheck because master isn't compatible yet with Go modules.
+	tmpModule=$$(mktemp -d 2>&1) && \
+	mkdir -p $${tmpModule}/staticcheck && \
+	cd "$${tmpModule}"/staticcheck && \
+	GO111MODULE=on $(GO) mod init example.com/staticcheck && \
+	GO111MODULE=on GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck@next && \
+	rm -rf $${tmpModule};
+else
+	GOOS= GOARCH= GO111MODULE=off $(GO) get -u honnef.co/go/tools/cmd/staticcheck
+endif
+
+ifdef GOVENDOR
+.PHONY: $(GOVENDOR)
+$(GOVENDOR):
+	GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
+endif
+
+.PHONY: precheck
+precheck::
+
+define PRECHECK_COMMAND_template =
+precheck:: $(1)_precheck
+
+
+PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
+.PHONY: $(1)_precheck
+$(1)_precheck:
+	@if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \
+		echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \
+		exit 1; \
+	fi
+endef
diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE
new file mode 100644
index 0000000..53c5e9a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/NOTICE
@@ -0,0 +1,7 @@
+procfs provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+Copyright 2014-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
new file mode 100644
index 0000000..2095494
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -0,0 +1,11 @@
+# procfs
+
+This procfs package provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+*WARNING*: This package is a work in progress. Its API may still break in
+backwards-incompatible ways without warnings. Use it at your own risk.
+
+[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
+[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
+[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
new file mode 100644
index 0000000..d3a8268
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -0,0 +1,95 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// A BuddyInfo is the details parsed from /proc/buddyinfo.
+// The data is comprised of an array of free fragments of each size.
+// The sizes are 2^n*PAGE_SIZE, where n is the array index.
+type BuddyInfo struct {
+	Node  string
+	Zone  string
+	Sizes []float64
+}
+
+// NewBuddyInfo reads the buddyinfo statistics.
+func NewBuddyInfo() ([]BuddyInfo, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return nil, err
+	}
+
+	return fs.NewBuddyInfo()
+}
+
+// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
+func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
+	file, err := os.Open(fs.Path("buddyinfo"))
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	return parseBuddyInfo(file)
+}
+
+func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
+	var (
+		buddyInfo   = []BuddyInfo{}
+		scanner     = bufio.NewScanner(r)
+		bucketCount = -1
+	)
+
+	for scanner.Scan() {
+		var err error
+		line := scanner.Text()
+		parts := strings.Fields(line)
+
+		if len(parts) < 4 {
+			return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
+		}
+
+		node := strings.TrimRight(parts[1], ",")
+		zone := strings.TrimRight(parts[3], ",")
+		arraySize := len(parts[4:])
+
+		if bucketCount == -1 {
+			bucketCount = arraySize
+		} else {
+			if bucketCount != arraySize {
+				return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
+			}
+		}
+
+		sizes := make([]float64, arraySize)
+		for i := 0; i < arraySize; i++ {
+			sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
+			if err != nil {
+				return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
+			}
+		}
+
+		buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
+	}
+
+	return buddyInfo, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
new file mode 100644
index 0000000..e2acd6d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2014 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package procfs provides functions to retrieve system, kernel and process
+// metrics from the pseudo-filesystem proc.
+//
+// Example:
+//
+//    package main
+//
+//    import (
+//    	"fmt"
+//    	"log"
+//
+//    	"github.com/prometheus/procfs"
+//    )
+//
+//    func main() {
+//    	p, err := procfs.Self()
+//    	if err != nil {
+//    		log.Fatalf("could not get process: %s", err)
+//    	}
+//
+//    	stat, err := p.NewStat()
+//    	if err != nil {
+//    		log.Fatalf("could not get process stat: %s", err)
+//    	}
+//
+//    	fmt.Printf("command:  %s\n", stat.Comm)
+//    	fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+//    	fmt.Printf("vsize:    %dB\n", stat.VirtualMemory())
+//    	fmt.Printf("rss:      %dB\n", stat.ResidentMemory())
+//    }
+//
+package procfs
diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar
new file mode 100644
index 0000000..13c831e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures.ttar
@@ -0,0 +1,462 @@
+# Archive created by ttar -c -f fixtures.ttar fixtures/
+Directory: fixtures
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26231
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/cmdline
+Lines: 1
+vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/comm
+Lines: 1
+vim
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/cwd
+SymlinkTo: /usr/bin
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/exe
+SymlinkTo: /usr/bin/vim
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26231/fd
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/fd/0
+SymlinkTo: ../../symlinktargets/abc
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/fd/1
+SymlinkTo: ../../symlinktargets/def
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/fd/10
+SymlinkTo: ../../symlinktargets/xyz
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/fd/2
+SymlinkTo: ../../symlinktargets/ghi
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/fd/3
+SymlinkTo: ../../symlinktargets/uvw
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/io
+Lines: 7
+rchar: 750339
+wchar: 818609
+syscr: 7405
+syscw: 5245
+read_bytes: 1024
+write_bytes: 2048
+cancelled_write_bytes: -1024
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/limits
+Lines: 17
+Limit                     Soft Limit           Hard Limit           Units
+Max cpu time              unlimited            unlimited            seconds
+Max file size             unlimited            unlimited            bytes
+Max data size             unlimited            unlimited            bytes
+Max stack size            8388608              unlimited            bytes
+Max core file size        0                    unlimited            bytes
+Max resident set          unlimited            unlimited            bytes
+Max processes             62898                62898                processes
+Max open files            2048                 4096                 files
+Max locked memory         65536                65536                bytes
+Max address space         8589934592           unlimited            bytes
+Max file locks            unlimited            unlimited            locks
+Max pending signals       62898                62898                signals
+Max msgqueue size         819200               819200               bytes
+Max nice priority         0                    0
+Max realtime priority     0                    0
+Max realtime timeout      unlimited            unlimited            us
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/mountstats
+Lines: 19
+device rootfs mounted on / with fstype rootfs
+device sysfs mounted on /sys with fstype sysfs
+device proc mounted on /proc with fstype proc
+device /dev/sda1 mounted on / with fstype ext4
+device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1
+	opts:	rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none
+	age:	13968
+	caps:	caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255
+	nfsv4:	bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured
+	sec:	flavor=1,pseudoflavor=1
+	events:	52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0
+	bytes:	1207640230 0 0 0 1210214218 0 295483 0
+	RPC iostats version: 1.0  p/v: 100003/4 (nfs)
+	xprt:	tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726
+	per-op statistics
+	        NULL: 0 0 0 0 0 0 0 0
+	        READ: 1298 1298 0 207680 1210292152 6 79386 79407
+	       WRITE: 0 0 0 0 0 0 0 0
+
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26231/net
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/net/dev
+Lines: 4
+Inter-|   Receive                                                |  Transmit
+ face |bytes    packets errs drop fifo frame compressed multicast|bytes    packets errs drop fifo colls carrier compressed
+    lo:       0       0    0    0    0     0          0         0        0       0    0    0    0     0       0          0
+  eth0:     438       5    0    0    0     0          0         0      648       8    0    0    0     0       0          0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26231/ns
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/ns/mnt
+SymlinkTo: mnt:[4026531840]
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/ns/net
+SymlinkTo: net:[4026531993]
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/root
+SymlinkTo: /
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/stat
+Lines: 1
+26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26232
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/cmdline
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/comm
+Lines: 1
+ata_sff
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/cwd
+SymlinkTo: /does/not/exist
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26232/fd
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/fd/0
+SymlinkTo: ../../symlinktargets/abc
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/fd/1
+SymlinkTo: ../../symlinktargets/def
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/fd/2
+SymlinkTo: ../../symlinktargets/ghi
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/fd/3
+SymlinkTo: ../../symlinktargets/uvw
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/fd/4
+SymlinkTo: ../../symlinktargets/xyz
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/limits
+Lines: 17
+Limit                     Soft Limit           Hard Limit           Units     
+Max cpu time              unlimited            unlimited            seconds   
+Max file size             unlimited            unlimited            bytes     
+Max data size             unlimited            unlimited            bytes     
+Max stack size            8388608              unlimited            bytes     
+Max core file size        0                    unlimited            bytes     
+Max resident set          unlimited            unlimited            bytes     
+Max processes             29436                29436                processes 
+Max open files            1024                 4096                 files     
+Max locked memory         65536                65536                bytes     
+Max address space         unlimited            unlimited            bytes     
+Max file locks            unlimited            unlimited            locks     
+Max pending signals       29436                29436                signals   
+Max msgqueue size         819200               819200               bytes     
+Max nice priority         0                    0                    
+Max realtime priority     0                    0                    
+Max realtime timeout      unlimited            unlimited            us        
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/root
+SymlinkTo: /does/not/exist
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/stat
+Lines: 1
+33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26233
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26233/cmdline
+Lines: 1
+com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/584
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/584/stat
+Lines: 2
+1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0
+#!/bin/cat /proc/self/stat
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/buddyinfo
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/buddyinfo/short
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/buddyinfo/short/buddyinfo
+Lines: 3
+Node 0, zone
+Node 0, zone
+Node 0, zone
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/buddyinfo/sizemismatch
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/buddyinfo/sizemismatch/buddyinfo
+Lines: 3
+Node 0, zone      DMA      1      0      1      0      2      1      1      0      1      1      3 
+Node 0, zone    DMA32    759    572    791    475    194     45     12      0      0      0      0      0
+Node 0, zone   Normal   4381   1093    185   1530    567    102      4      0      0      0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/buddyinfo/valid
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/buddyinfo/valid/buddyinfo
+Lines: 3
+Node 0, zone      DMA      1      0      1      0      2      1      1      0      1      1      3 
+Node 0, zone    DMA32    759    572    791    475    194     45     12      0      0      0      0 
+Node 0, zone   Normal   4381   1093    185   1530    567    102      4      0      0      0      0 
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/fs
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/fs/xfs
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/fs/xfs/stat
+Lines: 23
+extent_alloc 92447 97589 92448 93751
+abt 0 0 0 0
+blk_map 1767055 188820 184891 92447 92448 2140766 0
+bmbt 0 0 0 0
+dir 185039 92447 92444 136422
+trans 706 944304 0
+ig 185045 58807 0 126238 0 33637 22
+log 2883 113448 9 17360 739
+push_ail 945014 0 134260 15483 0 3940 464 159985 0 40
+xstrat 92447 0
+rw 107739 94045
+attr 4 0 0 0
+icluster 8677 7849 135802
+vnodes 92601 0 0 0 92444 92444 92444 0
+buf 2666287 7122 2659202 3599 2 7085 0 10297 7085
+abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147
+abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023
+bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0
+fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+qm 0 0 0 0 0 0 0 0
+xpc 399724544 92823103 86219234
+debug 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/mdstat
+Lines: 26
+Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
+md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9]
+      5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU]
+      
+md127 : active raid1 sdi2[0] sdj2[1]
+      312319552 blocks [2/2] [UU]
+      
+md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1]
+      248896 blocks [2/2] [UU]
+      
+md4 : inactive raid1 sda3[0] sdb3[1]
+      4883648 blocks [2/2] [UU]
+
+md6 : active raid1 sdb2[2] sda2[0]
+      195310144 blocks [2/1] [U_]
+      [=>...................]  recovery =  8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
+
+md8 : active raid1 sdb1[1] sda1[0]
+      195310144 blocks [2/2] [UU]
+      [=>...................]  resync =  8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
+
+md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1]
+      7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU]
+      bitmap: 0/30 pages [0KB], 65536KB chunk
+
+unused devices: <none>
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/net
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/dev
+Lines: 6
+Inter-|   Receive                                                |  Transmit
+ face |bytes    packets errs drop fifo frame compressed multicast|bytes    packets errs drop fifo colls carrier compressed
+vethf345468:     648       8    0    0    0     0          0         0      438       5    0    0    0     0       0          0
+    lo: 1664039048 1566805    0    0    0     0          0         0 1664039048 1566805    0    0    0     0       0          0
+docker0:    2568      38    0    0    0     0          0         0      438       5    0    0    0     0       0          0
+  eth0: 874354587 1036395    0    0    0     0          0         0 563352563  732147    0    0    0     0       0          0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/ip_vs
+Lines: 21
+IP Virtual Server version 1.2.1 (size=4096)
+Prot LocalAddress:Port Scheduler Flags
+  -> RemoteAddress:Port Forward Weight ActiveConn InActConn
+TCP  C0A80016:0CEA wlc
+  -> C0A85216:0CEA      Tunnel  100    248        2
+  -> C0A85318:0CEA      Tunnel  100    248        2
+  -> C0A85315:0CEA      Tunnel  100    248        1
+TCP  C0A80039:0CEA wlc
+  -> C0A85416:0CEA      Tunnel  0      0          0
+  -> C0A85215:0CEA      Tunnel  100    1499       0
+  -> C0A83215:0CEA      Tunnel  100    1498       0
+TCP  C0A80037:0CEA wlc
+  -> C0A8321A:0CEA      Tunnel  0      0          0
+  -> C0A83120:0CEA      Tunnel  100    0          0
+TCP  [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh
+  -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050      Route   1      0          0
+  -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050      Route   1      0          0
+  -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050      Route   1      1          1
+FWM  10001000 wlc
+  -> C0A8321A:0CEA      Route   0      0          1
+  -> C0A83215:0CEA      Route   0      0          2
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/ip_vs_stats
+Lines: 6
+   Total Incoming Outgoing         Incoming         Outgoing
+   Conns  Packets  Packets            Bytes            Bytes
+ 16AA370 E33656E5        0     51D8C8883AB3                0
+
+ Conns/s   Pkts/s   Pkts/s          Bytes/s          Bytes/s
+       4    1FB3C        0          1282A8F                0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/net/rpc
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/rpc/nfs
+Lines: 5
+net 18628 0 18628 6
+rpc 4329785 0 4338291
+proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
+proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39
+proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/rpc/nfsd
+Lines: 11
+rc 0 6 18622
+fh 0 0 0 0 0
+io 157286400 0
+th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
+ra 32 0 0 0 0 0 0 0 0 0 0 0
+net 18628 0 18628 6
+rpc 18628 0 0 0 0
+proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
+proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0
+proc4 2 2 10853
+proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/xfrm_stat
+Lines: 28
+XfrmInError                     1
+XfrmInBufferError               2
+XfrmInHdrError                  4
+XfrmInNoStates                  3
+XfrmInStateProtoError           40
+XfrmInStateModeError            100
+XfrmInStateSeqError             6000
+XfrmInStateExpired              4
+XfrmInStateMismatch             23451
+XfrmInStateInvalid              55555
+XfrmInTmplMismatch              51
+XfrmInNoPols                    65432
+XfrmInPolBlock                  100
+XfrmInPolError                  10000
+XfrmOutError                    1000000
+XfrmOutBundleGenError           43321
+XfrmOutBundleCheckError         555
+XfrmOutNoStates                 869
+XfrmOutStateProtoError          4542
+XfrmOutStateModeError           4
+XfrmOutStateSeqError            543
+XfrmOutStateExpired             565
+XfrmOutPolBlock                 43456
+XfrmOutPolDead                  7656
+XfrmOutPolError                 1454
+XfrmFwdHdrError                 6654
+XfrmOutStateInvalid             28765
+XfrmAcquireError                24532
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/self
+SymlinkTo: 26231
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/stat
+Lines: 16
+cpu  301854 612 111922 8979004 3552 2 3944 0 0 0
+cpu0 44490 19 21045 1087069 220 1 3410 0 0 0
+cpu1 47869 23 16474 1110787 591 0 46 0 0 0
+cpu2 46504 36 15916 1112321 441 0 326 0 0 0
+cpu3 47054 102 15683 1113230 533 0 60 0 0 0
+cpu4 28413 25 10776 1140321 217 0 8 0 0 0
+cpu5 29271 101 11586 1136270 672 0 30 0 0 0
+cpu6 29152 36 10276 1139721 319 0 29 0 0 0
+cpu7 29098 268 10164 1139282 555 0 31 0 0 0
+intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ctxt 38014093
+btime 1418183276
+processes 26442
+procs_running 2
+procs_blocked 1
+softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/symlinktargets
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/README
+Lines: 2
+This directory contains some empty files that are the symlinks the files in the "fd" directory point to.
+They are otherwise ignored by the tests
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/abc
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/def
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/ghi
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/uvw
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/xyz
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/.unpacked
+Lines: 0
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
new file mode 100644
index 0000000..b6c6b2c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -0,0 +1,82 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"os"
+	"path"
+
+	"github.com/prometheus/procfs/nfs"
+	"github.com/prometheus/procfs/xfs"
+)
+
+// FS represents the pseudo-filesystem proc, which provides an interface to
+// kernel data structures.
+type FS string
+
+// DefaultMountPoint is the common mount point of the proc filesystem.
+const DefaultMountPoint = "/proc"
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+	info, err := os.Stat(mountPoint)
+	if err != nil {
+		return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
+	}
+	if !info.IsDir() {
+		return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+	}
+
+	return FS(mountPoint), nil
+}
+
+// Path returns the path of the given subsystem relative to the procfs root.
+func (fs FS) Path(p ...string) string {
+	return path.Join(append([]string{string(fs)}, p...)...)
+}
+
+// XFSStats retrieves XFS filesystem runtime statistics.
+func (fs FS) XFSStats() (*xfs.Stats, error) {
+	f, err := os.Open(fs.Path("fs/xfs/stat"))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	return xfs.ParseStats(f)
+}
+
+// NFSClientRPCStats retrieves NFS client RPC statistics.
+func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) {
+	f, err := os.Open(fs.Path("net/rpc/nfs"))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	return nfs.ParseClientRPCStats(f)
+}
+
+// NFSdServerRPCStats retrieves NFS daemon RPC statistics.
+func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) {
+	f, err := os.Open(fs.Path("net/rpc/nfsd"))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	return nfs.ParseServerRPCStats(f)
+}
diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod
new file mode 100644
index 0000000..e89ee6c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/go.mod
@@ -0,0 +1 @@
+module github.com/prometheus/procfs
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
new file mode 100644
index 0000000..2ff228e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -0,0 +1,59 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+	"io/ioutil"
+	"strconv"
+	"strings"
+)
+
+// ParseUint32s parses a slice of strings into a slice of uint32s.
+func ParseUint32s(ss []string) ([]uint32, error) {
+	us := make([]uint32, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseUint(s, 10, 32)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, uint32(u))
+	}
+
+	return us, nil
+}
+
+// ParseUint64s parses a slice of strings into a slice of uint64s.
+func ParseUint64s(ss []string) ([]uint64, error) {
+	us := make([]uint64, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseUint(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, u)
+	}
+
+	return us, nil
+}
+
+// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
+func ReadUintFromFile(path string) (uint64, error) {
+	data, err := ioutil.ReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go
new file mode 100644
index 0000000..df0d567
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go
@@ -0,0 +1,45 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package util
+
+import (
+	"bytes"
+	"os"
+	"syscall"
+)
+
+// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
+// https://github.com/prometheus/node_exporter/pull/728/files
+func SysReadFile(file string) (string, error) {
+	f, err := os.Open(file)
+	if err != nil {
+		return "", err
+	}
+	defer f.Close()
+
+	// On some machines, hwmon drivers are broken and return EAGAIN.  This causes
+	// Go's ioutil.ReadFile implementation to poll forever.
+	//
+	// Since we either want to read data or bail immediately, do the simplest
+	// possible read using syscall directly.
+	b := make([]byte, 128)
+	n, err := syscall.Read(int(f.Fd()), b)
+	if err != nil {
+		return "", err
+	}
+
+	return string(bytes.TrimSpace(b[:n])), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
new file mode 100644
index 0000000..e36d4a3
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -0,0 +1,259 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
+type IPVSStats struct {
+	// Total count of connections.
+	Connections uint64
+	// Total incoming packages processed.
+	IncomingPackets uint64
+	// Total outgoing packages processed.
+	OutgoingPackets uint64
+	// Total incoming traffic.
+	IncomingBytes uint64
+	// Total outgoing traffic.
+	OutgoingBytes uint64
+}
+
+// IPVSBackendStatus holds current metrics of one virtual / real address pair.
+type IPVSBackendStatus struct {
+	// The local (virtual) IP address.
+	LocalAddress net.IP
+	// The remote (real) IP address.
+	RemoteAddress net.IP
+	// The local (virtual) port.
+	LocalPort uint16
+	// The remote (real) port.
+	RemotePort uint16
+	// The local firewall mark
+	LocalMark string
+	// The transport protocol (TCP, UDP).
+	Proto string
+	// The current number of active connections for this virtual/real address pair.
+	ActiveConn uint64
+	// The current number of inactive connections for this virtual/real address pair.
+	InactConn uint64
+	// The current weight of this virtual/real address pair.
+	Weight uint64
+}
+
+// NewIPVSStats reads the IPVS statistics.
+func NewIPVSStats() (IPVSStats, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+
+	return fs.NewIPVSStats()
+}
+
+// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
+func (fs FS) NewIPVSStats() (IPVSStats, error) {
+	file, err := os.Open(fs.Path("net/ip_vs_stats"))
+	if err != nil {
+		return IPVSStats{}, err
+	}
+	defer file.Close()
+
+	return parseIPVSStats(file)
+}
+
+// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
+func parseIPVSStats(file io.Reader) (IPVSStats, error) {
+	var (
+		statContent []byte
+		statLines   []string
+		statFields  []string
+		stats       IPVSStats
+	)
+
+	statContent, err := ioutil.ReadAll(file)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+
+	statLines = strings.SplitN(string(statContent), "\n", 4)
+	if len(statLines) != 4 {
+		return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
+	}
+
+	statFields = strings.Fields(statLines[2])
+	if len(statFields) != 5 {
+		return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
+	}
+
+	stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+	stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+	stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+	stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+	stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+
+	return stats, nil
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
+func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return []IPVSBackendStatus{}, err
+	}
+
+	return fs.NewIPVSBackendStatus()
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
+func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+	file, err := os.Open(fs.Path("net/ip_vs"))
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	return parseIPVSBackendStatus(file)
+}
+
+func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
+	var (
+		status       []IPVSBackendStatus
+		scanner      = bufio.NewScanner(file)
+		proto        string
+		localMark    string
+		localAddress net.IP
+		localPort    uint16
+		err          error
+	)
+
+	for scanner.Scan() {
+		fields := strings.Fields(scanner.Text())
+		if len(fields) == 0 {
+			continue
+		}
+		switch {
+		case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
+			continue
+		case fields[0] == "TCP" || fields[0] == "UDP":
+			if len(fields) < 2 {
+				continue
+			}
+			proto = fields[0]
+			localMark = ""
+			localAddress, localPort, err = parseIPPort(fields[1])
+			if err != nil {
+				return nil, err
+			}
+		case fields[0] == "FWM":
+			if len(fields) < 2 {
+				continue
+			}
+			proto = fields[0]
+			localMark = fields[1]
+			localAddress = nil
+			localPort = 0
+		case fields[0] == "->":
+			if len(fields) < 6 {
+				continue
+			}
+			remoteAddress, remotePort, err := parseIPPort(fields[1])
+			if err != nil {
+				return nil, err
+			}
+			weight, err := strconv.ParseUint(fields[3], 10, 64)
+			if err != nil {
+				return nil, err
+			}
+			activeConn, err := strconv.ParseUint(fields[4], 10, 64)
+			if err != nil {
+				return nil, err
+			}
+			inactConn, err := strconv.ParseUint(fields[5], 10, 64)
+			if err != nil {
+				return nil, err
+			}
+			status = append(status, IPVSBackendStatus{
+				LocalAddress:  localAddress,
+				LocalPort:     localPort,
+				LocalMark:     localMark,
+				RemoteAddress: remoteAddress,
+				RemotePort:    remotePort,
+				Proto:         proto,
+				Weight:        weight,
+				ActiveConn:    activeConn,
+				InactConn:     inactConn,
+			})
+		}
+	}
+	return status, nil
+}
+
+func parseIPPort(s string) (net.IP, uint16, error) {
+	var (
+		ip  net.IP
+		err error
+	)
+
+	switch len(s) {
+	case 13:
+		ip, err = hex.DecodeString(s[0:8])
+		if err != nil {
+			return nil, 0, err
+		}
+	case 46:
+		ip = net.ParseIP(s[1:40])
+		if ip == nil {
+			return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
+		}
+	default:
+		return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
+	}
+
+	portString := s[len(s)-4:]
+	if len(portString) != 4 {
+		return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
+	}
+	port, err := strconv.ParseUint(portString, 16, 16)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	return ip, uint16(port), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
new file mode 100644
index 0000000..9dc1958
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -0,0 +1,151 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"io/ioutil"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+var (
+	statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
+	buildlineRE  = regexp.MustCompile(`\((\d+)/\d+\)`)
+)
+
+// MDStat holds info parsed from /proc/mdstat.
+type MDStat struct {
+	// Name of the device.
+	Name string
+	// activity-state of the device.
+	ActivityState string
+	// Number of active disks.
+	DisksActive int64
+	// Total number of disks the device consists of.
+	DisksTotal int64
+	// Number of blocks the device holds.
+	BlocksTotal int64
+	// Number of blocks on the device that are in sync.
+	BlocksSynced int64
+}
+
+// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
+func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
+	mdStatusFilePath := fs.Path("mdstat")
+	content, err := ioutil.ReadFile(mdStatusFilePath)
+	if err != nil {
+		return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+	}
+
+	mdStates := []MDStat{}
+	lines := strings.Split(string(content), "\n")
+	for i, l := range lines {
+		if l == "" {
+			continue
+		}
+		if l[0] == ' ' {
+			continue
+		}
+		if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
+			continue
+		}
+
+		mainLine := strings.Split(l, " ")
+		if len(mainLine) < 3 {
+			return mdStates, fmt.Errorf("error parsing mdline: %s", l)
+		}
+		mdName := mainLine[0]
+		activityState := mainLine[2]
+
+		if len(lines) <= i+3 {
+			return mdStates, fmt.Errorf(
+				"error parsing %s: too few lines for md device %s",
+				mdStatusFilePath,
+				mdName,
+			)
+		}
+
+		active, total, size, err := evalStatusline(lines[i+1])
+		if err != nil {
+			return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+		}
+
+		// j is the line number of the syncing-line.
+		j := i + 2
+		if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
+			j = i + 3
+		}
+
+		// If device is syncing at the moment, get the number of currently
+		// synced bytes, otherwise that number equals the size of the device.
+		syncedBlocks := size
+		if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
+			syncedBlocks, err = evalBuildline(lines[j])
+			if err != nil {
+				return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+			}
+		}
+
+		mdStates = append(mdStates, MDStat{
+			Name:          mdName,
+			ActivityState: activityState,
+			DisksActive:   active,
+			DisksTotal:    total,
+			BlocksTotal:   size,
+			BlocksSynced:  syncedBlocks,
+		})
+	}
+
+	return mdStates, nil
+}
+
+func evalStatusline(statusline string) (active, total, size int64, err error) {
+	matches := statuslineRE.FindStringSubmatch(statusline)
+	if len(matches) != 4 {
+		return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
+	}
+
+	size, err = strconv.ParseInt(matches[1], 10, 64)
+	if err != nil {
+		return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+	}
+
+	total, err = strconv.ParseInt(matches[2], 10, 64)
+	if err != nil {
+		return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+	}
+
+	active, err = strconv.ParseInt(matches[3], 10, 64)
+	if err != nil {
+		return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+	}
+
+	return active, total, size, nil
+}
+
+func evalBuildline(buildline string) (syncedBlocks int64, err error) {
+	matches := buildlineRE.FindStringSubmatch(buildline)
+	if len(matches) != 2 {
+		return 0, fmt.Errorf("unexpected buildline: %s", buildline)
+	}
+
+	syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
+	if err != nil {
+		return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
+	}
+
+	return syncedBlocks, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
new file mode 100644
index 0000000..7a8a1e0
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -0,0 +1,606 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+// While implementing parsing of /proc/[pid]/mountstats, this blog was used
+// heavily as a reference:
+//   https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
+//
+// Special thanks to Chris Siebenmann for all of his posts explaining the
+// various statistics available for NFS.
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Constants shared between multiple functions.
+const (
+	deviceEntryLen = 8
+
+	fieldBytesLen  = 8
+	fieldEventsLen = 27
+
+	statVersion10 = "1.0"
+	statVersion11 = "1.1"
+
+	fieldTransport10TCPLen = 10
+	fieldTransport10UDPLen = 7
+
+	fieldTransport11TCPLen = 13
+	fieldTransport11UDPLen = 10
+)
+
+// A Mount is a device mount parsed from /proc/[pid]/mountstats.
+type Mount struct {
+	// Name of the device.
+	Device string
+	// The mount point of the device.
+	Mount string
+	// The filesystem type used by the device.
+	Type string
+	// If available additional statistics related to this Mount.
+	// Use a type assertion to determine if additional statistics are available.
+	Stats MountStats
+}
+
+// A MountStats is a type which contains detailed statistics for a specific
+// type of Mount.
+type MountStats interface {
+	mountStats()
+}
+
+// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
+type MountStatsNFS struct {
+	// The version of statistics provided.
+	StatVersion string
+	// The age of the NFS mount.
+	Age time.Duration
+	// Statistics related to byte counters for various operations.
+	Bytes NFSBytesStats
+	// Statistics related to various NFS event occurrences.
+	Events NFSEventsStats
+	// Statistics broken down by filesystem operation.
+	Operations []NFSOperationStats
+	// Statistics about the NFS RPC transport.
+	Transport NFSTransportStats
+}
+
+// mountStats implements MountStats.
+func (m MountStatsNFS) mountStats() {}
+
+// A NFSBytesStats contains statistics about the number of bytes read and written
+// by an NFS client to and from an NFS server.
+type NFSBytesStats struct {
+	// Number of bytes read using the read() syscall.
+	Read uint64
+	// Number of bytes written using the write() syscall.
+	Write uint64
+	// Number of bytes read using the read() syscall in O_DIRECT mode.
+	DirectRead uint64
+	// Number of bytes written using the write() syscall in O_DIRECT mode.
+	DirectWrite uint64
+	// Number of bytes read from the NFS server, in total.
+	ReadTotal uint64
+	// Number of bytes written to the NFS server, in total.
+	WriteTotal uint64
+	// Number of pages read directly via mmap()'d files.
+	ReadPages uint64
+	// Number of pages written directly via mmap()'d files.
+	WritePages uint64
+}
+
+// A NFSEventsStats contains statistics about NFS event occurrences.
+type NFSEventsStats struct {
+	// Number of times cached inode attributes are re-validated from the server.
+	InodeRevalidate uint64
+	// Number of times cached dentry nodes are re-validated from the server.
+	DnodeRevalidate uint64
+	// Number of times an inode cache is cleared.
+	DataInvalidate uint64
+	// Number of times cached inode attributes are invalidated.
+	AttributeInvalidate uint64
+	// Number of times files or directories have been open()'d.
+	VFSOpen uint64
+	// Number of times a directory lookup has occurred.
+	VFSLookup uint64
+	// Number of times permissions have been checked.
+	VFSAccess uint64
+	// Number of updates (and potential writes) to pages.
+	VFSUpdatePage uint64
+	// Number of pages read directly via mmap()'d files.
+	VFSReadPage uint64
+	// Number of times a group of pages have been read.
+	VFSReadPages uint64
+	// Number of pages written directly via mmap()'d files.
+	VFSWritePage uint64
+	// Number of times a group of pages have been written.
+	VFSWritePages uint64
+	// Number of times directory entries have been read with getdents().
+	VFSGetdents uint64
+	// Number of times attributes have been set on inodes.
+	VFSSetattr uint64
+	// Number of pending writes that have been forcefully flushed to the server.
+	VFSFlush uint64
+	// Number of times fsync() has been called on directories and files.
+	VFSFsync uint64
+	// Number of times locking has been attempted on a file.
+	VFSLock uint64
+	// Number of times files have been closed and released.
+	VFSFileRelease uint64
+	// Unknown.  Possibly unused.
+	CongestionWait uint64
+	// Number of times files have been truncated.
+	Truncation uint64
+	// Number of times a file has been grown due to writes beyond its existing end.
+	WriteExtension uint64
+	// Number of times a file was removed while still open by another process.
+	SillyRename uint64
+	// Number of times the NFS server gave less data than expected while reading.
+	ShortRead uint64
+	// Number of times the NFS server wrote less data than expected while writing.
+	ShortWrite uint64
+	// Number of times the NFS server indicated EJUKEBOX; retrieving data from
+	// offline storage.
+	JukeboxDelay uint64
+	// Number of NFS v4.1+ pNFS reads.
+	PNFSRead uint64
+	// Number of NFS v4.1+ pNFS writes.
+	PNFSWrite uint64
+}
+
+// A NFSOperationStats contains statistics for a single operation.
+type NFSOperationStats struct {
+	// The name of the operation.
+	Operation string
+	// Number of requests performed for this operation.
+	Requests uint64
+	// Number of times an actual RPC request has been transmitted for this operation.
+	Transmissions uint64
+	// Number of times a request has had a major timeout.
+	MajorTimeouts uint64
+	// Number of bytes sent for this operation, including RPC headers and payload.
+	BytesSent uint64
+	// Number of bytes received for this operation, including RPC headers and payload.
+	BytesReceived uint64
+	// Duration all requests spent queued for transmission before they were sent.
+	CumulativeQueueTime time.Duration
+	// Duration it took to get a reply back after the request was transmitted.
+	CumulativeTotalResponseTime time.Duration
+	// Duration from when a request was enqueued to when it was completely handled.
+	CumulativeTotalRequestTime time.Duration
+}
+
+// A NFSTransportStats contains statistics for the NFS mount RPC requests and
+// responses.
+type NFSTransportStats struct {
+	// The transport protocol used for the NFS mount.
+	Protocol string
+	// The local port used for the NFS mount.
+	Port uint64
+	// Number of times the client has had to establish a connection from scratch
+	// to the NFS server.
+	Bind uint64
+	// Number of times the client has made a TCP connection to the NFS server.
+	Connect uint64
+	// Duration (in jiffies, a kernel internal unit of time) the NFS mount has
+	// spent waiting for connections to the server to be established.
+	ConnectIdleTime uint64
+	// Duration since the NFS mount last saw any RPC traffic.
+	IdleTime time.Duration
+	// Number of RPC requests for this mount sent to the NFS server.
+	Sends uint64
+	// Number of RPC responses for this mount received from the NFS server.
+	Receives uint64
+	// Number of times the NFS server sent a response with a transaction ID
+	// unknown to this client.
+	BadTransactionIDs uint64
+	// A running counter, incremented on each request as the current difference
+	// ebetween sends and receives.
+	CumulativeActiveRequests uint64
+	// A running counter, incremented on each request by the current backlog
+	// queue size.
+	CumulativeBacklog uint64
+
+	// Stats below only available with stat version 1.1.
+
+	// Maximum number of simultaneously active RPC requests ever used.
+	MaximumRPCSlotsUsed uint64
+	// A running counter, incremented on each request as the current size of the
+	// sending queue.
+	CumulativeSendingQueue uint64
+	// A running counter, incremented on each request as the current size of the
+	// pending queue.
+	CumulativePendingQueue uint64
+}
+
+// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
+// of Mount structures containing detailed information about each mount.
+// If available, statistics for each mount are parsed as well.
+func parseMountStats(r io.Reader) ([]*Mount, error) {
+	const (
+		device            = "device"
+		statVersionPrefix = "statvers="
+
+		nfs3Type = "nfs"
+		nfs4Type = "nfs4"
+	)
+
+	var mounts []*Mount
+
+	s := bufio.NewScanner(r)
+	for s.Scan() {
+		// Only look for device entries in this function
+		ss := strings.Fields(string(s.Bytes()))
+		if len(ss) == 0 || ss[0] != device {
+			continue
+		}
+
+		m, err := parseMount(ss)
+		if err != nil {
+			return nil, err
+		}
+
+		// Does this mount also possess statistics information?
+		if len(ss) > deviceEntryLen {
+			// Only NFSv3 and v4 are supported for parsing statistics
+			if m.Type != nfs3Type && m.Type != nfs4Type {
+				return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
+			}
+
+			statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
+
+			stats, err := parseMountStatsNFS(s, statVersion)
+			if err != nil {
+				return nil, err
+			}
+
+			m.Stats = stats
+		}
+
+		mounts = append(mounts, m)
+	}
+
+	return mounts, s.Err()
+}
+
+// parseMount parses an entry in /proc/[pid]/mountstats in the format:
+//   device [device] mounted on [mount] with fstype [type]
+func parseMount(ss []string) (*Mount, error) {
+	if len(ss) < deviceEntryLen {
+		return nil, fmt.Errorf("invalid device entry: %v", ss)
+	}
+
+	// Check for specific words appearing at specific indices to ensure
+	// the format is consistent with what we expect
+	format := []struct {
+		i int
+		s string
+	}{
+		{i: 0, s: "device"},
+		{i: 2, s: "mounted"},
+		{i: 3, s: "on"},
+		{i: 5, s: "with"},
+		{i: 6, s: "fstype"},
+	}
+
+	for _, f := range format {
+		if ss[f.i] != f.s {
+			return nil, fmt.Errorf("invalid device entry: %v", ss)
+		}
+	}
+
+	return &Mount{
+		Device: ss[1],
+		Mount:  ss[4],
+		Type:   ss[7],
+	}, nil
+}
+
+// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
+// related to NFS statistics.
+func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
+	// Field indicators for parsing specific types of data
+	const (
+		fieldAge        = "age:"
+		fieldBytes      = "bytes:"
+		fieldEvents     = "events:"
+		fieldPerOpStats = "per-op"
+		fieldTransport  = "xprt:"
+	)
+
+	stats := &MountStatsNFS{
+		StatVersion: statVersion,
+	}
+
+	for s.Scan() {
+		ss := strings.Fields(string(s.Bytes()))
+		if len(ss) == 0 {
+			break
+		}
+		if len(ss) < 2 {
+			return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+		}
+
+		switch ss[0] {
+		case fieldAge:
+			// Age integer is in seconds
+			d, err := time.ParseDuration(ss[1] + "s")
+			if err != nil {
+				return nil, err
+			}
+
+			stats.Age = d
+		case fieldBytes:
+			bstats, err := parseNFSBytesStats(ss[1:])
+			if err != nil {
+				return nil, err
+			}
+
+			stats.Bytes = *bstats
+		case fieldEvents:
+			estats, err := parseNFSEventsStats(ss[1:])
+			if err != nil {
+				return nil, err
+			}
+
+			stats.Events = *estats
+		case fieldTransport:
+			if len(ss) < 3 {
+				return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
+			}
+
+			tstats, err := parseNFSTransportStats(ss[1:], statVersion)
+			if err != nil {
+				return nil, err
+			}
+
+			stats.Transport = *tstats
+		}
+
+		// When encountering "per-operation statistics", we must break this
+		// loop and parse them separately to ensure we can terminate parsing
+		// before reaching another device entry; hence why this 'if' statement
+		// is not just another switch case
+		if ss[0] == fieldPerOpStats {
+			break
+		}
+	}
+
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+
+	// NFS per-operation stats appear last before the next device entry
+	perOpStats, err := parseNFSOperationStats(s)
+	if err != nil {
+		return nil, err
+	}
+
+	stats.Operations = perOpStats
+
+	return stats, nil
+}
+
+// parseNFSBytesStats parses a NFSBytesStats line using an input set of
+// integer fields.
+func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
+	if len(ss) != fieldBytesLen {
+		return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
+	}
+
+	ns := make([]uint64, 0, fieldBytesLen)
+	for _, s := range ss {
+		n, err := strconv.ParseUint(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		ns = append(ns, n)
+	}
+
+	return &NFSBytesStats{
+		Read:        ns[0],
+		Write:       ns[1],
+		DirectRead:  ns[2],
+		DirectWrite: ns[3],
+		ReadTotal:   ns[4],
+		WriteTotal:  ns[5],
+		ReadPages:   ns[6],
+		WritePages:  ns[7],
+	}, nil
+}
+
+// parseNFSEventsStats parses a NFSEventsStats line using an input set of
+// integer fields.
+func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
+	if len(ss) != fieldEventsLen {
+		return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
+	}
+
+	ns := make([]uint64, 0, fieldEventsLen)
+	for _, s := range ss {
+		n, err := strconv.ParseUint(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		ns = append(ns, n)
+	}
+
+	return &NFSEventsStats{
+		InodeRevalidate:     ns[0],
+		DnodeRevalidate:     ns[1],
+		DataInvalidate:      ns[2],
+		AttributeInvalidate: ns[3],
+		VFSOpen:             ns[4],
+		VFSLookup:           ns[5],
+		VFSAccess:           ns[6],
+		VFSUpdatePage:       ns[7],
+		VFSReadPage:         ns[8],
+		VFSReadPages:        ns[9],
+		VFSWritePage:        ns[10],
+		VFSWritePages:       ns[11],
+		VFSGetdents:         ns[12],
+		VFSSetattr:          ns[13],
+		VFSFlush:            ns[14],
+		VFSFsync:            ns[15],
+		VFSLock:             ns[16],
+		VFSFileRelease:      ns[17],
+		CongestionWait:      ns[18],
+		Truncation:          ns[19],
+		WriteExtension:      ns[20],
+		SillyRename:         ns[21],
+		ShortRead:           ns[22],
+		ShortWrite:          ns[23],
+		JukeboxDelay:        ns[24],
+		PNFSRead:            ns[25],
+		PNFSWrite:           ns[26],
+	}, nil
+}
+
+// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
+// additional information about per-operation statistics until an empty
+// line is reached.
+func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
+	const (
+		// Number of expected fields in each per-operation statistics set
+		numFields = 9
+	)
+
+	var ops []NFSOperationStats
+
+	for s.Scan() {
+		ss := strings.Fields(string(s.Bytes()))
+		if len(ss) == 0 {
+			// Must break when reading a blank line after per-operation stats to
+			// enable top-level function to parse the next device entry
+			break
+		}
+
+		if len(ss) != numFields {
+			return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
+		}
+
+		// Skip string operation name for integers
+		ns := make([]uint64, 0, numFields-1)
+		for _, st := range ss[1:] {
+			n, err := strconv.ParseUint(st, 10, 64)
+			if err != nil {
+				return nil, err
+			}
+
+			ns = append(ns, n)
+		}
+
+		ops = append(ops, NFSOperationStats{
+			Operation:                   strings.TrimSuffix(ss[0], ":"),
+			Requests:                    ns[0],
+			Transmissions:               ns[1],
+			MajorTimeouts:               ns[2],
+			BytesSent:                   ns[3],
+			BytesReceived:               ns[4],
+			CumulativeQueueTime:         time.Duration(ns[5]) * time.Millisecond,
+			CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
+			CumulativeTotalRequestTime:  time.Duration(ns[7]) * time.Millisecond,
+		})
+	}
+
+	return ops, s.Err()
+}
+
+// parseNFSTransportStats parses a NFSTransportStats line using an input set of
+// integer fields matched to a specific stats version.
+func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
+	// Extract the protocol field. It is the only string value in the line
+	protocol := ss[0]
+	ss = ss[1:]
+
+	switch statVersion {
+	case statVersion10:
+		var expectedLength int
+		if protocol == "tcp" {
+			expectedLength = fieldTransport10TCPLen
+		} else if protocol == "udp" {
+			expectedLength = fieldTransport10UDPLen
+		} else {
+			return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
+		}
+		if len(ss) != expectedLength {
+			return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
+		}
+	case statVersion11:
+		var expectedLength int
+		if protocol == "tcp" {
+			expectedLength = fieldTransport11TCPLen
+		} else if protocol == "udp" {
+			expectedLength = fieldTransport11UDPLen
+		} else {
+			return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
+		}
+		if len(ss) != expectedLength {
+			return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
+		}
+	default:
+		return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
+	}
+
+	// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
+	// in a v1.0 response. Since the stat length is bigger for TCP stats, we use
+	// the TCP length here.
+	//
+	// Note: slice length must be set to length of v1.1 stats to avoid a panic when
+	// only v1.0 stats are present.
+	// See: https://github.com/prometheus/node_exporter/issues/571.
+	ns := make([]uint64, fieldTransport11TCPLen)
+	for i, s := range ss {
+		n, err := strconv.ParseUint(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		ns[i] = n
+	}
+
+	// The fields differ depending on the transport protocol (TCP or UDP)
+	// From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
+	//
+	// For the udp RPC transport there is no connection count, connect idle time,
+	// or idle time (fields #3, #4, and #5); all other fields are the same. So
+	// we set them to 0 here.
+	if protocol == "udp" {
+		ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
+	}
+
+	return &NFSTransportStats{
+		Protocol:                 protocol,
+		Port:                     ns[0],
+		Bind:                     ns[1],
+		Connect:                  ns[2],
+		ConnectIdleTime:          ns[3],
+		IdleTime:                 time.Duration(ns[4]) * time.Second,
+		Sends:                    ns[5],
+		Receives:                 ns[6],
+		BadTransactionIDs:        ns[7],
+		CumulativeActiveRequests: ns[8],
+		CumulativeBacklog:        ns[9],
+		MaximumRPCSlotsUsed:      ns[10],
+		CumulativeSendingQueue:   ns[11],
+		CumulativePendingQueue:   ns[12],
+	}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go
new file mode 100644
index 0000000..3f25233
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_dev.go
@@ -0,0 +1,216 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"errors"
+	"os"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev.
+type NetDevLine struct {
+	Name         string `json:"name"`          // The name of the interface.
+	RxBytes      uint64 `json:"rx_bytes"`      // Cumulative count of bytes received.
+	RxPackets    uint64 `json:"rx_packets"`    // Cumulative count of packets received.
+	RxErrors     uint64 `json:"rx_errors"`     // Cumulative count of receive errors encountered.
+	RxDropped    uint64 `json:"rx_dropped"`    // Cumulative count of packets dropped while receiving.
+	RxFIFO       uint64 `json:"rx_fifo"`       // Cumulative count of FIFO buffer errors.
+	RxFrame      uint64 `json:"rx_frame"`      // Cumulative count of packet framing errors.
+	RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver.
+	RxMulticast  uint64 `json:"rx_multicast"`  // Cumulative count of multicast frames received by the device driver.
+	TxBytes      uint64 `json:"tx_bytes"`      // Cumulative count of bytes transmitted.
+	TxPackets    uint64 `json:"tx_packets"`    // Cumulative count of packets transmitted.
+	TxErrors     uint64 `json:"tx_errors"`     // Cumulative count of transmit errors encountered.
+	TxDropped    uint64 `json:"tx_dropped"`    // Cumulative count of packets dropped while transmitting.
+	TxFIFO       uint64 `json:"tx_fifo"`       // Cumulative count of FIFO buffer errors.
+	TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface.
+	TxCarrier    uint64 `json:"tx_carrier"`    // Cumulative count of carrier losses detected by the device driver.
+	TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver.
+}
+
+// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys
+// are interface names.
+type NetDev map[string]NetDevLine
+
+// NewNetDev returns kernel/system statistics read from /proc/net/dev.
+func NewNetDev() (NetDev, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return nil, err
+	}
+
+	return fs.NewNetDev()
+}
+
+// NewNetDev returns kernel/system statistics read from /proc/net/dev.
+func (fs FS) NewNetDev() (NetDev, error) {
+	return newNetDev(fs.Path("net/dev"))
+}
+
+// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
+func (p Proc) NewNetDev() (NetDev, error) {
+	return newNetDev(p.path("net/dev"))
+}
+
+// newNetDev creates a new NetDev from the contents of the given file.
+func newNetDev(file string) (NetDev, error) {
+	f, err := os.Open(file)
+	if err != nil {
+		return NetDev{}, err
+	}
+	defer f.Close()
+
+	nd := NetDev{}
+	s := bufio.NewScanner(f)
+	for n := 0; s.Scan(); n++ {
+		// Skip the 2 header lines.
+		if n < 2 {
+			continue
+		}
+
+		line, err := nd.parseLine(s.Text())
+		if err != nil {
+			return nd, err
+		}
+
+		nd[line.Name] = *line
+	}
+
+	return nd, s.Err()
+}
+
+// parseLine parses a single line from the /proc/net/dev file. Header lines
+// must be filtered prior to calling this method.
+func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
+	parts := strings.SplitN(rawLine, ":", 2)
+	if len(parts) != 2 {
+		return nil, errors.New("invalid net/dev line, missing colon")
+	}
+	fields := strings.Fields(strings.TrimSpace(parts[1]))
+
+	var err error
+	line := &NetDevLine{}
+
+	// Interface Name
+	line.Name = strings.TrimSpace(parts[0])
+	if line.Name == "" {
+		return nil, errors.New("invalid net/dev line, empty interface name")
+	}
+
+	// RX
+	line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	// TX
+	line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	return line, nil
+}
+
+// Total aggregates the values across interfaces and returns a new NetDevLine.
+// The Name field will be a sorted comma separated list of interface names.
+func (nd NetDev) Total() NetDevLine {
+	total := NetDevLine{}
+
+	names := make([]string, 0, len(nd))
+	for _, ifc := range nd {
+		names = append(names, ifc.Name)
+		total.RxBytes += ifc.RxBytes
+		total.RxPackets += ifc.RxPackets
+		total.RxPackets += ifc.RxPackets
+		total.RxErrors += ifc.RxErrors
+		total.RxDropped += ifc.RxDropped
+		total.RxFIFO += ifc.RxFIFO
+		total.RxFrame += ifc.RxFrame
+		total.RxCompressed += ifc.RxCompressed
+		total.RxMulticast += ifc.RxMulticast
+		total.TxBytes += ifc.TxBytes
+		total.TxPackets += ifc.TxPackets
+		total.TxErrors += ifc.TxErrors
+		total.TxDropped += ifc.TxDropped
+		total.TxFIFO += ifc.TxFIFO
+		total.TxCollisions += ifc.TxCollisions
+		total.TxCarrier += ifc.TxCarrier
+		total.TxCompressed += ifc.TxCompressed
+	}
+	sort.Strings(names)
+	total.Name = strings.Join(names, ", ")
+
+	return total
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go
new file mode 100644
index 0000000..651bf68
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/nfs.go
@@ -0,0 +1,263 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package nfs implements parsing of /proc/net/rpc/nfsd.
+// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/
+package nfs
+
+// ReplyCache models the "rc" line.
+type ReplyCache struct {
+	Hits    uint64
+	Misses  uint64
+	NoCache uint64
+}
+
+// FileHandles models the "fh" line.
+type FileHandles struct {
+	Stale        uint64
+	TotalLookups uint64
+	AnonLookups  uint64
+	DirNoCache   uint64
+	NoDirNoCache uint64
+}
+
+// InputOutput models the "io" line.
+type InputOutput struct {
+	Read  uint64
+	Write uint64
+}
+
+// Threads models the "th" line.
+type Threads struct {
+	Threads uint64
+	FullCnt uint64
+}
+
+// ReadAheadCache models the "ra" line.
+type ReadAheadCache struct {
+	CacheSize      uint64
+	CacheHistogram []uint64
+	NotFound       uint64
+}
+
+// Network models the "net" line.
+type Network struct {
+	NetCount   uint64
+	UDPCount   uint64
+	TCPCount   uint64
+	TCPConnect uint64
+}
+
+// ClientRPC models the nfs "rpc" line.
+type ClientRPC struct {
+	RPCCount        uint64
+	Retransmissions uint64
+	AuthRefreshes   uint64
+}
+
+// ServerRPC models the nfsd "rpc" line.
+type ServerRPC struct {
+	RPCCount uint64
+	BadCnt   uint64
+	BadFmt   uint64
+	BadAuth  uint64
+	BadcInt  uint64
+}
+
+// V2Stats models the "proc2" line.
+type V2Stats struct {
+	Null     uint64
+	GetAttr  uint64
+	SetAttr  uint64
+	Root     uint64
+	Lookup   uint64
+	ReadLink uint64
+	Read     uint64
+	WrCache  uint64
+	Write    uint64
+	Create   uint64
+	Remove   uint64
+	Rename   uint64
+	Link     uint64
+	SymLink  uint64
+	MkDir    uint64
+	RmDir    uint64
+	ReadDir  uint64
+	FsStat   uint64
+}
+
+// V3Stats models the "proc3" line.
+type V3Stats struct {
+	Null        uint64
+	GetAttr     uint64
+	SetAttr     uint64
+	Lookup      uint64
+	Access      uint64
+	ReadLink    uint64
+	Read        uint64
+	Write       uint64
+	Create      uint64
+	MkDir       uint64
+	SymLink     uint64
+	MkNod       uint64
+	Remove      uint64
+	RmDir       uint64
+	Rename      uint64
+	Link        uint64
+	ReadDir     uint64
+	ReadDirPlus uint64
+	FsStat      uint64
+	FsInfo      uint64
+	PathConf    uint64
+	Commit      uint64
+}
+
+// ClientV4Stats models the nfs "proc4" line.
+type ClientV4Stats struct {
+	Null               uint64
+	Read               uint64
+	Write              uint64
+	Commit             uint64
+	Open               uint64
+	OpenConfirm        uint64
+	OpenNoattr         uint64
+	OpenDowngrade      uint64
+	Close              uint64
+	Setattr            uint64
+	FsInfo             uint64
+	Renew              uint64
+	SetClientID        uint64
+	SetClientIDConfirm uint64
+	Lock               uint64
+	Lockt              uint64
+	Locku              uint64
+	Access             uint64
+	Getattr            uint64
+	Lookup             uint64
+	LookupRoot         uint64
+	Remove             uint64
+	Rename             uint64
+	Link               uint64
+	Symlink            uint64
+	Create             uint64
+	Pathconf           uint64
+	StatFs             uint64
+	ReadLink           uint64
+	ReadDir            uint64
+	ServerCaps         uint64
+	DelegReturn        uint64
+	GetACL             uint64
+	SetACL             uint64
+	FsLocations        uint64
+	ReleaseLockowner   uint64
+	Secinfo            uint64
+	FsidPresent        uint64
+	ExchangeID         uint64
+	CreateSession      uint64
+	DestroySession     uint64
+	Sequence           uint64
+	GetLeaseTime       uint64
+	ReclaimComplete    uint64
+	LayoutGet          uint64
+	GetDeviceInfo      uint64
+	LayoutCommit       uint64
+	LayoutReturn       uint64
+	SecinfoNoName      uint64
+	TestStateID        uint64
+	FreeStateID        uint64
+	GetDeviceList      uint64
+	BindConnToSession  uint64
+	DestroyClientID    uint64
+	Seek               uint64
+	Allocate           uint64
+	DeAllocate         uint64
+	LayoutStats        uint64
+	Clone              uint64
+}
+
+// ServerV4Stats models the nfsd "proc4" line.
+type ServerV4Stats struct {
+	Null     uint64
+	Compound uint64
+}
+
+// V4Ops models the "proc4ops" line: NFSv4 operations
+// Variable list, see:
+// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations)
+// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations)
+// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations)
+type V4Ops struct {
+	//Values       uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct?
+	Op0Unused    uint64
+	Op1Unused    uint64
+	Op2Future    uint64
+	Access       uint64
+	Close        uint64
+	Commit       uint64
+	Create       uint64
+	DelegPurge   uint64
+	DelegReturn  uint64
+	GetAttr      uint64
+	GetFH        uint64
+	Link         uint64
+	Lock         uint64
+	Lockt        uint64
+	Locku        uint64
+	Lookup       uint64
+	LookupRoot   uint64
+	Nverify      uint64
+	Open         uint64
+	OpenAttr     uint64
+	OpenConfirm  uint64
+	OpenDgrd     uint64
+	PutFH        uint64
+	PutPubFH     uint64
+	PutRootFH    uint64
+	Read         uint64
+	ReadDir      uint64
+	ReadLink     uint64
+	Remove       uint64
+	Rename       uint64
+	Renew        uint64
+	RestoreFH    uint64
+	SaveFH       uint64
+	SecInfo      uint64
+	SetAttr      uint64
+	Verify       uint64
+	Write        uint64
+	RelLockOwner uint64
+}
+
+// ClientRPCStats models all stats from /proc/net/rpc/nfs.
+type ClientRPCStats struct {
+	Network       Network
+	ClientRPC     ClientRPC
+	V2Stats       V2Stats
+	V3Stats       V3Stats
+	ClientV4Stats ClientV4Stats
+}
+
+// ServerRPCStats models all stats from /proc/net/rpc/nfsd.
+type ServerRPCStats struct {
+	ReplyCache     ReplyCache
+	FileHandles    FileHandles
+	InputOutput    InputOutput
+	Threads        Threads
+	ReadAheadCache ReadAheadCache
+	Network        Network
+	ServerRPC      ServerRPC
+	V2Stats        V2Stats
+	V3Stats        V3Stats
+	ServerV4Stats  ServerV4Stats
+	V4Ops          V4Ops
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go
new file mode 100644
index 0000000..95a83cc
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/parse.go
@@ -0,0 +1,317 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nfs
+
+import (
+	"fmt"
+)
+
+func parseReplyCache(v []uint64) (ReplyCache, error) {
+	if len(v) != 3 {
+		return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v)
+	}
+
+	return ReplyCache{
+		Hits:    v[0],
+		Misses:  v[1],
+		NoCache: v[2],
+	}, nil
+}
+
+func parseFileHandles(v []uint64) (FileHandles, error) {
+	if len(v) != 5 {
+		return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v)
+	}
+
+	return FileHandles{
+		Stale:        v[0],
+		TotalLookups: v[1],
+		AnonLookups:  v[2],
+		DirNoCache:   v[3],
+		NoDirNoCache: v[4],
+	}, nil
+}
+
+func parseInputOutput(v []uint64) (InputOutput, error) {
+	if len(v) != 2 {
+		return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v)
+	}
+
+	return InputOutput{
+		Read:  v[0],
+		Write: v[1],
+	}, nil
+}
+
+func parseThreads(v []uint64) (Threads, error) {
+	if len(v) != 2 {
+		return Threads{}, fmt.Errorf("invalid Threads line %q", v)
+	}
+
+	return Threads{
+		Threads: v[0],
+		FullCnt: v[1],
+	}, nil
+}
+
+func parseReadAheadCache(v []uint64) (ReadAheadCache, error) {
+	if len(v) != 12 {
+		return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v)
+	}
+
+	return ReadAheadCache{
+		CacheSize:      v[0],
+		CacheHistogram: v[1:11],
+		NotFound:       v[11],
+	}, nil
+}
+
+func parseNetwork(v []uint64) (Network, error) {
+	if len(v) != 4 {
+		return Network{}, fmt.Errorf("invalid Network line %q", v)
+	}
+
+	return Network{
+		NetCount:   v[0],
+		UDPCount:   v[1],
+		TCPCount:   v[2],
+		TCPConnect: v[3],
+	}, nil
+}
+
+func parseServerRPC(v []uint64) (ServerRPC, error) {
+	if len(v) != 5 {
+		return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v)
+	}
+
+	return ServerRPC{
+		RPCCount: v[0],
+		BadCnt:   v[1],
+		BadFmt:   v[2],
+		BadAuth:  v[3],
+		BadcInt:  v[4],
+	}, nil
+}
+
+func parseClientRPC(v []uint64) (ClientRPC, error) {
+	if len(v) != 3 {
+		return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v)
+	}
+
+	return ClientRPC{
+		RPCCount:        v[0],
+		Retransmissions: v[1],
+		AuthRefreshes:   v[2],
+	}, nil
+}
+
+func parseV2Stats(v []uint64) (V2Stats, error) {
+	values := int(v[0])
+	if len(v[1:]) != values || values != 18 {
+		return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v)
+	}
+
+	return V2Stats{
+		Null:     v[1],
+		GetAttr:  v[2],
+		SetAttr:  v[3],
+		Root:     v[4],
+		Lookup:   v[5],
+		ReadLink: v[6],
+		Read:     v[7],
+		WrCache:  v[8],
+		Write:    v[9],
+		Create:   v[10],
+		Remove:   v[11],
+		Rename:   v[12],
+		Link:     v[13],
+		SymLink:  v[14],
+		MkDir:    v[15],
+		RmDir:    v[16],
+		ReadDir:  v[17],
+		FsStat:   v[18],
+	}, nil
+}
+
+func parseV3Stats(v []uint64) (V3Stats, error) {
+	values := int(v[0])
+	if len(v[1:]) != values || values != 22 {
+		return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v)
+	}
+
+	return V3Stats{
+		Null:        v[1],
+		GetAttr:     v[2],
+		SetAttr:     v[3],
+		Lookup:      v[4],
+		Access:      v[5],
+		ReadLink:    v[6],
+		Read:        v[7],
+		Write:       v[8],
+		Create:      v[9],
+		MkDir:       v[10],
+		SymLink:     v[11],
+		MkNod:       v[12],
+		Remove:      v[13],
+		RmDir:       v[14],
+		Rename:      v[15],
+		Link:        v[16],
+		ReadDir:     v[17],
+		ReadDirPlus: v[18],
+		FsStat:      v[19],
+		FsInfo:      v[20],
+		PathConf:    v[21],
+		Commit:      v[22],
+	}, nil
+}
+
+func parseClientV4Stats(v []uint64) (ClientV4Stats, error) {
+	values := int(v[0])
+	if len(v[1:]) != values {
+		return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v)
+	}
+
+	// This function currently supports mapping 59 NFS v4 client stats.  Older
+	// kernels may emit fewer stats, so we must detect this and pad out the
+	// values to match the expected slice size.
+	if values < 59 {
+		newValues := make([]uint64, 60)
+		copy(newValues, v)
+		v = newValues
+	}
+
+	return ClientV4Stats{
+		Null:               v[1],
+		Read:               v[2],
+		Write:              v[3],
+		Commit:             v[4],
+		Open:               v[5],
+		OpenConfirm:        v[6],
+		OpenNoattr:         v[7],
+		OpenDowngrade:      v[8],
+		Close:              v[9],
+		Setattr:            v[10],
+		FsInfo:             v[11],
+		Renew:              v[12],
+		SetClientID:        v[13],
+		SetClientIDConfirm: v[14],
+		Lock:               v[15],
+		Lockt:              v[16],
+		Locku:              v[17],
+		Access:             v[18],
+		Getattr:            v[19],
+		Lookup:             v[20],
+		LookupRoot:         v[21],
+		Remove:             v[22],
+		Rename:             v[23],
+		Link:               v[24],
+		Symlink:            v[25],
+		Create:             v[26],
+		Pathconf:           v[27],
+		StatFs:             v[28],
+		ReadLink:           v[29],
+		ReadDir:            v[30],
+		ServerCaps:         v[31],
+		DelegReturn:        v[32],
+		GetACL:             v[33],
+		SetACL:             v[34],
+		FsLocations:        v[35],
+		ReleaseLockowner:   v[36],
+		Secinfo:            v[37],
+		FsidPresent:        v[38],
+		ExchangeID:         v[39],
+		CreateSession:      v[40],
+		DestroySession:     v[41],
+		Sequence:           v[42],
+		GetLeaseTime:       v[43],
+		ReclaimComplete:    v[44],
+		LayoutGet:          v[45],
+		GetDeviceInfo:      v[46],
+		LayoutCommit:       v[47],
+		LayoutReturn:       v[48],
+		SecinfoNoName:      v[49],
+		TestStateID:        v[50],
+		FreeStateID:        v[51],
+		GetDeviceList:      v[52],
+		BindConnToSession:  v[53],
+		DestroyClientID:    v[54],
+		Seek:               v[55],
+		Allocate:           v[56],
+		DeAllocate:         v[57],
+		LayoutStats:        v[58],
+		Clone:              v[59],
+	}, nil
+}
+
+func parseServerV4Stats(v []uint64) (ServerV4Stats, error) {
+	values := int(v[0])
+	if len(v[1:]) != values || values != 2 {
+		return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v)
+	}
+
+	return ServerV4Stats{
+		Null:     v[1],
+		Compound: v[2],
+	}, nil
+}
+
+func parseV4Ops(v []uint64) (V4Ops, error) {
+	values := int(v[0])
+	if len(v[1:]) != values || values < 39 {
+		return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v)
+	}
+
+	stats := V4Ops{
+		Op0Unused:    v[1],
+		Op1Unused:    v[2],
+		Op2Future:    v[3],
+		Access:       v[4],
+		Close:        v[5],
+		Commit:       v[6],
+		Create:       v[7],
+		DelegPurge:   v[8],
+		DelegReturn:  v[9],
+		GetAttr:      v[10],
+		GetFH:        v[11],
+		Link:         v[12],
+		Lock:         v[13],
+		Lockt:        v[14],
+		Locku:        v[15],
+		Lookup:       v[16],
+		LookupRoot:   v[17],
+		Nverify:      v[18],
+		Open:         v[19],
+		OpenAttr:     v[20],
+		OpenConfirm:  v[21],
+		OpenDgrd:     v[22],
+		PutFH:        v[23],
+		PutPubFH:     v[24],
+		PutRootFH:    v[25],
+		Read:         v[26],
+		ReadDir:      v[27],
+		ReadLink:     v[28],
+		Remove:       v[29],
+		Rename:       v[30],
+		Renew:        v[31],
+		RestoreFH:    v[32],
+		SaveFH:       v[33],
+		SecInfo:      v[34],
+		SetAttr:      v[35],
+		Verify:       v[36],
+		Write:        v[37],
+		RelLockOwner: v[38],
+	}
+
+	return stats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
new file mode 100644
index 0000000..c0d3a5a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
@@ -0,0 +1,67 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nfs
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs
+func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) {
+	stats := &ClientRPCStats{}
+
+	scanner := bufio.NewScanner(r)
+	for scanner.Scan() {
+		line := scanner.Text()
+		parts := strings.Fields(scanner.Text())
+		// require at least <key> <value>
+		if len(parts) < 2 {
+			return nil, fmt.Errorf("invalid NFS metric line %q", line)
+		}
+
+		values, err := util.ParseUint64s(parts[1:])
+		if err != nil {
+			return nil, fmt.Errorf("error parsing NFS metric line: %s", err)
+		}
+
+		switch metricLine := parts[0]; metricLine {
+		case "net":
+			stats.Network, err = parseNetwork(values)
+		case "rpc":
+			stats.ClientRPC, err = parseClientRPC(values)
+		case "proc2":
+			stats.V2Stats, err = parseV2Stats(values)
+		case "proc3":
+			stats.V3Stats, err = parseV3Stats(values)
+		case "proc4":
+			stats.ClientV4Stats, err = parseClientV4Stats(values)
+		default:
+			return nil, fmt.Errorf("unknown NFS metric line %q", metricLine)
+		}
+		if err != nil {
+			return nil, fmt.Errorf("errors parsing NFS metric line: %s", err)
+		}
+	}
+
+	if err := scanner.Err(); err != nil {
+		return nil, fmt.Errorf("error scanning NFS file: %s", err)
+	}
+
+	return stats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
new file mode 100644
index 0000000..57bb4a3
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
@@ -0,0 +1,89 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nfs
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd
+func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) {
+	stats := &ServerRPCStats{}
+
+	scanner := bufio.NewScanner(r)
+	for scanner.Scan() {
+		line := scanner.Text()
+		parts := strings.Fields(scanner.Text())
+		// require at least <key> <value>
+		if len(parts) < 2 {
+			return nil, fmt.Errorf("invalid NFSd metric line %q", line)
+		}
+		label := parts[0]
+
+		var values []uint64
+		var err error
+		if label == "th" {
+			if len(parts) < 3 {
+				return nil, fmt.Errorf("invalid NFSd th metric line %q", line)
+			}
+			values, err = util.ParseUint64s(parts[1:3])
+		} else {
+			values, err = util.ParseUint64s(parts[1:])
+		}
+		if err != nil {
+			return nil, fmt.Errorf("error parsing NFSd metric line: %s", err)
+		}
+
+		switch metricLine := parts[0]; metricLine {
+		case "rc":
+			stats.ReplyCache, err = parseReplyCache(values)
+		case "fh":
+			stats.FileHandles, err = parseFileHandles(values)
+		case "io":
+			stats.InputOutput, err = parseInputOutput(values)
+		case "th":
+			stats.Threads, err = parseThreads(values)
+		case "ra":
+			stats.ReadAheadCache, err = parseReadAheadCache(values)
+		case "net":
+			stats.Network, err = parseNetwork(values)
+		case "rpc":
+			stats.ServerRPC, err = parseServerRPC(values)
+		case "proc2":
+			stats.V2Stats, err = parseV2Stats(values)
+		case "proc3":
+			stats.V3Stats, err = parseV3Stats(values)
+		case "proc4":
+			stats.ServerV4Stats, err = parseServerV4Stats(values)
+		case "proc4ops":
+			stats.V4Ops, err = parseV4Ops(values)
+		default:
+			return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine)
+		}
+		if err != nil {
+			return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err)
+		}
+	}
+
+	if err := scanner.Err(); err != nil {
+		return nil, fmt.Errorf("error scanning NFSd file: %s", err)
+	}
+
+	return stats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
new file mode 100644
index 0000000..06bed0e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -0,0 +1,258 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// Proc provides information about a running process.
+type Proc struct {
+	// The process ID.
+	PID int
+
+	fs FS
+}
+
+// Procs represents a list of Proc structs.
+type Procs []Proc
+
+func (p Procs) Len() int           { return len(p) }
+func (p Procs) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
+
+// Self returns a process for the current process read via /proc/self.
+func Self() (Proc, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return Proc{}, err
+	}
+	return fs.Self()
+}
+
+// NewProc returns a process for the given pid under /proc.
+func NewProc(pid int) (Proc, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return Proc{}, err
+	}
+	return fs.NewProc(pid)
+}
+
+// AllProcs returns a list of all currently available processes under /proc.
+func AllProcs() (Procs, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return Procs{}, err
+	}
+	return fs.AllProcs()
+}
+
+// Self returns a process for the current process.
+func (fs FS) Self() (Proc, error) {
+	p, err := os.Readlink(fs.Path("self"))
+	if err != nil {
+		return Proc{}, err
+	}
+	pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1))
+	if err != nil {
+		return Proc{}, err
+	}
+	return fs.NewProc(pid)
+}
+
+// NewProc returns a process for the given pid.
+func (fs FS) NewProc(pid int) (Proc, error) {
+	if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
+		return Proc{}, err
+	}
+	return Proc{PID: pid, fs: fs}, nil
+}
+
+// AllProcs returns a list of all currently available processes.
+func (fs FS) AllProcs() (Procs, error) {
+	d, err := os.Open(fs.Path())
+	if err != nil {
+		return Procs{}, err
+	}
+	defer d.Close()
+
+	names, err := d.Readdirnames(-1)
+	if err != nil {
+		return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
+	}
+
+	p := Procs{}
+	for _, n := range names {
+		pid, err := strconv.ParseInt(n, 10, 64)
+		if err != nil {
+			continue
+		}
+		p = append(p, Proc{PID: int(pid), fs: fs})
+	}
+
+	return p, nil
+}
+
+// CmdLine returns the command line of a process.
+func (p Proc) CmdLine() ([]string, error) {
+	f, err := os.Open(p.path("cmdline"))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	data, err := ioutil.ReadAll(f)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(data) < 1 {
+		return []string{}, nil
+	}
+
+	return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
+}
+
+// Comm returns the command name of a process.
+func (p Proc) Comm() (string, error) {
+	f, err := os.Open(p.path("comm"))
+	if err != nil {
+		return "", err
+	}
+	defer f.Close()
+
+	data, err := ioutil.ReadAll(f)
+	if err != nil {
+		return "", err
+	}
+
+	return strings.TrimSpace(string(data)), nil
+}
+
+// Executable returns the absolute path of the executable command of a process.
+func (p Proc) Executable() (string, error) {
+	exe, err := os.Readlink(p.path("exe"))
+	if os.IsNotExist(err) {
+		return "", nil
+	}
+
+	return exe, err
+}
+
+// Cwd returns the absolute path to the current working directory of the process.
+func (p Proc) Cwd() (string, error) {
+	wd, err := os.Readlink(p.path("cwd"))
+	if os.IsNotExist(err) {
+		return "", nil
+	}
+
+	return wd, err
+}
+
+// RootDir returns the absolute path to the process's root directory (as set by chroot)
+func (p Proc) RootDir() (string, error) {
+	rdir, err := os.Readlink(p.path("root"))
+	if os.IsNotExist(err) {
+		return "", nil
+	}
+
+	return rdir, err
+}
+
+// FileDescriptors returns the currently open file descriptors of a process.
+func (p Proc) FileDescriptors() ([]uintptr, error) {
+	names, err := p.fileDescriptors()
+	if err != nil {
+		return nil, err
+	}
+
+	fds := make([]uintptr, len(names))
+	for i, n := range names {
+		fd, err := strconv.ParseInt(n, 10, 32)
+		if err != nil {
+			return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
+		}
+		fds[i] = uintptr(fd)
+	}
+
+	return fds, nil
+}
+
+// FileDescriptorTargets returns the targets of all file descriptors of a process.
+// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
+func (p Proc) FileDescriptorTargets() ([]string, error) {
+	names, err := p.fileDescriptors()
+	if err != nil {
+		return nil, err
+	}
+
+	targets := make([]string, len(names))
+
+	for i, name := range names {
+		target, err := os.Readlink(p.path("fd", name))
+		if err == nil {
+			targets[i] = target
+		}
+	}
+
+	return targets, nil
+}
+
+// FileDescriptorsLen returns the number of currently open file descriptors of
+// a process.
+func (p Proc) FileDescriptorsLen() (int, error) {
+	fds, err := p.fileDescriptors()
+	if err != nil {
+		return 0, err
+	}
+
+	return len(fds), nil
+}
+
+// MountStats retrieves statistics and configuration for mount points in a
+// process's namespace.
+func (p Proc) MountStats() ([]*Mount, error) {
+	f, err := os.Open(p.path("mountstats"))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	return parseMountStats(f)
+}
+
+func (p Proc) fileDescriptors() ([]string, error) {
+	d, err := os.Open(p.path("fd"))
+	if err != nil {
+		return nil, err
+	}
+	defer d.Close()
+
+	names, err := d.Readdirnames(-1)
+	if err != nil {
+		return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
+	}
+
+	return names, nil
+}
+
+func (p Proc) path(pa ...string) string {
+	return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
new file mode 100644
index 0000000..0251c83
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -0,0 +1,65 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+)
+
+// ProcIO models the content of /proc/<pid>/io.
+type ProcIO struct {
+	// Chars read.
+	RChar uint64
+	// Chars written.
+	WChar uint64
+	// Read syscalls.
+	SyscR uint64
+	// Write syscalls.
+	SyscW uint64
+	// Bytes read.
+	ReadBytes uint64
+	// Bytes written.
+	WriteBytes uint64
+	// Bytes written, but taking into account truncation. See
+	// Documentation/filesystems/proc.txt in the kernel sources for
+	// detailed explanation.
+	CancelledWriteBytes int64
+}
+
+// NewIO creates a new ProcIO instance from a given Proc instance.
+func (p Proc) NewIO() (ProcIO, error) {
+	pio := ProcIO{}
+
+	f, err := os.Open(p.path("io"))
+	if err != nil {
+		return pio, err
+	}
+	defer f.Close()
+
+	data, err := ioutil.ReadAll(f)
+	if err != nil {
+		return pio, err
+	}
+
+	ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
+		"read_bytes: %d\nwrite_bytes: %d\n" +
+		"cancelled_write_bytes: %d\n"
+
+	_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
+		&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
+
+	return pio, err
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
new file mode 100644
index 0000000..f04ba6f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -0,0 +1,150 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"regexp"
+	"strconv"
+)
+
+// ProcLimits represents the soft limits for each of the process's resource
+// limits. For more information see getrlimit(2):
+// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
+type ProcLimits struct {
+	// CPU time limit in seconds.
+	CPUTime int64
+	// Maximum size of files that the process may create.
+	FileSize int64
+	// Maximum size of the process's data segment (initialized data,
+	// uninitialized data, and heap).
+	DataSize int64
+	// Maximum size of the process stack in bytes.
+	StackSize int64
+	// Maximum size of a core file.
+	CoreFileSize int64
+	// Limit of the process's resident set in pages.
+	ResidentSet int64
+	// Maximum number of processes that can be created for the real user ID of
+	// the calling process.
+	Processes int64
+	// Value one greater than the maximum file descriptor number that can be
+	// opened by this process.
+	OpenFiles int64
+	// Maximum number of bytes of memory that may be locked into RAM.
+	LockedMemory int64
+	// Maximum size of the process's virtual memory address space in bytes.
+	AddressSpace int64
+	// Limit on the combined number of flock(2) locks and fcntl(2) leases that
+	// this process may establish.
+	FileLocks int64
+	// Limit of signals that may be queued for the real user ID of the calling
+	// process.
+	PendingSignals int64
+	// Limit on the number of bytes that can be allocated for POSIX message
+	// queues for the real user ID of the calling process.
+	MsqqueueSize int64
+	// Limit of the nice priority set using setpriority(2) or nice(2).
+	NicePriority int64
+	// Limit of the real-time priority set using sched_setscheduler(2) or
+	// sched_setparam(2).
+	RealtimePriority int64
+	// Limit (in microseconds) on the amount of CPU time that a process
+	// scheduled under a real-time scheduling policy may consume without making
+	// a blocking system call.
+	RealtimeTimeout int64
+}
+
+const (
+	limitsFields    = 3
+	limitsUnlimited = "unlimited"
+)
+
+var (
+	limitsDelimiter = regexp.MustCompile("  +")
+)
+
+// NewLimits returns the current soft limits of the process.
+func (p Proc) NewLimits() (ProcLimits, error) {
+	f, err := os.Open(p.path("limits"))
+	if err != nil {
+		return ProcLimits{}, err
+	}
+	defer f.Close()
+
+	var (
+		l = ProcLimits{}
+		s = bufio.NewScanner(f)
+	)
+	for s.Scan() {
+		fields := limitsDelimiter.Split(s.Text(), limitsFields)
+		if len(fields) != limitsFields {
+			return ProcLimits{}, fmt.Errorf(
+				"couldn't parse %s line %s", f.Name(), s.Text())
+		}
+
+		switch fields[0] {
+		case "Max cpu time":
+			l.CPUTime, err = parseInt(fields[1])
+		case "Max file size":
+			l.FileSize, err = parseInt(fields[1])
+		case "Max data size":
+			l.DataSize, err = parseInt(fields[1])
+		case "Max stack size":
+			l.StackSize, err = parseInt(fields[1])
+		case "Max core file size":
+			l.CoreFileSize, err = parseInt(fields[1])
+		case "Max resident set":
+			l.ResidentSet, err = parseInt(fields[1])
+		case "Max processes":
+			l.Processes, err = parseInt(fields[1])
+		case "Max open files":
+			l.OpenFiles, err = parseInt(fields[1])
+		case "Max locked memory":
+			l.LockedMemory, err = parseInt(fields[1])
+		case "Max address space":
+			l.AddressSpace, err = parseInt(fields[1])
+		case "Max file locks":
+			l.FileLocks, err = parseInt(fields[1])
+		case "Max pending signals":
+			l.PendingSignals, err = parseInt(fields[1])
+		case "Max msgqueue size":
+			l.MsqqueueSize, err = parseInt(fields[1])
+		case "Max nice priority":
+			l.NicePriority, err = parseInt(fields[1])
+		case "Max realtime priority":
+			l.RealtimePriority, err = parseInt(fields[1])
+		case "Max realtime timeout":
+			l.RealtimeTimeout, err = parseInt(fields[1])
+		}
+		if err != nil {
+			return ProcLimits{}, err
+		}
+	}
+
+	return l, s.Err()
+}
+
+func parseInt(s string) (int64, error) {
+	if s == limitsUnlimited {
+		return -1, nil
+	}
+	i, err := strconv.ParseInt(s, 10, 64)
+	if err != nil {
+		return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
+	}
+	return i, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go
new file mode 100644
index 0000000..d06c26e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_ns.go
@@ -0,0 +1,68 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// Namespace represents a single namespace of a process.
+type Namespace struct {
+	Type  string // Namespace type.
+	Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match.
+}
+
+// Namespaces contains all of the namespaces that the process is contained in.
+type Namespaces map[string]Namespace
+
+// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the
+// process is a member.
+func (p Proc) NewNamespaces() (Namespaces, error) {
+	d, err := os.Open(p.path("ns"))
+	if err != nil {
+		return nil, err
+	}
+	defer d.Close()
+
+	names, err := d.Readdirnames(-1)
+	if err != nil {
+		return nil, fmt.Errorf("failed to read contents of ns dir: %v", err)
+	}
+
+	ns := make(Namespaces, len(names))
+	for _, name := range names {
+		target, err := os.Readlink(p.path("ns", name))
+		if err != nil {
+			return nil, err
+		}
+
+		fields := strings.SplitN(target, ":", 2)
+		if len(fields) != 2 {
+			return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target)
+		}
+
+		typ := fields[0]
+		inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
+		if err != nil {
+			return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err)
+		}
+
+		ns[name] = Namespace{typ, uint32(inode)}
+	}
+
+	return ns, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
new file mode 100644
index 0000000..3cf2a9f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -0,0 +1,188 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"os"
+)
+
+// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
+// which required cgo. However, that caused a lot of problems regarding
+// cross-compilation. Alternatives such as running a binary to determine the
+// value, or trying to derive it in some other way were all problematic.  After
+// much research it was determined that USER_HZ is actually hardcoded to 100 on
+// all Go-supported platforms as of the time of this writing. This is why we
+// decided to hardcode it here as well. It is not impossible that there could
+// be systems with exceptions, but they should be very exotic edge cases, and
+// in that case, the worst outcome will be two misreported metrics.
+//
+// See also the following discussions:
+//
+// - https://github.com/prometheus/node_exporter/issues/52
+// - https://github.com/prometheus/procfs/pull/2
+// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
+const userHZ = 100
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStat struct {
+	// The process ID.
+	PID int
+	// The filename of the executable.
+	Comm string
+	// The process state.
+	State string
+	// The PID of the parent of this process.
+	PPID int
+	// The process group ID of the process.
+	PGRP int
+	// The session ID of the process.
+	Session int
+	// The controlling terminal of the process.
+	TTY int
+	// The ID of the foreground process group of the controlling terminal of
+	// the process.
+	TPGID int
+	// The kernel flags word of the process.
+	Flags uint
+	// The number of minor faults the process has made which have not required
+	// loading a memory page from disk.
+	MinFlt uint
+	// The number of minor faults that the process's waited-for children have
+	// made.
+	CMinFlt uint
+	// The number of major faults the process has made which have required
+	// loading a memory page from disk.
+	MajFlt uint
+	// The number of major faults that the process's waited-for children have
+	// made.
+	CMajFlt uint
+	// Amount of time that this process has been scheduled in user mode,
+	// measured in clock ticks.
+	UTime uint
+	// Amount of time that this process has been scheduled in kernel mode,
+	// measured in clock ticks.
+	STime uint
+	// Amount of time that this process's waited-for children have been
+	// scheduled in user mode, measured in clock ticks.
+	CUTime uint
+	// Amount of time that this process's waited-for children have been
+	// scheduled in kernel mode, measured in clock ticks.
+	CSTime uint
+	// For processes running a real-time scheduling policy, this is the negated
+	// scheduling priority, minus one.
+	Priority int
+	// The nice value, a value in the range 19 (low priority) to -20 (high
+	// priority).
+	Nice int
+	// Number of threads in this process.
+	NumThreads int
+	// The time the process started after system boot, the value is expressed
+	// in clock ticks.
+	Starttime uint64
+	// Virtual memory size in bytes.
+	VSize int
+	// Resident set size in pages.
+	RSS int
+
+	fs FS
+}
+
+// NewStat returns the current status information of the process.
+func (p Proc) NewStat() (ProcStat, error) {
+	f, err := os.Open(p.path("stat"))
+	if err != nil {
+		return ProcStat{}, err
+	}
+	defer f.Close()
+
+	data, err := ioutil.ReadAll(f)
+	if err != nil {
+		return ProcStat{}, err
+	}
+
+	var (
+		ignore int
+
+		s = ProcStat{PID: p.PID, fs: p.fs}
+		l = bytes.Index(data, []byte("("))
+		r = bytes.LastIndex(data, []byte(")"))
+	)
+
+	if l < 0 || r < 0 {
+		return ProcStat{}, fmt.Errorf(
+			"unexpected format, couldn't extract comm: %s",
+			data,
+		)
+	}
+
+	s.Comm = string(data[l+1 : r])
+	_, err = fmt.Fscan(
+		bytes.NewBuffer(data[r+2:]),
+		&s.State,
+		&s.PPID,
+		&s.PGRP,
+		&s.Session,
+		&s.TTY,
+		&s.TPGID,
+		&s.Flags,
+		&s.MinFlt,
+		&s.CMinFlt,
+		&s.MajFlt,
+		&s.CMajFlt,
+		&s.UTime,
+		&s.STime,
+		&s.CUTime,
+		&s.CSTime,
+		&s.Priority,
+		&s.Nice,
+		&s.NumThreads,
+		&ignore,
+		&s.Starttime,
+		&s.VSize,
+		&s.RSS,
+	)
+	if err != nil {
+		return ProcStat{}, err
+	}
+
+	return s, nil
+}
+
+// VirtualMemory returns the virtual memory size in bytes.
+func (s ProcStat) VirtualMemory() int {
+	return s.VSize
+}
+
+// ResidentMemory returns the resident memory size in bytes.
+func (s ProcStat) ResidentMemory() int {
+	return s.RSS * os.Getpagesize()
+}
+
+// StartTime returns the unix timestamp of the process in seconds.
+func (s ProcStat) StartTime() (float64, error) {
+	stat, err := s.fs.NewStat()
+	if err != nil {
+		return 0, err
+	}
+	return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
+}
+
+// CPUTime returns the total CPU user and system time in seconds.
+func (s ProcStat) CPUTime() float64 {
+	return float64(s.UTime+s.STime) / userHZ
+}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
new file mode 100644
index 0000000..61eb6b0
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -0,0 +1,232 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// CPUStat shows how much time the cpu spend in various stages.
+type CPUStat struct {
+	User      float64
+	Nice      float64
+	System    float64
+	Idle      float64
+	Iowait    float64
+	IRQ       float64
+	SoftIRQ   float64
+	Steal     float64
+	Guest     float64
+	GuestNice float64
+}
+
+// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
+// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
+// It is possible to get per-cpu stats by reading /proc/softirqs
+type SoftIRQStat struct {
+	Hi          uint64
+	Timer       uint64
+	NetTx       uint64
+	NetRx       uint64
+	Block       uint64
+	BlockIoPoll uint64
+	Tasklet     uint64
+	Sched       uint64
+	Hrtimer     uint64
+	Rcu         uint64
+}
+
+// Stat represents kernel/system statistics.
+type Stat struct {
+	// Boot time in seconds since the Epoch.
+	BootTime uint64
+	// Summed up cpu statistics.
+	CPUTotal CPUStat
+	// Per-CPU statistics.
+	CPU []CPUStat
+	// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
+	IRQTotal uint64
+	// Number of times a numbered IRQ was triggered.
+	IRQ []uint64
+	// Number of times a context switch happened.
+	ContextSwitches uint64
+	// Number of times a process was created.
+	ProcessCreated uint64
+	// Number of processes currently running.
+	ProcessesRunning uint64
+	// Number of processes currently blocked (waiting for IO).
+	ProcessesBlocked uint64
+	// Number of times a softirq was scheduled.
+	SoftIRQTotal uint64
+	// Detailed softirq statistics.
+	SoftIRQ SoftIRQStat
+}
+
+// NewStat returns kernel/system statistics read from /proc/stat.
+func NewStat() (Stat, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return Stat{}, err
+	}
+
+	return fs.NewStat()
+}
+
+// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
+func parseCPUStat(line string) (CPUStat, int64, error) {
+	cpuStat := CPUStat{}
+	var cpu string
+
+	count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f",
+		&cpu,
+		&cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,
+		&cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,
+		&cpuStat.Guest, &cpuStat.GuestNice)
+
+	if err != nil && err != io.EOF {
+		return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err)
+	}
+	if count == 0 {
+		return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line)
+	}
+
+	cpuStat.User /= userHZ
+	cpuStat.Nice /= userHZ
+	cpuStat.System /= userHZ
+	cpuStat.Idle /= userHZ
+	cpuStat.Iowait /= userHZ
+	cpuStat.IRQ /= userHZ
+	cpuStat.SoftIRQ /= userHZ
+	cpuStat.Steal /= userHZ
+	cpuStat.Guest /= userHZ
+	cpuStat.GuestNice /= userHZ
+
+	if cpu == "cpu" {
+		return cpuStat, -1, nil
+	}
+
+	cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
+	if err != nil {
+		return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err)
+	}
+
+	return cpuStat, cpuID, nil
+}
+
+// Parse a softirq line.
+func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
+	softIRQStat := SoftIRQStat{}
+	var total uint64
+	var prefix string
+
+	_, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d",
+		&prefix, &total,
+		&softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,
+		&softIRQStat.Block, &softIRQStat.BlockIoPoll,
+		&softIRQStat.Tasklet, &softIRQStat.Sched,
+		&softIRQStat.Hrtimer, &softIRQStat.Rcu)
+
+	if err != nil {
+		return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err)
+	}
+
+	return softIRQStat, total, nil
+}
+
+// NewStat returns an information about current kernel/system statistics.
+func (fs FS) NewStat() (Stat, error) {
+	// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+
+	f, err := os.Open(fs.Path("stat"))
+	if err != nil {
+		return Stat{}, err
+	}
+	defer f.Close()
+
+	stat := Stat{}
+
+	scanner := bufio.NewScanner(f)
+	for scanner.Scan() {
+		line := scanner.Text()
+		parts := strings.Fields(scanner.Text())
+		// require at least <key> <value>
+		if len(parts) < 2 {
+			continue
+		}
+		switch {
+		case parts[0] == "btime":
+			if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err)
+			}
+		case parts[0] == "intr":
+			if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err)
+			}
+			numberedIRQs := parts[2:]
+			stat.IRQ = make([]uint64, len(numberedIRQs))
+			for i, count := range numberedIRQs {
+				if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err)
+				}
+			}
+		case parts[0] == "ctxt":
+			if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err)
+			}
+		case parts[0] == "processes":
+			if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err)
+			}
+		case parts[0] == "procs_running":
+			if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err)
+			}
+		case parts[0] == "procs_blocked":
+			if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err)
+			}
+		case parts[0] == "softirq":
+			softIRQStats, total, err := parseSoftIRQStat(line)
+			if err != nil {
+				return Stat{}, err
+			}
+			stat.SoftIRQTotal = total
+			stat.SoftIRQ = softIRQStats
+		case strings.HasPrefix(parts[0], "cpu"):
+			cpuStat, cpuID, err := parseCPUStat(line)
+			if err != nil {
+				return Stat{}, err
+			}
+			if cpuID == -1 {
+				stat.CPUTotal = cpuStat
+			} else {
+				for int64(len(stat.CPU)) <= cpuID {
+					stat.CPU = append(stat.CPU, CPUStat{})
+				}
+				stat.CPU[cpuID] = cpuStat
+			}
+		}
+	}
+
+	if err := scanner.Err(); err != nil {
+		return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
+	}
+
+	return stat, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar
new file mode 100755
index 0000000..b0171a1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ttar
@@ -0,0 +1,389 @@
+#!/usr/bin/env bash
+
+# Purpose: plain text tar format
+# Limitations: - only suitable for text files, directories, and symlinks
+#              - stores only filename, content, and mode
+#              - not designed for untrusted input
+#
+# Note: must work with bash version 3.2 (macOS)
+
+# Copyright 2017 Roger Luethi
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit -o nounset
+
+# Sanitize environment (for instance, standard sorting of glob matches)
+export LC_ALL=C
+
+path=""
+CMD=""
+ARG_STRING="$*"
+
+#------------------------------------------------------------------------------
+# Not all sed implementations can work on null bytes. In order to make ttar
+# work out of the box on macOS, use Python as a stream editor.
+
+USE_PYTHON=0
+
+PYTHON_CREATE_FILTER=$(cat << 'PCF'
+#!/usr/bin/env python
+
+import re
+import sys
+
+for line in sys.stdin:
+    line = re.sub(r'EOF', r'\EOF', line)
+    line = re.sub(r'NULLBYTE', r'\NULLBYTE', line)
+    line = re.sub('\x00', r'NULLBYTE', line)
+    sys.stdout.write(line)
+PCF
+)
+
+PYTHON_EXTRACT_FILTER=$(cat << 'PEF'
+#!/usr/bin/env python
+
+import re
+import sys
+
+for line in sys.stdin:
+    line = re.sub(r'(?<!\\)NULLBYTE', '\x00', line)
+    line = re.sub(r'\\NULLBYTE', 'NULLBYTE', line)
+    line = re.sub(r'([^\\])EOF', r'\1', line)
+    line = re.sub(r'\\EOF', 'EOF', line)
+    sys.stdout.write(line)
+PEF
+)
+
+function test_environment {
+    if [[ "$(echo "a" | sed 's/a/\x0/' | wc -c)" -ne 2 ]]; then
+        echo "WARNING sed unable to handle null bytes, using Python (slow)."
+        if ! which python >/dev/null; then
+            echo "ERROR Python not found. Aborting."
+            exit 2
+        fi
+        USE_PYTHON=1
+    fi
+}
+
+#------------------------------------------------------------------------------
+
+function usage {
+    bname=$(basename "$0")
+    cat << USAGE
+Usage:   $bname [-C <DIR>] -c -f <ARCHIVE> <FILE...> (create archive)
+         $bname            -t -f <ARCHIVE>           (list archive contents)
+         $bname [-C <DIR>] -x -f <ARCHIVE>           (extract archive)
+
+Options:
+         -C <DIR>                                    (change directory)
+         -v                                          (verbose)
+
+Example: Change to sysfs directory, create ttar file from fixtures directory
+         $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
+USAGE
+exit "$1"
+}
+
+function vecho {
+    if [ "${VERBOSE:-}" == "yes" ]; then
+        echo >&7 "$@"
+    fi
+}
+
+function set_cmd {
+    if [ -n "$CMD" ]; then
+        echo "ERROR: more than one command given"
+        echo
+        usage 2
+    fi
+    CMD=$1
+}
+
+unset VERBOSE
+
+while getopts :cf:htxvC: opt; do
+    case $opt in
+        c)
+            set_cmd "create"
+            ;;
+        f)
+            ARCHIVE=$OPTARG
+            ;;
+        h)
+            usage 0
+            ;;
+        t)
+            set_cmd "list"
+            ;;
+        x)
+            set_cmd "extract"
+            ;;
+        v)
+            VERBOSE=yes
+            exec 7>&1
+            ;;
+        C)
+            CDIR=$OPTARG
+            ;;
+        *)
+            echo >&2 "ERROR: invalid option -$OPTARG"
+            echo
+            usage 1
+            ;;
+    esac
+done
+
+# Remove processed options from arguments
+shift $(( OPTIND - 1 ));
+
+if [ "${CMD:-}" == "" ]; then
+    echo >&2 "ERROR: no command given"
+    echo
+    usage 1
+elif [ "${ARCHIVE:-}" == "" ]; then
+    echo >&2 "ERROR: no archive name given"
+    echo
+    usage 1
+fi
+
+function list {
+    local path=""
+    local size=0
+    local line_no=0
+    local ttar_file=$1
+    if [ -n "${2:-}" ]; then
+        echo >&2 "ERROR: too many arguments."
+        echo
+        usage 1
+    fi
+    if [ ! -e "$ttar_file" ]; then
+        echo >&2 "ERROR: file not found ($ttar_file)"
+        echo
+        usage 1
+    fi
+    while read -r line; do
+        line_no=$(( line_no + 1 ))
+        if [ $size -gt 0 ]; then
+            size=$(( size - 1 ))
+            continue
+        fi
+        if [[ $line =~ ^Path:\ (.*)$ ]]; then
+            path=${BASH_REMATCH[1]}
+        elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
+            size=${BASH_REMATCH[1]}
+            echo "$path"
+        elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
+            path=${BASH_REMATCH[1]}
+            echo "$path/"
+        elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
+            echo  "$path -> ${BASH_REMATCH[1]}"
+        fi
+    done < "$ttar_file"
+}
+
+function extract {
+    local path=""
+    local size=0
+    local line_no=0
+    local ttar_file=$1
+    if [ -n "${2:-}" ]; then
+        echo >&2 "ERROR: too many arguments."
+        echo
+        usage 1
+    fi
+    if [ ! -e "$ttar_file" ]; then
+        echo >&2 "ERROR: file not found ($ttar_file)"
+        echo
+        usage 1
+    fi
+    while IFS= read -r line; do
+        line_no=$(( line_no + 1 ))
+        local eof_without_newline
+        if [ "$size" -gt 0 ]; then
+            if [[ "$line" =~ [^\\]EOF ]]; then
+                # An EOF not preceeded by a backslash indicates that the line
+                # does not end with a newline
+                eof_without_newline=1
+            else
+                eof_without_newline=0
+            fi
+            # Replace NULLBYTE with null byte if at beginning of line
+            # Replace NULLBYTE with null byte unless preceeded by backslash
+            # Remove one backslash in front of NULLBYTE (if any)
+            # Remove EOF unless preceeded by backslash
+            # Remove one backslash in front of EOF
+            if [ $USE_PYTHON -eq 1 ]; then
+                echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
+            else
+                # The repeated pattern makes up for sed's lack of negative
+                # lookbehind assertions (for consecutive null bytes).
+                echo -n "$line" | \
+                    sed -e 's/^NULLBYTE/\x0/g;
+                            s/\([^\\]\)NULLBYTE/\1\x0/g;
+                            s/\([^\\]\)NULLBYTE/\1\x0/g;
+                            s/\\NULLBYTE/NULLBYTE/g;
+                            s/\([^\\]\)EOF/\1/g;
+                            s/\\EOF/EOF/g;
+                    ' >> "$path"
+            fi
+            if [[ "$eof_without_newline" -eq 0 ]]; then
+                echo >> "$path"
+            fi
+            size=$(( size - 1 ))
+            continue
+        fi
+        if [[ $line =~ ^Path:\ (.*)$ ]]; then
+            path=${BASH_REMATCH[1]}
+            if [ -e "$path" ] || [ -L "$path" ]; then
+                rm "$path"
+            fi
+        elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
+            size=${BASH_REMATCH[1]}
+            # Create file even if it is zero-length.
+            touch "$path"
+            vecho "    $path"
+        elif [[ $line =~ ^Mode:\ (.*)$ ]]; then
+            mode=${BASH_REMATCH[1]}
+            chmod "$mode" "$path"
+            vecho "$mode"
+        elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
+            path=${BASH_REMATCH[1]}
+            mkdir -p "$path"
+            vecho "    $path/"
+        elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
+            ln -s "${BASH_REMATCH[1]}" "$path"
+            vecho "    $path -> ${BASH_REMATCH[1]}"
+        elif [[ $line =~ ^# ]]; then
+            # Ignore comments between files
+            continue
+        else
+            echo >&2 "ERROR: Unknown keyword on line $line_no: $line"
+            exit 1
+        fi
+    done < "$ttar_file"
+}
+
+function div {
+    echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \
+         "- - - - - -"
+}
+
+function get_mode {
+    local mfile=$1
+    if [ -z "${STAT_OPTION:-}" ]; then
+        if stat -c '%a' "$mfile" >/dev/null 2>&1; then
+            # GNU stat
+            STAT_OPTION='-c'
+            STAT_FORMAT='%a'
+        else
+            # BSD stat
+            STAT_OPTION='-f'
+            # Octal output, user/group/other (omit file type, sticky bit)
+            STAT_FORMAT='%OLp'
+        fi
+    fi
+    stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile"
+}
+
+function _create {
+    shopt -s nullglob
+    local mode
+    local eof_without_newline
+    while (( "$#" )); do
+        file=$1
+        if [ -L "$file" ]; then
+            echo "Path: $file"
+            symlinkTo=$(readlink "$file")
+            echo "SymlinkTo: $symlinkTo"
+            vecho "    $file -> $symlinkTo"
+            div
+        elif [ -d "$file" ]; then
+            # Strip trailing slash (if there is one)
+            file=${file%/}
+            echo "Directory: $file"
+            mode=$(get_mode "$file")
+            echo "Mode: $mode"
+            vecho "$mode $file/"
+            div
+            # Find all files and dirs, including hidden/dot files
+            for x in "$file/"{*,.[^.]*}; do
+                _create "$x"
+            done
+        elif [ -f "$file" ]; then
+            echo "Path: $file"
+            lines=$(wc -l "$file"|awk '{print $1}')
+            eof_without_newline=0
+            if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \
+                    [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then
+                eof_without_newline=1
+                lines=$((lines+1))
+            fi
+            echo "Lines: $lines"
+            # Add backslash in front of EOF
+            # Add backslash in front of NULLBYTE
+            # Replace null byte with NULLBYTE
+            if [ $USE_PYTHON -eq 1 ]; then
+                < "$file" python -c "$PYTHON_CREATE_FILTER"
+            else
+                < "$file" \
+                    sed 's/EOF/\\EOF/g;
+                         s/NULLBYTE/\\NULLBYTE/g;
+                         s/\x0/NULLBYTE/g;
+                    '
+            fi
+            if [[ "$eof_without_newline" -eq 1 ]]; then
+                # Finish line with EOF to indicate that the original line did
+                # not end with a linefeed
+                echo "EOF"
+            fi
+            mode=$(get_mode "$file")
+            echo "Mode: $mode"
+            vecho "$mode $file"
+            div
+        else
+            echo >&2 "ERROR: file not found ($file in $(pwd))"
+            exit 2
+        fi
+        shift
+    done
+}
+
+function create {
+    ttar_file=$1
+    shift
+    if [ -z "${1:-}" ]; then
+        echo >&2 "ERROR: missing arguments."
+        echo
+        usage 1
+    fi
+    if [ -e "$ttar_file" ]; then
+        rm "$ttar_file"
+    fi
+    exec > "$ttar_file"
+    echo "# Archive created by ttar $ARG_STRING"
+    _create "$@"
+}
+
+test_environment
+
+if [ -n "${CDIR:-}" ]; then
+    if [[ "$ARCHIVE" != /* ]]; then
+        # Relative path: preserve the archive's location before changing
+        # directory
+        ARCHIVE="$(pwd)/$ARCHIVE"
+    fi
+    cd "$CDIR"
+fi
+
+"$CMD" "$ARCHIVE" "$@"
diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go
new file mode 100644
index 0000000..8f1508f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/xfrm.go
@@ -0,0 +1,187 @@
+// Copyright 2017 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// XfrmStat models the contents of /proc/net/xfrm_stat.
+type XfrmStat struct {
+	// All errors which are not matched by other
+	XfrmInError int
+	// No buffer is left
+	XfrmInBufferError int
+	// Header Error
+	XfrmInHdrError int
+	// No state found
+	// i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
+	XfrmInNoStates int
+	// Transformation protocol specific error
+	// e.g. SA Key is wrong
+	XfrmInStateProtoError int
+	// Transformation mode specific error
+	XfrmInStateModeError int
+	// Sequence error
+	// e.g. sequence number is out of window
+	XfrmInStateSeqError int
+	// State is expired
+	XfrmInStateExpired int
+	// State has mismatch option
+	// e.g. UDP encapsulation type is mismatched
+	XfrmInStateMismatch int
+	// State is invalid
+	XfrmInStateInvalid int
+	// No matching template for states
+	// e.g. Inbound SAs are correct but SP rule is wrong
+	XfrmInTmplMismatch int
+	// No policy is found for states
+	// e.g. Inbound SAs are correct but no SP is found
+	XfrmInNoPols int
+	// Policy discards
+	XfrmInPolBlock int
+	// Policy error
+	XfrmInPolError int
+	// All errors which are not matched by others
+	XfrmOutError int
+	// Bundle generation error
+	XfrmOutBundleGenError int
+	// Bundle check error
+	XfrmOutBundleCheckError int
+	// No state was found
+	XfrmOutNoStates int
+	// Transformation protocol specific error
+	XfrmOutStateProtoError int
+	// Transportation mode specific error
+	XfrmOutStateModeError int
+	// Sequence error
+	// i.e sequence number overflow
+	XfrmOutStateSeqError int
+	// State is expired
+	XfrmOutStateExpired int
+	// Policy discads
+	XfrmOutPolBlock int
+	// Policy is dead
+	XfrmOutPolDead int
+	// Policy Error
+	XfrmOutPolError     int
+	XfrmFwdHdrError     int
+	XfrmOutStateInvalid int
+	XfrmAcquireError    int
+}
+
+// NewXfrmStat reads the xfrm_stat statistics.
+func NewXfrmStat() (XfrmStat, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return XfrmStat{}, err
+	}
+
+	return fs.NewXfrmStat()
+}
+
+// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
+func (fs FS) NewXfrmStat() (XfrmStat, error) {
+	file, err := os.Open(fs.Path("net/xfrm_stat"))
+	if err != nil {
+		return XfrmStat{}, err
+	}
+	defer file.Close()
+
+	var (
+		x = XfrmStat{}
+		s = bufio.NewScanner(file)
+	)
+
+	for s.Scan() {
+		fields := strings.Fields(s.Text())
+
+		if len(fields) != 2 {
+			return XfrmStat{}, fmt.Errorf(
+				"couldn't parse %s line %s", file.Name(), s.Text())
+		}
+
+		name := fields[0]
+		value, err := strconv.Atoi(fields[1])
+		if err != nil {
+			return XfrmStat{}, err
+		}
+
+		switch name {
+		case "XfrmInError":
+			x.XfrmInError = value
+		case "XfrmInBufferError":
+			x.XfrmInBufferError = value
+		case "XfrmInHdrError":
+			x.XfrmInHdrError = value
+		case "XfrmInNoStates":
+			x.XfrmInNoStates = value
+		case "XfrmInStateProtoError":
+			x.XfrmInStateProtoError = value
+		case "XfrmInStateModeError":
+			x.XfrmInStateModeError = value
+		case "XfrmInStateSeqError":
+			x.XfrmInStateSeqError = value
+		case "XfrmInStateExpired":
+			x.XfrmInStateExpired = value
+		case "XfrmInStateInvalid":
+			x.XfrmInStateInvalid = value
+		case "XfrmInTmplMismatch":
+			x.XfrmInTmplMismatch = value
+		case "XfrmInNoPols":
+			x.XfrmInNoPols = value
+		case "XfrmInPolBlock":
+			x.XfrmInPolBlock = value
+		case "XfrmInPolError":
+			x.XfrmInPolError = value
+		case "XfrmOutError":
+			x.XfrmOutError = value
+		case "XfrmInStateMismatch":
+			x.XfrmInStateMismatch = value
+		case "XfrmOutBundleGenError":
+			x.XfrmOutBundleGenError = value
+		case "XfrmOutBundleCheckError":
+			x.XfrmOutBundleCheckError = value
+		case "XfrmOutNoStates":
+			x.XfrmOutNoStates = value
+		case "XfrmOutStateProtoError":
+			x.XfrmOutStateProtoError = value
+		case "XfrmOutStateModeError":
+			x.XfrmOutStateModeError = value
+		case "XfrmOutStateSeqError":
+			x.XfrmOutStateSeqError = value
+		case "XfrmOutStateExpired":
+			x.XfrmOutStateExpired = value
+		case "XfrmOutPolBlock":
+			x.XfrmOutPolBlock = value
+		case "XfrmOutPolDead":
+			x.XfrmOutPolDead = value
+		case "XfrmOutPolError":
+			x.XfrmOutPolError = value
+		case "XfrmFwdHdrError":
+			x.XfrmFwdHdrError = value
+		case "XfrmOutStateInvalid":
+			x.XfrmOutStateInvalid = value
+		case "XfrmAcquireError":
+			x.XfrmAcquireError = value
+		}
+
+	}
+
+	return x, s.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go
new file mode 100644
index 0000000..b3d8634
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/xfs/parse.go
@@ -0,0 +1,330 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package xfs
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// ParseStats parses a Stats from an input io.Reader, using the format
+// found in /proc/fs/xfs/stat.
+func ParseStats(r io.Reader) (*Stats, error) {
+	const (
+		// Fields parsed into stats structures.
+		fieldExtentAlloc = "extent_alloc"
+		fieldAbt         = "abt"
+		fieldBlkMap      = "blk_map"
+		fieldBmbt        = "bmbt"
+		fieldDir         = "dir"
+		fieldTrans       = "trans"
+		fieldIg          = "ig"
+		fieldLog         = "log"
+		fieldRw          = "rw"
+		fieldAttr        = "attr"
+		fieldIcluster    = "icluster"
+		fieldVnodes      = "vnodes"
+		fieldBuf         = "buf"
+		fieldXpc         = "xpc"
+
+		// Unimplemented at this time due to lack of documentation.
+		// fieldPushAil = "push_ail"
+		// fieldXstrat  = "xstrat"
+		// fieldAbtb2   = "abtb2"
+		// fieldAbtc2   = "abtc2"
+		// fieldBmbt2   = "bmbt2"
+		// fieldIbt2    = "ibt2"
+		// fieldFibt2   = "fibt2"
+		// fieldQm      = "qm"
+		// fieldDebug   = "debug"
+	)
+
+	var xfss Stats
+
+	s := bufio.NewScanner(r)
+	for s.Scan() {
+		// Expect at least a string label and a single integer value, ex:
+		//   - abt 0
+		//   - rw 1 2
+		ss := strings.Fields(string(s.Bytes()))
+		if len(ss) < 2 {
+			continue
+		}
+		label := ss[0]
+
+		// Extended precision counters are uint64 values.
+		if label == fieldXpc {
+			us, err := util.ParseUint64s(ss[1:])
+			if err != nil {
+				return nil, err
+			}
+
+			xfss.ExtendedPrecision, err = extendedPrecisionStats(us)
+			if err != nil {
+				return nil, err
+			}
+
+			continue
+		}
+
+		// All other counters are uint32 values.
+		us, err := util.ParseUint32s(ss[1:])
+		if err != nil {
+			return nil, err
+		}
+
+		switch label {
+		case fieldExtentAlloc:
+			xfss.ExtentAllocation, err = extentAllocationStats(us)
+		case fieldAbt:
+			xfss.AllocationBTree, err = btreeStats(us)
+		case fieldBlkMap:
+			xfss.BlockMapping, err = blockMappingStats(us)
+		case fieldBmbt:
+			xfss.BlockMapBTree, err = btreeStats(us)
+		case fieldDir:
+			xfss.DirectoryOperation, err = directoryOperationStats(us)
+		case fieldTrans:
+			xfss.Transaction, err = transactionStats(us)
+		case fieldIg:
+			xfss.InodeOperation, err = inodeOperationStats(us)
+		case fieldLog:
+			xfss.LogOperation, err = logOperationStats(us)
+		case fieldRw:
+			xfss.ReadWrite, err = readWriteStats(us)
+		case fieldAttr:
+			xfss.AttributeOperation, err = attributeOperationStats(us)
+		case fieldIcluster:
+			xfss.InodeClustering, err = inodeClusteringStats(us)
+		case fieldVnodes:
+			xfss.Vnode, err = vnodeStats(us)
+		case fieldBuf:
+			xfss.Buffer, err = bufferStats(us)
+		}
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return &xfss, s.Err()
+}
+
+// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
+func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {
+	if l := len(us); l != 4 {
+		return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l)
+	}
+
+	return ExtentAllocationStats{
+		ExtentsAllocated: us[0],
+		BlocksAllocated:  us[1],
+		ExtentsFreed:     us[2],
+		BlocksFreed:      us[3],
+	}, nil
+}
+
+// btreeStats builds a BTreeStats from a slice of uint32s.
+func btreeStats(us []uint32) (BTreeStats, error) {
+	if l := len(us); l != 4 {
+		return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l)
+	}
+
+	return BTreeStats{
+		Lookups:         us[0],
+		Compares:        us[1],
+		RecordsInserted: us[2],
+		RecordsDeleted:  us[3],
+	}, nil
+}
+
+// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
+func blockMappingStats(us []uint32) (BlockMappingStats, error) {
+	if l := len(us); l != 7 {
+		return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l)
+	}
+
+	return BlockMappingStats{
+		Reads:                us[0],
+		Writes:               us[1],
+		Unmaps:               us[2],
+		ExtentListInsertions: us[3],
+		ExtentListDeletions:  us[4],
+		ExtentListLookups:    us[5],
+		ExtentListCompares:   us[6],
+	}, nil
+}
+
+// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
+func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {
+	if l := len(us); l != 4 {
+		return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l)
+	}
+
+	return DirectoryOperationStats{
+		Lookups:  us[0],
+		Creates:  us[1],
+		Removes:  us[2],
+		Getdents: us[3],
+	}, nil
+}
+
+// TransactionStats builds a TransactionStats from a slice of uint32s.
+func transactionStats(us []uint32) (TransactionStats, error) {
+	if l := len(us); l != 3 {
+		return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l)
+	}
+
+	return TransactionStats{
+		Sync:  us[0],
+		Async: us[1],
+		Empty: us[2],
+	}, nil
+}
+
+// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
+func inodeOperationStats(us []uint32) (InodeOperationStats, error) {
+	if l := len(us); l != 7 {
+		return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l)
+	}
+
+	return InodeOperationStats{
+		Attempts:        us[0],
+		Found:           us[1],
+		Recycle:         us[2],
+		Missed:          us[3],
+		Duplicate:       us[4],
+		Reclaims:        us[5],
+		AttributeChange: us[6],
+	}, nil
+}
+
+// LogOperationStats builds a LogOperationStats from a slice of uint32s.
+func logOperationStats(us []uint32) (LogOperationStats, error) {
+	if l := len(us); l != 5 {
+		return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l)
+	}
+
+	return LogOperationStats{
+		Writes:            us[0],
+		Blocks:            us[1],
+		NoInternalBuffers: us[2],
+		Force:             us[3],
+		ForceSleep:        us[4],
+	}, nil
+}
+
+// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
+func readWriteStats(us []uint32) (ReadWriteStats, error) {
+	if l := len(us); l != 2 {
+		return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l)
+	}
+
+	return ReadWriteStats{
+		Read:  us[0],
+		Write: us[1],
+	}, nil
+}
+
+// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
+func attributeOperationStats(us []uint32) (AttributeOperationStats, error) {
+	if l := len(us); l != 4 {
+		return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l)
+	}
+
+	return AttributeOperationStats{
+		Get:    us[0],
+		Set:    us[1],
+		Remove: us[2],
+		List:   us[3],
+	}, nil
+}
+
+// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
+func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {
+	if l := len(us); l != 3 {
+		return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l)
+	}
+
+	return InodeClusteringStats{
+		Iflush:     us[0],
+		Flush:      us[1],
+		FlushInode: us[2],
+	}, nil
+}
+
+// VnodeStats builds a VnodeStats from a slice of uint32s.
+func vnodeStats(us []uint32) (VnodeStats, error) {
+	// The attribute "Free" appears to not be available on older XFS
+	// stats versions.  Therefore, 7 or 8 elements may appear in
+	// this slice.
+	l := len(us)
+	if l != 7 && l != 8 {
+		return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
+	}
+
+	s := VnodeStats{
+		Active:   us[0],
+		Allocate: us[1],
+		Get:      us[2],
+		Hold:     us[3],
+		Release:  us[4],
+		Reclaim:  us[5],
+		Remove:   us[6],
+	}
+
+	// Skip adding free, unless it is present. The zero value will
+	// be used in place of an actual count.
+	if l == 7 {
+		return s, nil
+	}
+
+	s.Free = us[7]
+	return s, nil
+}
+
+// BufferStats builds a BufferStats from a slice of uint32s.
+func bufferStats(us []uint32) (BufferStats, error) {
+	if l := len(us); l != 9 {
+		return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l)
+	}
+
+	return BufferStats{
+		Get:             us[0],
+		Create:          us[1],
+		GetLocked:       us[2],
+		GetLockedWaited: us[3],
+		BusyLocked:      us[4],
+		MissLocked:      us[5],
+		PageRetries:     us[6],
+		PageFound:       us[7],
+		GetRead:         us[8],
+	}, nil
+}
+
+// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
+func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
+	if l := len(us); l != 3 {
+		return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l)
+	}
+
+	return ExtendedPrecisionStats{
+		FlushBytes: us[0],
+		WriteBytes: us[1],
+		ReadBytes:  us[2],
+	}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go
new file mode 100644
index 0000000..d86794b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/xfs/xfs.go
@@ -0,0 +1,163 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package xfs provides access to statistics exposed by the XFS filesystem.
+package xfs
+
+// Stats contains XFS filesystem runtime statistics, parsed from
+// /proc/fs/xfs/stat.
+//
+// The names and meanings of each statistic were taken from
+// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
+// kernel source. Most counters are uint32s (same data types used in
+// xfs_stats.h), but some of the "extended precision stats" are uint64s.
+type Stats struct {
+	// The name of the filesystem used to source these statistics.
+	// If empty, this indicates aggregated statistics for all XFS
+	// filesystems on the host.
+	Name string
+
+	ExtentAllocation   ExtentAllocationStats
+	AllocationBTree    BTreeStats
+	BlockMapping       BlockMappingStats
+	BlockMapBTree      BTreeStats
+	DirectoryOperation DirectoryOperationStats
+	Transaction        TransactionStats
+	InodeOperation     InodeOperationStats
+	LogOperation       LogOperationStats
+	ReadWrite          ReadWriteStats
+	AttributeOperation AttributeOperationStats
+	InodeClustering    InodeClusteringStats
+	Vnode              VnodeStats
+	Buffer             BufferStats
+	ExtendedPrecision  ExtendedPrecisionStats
+}
+
+// ExtentAllocationStats contains statistics regarding XFS extent allocations.
+type ExtentAllocationStats struct {
+	ExtentsAllocated uint32
+	BlocksAllocated  uint32
+	ExtentsFreed     uint32
+	BlocksFreed      uint32
+}
+
+// BTreeStats contains statistics regarding an XFS internal B-tree.
+type BTreeStats struct {
+	Lookups         uint32
+	Compares        uint32
+	RecordsInserted uint32
+	RecordsDeleted  uint32
+}
+
+// BlockMappingStats contains statistics regarding XFS block maps.
+type BlockMappingStats struct {
+	Reads                uint32
+	Writes               uint32
+	Unmaps               uint32
+	ExtentListInsertions uint32
+	ExtentListDeletions  uint32
+	ExtentListLookups    uint32
+	ExtentListCompares   uint32
+}
+
+// DirectoryOperationStats contains statistics regarding XFS directory entries.
+type DirectoryOperationStats struct {
+	Lookups  uint32
+	Creates  uint32
+	Removes  uint32
+	Getdents uint32
+}
+
+// TransactionStats contains statistics regarding XFS metadata transactions.
+type TransactionStats struct {
+	Sync  uint32
+	Async uint32
+	Empty uint32
+}
+
+// InodeOperationStats contains statistics regarding XFS inode operations.
+type InodeOperationStats struct {
+	Attempts        uint32
+	Found           uint32
+	Recycle         uint32
+	Missed          uint32
+	Duplicate       uint32
+	Reclaims        uint32
+	AttributeChange uint32
+}
+
+// LogOperationStats contains statistics regarding the XFS log buffer.
+type LogOperationStats struct {
+	Writes            uint32
+	Blocks            uint32
+	NoInternalBuffers uint32
+	Force             uint32
+	ForceSleep        uint32
+}
+
+// ReadWriteStats contains statistics regarding the number of read and write
+// system calls for XFS filesystems.
+type ReadWriteStats struct {
+	Read  uint32
+	Write uint32
+}
+
+// AttributeOperationStats contains statistics regarding manipulation of
+// XFS extended file attributes.
+type AttributeOperationStats struct {
+	Get    uint32
+	Set    uint32
+	Remove uint32
+	List   uint32
+}
+
+// InodeClusteringStats contains statistics regarding XFS inode clustering
+// operations.
+type InodeClusteringStats struct {
+	Iflush     uint32
+	Flush      uint32
+	FlushInode uint32
+}
+
+// VnodeStats contains statistics regarding XFS vnode operations.
+type VnodeStats struct {
+	Active   uint32
+	Allocate uint32
+	Get      uint32
+	Hold     uint32
+	Release  uint32
+	Reclaim  uint32
+	Remove   uint32
+	Free     uint32
+}
+
+// BufferStats contains statistics regarding XFS read/write I/O buffers.
+type BufferStats struct {
+	Get             uint32
+	Create          uint32
+	GetLocked       uint32
+	GetLockedWaited uint32
+	BusyLocked      uint32
+	MissLocked      uint32
+	PageRetries     uint32
+	PageFound       uint32
+	GetRead         uint32
+}
+
+// ExtendedPrecisionStats contains high precision counters used to track the
+// total number of bytes read, written, or flushed, during XFS operations.
+type ExtendedPrecisionStats struct {
+	FlushBytes uint64
+	WriteBytes uint64
+	ReadBytes  uint64
+}
diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore
new file mode 100644
index 0000000..6b7d7d1
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.gitignore
@@ -0,0 +1,2 @@
+logrus
+vendor
diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml
new file mode 100644
index 0000000..a8f1545
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.travis.yml
@@ -0,0 +1,52 @@
+language: go
+go_import_path: github.com/sirupsen/logrus
+env:
+  - GOMAXPROCS=4 GORACE=halt_on_error=1
+matrix:
+  include:
+    - go: 1.10.x
+      install:
+        - go get github.com/stretchr/testify/assert
+        - go get golang.org/x/crypto/ssh/terminal
+        - go get golang.org/x/sys/unix
+        - go get golang.org/x/sys/windows
+      script:
+        - go test -race -v ./...
+    - go: 1.11.x
+      env: GO111MODULE=on
+      install:
+        - go mod download
+      script:
+        - go test -race -v ./...
+    - go: 1.11.x
+      env: GO111MODULE=off
+      install:
+        - go get github.com/stretchr/testify/assert
+        - go get golang.org/x/crypto/ssh/terminal
+        - go get golang.org/x/sys/unix
+        - go get golang.org/x/sys/windows
+      script:
+        - go test -race -v ./...
+    - go: 1.10.x
+      install:
+        - go get github.com/stretchr/testify/assert
+        - go get golang.org/x/crypto/ssh/terminal
+        - go get golang.org/x/sys/unix
+        - go get golang.org/x/sys/windows
+      script:
+        - go test -race -v -tags appengine ./...
+    - go: 1.11.x
+      env: GO111MODULE=on
+      install:
+        - go mod download
+      script:
+        - go test -race -v -tags appengine ./...
+    - go: 1.11.x
+      env: GO111MODULE=off
+      install:
+        - go get github.com/stretchr/testify/assert
+        - go get golang.org/x/crypto/ssh/terminal
+        - go get golang.org/x/sys/unix
+        - go get golang.org/x/sys/windows
+      script:
+        - go test -race -v -tags appengine ./...
diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..cb85d9f
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,165 @@
+# 1.2.0
+This new release introduces:
+  * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued
+  * A new trace level named `Trace` whose level is below `Debug`
+  * A configurable exit function to be called upon a Fatal trace
+  * The `Level` object now implements `encoding.TextUnmarshaler` interface
+
+# 1.1.1
+This is a bug fix release.
+  * fix the build break on Solaris
+  * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized
+
+# 1.1.0
+This new release introduces:
+  * several fixes:
+    * a fix for a race condition on entry formatting
+    * proper cleanup of previously used entries before putting them back in the pool
+    * the extra new line at the end of message in text formatter has been removed
+  * a new global public API to check if a level is activated: IsLevelEnabled
+  * the following methods have been added to the Logger object
+    * IsLevelEnabled
+    * SetFormatter
+    * SetOutput
+    * ReplaceHooks
+  * introduction of go module
+  * an indent configuration for the json formatter
+  * output colour support for windows
+  * the field sort function is now configurable for text formatter
+  * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater
+
+# 1.0.6
+
+This new release introduces:
+  * a new api WithTime which allows to easily force the time of the log entry
+    which is mostly useful for logger wrapper
+  * a fix reverting the immutability of the entry given as parameter to the hooks
+    a new configuration field of the json formatter in order to put all the fields
+    in a nested dictionnary
+  * a new SetOutput method in the Logger
+  * a new configuration of the textformatter to configure the name of the default keys
+  * a new configuration of the text formatter to disable the level truncation
+
+# 1.0.5
+
+* Fix hooks race (#707)
+* Fix panic deadlock (#695)
+
+# 1.0.4
+
+* Fix race when adding hooks (#612)
+* Fix terminal check in AppEngine (#635)
+
+# 1.0.3
+
+* Replace example files with testable examples
+
+# 1.0.2
+
+* bug: quote non-string values in text formatter (#583)
+* Make (*Logger) SetLevel a public method
+
+# 1.0.1
+
+* bug: fix escaping in text formatter (#575)
+
+# 1.0.0
+
+* Officially changed name to lower-case
+* bug: colors on Windows 10 (#541)
+* bug: fix race in accessing level (#512)
+
+# 0.11.5
+
+* feature: add writer and writerlevel to entry (#372)
+
+# 0.11.4
+
+* bug: fix undefined variable on solaris (#493)
+
+# 0.11.3
+
+* formatter: configure quoting of empty values (#484)
+* formatter: configure quoting character (default is `"`) (#484)
+* bug: fix not importing io correctly in non-linux environments (#481)
+
+# 0.11.2
+
+* bug: fix windows terminal detection (#476)
+
+# 0.11.1
+
+* bug: fix tty detection with custom out (#471)
+
+# 0.11.0
+
+* performance: Use bufferpool to allocate (#370)
+* terminal: terminal detection for app-engine (#343)
+* feature: exit handler (#375)
+
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
new file mode 100644
index 0000000..3987310
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/README.md
@@ -0,0 +1,494 @@
+# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger.
+
+**Seeing weird case-sensitive problems?** It's in the past been possible to
+import Logrus as both upper- and lower-case. Due to the Go package environment,
+this caused issues in the community and we needed a standard. Some environments
+experienced problems with the upper-case variant, so the lower-case was decided.
+Everything using `logrus` will need to use the lower-case:
+`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
+
+To fix Glide, see [these
+comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
+For an in-depth explanation of the casing issue, see [this
+comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
+
+**Are you interested in assisting in maintaining Logrus?** Currently I have a
+lot of obligations, and I am unable to provide Logrus with the maintainership it
+needs. If you'd like to help, please reach out to me at `simon at author's
+username dot com`.
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+```
+To ensure this behaviour even if a TTY is attached, set your formatter as follows:
+
+```go
+	log.SetFormatter(&log.TextFormatter{
+		DisableColors: true,
+		FullTimestamp: true,
+	})
+```
+
+#### Logging Method Name
+
+If you wish to add the calling method as a field, instruct the logger via:
+```go
+log.SetReportCaller(true)
+```
+This adds the caller as 'method' like so:
+
+```json
+{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by",
+"time":"2014-03-10 19:57:38.562543129 -0400 EDT"}
+```
+
+```text
+time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin
+```
+Note that this does add measurable overhead - the cost will depend on the version of Go, but is
+between 20 and 40% in recent tests with 1.6 and 1.7.  You can validate this in your
+environment via benchmarks: 
+```
+go test -bench=.*CallerTracing
+```
+
+
+#### Case-sensitivity
+
+The organization's name was changed to lower-case--and this will not be changed
+back. If you are getting import conflicts due to case sensitivity, please use
+the lower-case import: `github.com/sirupsen/logrus`.
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+  log "github.com/sirupsen/logrus"
+)
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+  }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+  "os"
+  log "github.com/sirupsen/logrus"
+)
+
+func init() {
+  // Log as JSON instead of the default ASCII formatter.
+  log.SetFormatter(&log.JSONFormatter{})
+
+  // Output to stdout instead of the default stderr
+  // Can be any io.Writer, see below for File example
+  log.SetOutput(os.Stdout)
+
+  // Only log the warning severity or above.
+  log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 122,
+  }).Warn("The group's number increased tremendously!")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 100,
+  }).Fatal("The ice breaks!")
+
+  // A common pattern is to re-use fields between logging statements by re-using
+  // the logrus.Entry returned from WithFields()
+  contextLogger := log.WithFields(log.Fields{
+    "common": "this is a common field",
+    "other": "I also should be logged always",
+  })
+
+  contextLogger.Info("I'll be logged with common and other field")
+  contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+  "os"
+  "github.com/sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+  // The API for setting attributes is a little different than the package level
+  // exported logger. See Godoc.
+  log.Out = os.Stdout
+
+  // You could set this to any `io.Writer` such as a file
+  // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
+  // if err == nil {
+  //  log.Out = file
+  // } else {
+  //  log.Info("Failed to log to file, using default stderr")
+  // }
+
+  log.WithFields(logrus.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging through logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+  "event": event,
+  "topic": topic,
+  "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Default Fields
+
+Often it's helpful to have fields _always_ attached to log statements in an
+application or parts of one. For example, you may want to always log the
+`request_id` and `user_ip` in the context of a request. Instead of writing
+`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
+every line, you can create a `logrus.Entry` to pass around instead:
+
+```go
+requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
+requestLogger.Info("something happened on that request") # will log request_id and user_ip
+requestLogger.Warn("something not great happened")
+```
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+  log "github.com/sirupsen/logrus"
+  "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
+  logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
+  "log/syslog"
+)
+
+func init() {
+
+  // Use the Airbrake hook to report errors that have Error severity or above to
+  // an exception tracker. You can create custom hooks, see the Hooks section.
+  log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+  hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+  if err != nil {
+    log.Error("Unable to connect to local syslog daemon")
+  } else {
+    log.AddHook(hook)
+  }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
+
+
+#### Level logging
+
+Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Trace("Something very low level.")
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+   the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+  log "github.com/sirupsen/logrus"
+)
+
+init() {
+  // do something here to set environment depending on an environment variable
+  // or command-line flag
+  if Environment == "production" {
+    log.SetFormatter(&log.JSONFormatter{})
+  } else {
+    // The TextFormatter is default, you don't actually have to do this.
+    log.SetFormatter(&log.TextFormatter{})
+  }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+  without colors.
+  * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+    field to `true`.  To force no colored output even if there is a TTY  set the
+    `DisableColors` field to `true`. For Windows, see
+    [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
+  * When colors are enabled, levels are truncated to 4 characters by default. To disable
+    truncation set the `DisableLevelTruncation` field to `true`.
+  * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
+* `logrus.JSONFormatter`. Logs fields as JSON.
+  * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
+
+Third party logging formatters:
+
+* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
+* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html).
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+  // Note this doesn't include Time, Level and Message which are available on
+  // the Entry. Consult `godoc` on information about those fields or read the
+  // source of the official loggers.
+  serialized, err := json.Marshal(entry.Data)
+    if err != nil {
+      return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+    }
+  return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+    // create a stdlib log.Logger that writes to
+    // logrus.Logger.
+    ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+This means that we can override the standard library logger easily:
+
+```go
+logger := logrus.New()
+logger.Formatter = &logrus.JSONFormatter{}
+
+// Use logrus for standard log output
+// Note that `log` here references stdlib's log
+// Not logrus imported under the name `log`.
+log.SetOutput(logger.Writer())
+```
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+import(
+  "github.com/sirupsen/logrus"
+  "github.com/sirupsen/logrus/hooks/test"
+  "github.com/stretchr/testify/assert"
+  "testing"
+)
+
+func TestSomething(t*testing.T){
+  logger, hook := test.NewNullLogger()
+  logger.Error("Helloerror")
+
+  assert.Equal(t, 1, len(hook.Entries))
+  assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
+  assert.Equal(t, "Helloerror", hook.LastEntry().Message)
+
+  hook.Reset()
+  assert.Nil(t, hook.LastEntry())
+}
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+  // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+  1) logger.Out is protected by locks.
+
+  2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+     (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000..8af9063
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://github.com/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+	"fmt"
+	"os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+	defer func() {
+		if err := recover(); err != nil {
+			fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+		}
+	}()
+
+	handler()
+}
+
+func runHandlers() {
+	for _, handler := range handlers {
+		runHandler(handler)
+	}
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+	runHandlers()
+	os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+	handlers = append(handlers, handler)
+}
diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml
new file mode 100644
index 0000000..96c2ce1
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/appveyor.yml
@@ -0,0 +1,14 @@
+version: "{build}"

+platform: x64

+clone_folder: c:\gopath\src\github.com\sirupsen\logrus

+environment:  

+  GOPATH: c:\gopath

+branches:  

+  only:

+    - master

+install:  

+  - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%

+  - go version

+build_script:  

+  - go get -t

+  - go test

diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go
new file mode 100644
index 0000000..da67aba
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+  package main
+
+  import (
+    log "github.com/sirupsen/logrus"
+  )
+
+  func main() {
+    log.WithFields(log.Fields{
+      "animal": "walrus",
+      "number": 1,
+      "size":   10,
+    }).Info("A walrus appears")
+  }
+
+Output:
+  time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
new file mode 100644
index 0000000..df6d188
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -0,0 +1,393 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync"
+	"time"
+)
+
+var (
+	bufferPool *sync.Pool
+
+	// qualified package name, cached at first use
+	logrusPackage string
+
+	// Positions in the call stack when tracing to report the calling method
+	minimumCallerDepth int
+
+	// Used for caller information initialisation
+	callerInitOnce sync.Once
+)
+
+const (
+	maximumCallerDepth int = 25
+	knownLogrusFrames  int = 4
+)
+
+func init() {
+	bufferPool = &sync.Pool{
+		New: func() interface{} {
+			return new(bytes.Buffer)
+		},
+	}
+
+	// start at the bottom of the stack before the package-name cache is primed
+	minimumCallerDepth = 1
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
+// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
+// reused and passed around as much as you wish to avoid field duplication.
+type Entry struct {
+	Logger *Logger
+
+	// Contains all the fields set by the user.
+	Data Fields
+
+	// Time at which the log entry was created
+	Time time.Time
+
+	// Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
+	// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
+	Level Level
+
+	// Calling method, with package name
+	Caller *runtime.Frame
+
+	// Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
+	Message string
+
+	// When formatter is called in entry.log(), a Buffer may be set to entry
+	Buffer *bytes.Buffer
+
+	// err may contain a field formatting error
+	err string
+}
+
+func NewEntry(logger *Logger) *Entry {
+	return &Entry{
+		Logger: logger,
+		// Default is three fields, plus one optional.  Give a little extra room.
+		Data: make(Fields, 6),
+	}
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+	serialized, err := entry.Logger.Formatter.Format(entry)
+	if err != nil {
+		return "", err
+	}
+	str := string(serialized)
+	return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+	return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+	return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+	data := make(Fields, len(entry.Data)+len(fields))
+	for k, v := range entry.Data {
+		data[k] = v
+	}
+	fieldErr := entry.err
+	for k, v := range fields {
+		isErrField := false
+		if t := reflect.TypeOf(v); t != nil {
+			switch t.Kind() {
+			case reflect.Func:
+				isErrField = true
+			case reflect.Ptr:
+				isErrField = t.Elem().Kind() == reflect.Func
+			}
+		}
+		if isErrField {
+			tmp := fmt.Sprintf("can not add field %q", k)
+			if fieldErr != "" {
+				fieldErr = entry.err + ", " + tmp
+			} else {
+				fieldErr = tmp
+			}
+		} else {
+			data[k] = v
+		}
+	}
+	return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr}
+}
+
+// Overrides the time of the Entry.
+func (entry *Entry) WithTime(t time.Time) *Entry {
+	return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err}
+}
+
+// getPackageName reduces a fully qualified function name to the package name
+// There really ought to be to be a better way...
+func getPackageName(f string) string {
+	for {
+		lastPeriod := strings.LastIndex(f, ".")
+		lastSlash := strings.LastIndex(f, "/")
+		if lastPeriod > lastSlash {
+			f = f[:lastPeriod]
+		} else {
+			break
+		}
+	}
+
+	return f
+}
+
+// getCaller retrieves the name of the first non-logrus calling function
+func getCaller() *runtime.Frame {
+	// Restrict the lookback frames to avoid runaway lookups
+	pcs := make([]uintptr, maximumCallerDepth)
+	depth := runtime.Callers(minimumCallerDepth, pcs)
+	frames := runtime.CallersFrames(pcs[:depth])
+
+	// cache this package's fully-qualified name
+	callerInitOnce.Do(func() {
+		logrusPackage = getPackageName(runtime.FuncForPC(pcs[0]).Name())
+
+		// now that we have the cache, we can skip a minimum count of known-logrus functions
+		// XXX this is dubious, the number of frames may vary store an entry in a logger interface
+		minimumCallerDepth = knownLogrusFrames
+	})
+
+	for f, again := frames.Next(); again; f, again = frames.Next() {
+		pkg := getPackageName(f.Function)
+
+		// If the caller isn't part of this package, we're done
+		if pkg != logrusPackage {
+			return &f
+		}
+	}
+
+	// if we got here, we failed to find the caller's context
+	return nil
+}
+
+func (entry Entry) HasCaller() (has bool) {
+	return entry.Logger != nil &&
+		entry.Logger.ReportCaller &&
+		entry.Caller != nil
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+	var buffer *bytes.Buffer
+
+	// Default to now, but allow users to override if they want.
+	//
+	// We don't have to worry about polluting future calls to Entry#log()
+	// with this assignment because this function is declared with a
+	// non-pointer receiver.
+	if entry.Time.IsZero() {
+		entry.Time = time.Now()
+	}
+
+	entry.Level = level
+	entry.Message = msg
+	if entry.Logger.ReportCaller {
+		entry.Caller = getCaller()
+	}
+
+	entry.fireHooks()
+
+	buffer = bufferPool.Get().(*bytes.Buffer)
+	buffer.Reset()
+	defer bufferPool.Put(buffer)
+	entry.Buffer = buffer
+
+	entry.write()
+
+	entry.Buffer = nil
+
+	// To avoid Entry#log() returning a value that only would make sense for
+	// panic() to use in Entry#Panic(), we avoid the allocation by checking
+	// directly here.
+	if level <= PanicLevel {
+		panic(&entry)
+	}
+}
+
+func (entry *Entry) fireHooks() {
+	entry.Logger.mu.Lock()
+	defer entry.Logger.mu.Unlock()
+	err := entry.Logger.Hooks.Fire(entry.Level, entry)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+	}
+}
+
+func (entry *Entry) write() {
+	entry.Logger.mu.Lock()
+	defer entry.Logger.mu.Unlock()
+	serialized, err := entry.Logger.Formatter.Format(entry)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+	} else {
+		_, err = entry.Logger.Out.Write(serialized)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+		}
+	}
+}
+
+func (entry *Entry) Log(level Level, args ...interface{}) {
+	if entry.Logger.IsLevelEnabled(level) {
+		entry.log(level, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Trace(args ...interface{}) {
+	entry.Log(TraceLevel, args...)
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+	entry.Log(DebugLevel, args...)
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+	entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+	entry.Log(InfoLevel, args...)
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+	entry.Log(WarnLevel, args...)
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+	entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+	entry.Log(ErrorLevel, args...)
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+	entry.Log(FatalLevel, args...)
+	entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+	entry.Log(PanicLevel, args...)
+	panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Logf(level Level, format string, args ...interface{}) {
+	entry.Log(level, fmt.Sprintf(format, args...))
+}
+
+func (entry *Entry) Tracef(format string, args ...interface{}) {
+	entry.Logf(TraceLevel, format, args...)
+}
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+	entry.Logf(DebugLevel, format, args...)
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+	entry.Logf(InfoLevel, format, args...)
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+	entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+	entry.Logf(WarnLevel, format, args...)
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+	entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+	entry.Logf(ErrorLevel, format, args...)
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+	entry.Logf(FatalLevel, format, args...)
+	entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+	entry.Logf(PanicLevel, format, args...)
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Logln(level Level, args ...interface{}) {
+	if entry.Logger.IsLevelEnabled(level) {
+		entry.Log(level, entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Traceln(args ...interface{}) {
+	entry.Logln(TraceLevel, args...)
+}
+
+func (entry *Entry) Debugln(args ...interface{}) {
+	entry.Logln(DebugLevel, args...)
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+	entry.Logln(InfoLevel, args...)
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+	entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+	entry.Logln(WarnLevel, args...)
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+	entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+	entry.Logln(ErrorLevel, args...)
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+	entry.Logln(FatalLevel, args...)
+	entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+	entry.Logln(PanicLevel, args...)
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+	msg := fmt.Sprintln(args...)
+	return msg[:len(msg)-1]
+}
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
new file mode 100644
index 0000000..7342613
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/exported.go
@@ -0,0 +1,219 @@
+package logrus
+
+import (
+	"io"
+	"time"
+)
+
+var (
+	// std is the name of the standard logger in stdlib `log`
+	std = New()
+)
+
+func StandardLogger() *Logger {
+	return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+	std.SetOutput(out)
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+	std.SetFormatter(formatter)
+}
+
+// SetReportCaller sets whether the standard logger will include the calling
+// method as a field.
+func SetReportCaller(include bool) {
+	std.SetReportCaller(include)
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+	std.SetLevel(level)
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+	return std.GetLevel()
+}
+
+// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
+func IsLevelEnabled(level Level) bool {
+	return std.IsLevelEnabled(level)
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+	std.AddHook(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+	return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+	return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+	return std.WithFields(fields)
+}
+
+// WithTime creats an entry from the standard logger and overrides the time of
+// logs generated with it.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithTime(t time.Time) *Entry {
+	return std.WithTime(t)
+}
+
+// Trace logs a message at level Trace on the standard logger.
+func Trace(args ...interface{}) {
+	std.Trace(args...)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+	std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+	std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+	std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+	std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+	std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+	std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+	std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatal(args ...interface{}) {
+	std.Fatal(args...)
+}
+
+// Tracef logs a message at level Trace on the standard logger.
+func Tracef(format string, args ...interface{}) {
+	std.Tracef(format, args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+	std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+	std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+	std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+	std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+	std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+	std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+	std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatalf(format string, args ...interface{}) {
+	std.Fatalf(format, args...)
+}
+
+// Traceln logs a message at level Trace on the standard logger.
+func Traceln(args ...interface{}) {
+	std.Traceln(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+	std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+	std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+	std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+	std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+	std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+	std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+	std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatalln(args ...interface{}) {
+	std.Fatalln(args...)
+}
diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..4088837
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/formatter.go
@@ -0,0 +1,78 @@
+package logrus
+
+import "time"
+
+// Default key names for the default fields
+const (
+	defaultTimestampFormat = time.RFC3339
+	FieldKeyMsg            = "msg"
+	FieldKeyLevel          = "level"
+	FieldKeyTime           = "time"
+	FieldKeyLogrusError    = "logrus_error"
+	FieldKeyFunc           = "func"
+	FieldKeyFile           = "file"
+)
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+	Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+//  logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+//  {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) {
+	timeKey := fieldMap.resolve(FieldKeyTime)
+	if t, ok := data[timeKey]; ok {
+		data["fields."+timeKey] = t
+		delete(data, timeKey)
+	}
+
+	msgKey := fieldMap.resolve(FieldKeyMsg)
+	if m, ok := data[msgKey]; ok {
+		data["fields."+msgKey] = m
+		delete(data, msgKey)
+	}
+
+	levelKey := fieldMap.resolve(FieldKeyLevel)
+	if l, ok := data[levelKey]; ok {
+		data["fields."+levelKey] = l
+		delete(data, levelKey)
+	}
+
+	logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
+	if l, ok := data[logrusErrKey]; ok {
+		data["fields."+logrusErrKey] = l
+		delete(data, logrusErrKey)
+	}
+
+	// If reportCaller is not set, 'func' will not conflict.
+	if reportCaller {
+		funcKey := fieldMap.resolve(FieldKeyFunc)
+		if l, ok := data[funcKey]; ok {
+			data["fields."+funcKey] = l
+		}
+		fileKey := fieldMap.resolve(FieldKeyFile)
+		if l, ok := data[fileKey]; ok {
+			data["fields."+fileKey] = l
+		}
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod
new file mode 100644
index 0000000..94574cc
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/go.mod
@@ -0,0 +1,11 @@
+module github.com/sirupsen/logrus
+
+require (
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/konsorten/go-windows-terminal-sequences v1.0.1
+	github.com/pmezard/go-difflib v1.0.0 // indirect
+	github.com/stretchr/objx v0.1.1 // indirect
+	github.com/stretchr/testify v1.2.2
+	golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
+	golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33
+)
diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum
new file mode 100644
index 0000000..133d34a
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/go.sum
@@ -0,0 +1,15 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs=
+github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..3f151cd
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+	Levels() []Level
+	Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+	for _, level := range hook.Levels() {
+		hooks[level] = append(hooks[level], hook)
+	}
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+	for _, hook := range hooks[level] {
+		if err := hook.Fire(entry); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..2605753
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -0,0 +1,105 @@
+package logrus
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+)
+
+type fieldKey string
+
+// FieldMap allows customization of the key names for default fields.
+type FieldMap map[fieldKey]string
+
+func (f FieldMap) resolve(key fieldKey) string {
+	if k, ok := f[key]; ok {
+		return k
+	}
+
+	return string(key)
+}
+
+// JSONFormatter formats logs into parsable json
+type JSONFormatter struct {
+	// TimestampFormat sets the format used for marshaling timestamps.
+	TimestampFormat string
+
+	// DisableTimestamp allows disabling automatic timestamps in output
+	DisableTimestamp bool
+
+	// DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
+	DataKey string
+
+	// FieldMap allows users to customize the names of keys for default fields.
+	// As an example:
+	// formatter := &JSONFormatter{
+	//   	FieldMap: FieldMap{
+	// 		 FieldKeyTime:  "@timestamp",
+	// 		 FieldKeyLevel: "@level",
+	// 		 FieldKeyMsg:   "@message",
+	// 		 FieldKeyFunc:  "@caller",
+	//    },
+	// }
+	FieldMap FieldMap
+
+	// PrettyPrint will indent all json logs
+	PrettyPrint bool
+}
+
+// Format renders a single log entry
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+	data := make(Fields, len(entry.Data)+4)
+	for k, v := range entry.Data {
+		switch v := v.(type) {
+		case error:
+			// Otherwise errors are ignored by `encoding/json`
+			// https://github.com/sirupsen/logrus/issues/137
+			data[k] = v.Error()
+		default:
+			data[k] = v
+		}
+	}
+
+	if f.DataKey != "" {
+		newData := make(Fields, 4)
+		newData[f.DataKey] = data
+		data = newData
+	}
+
+	prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = defaultTimestampFormat
+	}
+
+	if entry.err != "" {
+		data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
+	}
+	if !f.DisableTimestamp {
+		data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+	}
+	data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+	data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+	if entry.HasCaller() {
+		data[f.FieldMap.resolve(FieldKeyFunc)] = entry.Caller.Function
+		data[f.FieldMap.resolve(FieldKeyFile)] = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+	}
+
+	var b *bytes.Buffer
+	if entry.Buffer != nil {
+		b = entry.Buffer
+	} else {
+		b = &bytes.Buffer{}
+	}
+
+	encoder := json.NewEncoder(b)
+	if f.PrettyPrint {
+		encoder.SetIndent("", "  ")
+	}
+	if err := encoder.Encode(data); err != nil {
+		return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+	}
+
+	return b.Bytes(), nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
new file mode 100644
index 0000000..9bf64e2
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -0,0 +1,343 @@
+package logrus
+
+import (
+	"io"
+	"os"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+type Logger struct {
+	// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+	// file, or leave it default which is `os.Stderr`. You can also set this to
+	// something more adventurous, such as logging to Kafka.
+	Out io.Writer
+	// Hooks for the logger instance. These allow firing events based on logging
+	// levels and log entries. For example, to send errors to an error tracking
+	// service, log to StatsD or dump the core on fatal errors.
+	Hooks LevelHooks
+	// All log entries pass through the formatter before logged to Out. The
+	// included formatters are `TextFormatter` and `JSONFormatter` for which
+	// TextFormatter is the default. In development (when a TTY is attached) it
+	// logs with colors, but to a file it wouldn't. You can easily implement your
+	// own that implements the `Formatter` interface, see the `README` or included
+	// formatters for examples.
+	Formatter Formatter
+
+	// Flag for whether to log caller info (off by default)
+	ReportCaller bool
+
+	// The logging level the logger should log at. This is typically (and defaults
+	// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+	// logged.
+	Level Level
+	// Used to sync writing to the log. Locking is enabled by Default
+	mu MutexWrap
+	// Reusable empty entry
+	entryPool sync.Pool
+	// Function to exit the application, defaults to `os.Exit()`
+	ExitFunc exitFunc
+}
+
+type exitFunc func(int)
+
+type MutexWrap struct {
+	lock     sync.Mutex
+	disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+	if !mw.disabled {
+		mw.lock.Lock()
+	}
+}
+
+func (mw *MutexWrap) Unlock() {
+	if !mw.disabled {
+		mw.lock.Unlock()
+	}
+}
+
+func (mw *MutexWrap) Disable() {
+	mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+//    var log = &Logger{
+//      Out: os.Stderr,
+//      Formatter: new(JSONFormatter),
+//      Hooks: make(LevelHooks),
+//      Level: logrus.DebugLevel,
+//    }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+	return &Logger{
+		Out:          os.Stderr,
+		Formatter:    new(TextFormatter),
+		Hooks:        make(LevelHooks),
+		Level:        InfoLevel,
+		ExitFunc:     os.Exit,
+		ReportCaller: false,
+	}
+}
+
+func (logger *Logger) newEntry() *Entry {
+	entry, ok := logger.entryPool.Get().(*Entry)
+	if ok {
+		return entry
+	}
+	return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+	entry.Data = map[string]interface{}{}
+	logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
+// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry.  All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithError(err)
+}
+
+// Overrides the time of the log entry.
+func (logger *Logger) WithTime(t time.Time) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithTime(t)
+}
+
+func (logger *Logger) Logf(level Level, format string, args ...interface{}) {
+	if logger.IsLevelEnabled(level) {
+		entry := logger.newEntry()
+		entry.Logf(level, format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Tracef(format string, args ...interface{}) {
+	logger.Logf(TraceLevel, format, args...)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+	logger.Logf(DebugLevel, format, args...)
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+	logger.Logf(InfoLevel, format, args...)
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Printf(format, args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+	logger.Logf(WarnLevel, format, args...)
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+	logger.Warnf(format, args...)
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+	logger.Logf(ErrorLevel, format, args...)
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+	logger.Logf(FatalLevel, format, args...)
+	logger.Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+	logger.Logf(PanicLevel, format, args...)
+}
+
+func (logger *Logger) Log(level Level, args ...interface{}) {
+	if logger.IsLevelEnabled(level) {
+		entry := logger.newEntry()
+		entry.Log(level, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Trace(args ...interface{}) {
+	logger.Log(TraceLevel, args...)
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+	logger.Log(DebugLevel, args...)
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+	logger.Log(InfoLevel, args...)
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Info(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+	logger.Log(WarnLevel, args...)
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+	logger.Warn(args...)
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+	logger.Log(ErrorLevel, args...)
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+	logger.Log(FatalLevel, args...)
+	logger.Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+	logger.Log(PanicLevel, args...)
+}
+
+func (logger *Logger) Logln(level Level, args ...interface{}) {
+	if logger.IsLevelEnabled(level) {
+		entry := logger.newEntry()
+		entry.Logln(level, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Traceln(args ...interface{}) {
+	logger.Logln(TraceLevel, args...)
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+	logger.Logln(DebugLevel, args...)
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+	logger.Logln(InfoLevel, args...)
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Println(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+	logger.Logln(WarnLevel, args...)
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+	logger.Warn(args...)
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+	logger.Logln(ErrorLevel, args...)
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+	logger.Logln(FatalLevel, args...)
+	logger.Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+	logger.Logln(PanicLevel, args...)
+}
+
+func (logger *Logger) Exit(code int) {
+	runHandlers()
+	if logger.ExitFunc == nil {
+		logger.ExitFunc = os.Exit
+	}
+	logger.ExitFunc(code)
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+	logger.mu.Disable()
+}
+
+func (logger *Logger) level() Level {
+	return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
+}
+
+// SetLevel sets the logger level.
+func (logger *Logger) SetLevel(level Level) {
+	atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
+}
+
+// GetLevel returns the logger level.
+func (logger *Logger) GetLevel() Level {
+	return logger.level()
+}
+
+// AddHook adds a hook to the logger hooks.
+func (logger *Logger) AddHook(hook Hook) {
+	logger.mu.Lock()
+	defer logger.mu.Unlock()
+	logger.Hooks.Add(hook)
+}
+
+// IsLevelEnabled checks if the log level of the logger is greater than the level param
+func (logger *Logger) IsLevelEnabled(level Level) bool {
+	return logger.level() >= level
+}
+
+// SetFormatter sets the logger formatter.
+func (logger *Logger) SetFormatter(formatter Formatter) {
+	logger.mu.Lock()
+	defer logger.mu.Unlock()
+	logger.Formatter = formatter
+}
+
+// SetOutput sets the logger output.
+func (logger *Logger) SetOutput(output io.Writer) {
+	logger.mu.Lock()
+	defer logger.mu.Unlock()
+	logger.Out = output
+}
+
+func (logger *Logger) SetReportCaller(reportCaller bool) {
+	logger.mu.Lock()
+	defer logger.mu.Unlock()
+	logger.ReportCaller = reportCaller
+}
+
+// ReplaceHooks replaces the logger hooks and returns the old ones
+func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
+	logger.mu.Lock()
+	oldHooks := logger.Hooks
+	logger.Hooks = hooks
+	logger.mu.Unlock()
+	return oldHooks
+}
diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..c1ca889
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logrus.go
@@ -0,0 +1,186 @@
+package logrus
+
+import (
+	"fmt"
+	"log"
+	"strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint32
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+	if b, err := level.MarshalText(); err == nil {
+		return string(b)
+	} else {
+		return "unknown"
+	}
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+	switch strings.ToLower(lvl) {
+	case "panic":
+		return PanicLevel, nil
+	case "fatal":
+		return FatalLevel, nil
+	case "error":
+		return ErrorLevel, nil
+	case "warn", "warning":
+		return WarnLevel, nil
+	case "info":
+		return InfoLevel, nil
+	case "debug":
+		return DebugLevel, nil
+	case "trace":
+		return TraceLevel, nil
+	}
+
+	var l Level
+	return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (level *Level) UnmarshalText(text []byte) error {
+	l, err := ParseLevel(string(text))
+	if err != nil {
+		return err
+	}
+
+	*level = Level(l)
+
+	return nil
+}
+
+func (level Level) MarshalText() ([]byte, error) {
+	switch level {
+	case TraceLevel:
+		return []byte("trace"), nil
+	case DebugLevel:
+		return []byte("debug"), nil
+	case InfoLevel:
+		return []byte("info"), nil
+	case WarnLevel:
+		return []byte("warning"), nil
+	case ErrorLevel:
+		return []byte("error"), nil
+	case FatalLevel:
+		return []byte("fatal"), nil
+	case PanicLevel:
+		return []byte("panic"), nil
+	}
+
+	return nil, fmt.Errorf("not a valid lorus level %q", level)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+	PanicLevel,
+	FatalLevel,
+	ErrorLevel,
+	WarnLevel,
+	InfoLevel,
+	DebugLevel,
+	TraceLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+	// PanicLevel level, highest level of severity. Logs and then calls panic with the
+	// message passed to Debug, Info, ...
+	PanicLevel Level = iota
+	// FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
+	// logging level is set to Panic.
+	FatalLevel
+	// ErrorLevel level. Logs. Used for errors that should definitely be noted.
+	// Commonly used for hooks to send errors to an error tracking service.
+	ErrorLevel
+	// WarnLevel level. Non-critical entries that deserve eyes.
+	WarnLevel
+	// InfoLevel level. General operational entries about what's going on inside the
+	// application.
+	InfoLevel
+	// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+	DebugLevel
+	// TraceLevel level. Designates finer-grained informational events than the Debug.
+	TraceLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+	_ StdLogger = &log.Logger{}
+	_ StdLogger = &Entry{}
+	_ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+	Print(...interface{})
+	Printf(string, ...interface{})
+	Println(...interface{})
+
+	Fatal(...interface{})
+	Fatalf(string, ...interface{})
+	Fatalln(...interface{})
+
+	Panic(...interface{})
+	Panicf(string, ...interface{})
+	Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+	WithField(key string, value interface{}) *Entry
+	WithFields(fields Fields) *Entry
+	WithError(err error) *Entry
+
+	Debugf(format string, args ...interface{})
+	Infof(format string, args ...interface{})
+	Printf(format string, args ...interface{})
+	Warnf(format string, args ...interface{})
+	Warningf(format string, args ...interface{})
+	Errorf(format string, args ...interface{})
+	Fatalf(format string, args ...interface{})
+	Panicf(format string, args ...interface{})
+
+	Debug(args ...interface{})
+	Info(args ...interface{})
+	Print(args ...interface{})
+	Warn(args ...interface{})
+	Warning(args ...interface{})
+	Error(args ...interface{})
+	Fatal(args ...interface{})
+	Panic(args ...interface{})
+
+	Debugln(args ...interface{})
+	Infoln(args ...interface{})
+	Println(args ...interface{})
+	Warnln(args ...interface{})
+	Warningln(args ...interface{})
+	Errorln(args ...interface{})
+	Fatalln(args ...interface{})
+	Panicln(args ...interface{})
+
+	// IsDebugEnabled() bool
+	// IsInfoEnabled() bool
+	// IsWarnEnabled() bool
+	// IsErrorEnabled() bool
+	// IsFatalEnabled() bool
+	// IsPanicEnabled() bool
+}
+
+// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
+// here for consistancy. Do not use. Use Logger or Entry instead.
+type Ext1FieldLogger interface {
+	FieldLogger
+	Tracef(format string, args ...interface{})
+	Trace(args ...interface{})
+	Traceln(args ...interface{})
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_aix.go b/vendor/github.com/sirupsen/logrus/terminal_check_aix.go
new file mode 100644
index 0000000..04fdb7b
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_aix.go
@@ -0,0 +1,9 @@
+// +build !appengine,!js,!windows,aix
+
+package logrus
+
+import "io"
+
+func checkIfTerminal(w io.Writer) bool {
+	return false
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
new file mode 100644
index 0000000..2403de9
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package logrus
+
+import (
+	"io"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+	return true
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go
new file mode 100644
index 0000000..0c20975
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go
@@ -0,0 +1,11 @@
+// +build js
+
+package logrus
+
+import (
+	"io"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+	return false
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
new file mode 100644
index 0000000..d465565
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
@@ -0,0 +1,19 @@
+// +build !appengine,!js,!windows,!aix
+
+package logrus
+
+import (
+	"io"
+	"os"
+
+	"golang.org/x/crypto/ssh/terminal"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+	switch v := w.(type) {
+	case *os.File:
+		return terminal.IsTerminal(int(v.Fd()))
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
new file mode 100644
index 0000000..3b9d286
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
@@ -0,0 +1,20 @@
+// +build !appengine,!js,windows
+
+package logrus
+
+import (
+	"io"
+	"os"
+	"syscall"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+	switch v := w.(type) {
+	case *os.File:
+		var mode uint32
+		err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
+		return err == nil
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 0000000..3dbd237
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,8 @@
+// +build !windows
+
+package logrus
+
+import "io"
+
+func initTerminal(w io.Writer) {
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_windows.go b/vendor/github.com/sirupsen/logrus/terminal_windows.go
new file mode 100644
index 0000000..b4ef528
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,18 @@
+// +build !appengine,!js,windows
+
+package logrus
+
+import (
+	"io"
+	"os"
+	"syscall"
+
+	sequences "github.com/konsorten/go-windows-terminal-sequences"
+)
+
+func initTerminal(w io.Writer) {
+	switch v := w.(type) {
+	case *os.File:
+		sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..fb21649
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -0,0 +1,273 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"runtime"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+)
+
+const (
+	nocolor = 0
+	red     = 31
+	green   = 32
+	yellow  = 33
+	blue    = 36
+	gray    = 37
+)
+
+var (
+	baseTimestamp time.Time
+	emptyFieldMap FieldMap
+)
+
+func init() {
+	baseTimestamp = time.Now()
+}
+
+// TextFormatter formats logs into text
+type TextFormatter struct {
+	// Set to true to bypass checking for a TTY before outputting colors.
+	ForceColors bool
+
+	// Force disabling colors.
+	DisableColors bool
+
+	// Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
+	EnvironmentOverrideColors bool
+
+	// Disable timestamp logging. useful when output is redirected to logging
+	// system that already adds timestamps.
+	DisableTimestamp bool
+
+	// Enable logging the full timestamp when a TTY is attached instead of just
+	// the time passed since beginning of execution.
+	FullTimestamp bool
+
+	// TimestampFormat to use for display when a full timestamp is printed
+	TimestampFormat string
+
+	// The fields are sorted by default for a consistent output. For applications
+	// that log extremely frequently and don't use the JSON formatter this may not
+	// be desired.
+	DisableSorting bool
+
+	// The keys sorting function, when uninitialized it uses sort.Strings.
+	SortingFunc func([]string)
+
+	// Disables the truncation of the level text to 4 characters.
+	DisableLevelTruncation bool
+
+	// QuoteEmptyFields will wrap empty fields in quotes if true
+	QuoteEmptyFields bool
+
+	// Whether the logger's out is to a terminal
+	isTerminal bool
+
+	// FieldMap allows users to customize the names of keys for default fields.
+	// As an example:
+	// formatter := &TextFormatter{
+	//     FieldMap: FieldMap{
+	//         FieldKeyTime:  "@timestamp",
+	//         FieldKeyLevel: "@level",
+	//         FieldKeyMsg:   "@message"}}
+	FieldMap FieldMap
+
+	terminalInitOnce sync.Once
+}
+
+func (f *TextFormatter) init(entry *Entry) {
+	if entry.Logger != nil {
+		f.isTerminal = checkIfTerminal(entry.Logger.Out)
+
+		if f.isTerminal {
+			initTerminal(entry.Logger.Out)
+		}
+	}
+}
+
+func (f *TextFormatter) isColored() bool {
+	isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows"))
+
+	if f.EnvironmentOverrideColors {
+		if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
+			isColored = true
+		} else if ok && force == "0" {
+			isColored = false
+		} else if os.Getenv("CLICOLOR") == "0" {
+			isColored = false
+		}
+	}
+
+	return isColored && !f.DisableColors
+}
+
+// Format renders a single log entry
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+	data := make(Fields)
+	for k, v := range entry.Data {
+		data[k] = v
+	}
+	prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
+	keys := make([]string, 0, len(data))
+	for k := range data {
+		keys = append(keys, k)
+	}
+
+	fixedKeys := make([]string, 0, 4+len(data))
+	if !f.DisableTimestamp {
+		fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
+	}
+	fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel))
+	if entry.Message != "" {
+		fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
+	}
+	if entry.err != "" {
+		fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
+	}
+	if entry.HasCaller() {
+		fixedKeys = append(fixedKeys,
+			f.FieldMap.resolve(FieldKeyFunc), f.FieldMap.resolve(FieldKeyFile))
+	}
+
+	if !f.DisableSorting {
+		if f.SortingFunc == nil {
+			sort.Strings(keys)
+			fixedKeys = append(fixedKeys, keys...)
+		} else {
+			if !f.isColored() {
+				fixedKeys = append(fixedKeys, keys...)
+				f.SortingFunc(fixedKeys)
+			} else {
+				f.SortingFunc(keys)
+			}
+		}
+	} else {
+		fixedKeys = append(fixedKeys, keys...)
+	}
+
+	var b *bytes.Buffer
+	if entry.Buffer != nil {
+		b = entry.Buffer
+	} else {
+		b = &bytes.Buffer{}
+	}
+
+	f.terminalInitOnce.Do(func() { f.init(entry) })
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = defaultTimestampFormat
+	}
+	if f.isColored() {
+		f.printColored(b, entry, keys, data, timestampFormat)
+	} else {
+		for _, key := range fixedKeys {
+			var value interface{}
+			switch {
+			case key == f.FieldMap.resolve(FieldKeyTime):
+				value = entry.Time.Format(timestampFormat)
+			case key == f.FieldMap.resolve(FieldKeyLevel):
+				value = entry.Level.String()
+			case key == f.FieldMap.resolve(FieldKeyMsg):
+				value = entry.Message
+			case key == f.FieldMap.resolve(FieldKeyLogrusError):
+				value = entry.err
+			case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
+				value = entry.Caller.Function
+			case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
+				value = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+			default:
+				value = data[key]
+			}
+			f.appendKeyValue(b, key, value)
+		}
+	}
+
+	b.WriteByte('\n')
+	return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) {
+	var levelColor int
+	switch entry.Level {
+	case DebugLevel, TraceLevel:
+		levelColor = gray
+	case WarnLevel:
+		levelColor = yellow
+	case ErrorLevel, FatalLevel, PanicLevel:
+		levelColor = red
+	default:
+		levelColor = blue
+	}
+
+	levelText := strings.ToUpper(entry.Level.String())
+	if !f.DisableLevelTruncation {
+		levelText = levelText[0:4]
+	}
+
+	// Remove a single newline if it already exists in the message to keep
+	// the behavior of logrus text_formatter the same as the stdlib log package
+	entry.Message = strings.TrimSuffix(entry.Message, "\n")
+
+	caller := ""
+
+	if entry.HasCaller() {
+		caller = fmt.Sprintf("%s:%d %s()",
+			entry.Caller.File, entry.Caller.Line, entry.Caller.Function)
+	}
+
+	if f.DisableTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
+	} else if !f.FullTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
+	} else {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
+	}
+	for _, k := range keys {
+		v := data[k]
+		fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+		f.appendValue(b, v)
+	}
+}
+
+func (f *TextFormatter) needsQuoting(text string) bool {
+	if f.QuoteEmptyFields && len(text) == 0 {
+		return true
+	}
+	for _, ch := range text {
+		if !((ch >= 'a' && ch <= 'z') ||
+			(ch >= 'A' && ch <= 'Z') ||
+			(ch >= '0' && ch <= '9') ||
+			ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
+			return true
+		}
+	}
+	return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+	if b.Len() > 0 {
+		b.WriteByte(' ')
+	}
+	b.WriteString(key)
+	b.WriteByte('=')
+	f.appendValue(b, value)
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+	stringVal, ok := value.(string)
+	if !ok {
+		stringVal = fmt.Sprint(value)
+	}
+
+	if !f.needsQuoting(stringVal) {
+		b.WriteString(stringVal)
+	} else {
+		b.WriteString(fmt.Sprintf("%q", stringVal))
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go
new file mode 100644
index 0000000..9e1f751
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/writer.go
@@ -0,0 +1,64 @@
+package logrus
+
+import (
+	"bufio"
+	"io"
+	"runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+	return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+	return NewEntry(logger).WriterLevel(level)
+}
+
+func (entry *Entry) Writer() *io.PipeWriter {
+	return entry.WriterLevel(InfoLevel)
+}
+
+func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
+	reader, writer := io.Pipe()
+
+	var printFunc func(args ...interface{})
+
+	switch level {
+	case TraceLevel:
+		printFunc = entry.Trace
+	case DebugLevel:
+		printFunc = entry.Debug
+	case InfoLevel:
+		printFunc = entry.Info
+	case WarnLevel:
+		printFunc = entry.Warn
+	case ErrorLevel:
+		printFunc = entry.Error
+	case FatalLevel:
+		printFunc = entry.Fatal
+	case PanicLevel:
+		printFunc = entry.Panic
+	default:
+		printFunc = entry.Print
+	}
+
+	go entry.writerScanner(reader, printFunc)
+	runtime.SetFinalizer(writer, writerFinalizer)
+
+	return writer
+}
+
+func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+	scanner := bufio.NewScanner(reader)
+	for scanner.Scan() {
+		printFunc(scanner.Text())
+	}
+	if err := scanner.Err(); err != nil {
+		entry.Errorf("Error while reading from Writer: %s", err)
+	}
+	reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+	writer.Close()
+}
diff --git a/vendor/github.com/soheilhy/cmux/.gitignore b/vendor/github.com/soheilhy/cmux/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/soheilhy/cmux/.travis.yml b/vendor/github.com/soheilhy/cmux/.travis.yml
new file mode 100644
index 0000000..4bc48e0
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/.travis.yml
@@ -0,0 +1,29 @@
+language: go
+
+go:
+  - 1.6
+  - 1.7
+  - 1.8
+  - tip
+
+matrix:
+  allow_failures:
+    - go: tip
+
+gobuild_args: -race
+
+before_install:
+  - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go get -u github.com/kisielk/errcheck; fi
+  - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go get -u github.com/golang/lint/golint; fi
+
+before_script:
+  - '! gofmt -s -l . | read'
+  - echo $TRAVIS_GO_VERSION
+  - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then golint ./...; fi
+  - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then errcheck ./...; fi
+  - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go tool vet .; fi
+  - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go tool vet --shadow .; fi
+
+script:
+  - go test -bench . -v ./...
+  - go test -race -bench . -v ./...
diff --git a/vendor/github.com/soheilhy/cmux/CONTRIBUTORS b/vendor/github.com/soheilhy/cmux/CONTRIBUTORS
new file mode 100644
index 0000000..49878f2
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/CONTRIBUTORS
@@ -0,0 +1,12 @@
+# The list of people who have contributed code to the cmux repository.
+#
+# Auto-generated with:
+#		git log --oneline --pretty=format:'%an <%aE>' | sort -u
+#
+Andreas Jaekle <andreas@jaekle.net>
+Dmitri Shuralyov <shurcooL@gmail.com>
+Ethan Mosbaugh <emosbaugh@gmail.com>
+Soheil Hassas Yeganeh <soheil.h.y@gmail.com>
+Soheil Hassas Yeganeh <soheil@cs.toronto.edu>
+Tamir Duberstein <tamir@cockroachlabs.com>
+Tamir Duberstein <tamird@gmail.com>
diff --git a/vendor/github.com/soheilhy/cmux/LICENSE b/vendor/github.com/soheilhy/cmux/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/soheilhy/cmux/README.md b/vendor/github.com/soheilhy/cmux/README.md
new file mode 100644
index 0000000..70306e6
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/README.md
@@ -0,0 +1,83 @@
+# cmux: Connection Mux ![Travis Build Status](https://api.travis-ci.org/soheilhy/args.svg?branch=master "Travis Build Status") [![GoDoc](https://godoc.org/github.com/soheilhy/cmux?status.svg)](http://godoc.org/github.com/soheilhy/cmux)
+
+cmux is a generic Go library to multiplex connections based on
+their payload. Using cmux, you can serve gRPC, SSH, HTTPS, HTTP,
+Go RPC, and pretty much any other protocol on the same TCP listener.
+
+## How-To
+Simply create your main listener, create a cmux for that listener,
+and then match connections:
+```go
+// Create the main listener.
+l, err := net.Listen("tcp", ":23456")
+if err != nil {
+	log.Fatal(err)
+}
+
+// Create a cmux.
+m := cmux.New(l)
+
+// Match connections in order:
+// First grpc, then HTTP, and otherwise Go RPC/TCP.
+grpcL := m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc"))
+httpL := m.Match(cmux.HTTP1Fast())
+trpcL := m.Match(cmux.Any()) // Any means anything that is not yet matched.
+
+// Create your protocol servers.
+grpcS := grpc.NewServer()
+grpchello.RegisterGreeterServer(grpcs, &server{})
+
+httpS := &http.Server{
+	Handler: &helloHTTP1Handler{},
+}
+
+trpcS := rpc.NewServer()
+trpcS.Register(&ExampleRPCRcvr{})
+
+// Use the muxed listeners for your servers.
+go grpcS.Serve(grpcL)
+go httpS.Serve(httpL)
+go trpcS.Accept(trpcL)
+
+// Start serving!
+m.Serve()
+```
+
+Take a look at [other examples in the GoDoc](http://godoc.org/github.com/soheilhy/cmux/#pkg-examples).
+
+## Docs
+* [GoDocs](https://godoc.org/github.com/soheilhy/cmux)
+
+## Performance
+There is room for improvment but, since we are only matching
+the very first bytes of a connection, the performance overheads on
+long-lived connections (i.e., RPCs and pipelined HTTP streams)
+is negligible.
+
+*TODO(soheil)*: Add benchmarks.
+
+## Limitations
+* *TLS*: `net/http` uses a type assertion to identify TLS connections; since
+cmux's lookahead-implementing connection wraps the underlying TLS connection,
+this type assertion fails.
+Because of that, you can serve HTTPS using cmux but `http.Request.TLS`
+would not be set in your handlers.
+
+* *Different Protocols on The Same Connection*: `cmux` matches the connection
+when it's accepted. For example, one connection can be either gRPC or REST, but
+not both. That is, we assume that a client connection is either used for gRPC
+or REST.
+
+* *Java gRPC Clients*: Java gRPC client blocks until it receives a SETTINGS
+frame from the server. If you are using the Java client to connect to a cmux'ed
+gRPC server please match with writers:
+```go
+grpcl := m.MatchWithWriters(cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc"))
+```
+
+# Copyright and License
+Copyright 2016 The CMux Authors. All rights reserved.
+
+See [CONTRIBUTORS](https://github.com/soheilhy/cmux/blob/master/CONTRIBUTORS)
+for the CMux Authors. Code is released under
+[the Apache 2 license](https://github.com/soheilhy/cmux/blob/master/LICENSE).
diff --git a/vendor/github.com/soheilhy/cmux/buffer.go b/vendor/github.com/soheilhy/cmux/buffer.go
new file mode 100644
index 0000000..f8cf30a
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/buffer.go
@@ -0,0 +1,67 @@
+// Copyright 2016 The CMux Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package cmux
+
+import (
+	"bytes"
+	"io"
+)
+
+// bufferedReader is an optimized implementation of io.Reader that behaves like
+// ```
+// io.MultiReader(bytes.NewReader(buffer.Bytes()), io.TeeReader(source, buffer))
+// ```
+// without allocating.
+type bufferedReader struct {
+	source     io.Reader
+	buffer     bytes.Buffer
+	bufferRead int
+	bufferSize int
+	sniffing   bool
+	lastErr    error
+}
+
+func (s *bufferedReader) Read(p []byte) (int, error) {
+	if s.bufferSize > s.bufferRead {
+		// If we have already read something from the buffer before, we return the
+		// same data and the last error if any. We need to immediately return,
+		// otherwise we may block for ever, if we try to be smart and call
+		// source.Read() seeking a little bit of more data.
+		bn := copy(p, s.buffer.Bytes()[s.bufferRead:s.bufferSize])
+		s.bufferRead += bn
+		return bn, s.lastErr
+	} else if !s.sniffing && s.buffer.Cap() != 0 {
+		// We don't need the buffer anymore.
+		// Reset it to release the internal slice.
+		s.buffer = bytes.Buffer{}
+	}
+
+	// If there is nothing more to return in the sniffed buffer, read from the
+	// source.
+	sn, sErr := s.source.Read(p)
+	if sn > 0 && s.sniffing {
+		s.lastErr = sErr
+		if wn, wErr := s.buffer.Write(p[:sn]); wErr != nil {
+			return wn, wErr
+		}
+	}
+	return sn, sErr
+}
+
+func (s *bufferedReader) reset(snif bool) {
+	s.sniffing = snif
+	s.bufferRead = 0
+	s.bufferSize = s.buffer.Len()
+}
diff --git a/vendor/github.com/soheilhy/cmux/cmux.go b/vendor/github.com/soheilhy/cmux/cmux.go
new file mode 100644
index 0000000..8040342
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/cmux.go
@@ -0,0 +1,270 @@
+// Copyright 2016 The CMux Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package cmux
+
+import (
+	"fmt"
+	"io"
+	"net"
+	"sync"
+	"time"
+)
+
+// Matcher matches a connection based on its content.
+type Matcher func(io.Reader) bool
+
+// MatchWriter is a match that can also write response (say to do handshake).
+type MatchWriter func(io.Writer, io.Reader) bool
+
+// ErrorHandler handles an error and returns whether
+// the mux should continue serving the listener.
+type ErrorHandler func(error) bool
+
+var _ net.Error = ErrNotMatched{}
+
+// ErrNotMatched is returned whenever a connection is not matched by any of
+// the matchers registered in the multiplexer.
+type ErrNotMatched struct {
+	c net.Conn
+}
+
+func (e ErrNotMatched) Error() string {
+	return fmt.Sprintf("mux: connection %v not matched by an matcher",
+		e.c.RemoteAddr())
+}
+
+// Temporary implements the net.Error interface.
+func (e ErrNotMatched) Temporary() bool { return true }
+
+// Timeout implements the net.Error interface.
+func (e ErrNotMatched) Timeout() bool { return false }
+
+type errListenerClosed string
+
+func (e errListenerClosed) Error() string   { return string(e) }
+func (e errListenerClosed) Temporary() bool { return false }
+func (e errListenerClosed) Timeout() bool   { return false }
+
+// ErrListenerClosed is returned from muxListener.Accept when the underlying
+// listener is closed.
+var ErrListenerClosed = errListenerClosed("mux: listener closed")
+
+// for readability of readTimeout
+var noTimeout time.Duration
+
+// New instantiates a new connection multiplexer.
+func New(l net.Listener) CMux {
+	return &cMux{
+		root:        l,
+		bufLen:      1024,
+		errh:        func(_ error) bool { return true },
+		donec:       make(chan struct{}),
+		readTimeout: noTimeout,
+	}
+}
+
+// CMux is a multiplexer for network connections.
+type CMux interface {
+	// Match returns a net.Listener that sees (i.e., accepts) only
+	// the connections matched by at least one of the matcher.
+	//
+	// The order used to call Match determines the priority of matchers.
+	Match(...Matcher) net.Listener
+	// MatchWithWriters returns a net.Listener that accepts only the
+	// connections that matched by at least of the matcher writers.
+	//
+	// Prefer Matchers over MatchWriters, since the latter can write on the
+	// connection before the actual handler.
+	//
+	// The order used to call Match determines the priority of matchers.
+	MatchWithWriters(...MatchWriter) net.Listener
+	// Serve starts multiplexing the listener. Serve blocks and perhaps
+	// should be invoked concurrently within a go routine.
+	Serve() error
+	// HandleError registers an error handler that handles listener errors.
+	HandleError(ErrorHandler)
+	// sets a timeout for the read of matchers
+	SetReadTimeout(time.Duration)
+}
+
+type matchersListener struct {
+	ss []MatchWriter
+	l  muxListener
+}
+
+type cMux struct {
+	root        net.Listener
+	bufLen      int
+	errh        ErrorHandler
+	donec       chan struct{}
+	sls         []matchersListener
+	readTimeout time.Duration
+}
+
+func matchersToMatchWriters(matchers []Matcher) []MatchWriter {
+	mws := make([]MatchWriter, 0, len(matchers))
+	for _, m := range matchers {
+		cm := m
+		mws = append(mws, func(w io.Writer, r io.Reader) bool {
+			return cm(r)
+		})
+	}
+	return mws
+}
+
+func (m *cMux) Match(matchers ...Matcher) net.Listener {
+	mws := matchersToMatchWriters(matchers)
+	return m.MatchWithWriters(mws...)
+}
+
+func (m *cMux) MatchWithWriters(matchers ...MatchWriter) net.Listener {
+	ml := muxListener{
+		Listener: m.root,
+		connc:    make(chan net.Conn, m.bufLen),
+	}
+	m.sls = append(m.sls, matchersListener{ss: matchers, l: ml})
+	return ml
+}
+
+func (m *cMux) SetReadTimeout(t time.Duration) {
+	m.readTimeout = t
+}
+
+func (m *cMux) Serve() error {
+	var wg sync.WaitGroup
+
+	defer func() {
+		close(m.donec)
+		wg.Wait()
+
+		for _, sl := range m.sls {
+			close(sl.l.connc)
+			// Drain the connections enqueued for the listener.
+			for c := range sl.l.connc {
+				_ = c.Close()
+			}
+		}
+	}()
+
+	for {
+		c, err := m.root.Accept()
+		if err != nil {
+			if !m.handleErr(err) {
+				return err
+			}
+			continue
+		}
+
+		wg.Add(1)
+		go m.serve(c, m.donec, &wg)
+	}
+}
+
+func (m *cMux) serve(c net.Conn, donec <-chan struct{}, wg *sync.WaitGroup) {
+	defer wg.Done()
+
+	muc := newMuxConn(c)
+	if m.readTimeout > noTimeout {
+		_ = c.SetReadDeadline(time.Now().Add(m.readTimeout))
+	}
+	for _, sl := range m.sls {
+		for _, s := range sl.ss {
+			matched := s(muc.Conn, muc.startSniffing())
+			if matched {
+				muc.doneSniffing()
+				if m.readTimeout > noTimeout {
+					_ = c.SetReadDeadline(time.Time{})
+				}
+				select {
+				case sl.l.connc <- muc:
+				case <-donec:
+					_ = c.Close()
+				}
+				return
+			}
+		}
+	}
+
+	_ = c.Close()
+	err := ErrNotMatched{c: c}
+	if !m.handleErr(err) {
+		_ = m.root.Close()
+	}
+}
+
+func (m *cMux) HandleError(h ErrorHandler) {
+	m.errh = h
+}
+
+func (m *cMux) handleErr(err error) bool {
+	if !m.errh(err) {
+		return false
+	}
+
+	if ne, ok := err.(net.Error); ok {
+		return ne.Temporary()
+	}
+
+	return false
+}
+
+type muxListener struct {
+	net.Listener
+	connc chan net.Conn
+}
+
+func (l muxListener) Accept() (net.Conn, error) {
+	c, ok := <-l.connc
+	if !ok {
+		return nil, ErrListenerClosed
+	}
+	return c, nil
+}
+
+// MuxConn wraps a net.Conn and provides transparent sniffing of connection data.
+type MuxConn struct {
+	net.Conn
+	buf bufferedReader
+}
+
+func newMuxConn(c net.Conn) *MuxConn {
+	return &MuxConn{
+		Conn: c,
+		buf:  bufferedReader{source: c},
+	}
+}
+
+// From the io.Reader documentation:
+//
+// When Read encounters an error or end-of-file condition after
+// successfully reading n > 0 bytes, it returns the number of
+// bytes read.  It may return the (non-nil) error from the same call
+// or return the error (and n == 0) from a subsequent call.
+// An instance of this general case is that a Reader returning
+// a non-zero number of bytes at the end of the input stream may
+// return either err == EOF or err == nil.  The next Read should
+// return 0, EOF.
+func (m *MuxConn) Read(p []byte) (int, error) {
+	return m.buf.Read(p)
+}
+
+func (m *MuxConn) startSniffing() io.Reader {
+	m.buf.reset(true)
+	return &m.buf
+}
+
+func (m *MuxConn) doneSniffing() {
+	m.buf.reset(false)
+}
diff --git a/vendor/github.com/soheilhy/cmux/doc.go b/vendor/github.com/soheilhy/cmux/doc.go
new file mode 100644
index 0000000..aaa8f31
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/doc.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The CMux Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+// Package cmux is a library to multiplex network connections based on
+// their payload. Using cmux, you can serve different protocols from the
+// same listener.
+package cmux
diff --git a/vendor/github.com/soheilhy/cmux/matchers.go b/vendor/github.com/soheilhy/cmux/matchers.go
new file mode 100644
index 0000000..878ae98
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/matchers.go
@@ -0,0 +1,267 @@
+// Copyright 2016 The CMux Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package cmux
+
+import (
+	"bufio"
+	"crypto/tls"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"strings"
+
+	"golang.org/x/net/http2"
+	"golang.org/x/net/http2/hpack"
+)
+
+// Any is a Matcher that matches any connection.
+func Any() Matcher {
+	return func(r io.Reader) bool { return true }
+}
+
+// PrefixMatcher returns a matcher that matches a connection if it
+// starts with any of the strings in strs.
+func PrefixMatcher(strs ...string) Matcher {
+	pt := newPatriciaTreeString(strs...)
+	return pt.matchPrefix
+}
+
+func prefixByteMatcher(list ...[]byte) Matcher {
+	pt := newPatriciaTree(list...)
+	return pt.matchPrefix
+}
+
+var defaultHTTPMethods = []string{
+	"OPTIONS",
+	"GET",
+	"HEAD",
+	"POST",
+	"PUT",
+	"DELETE",
+	"TRACE",
+	"CONNECT",
+}
+
+// HTTP1Fast only matches the methods in the HTTP request.
+//
+// This matcher is very optimistic: if it returns true, it does not mean that
+// the request is a valid HTTP response. If you want a correct but slower HTTP1
+// matcher, use HTTP1 instead.
+func HTTP1Fast(extMethods ...string) Matcher {
+	return PrefixMatcher(append(defaultHTTPMethods, extMethods...)...)
+}
+
+// TLS matches HTTPS requests.
+//
+// By default, any TLS handshake packet is matched. An optional whitelist
+// of versions can be passed in to restrict the matcher, for example:
+//  TLS(tls.VersionTLS11, tls.VersionTLS12)
+func TLS(versions ...int) Matcher {
+	if len(versions) == 0 {
+		versions = []int{
+			tls.VersionSSL30,
+			tls.VersionTLS10,
+			tls.VersionTLS11,
+			tls.VersionTLS12,
+		}
+	}
+	prefixes := [][]byte{}
+	for _, v := range versions {
+		prefixes = append(prefixes, []byte{22, byte(v >> 8 & 0xff), byte(v & 0xff)})
+	}
+	return prefixByteMatcher(prefixes...)
+}
+
+const maxHTTPRead = 4096
+
+// HTTP1 parses the first line or upto 4096 bytes of the request to see if
+// the conection contains an HTTP request.
+func HTTP1() Matcher {
+	return func(r io.Reader) bool {
+		br := bufio.NewReader(&io.LimitedReader{R: r, N: maxHTTPRead})
+		l, part, err := br.ReadLine()
+		if err != nil || part {
+			return false
+		}
+
+		_, _, proto, ok := parseRequestLine(string(l))
+		if !ok {
+			return false
+		}
+
+		v, _, ok := http.ParseHTTPVersion(proto)
+		return ok && v == 1
+	}
+}
+
+// grabbed from net/http.
+func parseRequestLine(line string) (method, uri, proto string, ok bool) {
+	s1 := strings.Index(line, " ")
+	s2 := strings.Index(line[s1+1:], " ")
+	if s1 < 0 || s2 < 0 {
+		return
+	}
+	s2 += s1 + 1
+	return line[:s1], line[s1+1 : s2], line[s2+1:], true
+}
+
+// HTTP2 parses the frame header of the first frame to detect whether the
+// connection is an HTTP2 connection.
+func HTTP2() Matcher {
+	return hasHTTP2Preface
+}
+
+// HTTP1HeaderField returns a matcher matching the header fields of the first
+// request of an HTTP 1 connection.
+func HTTP1HeaderField(name, value string) Matcher {
+	return func(r io.Reader) bool {
+		return matchHTTP1Field(r, name, func(gotValue string) bool {
+			return gotValue == value
+		})
+	}
+}
+
+// HTTP1HeaderFieldPrefix returns a matcher matching the header fields of the
+// first request of an HTTP 1 connection. If the header with key name has a
+// value prefixed with valuePrefix, this will match.
+func HTTP1HeaderFieldPrefix(name, valuePrefix string) Matcher {
+	return func(r io.Reader) bool {
+		return matchHTTP1Field(r, name, func(gotValue string) bool {
+			return strings.HasPrefix(gotValue, valuePrefix)
+		})
+	}
+}
+
+// HTTP2HeaderField returns a matcher matching the header fields of the first
+// headers frame.
+func HTTP2HeaderField(name, value string) Matcher {
+	return func(r io.Reader) bool {
+		return matchHTTP2Field(ioutil.Discard, r, name, func(gotValue string) bool {
+			return gotValue == value
+		})
+	}
+}
+
+// HTTP2HeaderFieldPrefix returns a matcher matching the header fields of the
+// first headers frame. If the header with key name has a value prefixed with
+// valuePrefix, this will match.
+func HTTP2HeaderFieldPrefix(name, valuePrefix string) Matcher {
+	return func(r io.Reader) bool {
+		return matchHTTP2Field(ioutil.Discard, r, name, func(gotValue string) bool {
+			return strings.HasPrefix(gotValue, valuePrefix)
+		})
+	}
+}
+
+// HTTP2MatchHeaderFieldSendSettings matches the header field and writes the
+// settings to the server. Prefer HTTP2HeaderField over this one, if the client
+// does not block on receiving a SETTING frame.
+func HTTP2MatchHeaderFieldSendSettings(name, value string) MatchWriter {
+	return func(w io.Writer, r io.Reader) bool {
+		return matchHTTP2Field(w, r, name, func(gotValue string) bool {
+			return gotValue == value
+		})
+	}
+}
+
+// HTTP2MatchHeaderFieldPrefixSendSettings matches the header field prefix
+// and writes the settings to the server. Prefer HTTP2HeaderFieldPrefix over
+// this one, if the client does not block on receiving a SETTING frame.
+func HTTP2MatchHeaderFieldPrefixSendSettings(name, valuePrefix string) MatchWriter {
+	return func(w io.Writer, r io.Reader) bool {
+		return matchHTTP2Field(w, r, name, func(gotValue string) bool {
+			return strings.HasPrefix(gotValue, valuePrefix)
+		})
+	}
+}
+
+func hasHTTP2Preface(r io.Reader) bool {
+	var b [len(http2.ClientPreface)]byte
+	last := 0
+
+	for {
+		n, err := r.Read(b[last:])
+		if err != nil {
+			return false
+		}
+
+		last += n
+		eq := string(b[:last]) == http2.ClientPreface[:last]
+		if last == len(http2.ClientPreface) {
+			return eq
+		}
+		if !eq {
+			return false
+		}
+	}
+}
+
+func matchHTTP1Field(r io.Reader, name string, matches func(string) bool) (matched bool) {
+	req, err := http.ReadRequest(bufio.NewReader(r))
+	if err != nil {
+		return false
+	}
+
+	return matches(req.Header.Get(name))
+}
+
+func matchHTTP2Field(w io.Writer, r io.Reader, name string, matches func(string) bool) (matched bool) {
+	if !hasHTTP2Preface(r) {
+		return false
+	}
+
+	done := false
+	framer := http2.NewFramer(w, r)
+	hdec := hpack.NewDecoder(uint32(4<<10), func(hf hpack.HeaderField) {
+		if hf.Name == name {
+			done = true
+			if matches(hf.Value) {
+				matched = true
+			}
+		}
+	})
+	for {
+		f, err := framer.ReadFrame()
+		if err != nil {
+			return false
+		}
+
+		switch f := f.(type) {
+		case *http2.SettingsFrame:
+			// Sender acknoweldged the SETTINGS frame. No need to write
+			// SETTINGS again.
+			if f.IsAck() {
+				break
+			}
+			if err := framer.WriteSettings(); err != nil {
+				return false
+			}
+		case *http2.ContinuationFrame:
+			if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil {
+				return false
+			}
+			done = done || f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0
+		case *http2.HeadersFrame:
+			if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil {
+				return false
+			}
+			done = done || f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0
+		}
+
+		if done {
+			return matched
+		}
+	}
+}
diff --git a/vendor/github.com/soheilhy/cmux/patricia.go b/vendor/github.com/soheilhy/cmux/patricia.go
new file mode 100644
index 0000000..c3e3d85
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/patricia.go
@@ -0,0 +1,179 @@
+// Copyright 2016 The CMux Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package cmux
+
+import (
+	"bytes"
+	"io"
+)
+
+// patriciaTree is a simple patricia tree that handles []byte instead of string
+// and cannot be changed after instantiation.
+type patriciaTree struct {
+	root     *ptNode
+	maxDepth int // max depth of the tree.
+}
+
+func newPatriciaTree(bs ...[]byte) *patriciaTree {
+	max := 0
+	for _, b := range bs {
+		if max < len(b) {
+			max = len(b)
+		}
+	}
+	return &patriciaTree{
+		root:     newNode(bs),
+		maxDepth: max + 1,
+	}
+}
+
+func newPatriciaTreeString(strs ...string) *patriciaTree {
+	b := make([][]byte, len(strs))
+	for i, s := range strs {
+		b[i] = []byte(s)
+	}
+	return newPatriciaTree(b...)
+}
+
+func (t *patriciaTree) matchPrefix(r io.Reader) bool {
+	buf := make([]byte, t.maxDepth)
+	n, _ := io.ReadFull(r, buf)
+	return t.root.match(buf[:n], true)
+}
+
+func (t *patriciaTree) match(r io.Reader) bool {
+	buf := make([]byte, t.maxDepth)
+	n, _ := io.ReadFull(r, buf)
+	return t.root.match(buf[:n], false)
+}
+
+type ptNode struct {
+	prefix   []byte
+	next     map[byte]*ptNode
+	terminal bool
+}
+
+func newNode(strs [][]byte) *ptNode {
+	if len(strs) == 0 {
+		return &ptNode{
+			prefix:   []byte{},
+			terminal: true,
+		}
+	}
+
+	if len(strs) == 1 {
+		return &ptNode{
+			prefix:   strs[0],
+			terminal: true,
+		}
+	}
+
+	p, strs := splitPrefix(strs)
+	n := &ptNode{
+		prefix: p,
+	}
+
+	nexts := make(map[byte][][]byte)
+	for _, s := range strs {
+		if len(s) == 0 {
+			n.terminal = true
+			continue
+		}
+		nexts[s[0]] = append(nexts[s[0]], s[1:])
+	}
+
+	n.next = make(map[byte]*ptNode)
+	for first, rests := range nexts {
+		n.next[first] = newNode(rests)
+	}
+
+	return n
+}
+
+func splitPrefix(bss [][]byte) (prefix []byte, rest [][]byte) {
+	if len(bss) == 0 || len(bss[0]) == 0 {
+		return prefix, bss
+	}
+
+	if len(bss) == 1 {
+		return bss[0], [][]byte{{}}
+	}
+
+	for i := 0; ; i++ {
+		var cur byte
+		eq := true
+		for j, b := range bss {
+			if len(b) <= i {
+				eq = false
+				break
+			}
+
+			if j == 0 {
+				cur = b[i]
+				continue
+			}
+
+			if cur != b[i] {
+				eq = false
+				break
+			}
+		}
+
+		if !eq {
+			break
+		}
+
+		prefix = append(prefix, cur)
+	}
+
+	rest = make([][]byte, 0, len(bss))
+	for _, b := range bss {
+		rest = append(rest, b[len(prefix):])
+	}
+
+	return prefix, rest
+}
+
+func (n *ptNode) match(b []byte, prefix bool) bool {
+	l := len(n.prefix)
+	if l > 0 {
+		if l > len(b) {
+			l = len(b)
+		}
+		if !bytes.Equal(b[:l], n.prefix) {
+			return false
+		}
+	}
+
+	if n.terminal && (prefix || len(n.prefix) == len(b)) {
+		return true
+	}
+
+	if l >= len(b) {
+		return false
+	}
+
+	nextN, ok := n.next[b[l]]
+	if !ok {
+		return false
+	}
+
+	if l == len(b) {
+		b = b[l:l]
+	} else {
+		b = b[l+1:]
+	}
+	return nextN.match(b, prefix)
+}
diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore
new file mode 100644
index 0000000..1b8c7c2
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.gitignore
@@ -0,0 +1,36 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore
+# swap
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+# session
+Session.vim
+# temporary
+.netrwhist
+*~
+# auto-generated tag files
+tags
+
+*.exe
+
+cobra.test
diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap
new file mode 100644
index 0000000..94ec530
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.mailmap
@@ -0,0 +1,3 @@
+Steve Francia <steve.francia@gmail.com>
+Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
+Fabiano Franz <ffranz@redhat.com>                   <contact@fabianofranz.com>
diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml
new file mode 100644
index 0000000..5afcb20
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.travis.yml
@@ -0,0 +1,21 @@
+language: go
+
+matrix:
+  include:
+    - go: 1.9.4
+    - go: 1.10.0
+    - go: tip
+  allow_failures:
+    - go: tip
+
+before_install:
+  - mkdir -p bin
+  - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck
+  - chmod +x bin/shellcheck
+script:
+  - PATH=$PATH:$PWD/bin go test -v ./...
+  - go build
+  - diff -u <(echo -n) <(gofmt -d -s .)
+  - if [ -z $NOVET ]; then
+      diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint');
+    fi
diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt
new file mode 100644
index 0000000..298f0e2
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/LICENSE.txt
@@ -0,0 +1,174 @@
+                                Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
new file mode 100644
index 0000000..851fcc0
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -0,0 +1,736 @@
+![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png)
+
+Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files.
+
+Many of the most widely used Go projects are built using Cobra including:
+
+* [Kubernetes](http://kubernetes.io/)
+* [Hugo](http://gohugo.io)
+* [rkt](https://github.com/coreos/rkt)
+* [etcd](https://github.com/coreos/etcd)
+* [Moby (former Docker)](https://github.com/moby/moby)
+* [Docker (distribution)](https://github.com/docker/distribution)
+* [OpenShift](https://www.openshift.com/)
+* [Delve](https://github.com/derekparker/delve)
+* [GopherJS](http://www.gopherjs.org/)
+* [CockroachDB](http://www.cockroachlabs.com/)
+* [Bleve](http://www.blevesearch.com/)
+* [ProjectAtomic (enterprise)](http://www.projectatomic.io/)
+* [GiantSwarm's swarm](https://github.com/giantswarm/cli)
+* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
+* [rclone](http://rclone.org/)
+* [nehm](https://github.com/bogem/nehm)
+* [Pouch](https://github.com/alibaba/pouch)
+
+[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra)
+[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra)
+[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra)
+
+# Table of Contents
+
+- [Overview](#overview)
+- [Concepts](#concepts)
+  * [Commands](#commands)
+  * [Flags](#flags)
+- [Installing](#installing)
+- [Getting Started](#getting-started)
+  * [Using the Cobra Generator](#using-the-cobra-generator)
+  * [Using the Cobra Library](#using-the-cobra-library)
+  * [Working with Flags](#working-with-flags)
+  * [Positional and Custom Arguments](#positional-and-custom-arguments)
+  * [Example](#example)
+  * [Help Command](#help-command)
+  * [Usage Message](#usage-message)
+  * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks)
+  * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens)
+  * [Generating documentation for your command](#generating-documentation-for-your-command)
+  * [Generating bash completions](#generating-bash-completions)
+- [Contributing](#contributing)
+- [License](#license)
+
+# Overview
+
+Cobra is a library providing a simple interface to create powerful modern CLI
+interfaces similar to git & go tools.
+
+Cobra is also an application that will generate your application scaffolding to rapidly
+develop a Cobra-based application.
+
+Cobra provides:
+* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
+* Fully POSIX-compliant flags (including short & long versions)
+* Nested subcommands
+* Global, local and cascading flags
+* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname`
+* Intelligent suggestions (`app srver`... did you mean `app server`?)
+* Automatic help generation for commands and flags
+* Automatic help flag recognition of `-h`, `--help`, etc.
+* Automatically generated bash autocomplete for your application
+* Automatically generated man pages for your application
+* Command aliases so you can change things without breaking them
+* The flexibility to define your own help, usage, etc.
+* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps
+
+# Concepts
+
+Cobra is built on a structure of commands, arguments & flags.
+
+**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions.
+
+The best applications will read like sentences when used. Users will know how
+to use the application because they will natively understand how to use it.
+
+The pattern to follow is
+`APPNAME VERB NOUN --ADJECTIVE.`
+    or
+`APPNAME COMMAND ARG --FLAG`
+
+A few good real world examples may better illustrate this point.
+
+In the following example, 'server' is a command, and 'port' is a flag:
+
+    hugo server --port=1313
+
+In this command we are telling Git to clone the url bare.
+
+    git clone URL --bare
+
+## Commands
+
+Command is the central point of the application. Each interaction that
+the application supports will be contained in a Command. A command can
+have children commands and optionally run an action.
+
+In the example above, 'server' is the command.
+
+[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command)
+
+## Flags
+
+A flag is a way to modify the behavior of a command. Cobra supports
+fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/).
+A Cobra command can define flags that persist through to children commands
+and flags that are only available to that command.
+
+In the example above, 'port' is the flag.
+
+Flag functionality is provided by the [pflag
+library](https://github.com/spf13/pflag), a fork of the flag standard library
+which maintains the same interface while adding POSIX compliance.
+
+# Installing
+Using Cobra is easy. First, use `go get` to install the latest version
+of the library. This command will install the `cobra` generator executable
+along with the library and its dependencies:
+
+    go get -u github.com/spf13/cobra/cobra
+
+Next, include Cobra in your application:
+
+```go
+import "github.com/spf13/cobra"
+```
+
+# Getting Started
+
+While you are welcome to provide your own organization, typically a Cobra-based
+application will follow the following organizational structure:
+
+```
+  ▾ appName/
+    ▾ cmd/
+        add.go
+        your.go
+        commands.go
+        here.go
+      main.go
+```
+
+In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra.
+
+```go
+package main
+
+import (
+  "fmt"
+  "os"
+
+  "{pathToYourApp}/cmd"
+)
+
+func main() {
+  cmd.Execute()
+}
+```
+
+## Using the Cobra Generator
+
+Cobra provides its own program that will create your application and add any
+commands you want. It's the easiest way to incorporate Cobra into your application.
+
+[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it.
+
+## Using the Cobra Library
+
+To manually implement Cobra you need to create a bare main.go file and a rootCmd file.
+You will optionally provide additional commands as you see fit.
+
+### Create rootCmd
+
+Cobra doesn't require any special constructors. Simply create your commands.
+
+Ideally you place this in app/cmd/root.go:
+
+```go
+var rootCmd = &cobra.Command{
+  Use:   "hugo",
+  Short: "Hugo is a very fast static site generator",
+  Long: `A Fast and Flexible Static Site Generator built with
+                love by spf13 and friends in Go.
+                Complete documentation is available at http://hugo.spf13.com`,
+  Run: func(cmd *cobra.Command, args []string) {
+    // Do Stuff Here
+  },
+}
+
+func Execute() {
+  if err := rootCmd.Execute(); err != nil {
+    fmt.Println(err)
+    os.Exit(1)
+  }
+}
+```
+
+You will additionally define flags and handle configuration in your init() function.
+
+For example cmd/root.go:
+
+```go
+import (
+  "fmt"
+  "os"
+
+  homedir "github.com/mitchellh/go-homedir"
+  "github.com/spf13/cobra"
+  "github.com/spf13/viper"
+)
+
+func init() {
+  cobra.OnInitialize(initConfig)
+  rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
+  rootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/")
+  rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution")
+  rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)")
+  rootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration")
+  viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+  viper.BindPFlag("projectbase", rootCmd.PersistentFlags().Lookup("projectbase"))
+  viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
+  viper.SetDefault("author", "NAME HERE <EMAIL ADDRESS>")
+  viper.SetDefault("license", "apache")
+}
+
+func initConfig() {
+  // Don't forget to read config either from cfgFile or from home directory!
+  if cfgFile != "" {
+    // Use config file from the flag.
+    viper.SetConfigFile(cfgFile)
+  } else {
+    // Find home directory.
+    home, err := homedir.Dir()
+    if err != nil {
+      fmt.Println(err)
+      os.Exit(1)
+    }
+
+    // Search config in home directory with name ".cobra" (without extension).
+    viper.AddConfigPath(home)
+    viper.SetConfigName(".cobra")
+  }
+
+  if err := viper.ReadInConfig(); err != nil {
+    fmt.Println("Can't read config:", err)
+    os.Exit(1)
+  }
+}
+```
+
+### Create your main.go
+
+With the root command you need to have your main function execute it.
+Execute should be run on the root for clarity, though it can be called on any command.
+
+In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra.
+
+```go
+package main
+
+import (
+  "fmt"
+  "os"
+
+  "{pathToYourApp}/cmd"
+)
+
+func main() {
+  cmd.Execute()
+}
+```
+
+### Create additional commands
+
+Additional commands can be defined and typically are each given their own file
+inside of the cmd/ directory.
+
+If you wanted to create a version command you would create cmd/version.go and
+populate it with the following:
+
+```go
+package cmd
+
+import (
+  "fmt"
+
+  "github.com/spf13/cobra"
+)
+
+func init() {
+  rootCmd.AddCommand(versionCmd)
+}
+
+var versionCmd = &cobra.Command{
+  Use:   "version",
+  Short: "Print the version number of Hugo",
+  Long:  `All software has versions. This is Hugo's`,
+  Run: func(cmd *cobra.Command, args []string) {
+    fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
+  },
+}
+```
+
+## Working with Flags
+
+Flags provide modifiers to control how the action command operates.
+
+### Assign flags to a command
+
+Since the flags are defined and used in different locations, we need to
+define a variable outside with the correct scope to assign the flag to
+work with.
+
+```go
+var Verbose bool
+var Source string
+```
+
+There are two different approaches to assign a flag.
+
+### Persistent Flags
+
+A flag can be 'persistent' meaning that this flag will be available to the
+command it's assigned to as well as every command under that command. For
+global flags, assign a flag as a persistent flag on the root.
+
+```go
+rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
+```
+
+### Local Flags
+
+A flag can also be assigned locally which will only apply to that specific command.
+
+```go
+rootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
+```
+
+### Local Flag on Parent Commands
+
+By default Cobra only parses local flags on the target command, any local flags on
+parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will
+parse local flags on each command before executing the target command.
+
+```go
+command := cobra.Command{
+  Use: "print [OPTIONS] [COMMANDS]",
+  TraverseChildren: true,
+}
+```
+
+### Bind Flags with Config
+
+You can also bind your flags with [viper](https://github.com/spf13/viper):
+```go
+var author string
+
+func init() {
+  rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution")
+  viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+}
+```
+
+In this example the persistent flag `author` is bound with `viper`.
+**Note**, that the variable `author` will not be set to the value from config,
+when the `--author` flag is not provided by user.
+
+More in [viper documentation](https://github.com/spf13/viper#working-with-flags).
+
+### Required flags
+
+Flags are optional by default. If instead you wish your command to report an error
+when a flag has not been set, mark it as required:
+```go
+rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)")
+rootCmd.MarkFlagRequired("region")
+```
+
+## Positional and Custom Arguments
+
+Validation of positional arguments can be specified using the `Args` field
+of `Command`.
+
+The following validators are built in:
+
+- `NoArgs` - the command will report an error if there are any positional args.
+- `ArbitraryArgs` - the command will accept any args.
+- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`.
+- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args.
+- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args.
+- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args.
+- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args.
+
+An example of setting the custom validator:
+
+```go
+var cmd = &cobra.Command{
+  Short: "hello",
+  Args: func(cmd *cobra.Command, args []string) error {
+    if len(args) < 1 {
+      return errors.New("requires at least one arg")
+    }
+    if myapp.IsValidColor(args[0]) {
+      return nil
+    }
+    return fmt.Errorf("invalid color specified: %s", args[0])
+  },
+  Run: func(cmd *cobra.Command, args []string) {
+    fmt.Println("Hello, World!")
+  },
+}
+```
+
+## Example
+
+In the example below, we have defined three commands. Two are at the top level
+and one (cmdTimes) is a child of one of the top commands. In this case the root
+is not executable meaning that a subcommand is required. This is accomplished
+by not providing a 'Run' for the 'rootCmd'.
+
+We have only defined one flag for a single command.
+
+More documentation about flags is available at https://github.com/spf13/pflag
+
+```go
+package main
+
+import (
+  "fmt"
+  "strings"
+
+  "github.com/spf13/cobra"
+)
+
+func main() {
+  var echoTimes int
+
+  var cmdPrint = &cobra.Command{
+    Use:   "print [string to print]",
+    Short: "Print anything to the screen",
+    Long: `print is for printing anything back to the screen.
+For many years people have printed back to the screen.`,
+    Args: cobra.MinimumNArgs(1),
+    Run: func(cmd *cobra.Command, args []string) {
+      fmt.Println("Print: " + strings.Join(args, " "))
+    },
+  }
+
+  var cmdEcho = &cobra.Command{
+    Use:   "echo [string to echo]",
+    Short: "Echo anything to the screen",
+    Long: `echo is for echoing anything back.
+Echo works a lot like print, except it has a child command.`,
+    Args: cobra.MinimumNArgs(1),
+    Run: func(cmd *cobra.Command, args []string) {
+      fmt.Println("Print: " + strings.Join(args, " "))
+    },
+  }
+
+  var cmdTimes = &cobra.Command{
+    Use:   "times [# times] [string to echo]",
+    Short: "Echo anything to the screen more times",
+    Long: `echo things multiple times back to the user by providing
+a count and a string.`,
+    Args: cobra.MinimumNArgs(1),
+    Run: func(cmd *cobra.Command, args []string) {
+      for i := 0; i < echoTimes; i++ {
+        fmt.Println("Echo: " + strings.Join(args, " "))
+      }
+    },
+  }
+
+  cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input")
+
+  var rootCmd = &cobra.Command{Use: "app"}
+  rootCmd.AddCommand(cmdPrint, cmdEcho)
+  cmdEcho.AddCommand(cmdTimes)
+  rootCmd.Execute()
+}
+```
+
+For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/).
+
+## Help Command
+
+Cobra automatically adds a help command to your application when you have subcommands.
+This will be called when a user runs 'app help'. Additionally, help will also
+support all other commands as input. Say, for instance, you have a command called
+'create' without any additional configuration; Cobra will work when 'app help
+create' is called.  Every command will automatically have the '--help' flag added.
+
+### Example
+
+The following output is automatically generated by Cobra. Nothing beyond the
+command and flag definitions are needed.
+
+    $ cobra help
+
+    Cobra is a CLI library for Go that empowers applications.
+    This application is a tool to generate the needed files
+    to quickly create a Cobra application.
+
+    Usage:
+      cobra [command]
+
+    Available Commands:
+      add         Add a command to a Cobra Application
+      help        Help about any command
+      init        Initialize a Cobra Application
+
+    Flags:
+      -a, --author string    author name for copyright attribution (default "YOUR NAME")
+          --config string    config file (default is $HOME/.cobra.yaml)
+      -h, --help             help for cobra
+      -l, --license string   name of license for the project
+          --viper            use Viper for configuration (default true)
+
+    Use "cobra [command] --help" for more information about a command.
+
+
+Help is just a command like any other. There is no special logic or behavior
+around it. In fact, you can provide your own if you want.
+
+### Defining your own help
+
+You can provide your own Help command or your own template for the default command to use
+with following functions:
+
+```go
+cmd.SetHelpCommand(cmd *Command)
+cmd.SetHelpFunc(f func(*Command, []string))
+cmd.SetHelpTemplate(s string)
+```
+
+The latter two will also apply to any children commands.
+
+## Usage Message
+
+When the user provides an invalid flag or invalid command, Cobra responds by
+showing the user the 'usage'.
+
+### Example
+You may recognize this from the help above. That's because the default help
+embeds the usage as part of its output.
+
+    $ cobra --invalid
+    Error: unknown flag: --invalid
+    Usage:
+      cobra [command]
+
+    Available Commands:
+      add         Add a command to a Cobra Application
+      help        Help about any command
+      init        Initialize a Cobra Application
+
+    Flags:
+      -a, --author string    author name for copyright attribution (default "YOUR NAME")
+          --config string    config file (default is $HOME/.cobra.yaml)
+      -h, --help             help for cobra
+      -l, --license string   name of license for the project
+          --viper            use Viper for configuration (default true)
+
+    Use "cobra [command] --help" for more information about a command.
+
+### Defining your own usage
+You can provide your own usage function or template for Cobra to use.
+Like help, the function and template are overridable through public methods:
+
+```go
+cmd.SetUsageFunc(f func(*Command) error)
+cmd.SetUsageTemplate(s string)
+```
+
+## Version Flag
+
+Cobra adds a top-level '--version' flag if the Version field is set on the root command.
+Running an application with the '--version' flag will print the version to stdout using
+the version template. The template can be customized using the
+`cmd.SetVersionTemplate(s string)` function.
+
+## PreRun and PostRun Hooks
+
+It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`.  The `Persistent*Run` functions will be inherited by children if they do not declare their own.  These functions are run in the following order:
+
+- `PersistentPreRun`
+- `PreRun`
+- `Run`
+- `PostRun`
+- `PersistentPostRun`
+
+An example of two commands which use all of these features is below.  When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`:
+
+```go
+package main
+
+import (
+  "fmt"
+
+  "github.com/spf13/cobra"
+)
+
+func main() {
+
+  var rootCmd = &cobra.Command{
+    Use:   "root [sub]",
+    Short: "My root command",
+    PersistentPreRun: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args)
+    },
+    PreRun: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside rootCmd PreRun with args: %v\n", args)
+    },
+    Run: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside rootCmd Run with args: %v\n", args)
+    },
+    PostRun: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside rootCmd PostRun with args: %v\n", args)
+    },
+    PersistentPostRun: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args)
+    },
+  }
+
+  var subCmd = &cobra.Command{
+    Use:   "sub [no options!]",
+    Short: "My subcommand",
+    PreRun: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside subCmd PreRun with args: %v\n", args)
+    },
+    Run: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside subCmd Run with args: %v\n", args)
+    },
+    PostRun: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside subCmd PostRun with args: %v\n", args)
+    },
+    PersistentPostRun: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args)
+    },
+  }
+
+  rootCmd.AddCommand(subCmd)
+
+  rootCmd.SetArgs([]string{""})
+  rootCmd.Execute()
+  fmt.Println()
+  rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
+  rootCmd.Execute()
+}
+```
+
+Output:
+```
+Inside rootCmd PersistentPreRun with args: []
+Inside rootCmd PreRun with args: []
+Inside rootCmd Run with args: []
+Inside rootCmd PostRun with args: []
+Inside rootCmd PersistentPostRun with args: []
+
+Inside rootCmd PersistentPreRun with args: [arg1 arg2]
+Inside subCmd PreRun with args: [arg1 arg2]
+Inside subCmd Run with args: [arg1 arg2]
+Inside subCmd PostRun with args: [arg1 arg2]
+Inside subCmd PersistentPostRun with args: [arg1 arg2]
+```
+
+## Suggestions when "unknown command" happens
+
+Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example:
+
+```
+$ hugo srever
+Error: unknown command "srever" for "hugo"
+
+Did you mean this?
+        server
+
+Run 'hugo --help' for usage.
+```
+
+Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
+
+If you need to disable suggestions or tweak the string distance in your command, use:
+
+```go
+command.DisableSuggestions = true
+```
+
+or
+
+```go
+command.SuggestionsMinimumDistance = 1
+```
+
+You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example:
+
+```
+$ kubectl remove
+Error: unknown command "remove" for "kubectl"
+
+Did you mean this?
+        delete
+
+Run 'kubectl help' for usage.
+```
+
+## Generating documentation for your command
+
+Cobra can generate documentation based on subcommands, flags, etc. in the following formats:
+
+- [Markdown](doc/md_docs.md)
+- [ReStructured Text](doc/rest_docs.md)
+- [Man Page](doc/man_docs.md)
+
+## Generating bash completions
+
+Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible.  Read more about it in [Bash Completions](bash_completions.md).
+
+# Contributing
+
+1. Fork it
+2. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`)
+3. Create your feature branch (`git checkout -b my-new-feature`)
+4. Make changes and add them (`git add .`)
+5. Commit your changes (`git commit -m 'Add some feature'`)
+6. Push to the branch (`git push origin my-new-feature`)
+7. Create new pull request
+
+# License
+
+Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt)
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
new file mode 100644
index 0000000..a5d8a92
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -0,0 +1,89 @@
+package cobra
+
+import (
+	"fmt"
+)
+
+type PositionalArgs func(cmd *Command, args []string) error
+
+// Legacy arg validation has the following behaviour:
+// - root commands with no subcommands can take arbitrary arguments
+// - root commands with subcommands will do subcommand validity checking
+// - subcommands will always accept arbitrary arguments
+func legacyArgs(cmd *Command, args []string) error {
+	// no subcommand, always take args
+	if !cmd.HasSubCommands() {
+		return nil
+	}
+
+	// root command with subcommands, do subcommand checking.
+	if !cmd.HasParent() && len(args) > 0 {
+		return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))
+	}
+	return nil
+}
+
+// NoArgs returns an error if any args are included.
+func NoArgs(cmd *Command, args []string) error {
+	if len(args) > 0 {
+		return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath())
+	}
+	return nil
+}
+
+// OnlyValidArgs returns an error if any args are not in the list of ValidArgs.
+func OnlyValidArgs(cmd *Command, args []string) error {
+	if len(cmd.ValidArgs) > 0 {
+		for _, v := range args {
+			if !stringInSlice(v, cmd.ValidArgs) {
+				return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
+			}
+		}
+	}
+	return nil
+}
+
+// ArbitraryArgs never returns an error.
+func ArbitraryArgs(cmd *Command, args []string) error {
+	return nil
+}
+
+// MinimumNArgs returns an error if there is not at least N args.
+func MinimumNArgs(n int) PositionalArgs {
+	return func(cmd *Command, args []string) error {
+		if len(args) < n {
+			return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args))
+		}
+		return nil
+	}
+}
+
+// MaximumNArgs returns an error if there are more than N args.
+func MaximumNArgs(n int) PositionalArgs {
+	return func(cmd *Command, args []string) error {
+		if len(args) > n {
+			return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args))
+		}
+		return nil
+	}
+}
+
+// ExactArgs returns an error if there are not exactly n args.
+func ExactArgs(n int) PositionalArgs {
+	return func(cmd *Command, args []string) error {
+		if len(args) != n {
+			return fmt.Errorf("accepts %d arg(s), received %d", n, len(args))
+		}
+		return nil
+	}
+}
+
+// RangeArgs returns an error if the number of args is not within the expected range.
+func RangeArgs(min int, max int) PositionalArgs {
+	return func(cmd *Command, args []string) error {
+		if len(args) < min || len(args) > max {
+			return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
+		}
+		return nil
+	}
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
new file mode 100644
index 0000000..8fa8f48
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -0,0 +1,584 @@
+package cobra
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+	"sort"
+	"strings"
+
+	"github.com/spf13/pflag"
+)
+
+// Annotations for Bash completion.
+const (
+	BashCompFilenameExt     = "cobra_annotation_bash_completion_filename_extensions"
+	BashCompCustom          = "cobra_annotation_bash_completion_custom"
+	BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
+	BashCompSubdirsInDir    = "cobra_annotation_bash_completion_subdirs_in_dir"
+)
+
+func writePreamble(buf *bytes.Buffer, name string) {
+	buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
+	buf.WriteString(fmt.Sprintf(`
+__%[1]s_debug()
+{
+    if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
+        echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
+    fi
+}
+
+# Homebrew on Macs have version 1.3 of bash-completion which doesn't include
+# _init_completion. This is a very minimal version of that function.
+__%[1]s_init_completion()
+{
+    COMPREPLY=()
+    _get_comp_words_by_ref "$@" cur prev words cword
+}
+
+__%[1]s_index_of_word()
+{
+    local w word=$1
+    shift
+    index=0
+    for w in "$@"; do
+        [[ $w = "$word" ]] && return
+        index=$((index+1))
+    done
+    index=-1
+}
+
+__%[1]s_contains_word()
+{
+    local w word=$1; shift
+    for w in "$@"; do
+        [[ $w = "$word" ]] && return
+    done
+    return 1
+}
+
+__%[1]s_handle_reply()
+{
+    __%[1]s_debug "${FUNCNAME[0]}"
+    case $cur in
+        -*)
+            if [[ $(type -t compopt) = "builtin" ]]; then
+                compopt -o nospace
+            fi
+            local allflags
+            if [ ${#must_have_one_flag[@]} -ne 0 ]; then
+                allflags=("${must_have_one_flag[@]}")
+            else
+                allflags=("${flags[*]} ${two_word_flags[*]}")
+            fi
+            COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") )
+            if [[ $(type -t compopt) = "builtin" ]]; then
+                [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
+            fi
+
+            # complete after --flag=abc
+            if [[ $cur == *=* ]]; then
+                if [[ $(type -t compopt) = "builtin" ]]; then
+                    compopt +o nospace
+                fi
+
+                local index flag
+                flag="${cur%%=*}"
+                __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}"
+                COMPREPLY=()
+                if [[ ${index} -ge 0 ]]; then
+                    PREFIX=""
+                    cur="${cur#*=}"
+                    ${flags_completion[${index}]}
+                    if [ -n "${ZSH_VERSION}" ]; then
+                        # zsh completion needs --flag= prefix
+                        eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
+                    fi
+                fi
+            fi
+            return 0;
+            ;;
+    esac
+
+    # check if we are handling a flag with special work handling
+    local index
+    __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}"
+    if [[ ${index} -ge 0 ]]; then
+        ${flags_completion[${index}]}
+        return
+    fi
+
+    # we are parsing a flag and don't have a special handler, no completion
+    if [[ ${cur} != "${words[cword]}" ]]; then
+        return
+    fi
+
+    local completions
+    completions=("${commands[@]}")
+    if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
+        completions=("${must_have_one_noun[@]}")
+    fi
+    if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
+        completions+=("${must_have_one_flag[@]}")
+    fi
+    COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") )
+
+    if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
+        COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") )
+    fi
+
+    if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
+        declare -F __custom_func >/dev/null && __custom_func
+    fi
+
+    # available in bash-completion >= 2, not always present on macOS
+    if declare -F __ltrim_colon_completions >/dev/null; then
+        __ltrim_colon_completions "$cur"
+    fi
+
+    # If there is only 1 completion and it is a flag with an = it will be completed
+    # but we don't want a space after the =
+    if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then
+       compopt -o nospace
+    fi
+}
+
+# The arguments should be in the form "ext1|ext2|extn"
+__%[1]s_handle_filename_extension_flag()
+{
+    local ext="$1"
+    _filedir "@(${ext})"
+}
+
+__%[1]s_handle_subdirs_in_dir_flag()
+{
+    local dir="$1"
+    pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1
+}
+
+__%[1]s_handle_flag()
+{
+    __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+    # if a command required a flag, and we found it, unset must_have_one_flag()
+    local flagname=${words[c]}
+    local flagvalue
+    # if the word contained an =
+    if [[ ${words[c]} == *"="* ]]; then
+        flagvalue=${flagname#*=} # take in as flagvalue after the =
+        flagname=${flagname%%=*} # strip everything after the =
+        flagname="${flagname}=" # but put the = back
+    fi
+    __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}"
+    if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then
+        must_have_one_flag=()
+    fi
+
+    # if you set a flag which only applies to this command, don't show subcommands
+    if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
+      commands=()
+    fi
+
+    # keep flag value with flagname as flaghash
+    # flaghash variable is an associative array which is only supported in bash > 3.
+    if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+        if [ -n "${flagvalue}" ] ; then
+            flaghash[${flagname}]=${flagvalue}
+        elif [ -n "${words[ $((c+1)) ]}" ] ; then
+            flaghash[${flagname}]=${words[ $((c+1)) ]}
+        else
+            flaghash[${flagname}]="true" # pad "true" for bool flag
+        fi
+    fi
+
+    # skip the argument to a two word flag
+    if __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
+        c=$((c+1))
+        # if we are looking for a flags value, don't show commands
+        if [[ $c -eq $cword ]]; then
+            commands=()
+        fi
+    fi
+
+    c=$((c+1))
+
+}
+
+__%[1]s_handle_noun()
+{
+    __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+    if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
+        must_have_one_noun=()
+    elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then
+        must_have_one_noun=()
+    fi
+
+    nouns+=("${words[c]}")
+    c=$((c+1))
+}
+
+__%[1]s_handle_command()
+{
+    __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+    local next_command
+    if [[ -n ${last_command} ]]; then
+        next_command="_${last_command}_${words[c]//:/__}"
+    else
+        if [[ $c -eq 0 ]]; then
+            next_command="_%[1]s_root_command"
+        else
+            next_command="_${words[c]//:/__}"
+        fi
+    fi
+    c=$((c+1))
+    __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}"
+    declare -F "$next_command" >/dev/null && $next_command
+}
+
+__%[1]s_handle_word()
+{
+    if [[ $c -ge $cword ]]; then
+        __%[1]s_handle_reply
+        return
+    fi
+    __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+    if [[ "${words[c]}" == -* ]]; then
+        __%[1]s_handle_flag
+    elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then
+        __%[1]s_handle_command
+    elif [[ $c -eq 0 ]]; then
+        __%[1]s_handle_command
+    elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
+        # aliashash variable is an associative array which is only supported in bash > 3.
+        if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+            words[c]=${aliashash[${words[c]}]}
+            __%[1]s_handle_command
+        else
+            __%[1]s_handle_noun
+        fi
+    else
+        __%[1]s_handle_noun
+    fi
+    __%[1]s_handle_word
+}
+
+`, name))
+}
+
+func writePostscript(buf *bytes.Buffer, name string) {
+	name = strings.Replace(name, ":", "__", -1)
+	buf.WriteString(fmt.Sprintf("__start_%s()\n", name))
+	buf.WriteString(fmt.Sprintf(`{
+    local cur prev words cword
+    declare -A flaghash 2>/dev/null || :
+    declare -A aliashash 2>/dev/null || :
+    if declare -F _init_completion >/dev/null 2>&1; then
+        _init_completion -s || return
+    else
+        __%[1]s_init_completion -n "=" || return
+    fi
+
+    local c=0
+    local flags=()
+    local two_word_flags=()
+    local local_nonpersistent_flags=()
+    local flags_with_completion=()
+    local flags_completion=()
+    local commands=("%[1]s")
+    local must_have_one_flag=()
+    local must_have_one_noun=()
+    local last_command
+    local nouns=()
+
+    __%[1]s_handle_word
+}
+
+`, name))
+	buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
+    complete -o default -F __start_%s %s
+else
+    complete -o default -o nospace -F __start_%s %s
+fi
+
+`, name, name, name, name))
+	buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n")
+}
+
+func writeCommands(buf *bytes.Buffer, cmd *Command) {
+	buf.WriteString("    commands=()\n")
+	for _, c := range cmd.Commands() {
+		if !c.IsAvailableCommand() || c == cmd.helpCommand {
+			continue
+		}
+		buf.WriteString(fmt.Sprintf("    commands+=(%q)\n", c.Name()))
+		writeCmdAliases(buf, c)
+	}
+	buf.WriteString("\n")
+}
+
+func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) {
+	for key, value := range annotations {
+		switch key {
+		case BashCompFilenameExt:
+			buf.WriteString(fmt.Sprintf("    flags_with_completion+=(%q)\n", name))
+
+			var ext string
+			if len(value) > 0 {
+				ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|")
+			} else {
+				ext = "_filedir"
+			}
+			buf.WriteString(fmt.Sprintf("    flags_completion+=(%q)\n", ext))
+		case BashCompCustom:
+			buf.WriteString(fmt.Sprintf("    flags_with_completion+=(%q)\n", name))
+			if len(value) > 0 {
+				handlers := strings.Join(value, "; ")
+				buf.WriteString(fmt.Sprintf("    flags_completion+=(%q)\n", handlers))
+			} else {
+				buf.WriteString("    flags_completion+=(:)\n")
+			}
+		case BashCompSubdirsInDir:
+			buf.WriteString(fmt.Sprintf("    flags_with_completion+=(%q)\n", name))
+
+			var ext string
+			if len(value) == 1 {
+				ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0]
+			} else {
+				ext = "_filedir -d"
+			}
+			buf.WriteString(fmt.Sprintf("    flags_completion+=(%q)\n", ext))
+		}
+	}
+}
+
+func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
+	name := flag.Shorthand
+	format := "    "
+	if len(flag.NoOptDefVal) == 0 {
+		format += "two_word_"
+	}
+	format += "flags+=(\"-%s\")\n"
+	buf.WriteString(fmt.Sprintf(format, name))
+	writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
+}
+
+func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
+	name := flag.Name
+	format := "    flags+=(\"--%s"
+	if len(flag.NoOptDefVal) == 0 {
+		format += "="
+	}
+	format += "\")\n"
+	buf.WriteString(fmt.Sprintf(format, name))
+	writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
+}
+
+func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) {
+	name := flag.Name
+	format := "    local_nonpersistent_flags+=(\"--%s"
+	if len(flag.NoOptDefVal) == 0 {
+		format += "="
+	}
+	format += "\")\n"
+	buf.WriteString(fmt.Sprintf(format, name))
+}
+
+func writeFlags(buf *bytes.Buffer, cmd *Command) {
+	buf.WriteString(`    flags=()
+    two_word_flags=()
+    local_nonpersistent_flags=()
+    flags_with_completion=()
+    flags_completion=()
+
+`)
+	localNonPersistentFlags := cmd.LocalNonPersistentFlags()
+	cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+		if nonCompletableFlag(flag) {
+			return
+		}
+		writeFlag(buf, flag, cmd)
+		if len(flag.Shorthand) > 0 {
+			writeShortFlag(buf, flag, cmd)
+		}
+		if localNonPersistentFlags.Lookup(flag.Name) != nil {
+			writeLocalNonPersistentFlag(buf, flag)
+		}
+	})
+	cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+		if nonCompletableFlag(flag) {
+			return
+		}
+		writeFlag(buf, flag, cmd)
+		if len(flag.Shorthand) > 0 {
+			writeShortFlag(buf, flag, cmd)
+		}
+	})
+
+	buf.WriteString("\n")
+}
+
+func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) {
+	buf.WriteString("    must_have_one_flag=()\n")
+	flags := cmd.NonInheritedFlags()
+	flags.VisitAll(func(flag *pflag.Flag) {
+		if nonCompletableFlag(flag) {
+			return
+		}
+		for key := range flag.Annotations {
+			switch key {
+			case BashCompOneRequiredFlag:
+				format := "    must_have_one_flag+=(\"--%s"
+				if flag.Value.Type() != "bool" {
+					format += "="
+				}
+				format += "\")\n"
+				buf.WriteString(fmt.Sprintf(format, flag.Name))
+
+				if len(flag.Shorthand) > 0 {
+					buf.WriteString(fmt.Sprintf("    must_have_one_flag+=(\"-%s\")\n", flag.Shorthand))
+				}
+			}
+		}
+	})
+}
+
+func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) {
+	buf.WriteString("    must_have_one_noun=()\n")
+	sort.Sort(sort.StringSlice(cmd.ValidArgs))
+	for _, value := range cmd.ValidArgs {
+		buf.WriteString(fmt.Sprintf("    must_have_one_noun+=(%q)\n", value))
+	}
+}
+
+func writeCmdAliases(buf *bytes.Buffer, cmd *Command) {
+	if len(cmd.Aliases) == 0 {
+		return
+	}
+
+	sort.Sort(sort.StringSlice(cmd.Aliases))
+
+	buf.WriteString(fmt.Sprint(`    if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n"))
+	for _, value := range cmd.Aliases {
+		buf.WriteString(fmt.Sprintf("        command_aliases+=(%q)\n", value))
+		buf.WriteString(fmt.Sprintf("        aliashash[%q]=%q\n", value, cmd.Name()))
+	}
+	buf.WriteString(`    fi`)
+	buf.WriteString("\n")
+}
+func writeArgAliases(buf *bytes.Buffer, cmd *Command) {
+	buf.WriteString("    noun_aliases=()\n")
+	sort.Sort(sort.StringSlice(cmd.ArgAliases))
+	for _, value := range cmd.ArgAliases {
+		buf.WriteString(fmt.Sprintf("    noun_aliases+=(%q)\n", value))
+	}
+}
+
+func gen(buf *bytes.Buffer, cmd *Command) {
+	for _, c := range cmd.Commands() {
+		if !c.IsAvailableCommand() || c == cmd.helpCommand {
+			continue
+		}
+		gen(buf, c)
+	}
+	commandName := cmd.CommandPath()
+	commandName = strings.Replace(commandName, " ", "_", -1)
+	commandName = strings.Replace(commandName, ":", "__", -1)
+
+	if cmd.Root() == cmd {
+		buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName))
+	} else {
+		buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName))
+	}
+
+	buf.WriteString(fmt.Sprintf("    last_command=%q\n", commandName))
+	buf.WriteString("\n")
+	buf.WriteString("    command_aliases=()\n")
+	buf.WriteString("\n")
+
+	writeCommands(buf, cmd)
+	writeFlags(buf, cmd)
+	writeRequiredFlag(buf, cmd)
+	writeRequiredNouns(buf, cmd)
+	writeArgAliases(buf, cmd)
+	buf.WriteString("}\n\n")
+}
+
+// GenBashCompletion generates bash completion file and writes to the passed writer.
+func (c *Command) GenBashCompletion(w io.Writer) error {
+	buf := new(bytes.Buffer)
+	writePreamble(buf, c.Name())
+	if len(c.BashCompletionFunction) > 0 {
+		buf.WriteString(c.BashCompletionFunction + "\n")
+	}
+	gen(buf, c)
+	writePostscript(buf, c.Name())
+
+	_, err := buf.WriteTo(w)
+	return err
+}
+
+func nonCompletableFlag(flag *pflag.Flag) bool {
+	return flag.Hidden || len(flag.Deprecated) > 0
+}
+
+// GenBashCompletionFile generates bash completion file.
+func (c *Command) GenBashCompletionFile(filename string) error {
+	outFile, err := os.Create(filename)
+	if err != nil {
+		return err
+	}
+	defer outFile.Close()
+
+	return c.GenBashCompletion(outFile)
+}
+
+// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkFlagRequired(name string) error {
+	return MarkFlagRequired(c.Flags(), name)
+}
+
+// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkPersistentFlagRequired(name string) error {
+	return MarkFlagRequired(c.PersistentFlags(), name)
+}
+
+// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
+	return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
+}
+
+// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists.
+// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
+func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
+	return MarkFlagFilename(c.Flags(), name, extensions...)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+// Generated bash autocompletion will call the bash function f for the flag.
+func (c *Command) MarkFlagCustom(name string, f string) error {
+	return MarkFlagCustom(c.Flags(), name, f)
+}
+
+// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists.
+// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
+func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
+	return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
+}
+
+// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists.
+// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
+func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error {
+	return flags.SetAnnotation(name, BashCompFilenameExt, extensions)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag in the flag set, if it exists.
+// Generated bash autocompletion will call the bash function f for the flag.
+func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error {
+	return flags.SetAnnotation(name, BashCompCustom, []string{f})
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md
new file mode 100644
index 0000000..e79d476
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.md
@@ -0,0 +1,221 @@
+# Generating Bash Completions For Your Own cobra.Command
+
+Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows:
+
+```go
+package main
+
+import (
+	"io/ioutil"
+	"os"
+
+	"k8s.io/kubernetes/pkg/kubectl/cmd"
+	"k8s.io/kubernetes/pkg/kubectl/cmd/util"
+)
+
+func main() {
+	kubectl := cmd.NewKubectlCommand(util.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard)
+	kubectl.GenBashCompletionFile("out.sh")
+}
+```
+
+`out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior.
+
+## Creating your own custom functions
+
+Some more actual code that works in kubernetes:
+
+```bash
+const (
+        bash_completion_func = `__kubectl_parse_get()
+{
+    local kubectl_output out
+    if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then
+        out=($(echo "${kubectl_output}" | awk '{print $1}'))
+        COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) )
+    fi
+}
+
+__kubectl_get_resource()
+{
+    if [[ ${#nouns[@]} -eq 0 ]]; then
+        return 1
+    fi
+    __kubectl_parse_get ${nouns[${#nouns[@]} -1]}
+    if [[ $? -eq 0 ]]; then
+        return 0
+    fi
+}
+
+__custom_func() {
+    case ${last_command} in
+        kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop)
+            __kubectl_get_resource
+            return
+            ;;
+        *)
+            ;;
+    esac
+}
+`)
+```
+
+And then I set that in my command definition:
+
+```go
+cmds := &cobra.Command{
+	Use:   "kubectl",
+	Short: "kubectl controls the Kubernetes cluster manager",
+	Long: `kubectl controls the Kubernetes cluster manager.
+
+Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
+	Run: runHelp,
+	BashCompletionFunction: bash_completion_func,
+}
+```
+
+The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`.  `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`.  So it will call `__kubectl_parse_get pod`.  `__kubectl_parse_get` will actually call out to kubernetes and get any pods.  It will then set `COMPREPLY` to valid pods!
+
+## Have the completions code complete your 'nouns'
+
+In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like:
+
+```go
+validArgs []string = { "pod", "node", "service", "replicationcontroller" }
+
+cmd := &cobra.Command{
+	Use:     "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
+	Short:   "Display one or many resources",
+	Long:    get_long,
+	Example: get_example,
+	Run: func(cmd *cobra.Command, args []string) {
+		err := RunGet(f, out, cmd, args)
+		util.CheckErr(err)
+	},
+	ValidArgs: validArgs,
+}
+```
+
+Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like
+
+```bash
+# kubectl get [tab][tab]
+node                 pod                    replicationcontroller  service
+```
+
+## Plural form and shortcuts for nouns
+
+If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
+
+```go
+argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
+
+cmd := &cobra.Command{
+    ...
+	ValidArgs:  validArgs,
+	ArgAliases: argAliases
+}
+```
+
+The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
+the completion algorithm if entered manually, e.g. in:
+
+```bash
+# kubectl get rc [tab][tab]
+backend        frontend       database 
+```
+
+Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns
+in this example again instead of the replication controllers.
+
+## Mark flags as required
+
+Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab].  Marking a flag as 'Required' is incredibly easy.
+
+```go
+cmd.MarkFlagRequired("pod")
+cmd.MarkFlagRequired("container")
+```
+
+and you'll get something like
+
+```bash
+# kubectl exec [tab][tab][tab]
+-c            --container=  -p            --pod=  
+```
+
+# Specify valid filename extensions for flags that take a filename
+
+In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions.
+
+```go
+	annotations := []string{"json", "yaml", "yml"}
+	annotation := make(map[string][]string)
+	annotation[cobra.BashCompFilenameExt] = annotations
+
+	flag := &pflag.Flag{
+		Name:        "filename",
+		Shorthand:   "f",
+		Usage:       usage,
+		Value:       value,
+		DefValue:    value.String(),
+		Annotations: annotation,
+	}
+	cmd.Flags().AddFlag(flag)
+```
+
+Now when you run a command with this filename flag you'll get something like
+
+```bash
+# kubectl create -f 
+test/                         example/                      rpmbuild/
+hello.yml                     test.json
+```
+
+So while there are many other files in the CWD it only shows me subdirs and those with valid extensions.
+
+# Specify custom flag completion
+
+Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specify
+a custom flag completion function with cobra.BashCompCustom:
+
+```go
+	annotation := make(map[string][]string)
+	annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"}
+
+	flag := &pflag.Flag{
+		Name:        "namespace",
+		Usage:       usage,
+		Annotations: annotation,
+	}
+	cmd.Flags().AddFlag(flag)
+```
+
+In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction`
+value, e.g.:
+
+```bash
+__kubectl_get_namespaces()
+{
+    local template
+    template="{{ range .items  }}{{ .metadata.name }} {{ end }}"
+    local kubectl_out
+    if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then
+        COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) )
+    fi
+}
+```
+# Using bash aliases for commands
+
+You can also configure the `bash aliases` for the commands and they will also support completions.
+
+```bash
+alias aliasname=origcommand
+complete -o default -F __start_origcommand aliasname
+
+# and now when you run `aliasname` completion will make
+# suggestions as it did for `origcommand`.
+
+$) aliasname <tab><tab>
+completion     firstcommand   secondcommand
+```
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
new file mode 100644
index 0000000..7010fd1
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -0,0 +1,200 @@
+// Copyright © 2013 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Commands similar to git, go tools and other modern CLI tools
+// inspired by go, go-Commander, gh and subcommand
+
+package cobra
+
+import (
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+	"strings"
+	"text/template"
+	"unicode"
+)
+
+var templateFuncs = template.FuncMap{
+	"trim":                    strings.TrimSpace,
+	"trimRightSpace":          trimRightSpace,
+	"trimTrailingWhitespaces": trimRightSpace,
+	"appendIfNotPresent":      appendIfNotPresent,
+	"rpad":                    rpad,
+	"gt":                      Gt,
+	"eq":                      Eq,
+}
+
+var initializers []func()
+
+// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing
+// to automatically enable in CLI tools.
+// Set this to true to enable it.
+var EnablePrefixMatching = false
+
+// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
+// To disable sorting, set it to false.
+var EnableCommandSorting = true
+
+// MousetrapHelpText enables an information splash screen on Windows
+// if the CLI is started from explorer.exe.
+// To disable the mousetrap, just set this variable to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapHelpText string = `This is a command line tool.
+
+You need to open cmd.exe and run it from there.
+`
+
+// AddTemplateFunc adds a template function that's available to Usage and Help
+// template generation.
+func AddTemplateFunc(name string, tmplFunc interface{}) {
+	templateFuncs[name] = tmplFunc
+}
+
+// AddTemplateFuncs adds multiple template functions that are available to Usage and
+// Help template generation.
+func AddTemplateFuncs(tmplFuncs template.FuncMap) {
+	for k, v := range tmplFuncs {
+		templateFuncs[k] = v
+	}
+}
+
+// OnInitialize sets the passed functions to be run when each command's
+// Execute method is called.
+func OnInitialize(y ...func()) {
+	initializers = append(initializers, y...)
+}
+
+// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
+// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
+// ints and then compared.
+func Gt(a interface{}, b interface{}) bool {
+	var left, right int64
+	av := reflect.ValueOf(a)
+
+	switch av.Kind() {
+	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+		left = int64(av.Len())
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		left = av.Int()
+	case reflect.String:
+		left, _ = strconv.ParseInt(av.String(), 10, 64)
+	}
+
+	bv := reflect.ValueOf(b)
+
+	switch bv.Kind() {
+	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+		right = int64(bv.Len())
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		right = bv.Int()
+	case reflect.String:
+		right, _ = strconv.ParseInt(bv.String(), 10, 64)
+	}
+
+	return left > right
+}
+
+// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
+func Eq(a interface{}, b interface{}) bool {
+	av := reflect.ValueOf(a)
+	bv := reflect.ValueOf(b)
+
+	switch av.Kind() {
+	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+		panic("Eq called on unsupported type")
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return av.Int() == bv.Int()
+	case reflect.String:
+		return av.String() == bv.String()
+	}
+	return false
+}
+
+func trimRightSpace(s string) string {
+	return strings.TrimRightFunc(s, unicode.IsSpace)
+}
+
+// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
+func appendIfNotPresent(s, stringToAppend string) string {
+	if strings.Contains(s, stringToAppend) {
+		return s
+	}
+	return s + " " + stringToAppend
+}
+
+// rpad adds padding to the right of a string.
+func rpad(s string, padding int) string {
+	template := fmt.Sprintf("%%-%ds", padding)
+	return fmt.Sprintf(template, s)
+}
+
+// tmpl executes the given template text on data, writing the result to w.
+func tmpl(w io.Writer, text string, data interface{}) error {
+	t := template.New("top")
+	t.Funcs(templateFuncs)
+	template.Must(t.Parse(text))
+	return t.Execute(w, data)
+}
+
+// ld compares two strings and returns the levenshtein distance between them.
+func ld(s, t string, ignoreCase bool) int {
+	if ignoreCase {
+		s = strings.ToLower(s)
+		t = strings.ToLower(t)
+	}
+	d := make([][]int, len(s)+1)
+	for i := range d {
+		d[i] = make([]int, len(t)+1)
+	}
+	for i := range d {
+		d[i][0] = i
+	}
+	for j := range d[0] {
+		d[0][j] = j
+	}
+	for j := 1; j <= len(t); j++ {
+		for i := 1; i <= len(s); i++ {
+			if s[i-1] == t[j-1] {
+				d[i][j] = d[i-1][j-1]
+			} else {
+				min := d[i-1][j]
+				if d[i][j-1] < min {
+					min = d[i][j-1]
+				}
+				if d[i-1][j-1] < min {
+					min = d[i-1][j-1]
+				}
+				d[i][j] = min + 1
+			}
+		}
+
+	}
+	return d[len(s)][len(t)]
+}
+
+func stringInSlice(a string, list []string) bool {
+	for _, b := range list {
+		if b == a {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden b/vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
new file mode 100644
index 0000000..34d1bf3
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -0,0 +1,1517 @@
+// Copyright © 2013 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
+// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
+package cobra
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+
+	flag "github.com/spf13/pflag"
+)
+
+// FParseErrWhitelist configures Flag parse errors to be ignored
+type FParseErrWhitelist flag.ParseErrorsWhitelist
+
+// Command is just that, a command for your application.
+// E.g.  'go run ...' - 'run' is the command. Cobra requires
+// you to define the usage and description as part of your command
+// definition to ensure usability.
+type Command struct {
+	// Use is the one-line usage message.
+	Use string
+
+	// Aliases is an array of aliases that can be used instead of the first word in Use.
+	Aliases []string
+
+	// SuggestFor is an array of command names for which this command will be suggested -
+	// similar to aliases but only suggests.
+	SuggestFor []string
+
+	// Short is the short description shown in the 'help' output.
+	Short string
+
+	// Long is the long message shown in the 'help <this-command>' output.
+	Long string
+
+	// Example is examples of how to use the command.
+	Example string
+
+	// ValidArgs is list of all valid non-flag arguments that are accepted in bash completions
+	ValidArgs []string
+
+	// Expected arguments
+	Args PositionalArgs
+
+	// ArgAliases is List of aliases for ValidArgs.
+	// These are not suggested to the user in the bash completion,
+	// but accepted if entered manually.
+	ArgAliases []string
+
+	// BashCompletionFunction is custom functions used by the bash autocompletion generator.
+	BashCompletionFunction string
+
+	// Deprecated defines, if this command is deprecated and should print this string when used.
+	Deprecated string
+
+	// Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
+	Hidden bool
+
+	// Annotations are key/value pairs that can be used by applications to identify or
+	// group commands.
+	Annotations map[string]string
+
+	// Version defines the version for this command. If this value is non-empty and the command does not
+	// define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
+	// will print content of the "Version" variable.
+	Version string
+
+	// The *Run functions are executed in the following order:
+	//   * PersistentPreRun()
+	//   * PreRun()
+	//   * Run()
+	//   * PostRun()
+	//   * PersistentPostRun()
+	// All functions get the same args, the arguments after the command name.
+	//
+	// PersistentPreRun: children of this command will inherit and execute.
+	PersistentPreRun func(cmd *Command, args []string)
+	// PersistentPreRunE: PersistentPreRun but returns an error.
+	PersistentPreRunE func(cmd *Command, args []string) error
+	// PreRun: children of this command will not inherit.
+	PreRun func(cmd *Command, args []string)
+	// PreRunE: PreRun but returns an error.
+	PreRunE func(cmd *Command, args []string) error
+	// Run: Typically the actual work function. Most commands will only implement this.
+	Run func(cmd *Command, args []string)
+	// RunE: Run but returns an error.
+	RunE func(cmd *Command, args []string) error
+	// PostRun: run after the Run command.
+	PostRun func(cmd *Command, args []string)
+	// PostRunE: PostRun but returns an error.
+	PostRunE func(cmd *Command, args []string) error
+	// PersistentPostRun: children of this command will inherit and execute after PostRun.
+	PersistentPostRun func(cmd *Command, args []string)
+	// PersistentPostRunE: PersistentPostRun but returns an error.
+	PersistentPostRunE func(cmd *Command, args []string) error
+
+	// SilenceErrors is an option to quiet errors down stream.
+	SilenceErrors bool
+
+	// SilenceUsage is an option to silence usage when an error occurs.
+	SilenceUsage bool
+
+	// DisableFlagParsing disables the flag parsing.
+	// If this is true all flags will be passed to the command as arguments.
+	DisableFlagParsing bool
+
+	// DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
+	// will be printed by generating docs for this command.
+	DisableAutoGenTag bool
+
+	// DisableFlagsInUseLine will disable the addition of [flags] to the usage
+	// line of a command when printing help or generating docs
+	DisableFlagsInUseLine bool
+
+	// DisableSuggestions disables the suggestions based on Levenshtein distance
+	// that go along with 'unknown command' messages.
+	DisableSuggestions bool
+	// SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
+	// Must be > 0.
+	SuggestionsMinimumDistance int
+
+	// TraverseChildren parses flags on all parents before executing child command.
+	TraverseChildren bool
+
+	//FParseErrWhitelist flag parse errors to be ignored
+	FParseErrWhitelist FParseErrWhitelist
+
+	// commands is the list of commands supported by this program.
+	commands []*Command
+	// parent is a parent command for this command.
+	parent *Command
+	// Max lengths of commands' string lengths for use in padding.
+	commandsMaxUseLen         int
+	commandsMaxCommandPathLen int
+	commandsMaxNameLen        int
+	// commandsAreSorted defines, if command slice are sorted or not.
+	commandsAreSorted bool
+	// commandCalledAs is the name or alias value used to call this command.
+	commandCalledAs struct {
+		name   string
+		called bool
+	}
+
+	// args is actual args parsed from flags.
+	args []string
+	// flagErrorBuf contains all error messages from pflag.
+	flagErrorBuf *bytes.Buffer
+	// flags is full set of flags.
+	flags *flag.FlagSet
+	// pflags contains persistent flags.
+	pflags *flag.FlagSet
+	// lflags contains local flags.
+	lflags *flag.FlagSet
+	// iflags contains inherited flags.
+	iflags *flag.FlagSet
+	// parentsPflags is all persistent flags of cmd's parents.
+	parentsPflags *flag.FlagSet
+	// globNormFunc is the global normalization function
+	// that we can use on every pflag set and children commands
+	globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName
+
+	// output is an output writer defined by user.
+	output io.Writer
+	// usageFunc is usage func defined by user.
+	usageFunc func(*Command) error
+	// usageTemplate is usage template defined by user.
+	usageTemplate string
+	// flagErrorFunc is func defined by user and it's called when the parsing of
+	// flags returns an error.
+	flagErrorFunc func(*Command, error) error
+	// helpTemplate is help template defined by user.
+	helpTemplate string
+	// helpFunc is help func defined by user.
+	helpFunc func(*Command, []string)
+	// helpCommand is command with usage 'help'. If it's not defined by user,
+	// cobra uses default help command.
+	helpCommand *Command
+	// versionTemplate is the version template defined by user.
+	versionTemplate string
+}
+
+// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
+// particularly useful when testing.
+func (c *Command) SetArgs(a []string) {
+	c.args = a
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (c *Command) SetOutput(output io.Writer) {
+	c.output = output
+}
+
+// SetUsageFunc sets usage function. Usage can be defined by application.
+func (c *Command) SetUsageFunc(f func(*Command) error) {
+	c.usageFunc = f
+}
+
+// SetUsageTemplate sets usage template. Can be defined by Application.
+func (c *Command) SetUsageTemplate(s string) {
+	c.usageTemplate = s
+}
+
+// SetFlagErrorFunc sets a function to generate an error when flag parsing
+// fails.
+func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) {
+	c.flagErrorFunc = f
+}
+
+// SetHelpFunc sets help function. Can be defined by Application.
+func (c *Command) SetHelpFunc(f func(*Command, []string)) {
+	c.helpFunc = f
+}
+
+// SetHelpCommand sets help command.
+func (c *Command) SetHelpCommand(cmd *Command) {
+	c.helpCommand = cmd
+}
+
+// SetHelpTemplate sets help template to be used. Application can use it to set custom template.
+func (c *Command) SetHelpTemplate(s string) {
+	c.helpTemplate = s
+}
+
+// SetVersionTemplate sets version template to be used. Application can use it to set custom template.
+func (c *Command) SetVersionTemplate(s string) {
+	c.versionTemplate = s
+}
+
+// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
+// The user should not have a cyclic dependency on commands.
+func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) {
+	c.Flags().SetNormalizeFunc(n)
+	c.PersistentFlags().SetNormalizeFunc(n)
+	c.globNormFunc = n
+
+	for _, command := range c.commands {
+		command.SetGlobalNormalizationFunc(n)
+	}
+}
+
+// OutOrStdout returns output to stdout.
+func (c *Command) OutOrStdout() io.Writer {
+	return c.getOut(os.Stdout)
+}
+
+// OutOrStderr returns output to stderr
+func (c *Command) OutOrStderr() io.Writer {
+	return c.getOut(os.Stderr)
+}
+
+func (c *Command) getOut(def io.Writer) io.Writer {
+	if c.output != nil {
+		return c.output
+	}
+	if c.HasParent() {
+		return c.parent.getOut(def)
+	}
+	return def
+}
+
+// UsageFunc returns either the function set by SetUsageFunc for this command
+// or a parent, or it returns a default usage function.
+func (c *Command) UsageFunc() (f func(*Command) error) {
+	if c.usageFunc != nil {
+		return c.usageFunc
+	}
+	if c.HasParent() {
+		return c.Parent().UsageFunc()
+	}
+	return func(c *Command) error {
+		c.mergePersistentFlags()
+		err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c)
+		if err != nil {
+			c.Println(err)
+		}
+		return err
+	}
+}
+
+// Usage puts out the usage for the command.
+// Used when a user provides invalid input.
+// Can be defined by user by overriding UsageFunc.
+func (c *Command) Usage() error {
+	return c.UsageFunc()(c)
+}
+
+// HelpFunc returns either the function set by SetHelpFunc for this command
+// or a parent, or it returns a function with default help behavior.
+func (c *Command) HelpFunc() func(*Command, []string) {
+	if c.helpFunc != nil {
+		return c.helpFunc
+	}
+	if c.HasParent() {
+		return c.Parent().HelpFunc()
+	}
+	return func(c *Command, a []string) {
+		c.mergePersistentFlags()
+		err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c)
+		if err != nil {
+			c.Println(err)
+		}
+	}
+}
+
+// Help puts out the help for the command.
+// Used when a user calls help [command].
+// Can be defined by user by overriding HelpFunc.
+func (c *Command) Help() error {
+	c.HelpFunc()(c, []string{})
+	return nil
+}
+
+// UsageString return usage string.
+func (c *Command) UsageString() string {
+	tmpOutput := c.output
+	bb := new(bytes.Buffer)
+	c.SetOutput(bb)
+	c.Usage()
+	c.output = tmpOutput
+	return bb.String()
+}
+
+// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this
+// command or a parent, or it returns a function which returns the original
+// error.
+func (c *Command) FlagErrorFunc() (f func(*Command, error) error) {
+	if c.flagErrorFunc != nil {
+		return c.flagErrorFunc
+	}
+
+	if c.HasParent() {
+		return c.parent.FlagErrorFunc()
+	}
+	return func(c *Command, err error) error {
+		return err
+	}
+}
+
+var minUsagePadding = 25
+
+// UsagePadding return padding for the usage.
+func (c *Command) UsagePadding() int {
+	if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen {
+		return minUsagePadding
+	}
+	return c.parent.commandsMaxUseLen
+}
+
+var minCommandPathPadding = 11
+
+// CommandPathPadding return padding for the command path.
+func (c *Command) CommandPathPadding() int {
+	if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen {
+		return minCommandPathPadding
+	}
+	return c.parent.commandsMaxCommandPathLen
+}
+
+var minNamePadding = 11
+
+// NamePadding returns padding for the name.
+func (c *Command) NamePadding() int {
+	if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen {
+		return minNamePadding
+	}
+	return c.parent.commandsMaxNameLen
+}
+
+// UsageTemplate returns usage template for the command.
+func (c *Command) UsageTemplate() string {
+	if c.usageTemplate != "" {
+		return c.usageTemplate
+	}
+
+	if c.HasParent() {
+		return c.parent.UsageTemplate()
+	}
+	return `Usage:{{if .Runnable}}
+  {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
+  {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
+
+Aliases:
+  {{.NameAndAliases}}{{end}}{{if .HasExample}}
+
+Examples:
+{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
+
+Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
+  {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
+
+Flags:
+{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
+
+Global Flags:
+{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
+
+Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
+  {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
+
+Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
+`
+}
+
+// HelpTemplate return help template for the command.
+func (c *Command) HelpTemplate() string {
+	if c.helpTemplate != "" {
+		return c.helpTemplate
+	}
+
+	if c.HasParent() {
+		return c.parent.HelpTemplate()
+	}
+	return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
+
+{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
+}
+
+// VersionTemplate return version template for the command.
+func (c *Command) VersionTemplate() string {
+	if c.versionTemplate != "" {
+		return c.versionTemplate
+	}
+
+	if c.HasParent() {
+		return c.parent.VersionTemplate()
+	}
+	return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
+`
+}
+
+func hasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+	flag := fs.Lookup(name)
+	if flag == nil {
+		return false
+	}
+	return flag.NoOptDefVal != ""
+}
+
+func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+	if len(name) == 0 {
+		return false
+	}
+
+	flag := fs.ShorthandLookup(name[:1])
+	if flag == nil {
+		return false
+	}
+	return flag.NoOptDefVal != ""
+}
+
+func stripFlags(args []string, c *Command) []string {
+	if len(args) == 0 {
+		return args
+	}
+	c.mergePersistentFlags()
+
+	commands := []string{}
+	flags := c.Flags()
+
+Loop:
+	for len(args) > 0 {
+		s := args[0]
+		args = args[1:]
+		switch {
+		case s == "--":
+			// "--" terminates the flags
+			break Loop
+		case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
+			// If '--flag arg' then
+			// delete arg from args.
+			fallthrough // (do the same as below)
+		case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
+			// If '-f arg' then
+			// delete 'arg' from args or break the loop if len(args) <= 1.
+			if len(args) <= 1 {
+				break Loop
+			} else {
+				args = args[1:]
+				continue
+			}
+		case s != "" && !strings.HasPrefix(s, "-"):
+			commands = append(commands, s)
+		}
+	}
+
+	return commands
+}
+
+// argsMinusFirstX removes only the first x from args.  Otherwise, commands that look like
+// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]).
+func argsMinusFirstX(args []string, x string) []string {
+	for i, y := range args {
+		if x == y {
+			ret := []string{}
+			ret = append(ret, args[:i]...)
+			ret = append(ret, args[i+1:]...)
+			return ret
+		}
+	}
+	return args
+}
+
+func isFlagArg(arg string) bool {
+	return ((len(arg) >= 3 && arg[1] == '-') ||
+		(len(arg) >= 2 && arg[0] == '-' && arg[1] != '-'))
+}
+
+// Find the target command given the args and command tree
+// Meant to be run on the highest node. Only searches down.
+func (c *Command) Find(args []string) (*Command, []string, error) {
+	var innerfind func(*Command, []string) (*Command, []string)
+
+	innerfind = func(c *Command, innerArgs []string) (*Command, []string) {
+		argsWOflags := stripFlags(innerArgs, c)
+		if len(argsWOflags) == 0 {
+			return c, innerArgs
+		}
+		nextSubCmd := argsWOflags[0]
+
+		cmd := c.findNext(nextSubCmd)
+		if cmd != nil {
+			return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd))
+		}
+		return c, innerArgs
+	}
+
+	commandFound, a := innerfind(c, args)
+	if commandFound.Args == nil {
+		return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound))
+	}
+	return commandFound, a, nil
+}
+
+func (c *Command) findSuggestions(arg string) string {
+	if c.DisableSuggestions {
+		return ""
+	}
+	if c.SuggestionsMinimumDistance <= 0 {
+		c.SuggestionsMinimumDistance = 2
+	}
+	suggestionsString := ""
+	if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 {
+		suggestionsString += "\n\nDid you mean this?\n"
+		for _, s := range suggestions {
+			suggestionsString += fmt.Sprintf("\t%v\n", s)
+		}
+	}
+	return suggestionsString
+}
+
+func (c *Command) findNext(next string) *Command {
+	matches := make([]*Command, 0)
+	for _, cmd := range c.commands {
+		if cmd.Name() == next || cmd.HasAlias(next) {
+			cmd.commandCalledAs.name = next
+			return cmd
+		}
+		if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) {
+			matches = append(matches, cmd)
+		}
+	}
+
+	if len(matches) == 1 {
+		return matches[0]
+	}
+
+	return nil
+}
+
+// Traverse the command tree to find the command, and parse args for
+// each parent.
+func (c *Command) Traverse(args []string) (*Command, []string, error) {
+	flags := []string{}
+	inFlag := false
+
+	for i, arg := range args {
+		switch {
+		// A long flag with a space separated value
+		case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="):
+			// TODO: this isn't quite right, we should really check ahead for 'true' or 'false'
+			inFlag = !hasNoOptDefVal(arg[2:], c.Flags())
+			flags = append(flags, arg)
+			continue
+		// A short flag with a space separated value
+		case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()):
+			inFlag = true
+			flags = append(flags, arg)
+			continue
+		// The value for a flag
+		case inFlag:
+			inFlag = false
+			flags = append(flags, arg)
+			continue
+		// A flag without a value, or with an `=` separated value
+		case isFlagArg(arg):
+			flags = append(flags, arg)
+			continue
+		}
+
+		cmd := c.findNext(arg)
+		if cmd == nil {
+			return c, args, nil
+		}
+
+		if err := c.ParseFlags(flags); err != nil {
+			return nil, args, err
+		}
+		return cmd.Traverse(args[i+1:])
+	}
+	return c, args, nil
+}
+
+// SuggestionsFor provides suggestions for the typedName.
+func (c *Command) SuggestionsFor(typedName string) []string {
+	suggestions := []string{}
+	for _, cmd := range c.commands {
+		if cmd.IsAvailableCommand() {
+			levenshteinDistance := ld(typedName, cmd.Name(), true)
+			suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance
+			suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName))
+			if suggestByLevenshtein || suggestByPrefix {
+				suggestions = append(suggestions, cmd.Name())
+			}
+			for _, explicitSuggestion := range cmd.SuggestFor {
+				if strings.EqualFold(typedName, explicitSuggestion) {
+					suggestions = append(suggestions, cmd.Name())
+				}
+			}
+		}
+	}
+	return suggestions
+}
+
+// VisitParents visits all parents of the command and invokes fn on each parent.
+func (c *Command) VisitParents(fn func(*Command)) {
+	if c.HasParent() {
+		fn(c.Parent())
+		c.Parent().VisitParents(fn)
+	}
+}
+
+// Root finds root command.
+func (c *Command) Root() *Command {
+	if c.HasParent() {
+		return c.Parent().Root()
+	}
+	return c
+}
+
+// ArgsLenAtDash will return the length of c.Flags().Args at the moment
+// when a -- was found during args parsing.
+func (c *Command) ArgsLenAtDash() int {
+	return c.Flags().ArgsLenAtDash()
+}
+
+func (c *Command) execute(a []string) (err error) {
+	if c == nil {
+		return fmt.Errorf("Called Execute() on a nil Command")
+	}
+
+	if len(c.Deprecated) > 0 {
+		c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated)
+	}
+
+	// initialize help and version flag at the last point possible to allow for user
+	// overriding
+	c.InitDefaultHelpFlag()
+	c.InitDefaultVersionFlag()
+
+	err = c.ParseFlags(a)
+	if err != nil {
+		return c.FlagErrorFunc()(c, err)
+	}
+
+	// If help is called, regardless of other flags, return we want help.
+	// Also say we need help if the command isn't runnable.
+	helpVal, err := c.Flags().GetBool("help")
+	if err != nil {
+		// should be impossible to get here as we always declare a help
+		// flag in InitDefaultHelpFlag()
+		c.Println("\"help\" flag declared as non-bool. Please correct your code")
+		return err
+	}
+
+	if helpVal {
+		return flag.ErrHelp
+	}
+
+	// for back-compat, only add version flag behavior if version is defined
+	if c.Version != "" {
+		versionVal, err := c.Flags().GetBool("version")
+		if err != nil {
+			c.Println("\"version\" flag declared as non-bool. Please correct your code")
+			return err
+		}
+		if versionVal {
+			err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c)
+			if err != nil {
+				c.Println(err)
+			}
+			return err
+		}
+	}
+
+	if !c.Runnable() {
+		return flag.ErrHelp
+	}
+
+	c.preRun()
+
+	argWoFlags := c.Flags().Args()
+	if c.DisableFlagParsing {
+		argWoFlags = a
+	}
+
+	if err := c.ValidateArgs(argWoFlags); err != nil {
+		return err
+	}
+
+	for p := c; p != nil; p = p.Parent() {
+		if p.PersistentPreRunE != nil {
+			if err := p.PersistentPreRunE(c, argWoFlags); err != nil {
+				return err
+			}
+			break
+		} else if p.PersistentPreRun != nil {
+			p.PersistentPreRun(c, argWoFlags)
+			break
+		}
+	}
+	if c.PreRunE != nil {
+		if err := c.PreRunE(c, argWoFlags); err != nil {
+			return err
+		}
+	} else if c.PreRun != nil {
+		c.PreRun(c, argWoFlags)
+	}
+
+	if err := c.validateRequiredFlags(); err != nil {
+		return err
+	}
+	if c.RunE != nil {
+		if err := c.RunE(c, argWoFlags); err != nil {
+			return err
+		}
+	} else {
+		c.Run(c, argWoFlags)
+	}
+	if c.PostRunE != nil {
+		if err := c.PostRunE(c, argWoFlags); err != nil {
+			return err
+		}
+	} else if c.PostRun != nil {
+		c.PostRun(c, argWoFlags)
+	}
+	for p := c; p != nil; p = p.Parent() {
+		if p.PersistentPostRunE != nil {
+			if err := p.PersistentPostRunE(c, argWoFlags); err != nil {
+				return err
+			}
+			break
+		} else if p.PersistentPostRun != nil {
+			p.PersistentPostRun(c, argWoFlags)
+			break
+		}
+	}
+
+	return nil
+}
+
+func (c *Command) preRun() {
+	for _, x := range initializers {
+		x()
+	}
+}
+
+// Execute uses the args (os.Args[1:] by default)
+// and run through the command tree finding appropriate matches
+// for commands and then corresponding flags.
+func (c *Command) Execute() error {
+	_, err := c.ExecuteC()
+	return err
+}
+
+// ExecuteC executes the command.
+func (c *Command) ExecuteC() (cmd *Command, err error) {
+	// Regardless of what command execute is called on, run on Root only
+	if c.HasParent() {
+		return c.Root().ExecuteC()
+	}
+
+	// windows hook
+	if preExecHookFn != nil {
+		preExecHookFn(c)
+	}
+
+	// initialize help as the last point possible to allow for user
+	// overriding
+	c.InitDefaultHelpCmd()
+
+	var args []string
+
+	// Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
+	if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
+		args = os.Args[1:]
+	} else {
+		args = c.args
+	}
+
+	var flags []string
+	if c.TraverseChildren {
+		cmd, flags, err = c.Traverse(args)
+	} else {
+		cmd, flags, err = c.Find(args)
+	}
+	if err != nil {
+		// If found parse to a subcommand and then failed, talk about the subcommand
+		if cmd != nil {
+			c = cmd
+		}
+		if !c.SilenceErrors {
+			c.Println("Error:", err.Error())
+			c.Printf("Run '%v --help' for usage.\n", c.CommandPath())
+		}
+		return c, err
+	}
+
+	cmd.commandCalledAs.called = true
+	if cmd.commandCalledAs.name == "" {
+		cmd.commandCalledAs.name = cmd.Name()
+	}
+
+	err = cmd.execute(flags)
+	if err != nil {
+		// Always show help if requested, even if SilenceErrors is in
+		// effect
+		if err == flag.ErrHelp {
+			cmd.HelpFunc()(cmd, args)
+			return cmd, nil
+		}
+
+		// If root command has SilentErrors flagged,
+		// all subcommands should respect it
+		if !cmd.SilenceErrors && !c.SilenceErrors {
+			c.Println("Error:", err.Error())
+		}
+
+		// If root command has SilentUsage flagged,
+		// all subcommands should respect it
+		if !cmd.SilenceUsage && !c.SilenceUsage {
+			c.Println(cmd.UsageString())
+		}
+	}
+	return cmd, err
+}
+
+func (c *Command) ValidateArgs(args []string) error {
+	if c.Args == nil {
+		return nil
+	}
+	return c.Args(c, args)
+}
+
+func (c *Command) validateRequiredFlags() error {
+	flags := c.Flags()
+	missingFlagNames := []string{}
+	flags.VisitAll(func(pflag *flag.Flag) {
+		requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag]
+		if !found {
+			return
+		}
+		if (requiredAnnotation[0] == "true") && !pflag.Changed {
+			missingFlagNames = append(missingFlagNames, pflag.Name)
+		}
+	})
+
+	if len(missingFlagNames) > 0 {
+		return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`))
+	}
+	return nil
+}
+
+// InitDefaultHelpFlag adds default help flag to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help flag, it will do nothing.
+func (c *Command) InitDefaultHelpFlag() {
+	c.mergePersistentFlags()
+	if c.Flags().Lookup("help") == nil {
+		usage := "help for "
+		if c.Name() == "" {
+			usage += "this command"
+		} else {
+			usage += c.Name()
+		}
+		c.Flags().BoolP("help", "h", false, usage)
+	}
+}
+
+// InitDefaultVersionFlag adds default version flag to c.
+// It is called automatically by executing the c.
+// If c already has a version flag, it will do nothing.
+// If c.Version is empty, it will do nothing.
+func (c *Command) InitDefaultVersionFlag() {
+	if c.Version == "" {
+		return
+	}
+
+	c.mergePersistentFlags()
+	if c.Flags().Lookup("version") == nil {
+		usage := "version for "
+		if c.Name() == "" {
+			usage += "this command"
+		} else {
+			usage += c.Name()
+		}
+		c.Flags().Bool("version", false, usage)
+	}
+}
+
+// InitDefaultHelpCmd adds default help command to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help command or c has no subcommands, it will do nothing.
+func (c *Command) InitDefaultHelpCmd() {
+	if !c.HasSubCommands() {
+		return
+	}
+
+	if c.helpCommand == nil {
+		c.helpCommand = &Command{
+			Use:   "help [command]",
+			Short: "Help about any command",
+			Long: `Help provides help for any command in the application.
+Simply type ` + c.Name() + ` help [path to command] for full details.`,
+
+			Run: func(c *Command, args []string) {
+				cmd, _, e := c.Root().Find(args)
+				if cmd == nil || e != nil {
+					c.Printf("Unknown help topic %#q\n", args)
+					c.Root().Usage()
+				} else {
+					cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
+					cmd.Help()
+				}
+			},
+		}
+	}
+	c.RemoveCommand(c.helpCommand)
+	c.AddCommand(c.helpCommand)
+}
+
+// ResetCommands delete parent, subcommand and help command from c.
+func (c *Command) ResetCommands() {
+	c.parent = nil
+	c.commands = nil
+	c.helpCommand = nil
+	c.parentsPflags = nil
+}
+
+// Sorts commands by their names.
+type commandSorterByName []*Command
+
+func (c commandSorterByName) Len() int           { return len(c) }
+func (c commandSorterByName) Swap(i, j int)      { c[i], c[j] = c[j], c[i] }
+func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() }
+
+// Commands returns a sorted slice of child commands.
+func (c *Command) Commands() []*Command {
+	// do not sort commands if it already sorted or sorting was disabled
+	if EnableCommandSorting && !c.commandsAreSorted {
+		sort.Sort(commandSorterByName(c.commands))
+		c.commandsAreSorted = true
+	}
+	return c.commands
+}
+
+// AddCommand adds one or more commands to this parent command.
+func (c *Command) AddCommand(cmds ...*Command) {
+	for i, x := range cmds {
+		if cmds[i] == c {
+			panic("Command can't be a child of itself")
+		}
+		cmds[i].parent = c
+		// update max lengths
+		usageLen := len(x.Use)
+		if usageLen > c.commandsMaxUseLen {
+			c.commandsMaxUseLen = usageLen
+		}
+		commandPathLen := len(x.CommandPath())
+		if commandPathLen > c.commandsMaxCommandPathLen {
+			c.commandsMaxCommandPathLen = commandPathLen
+		}
+		nameLen := len(x.Name())
+		if nameLen > c.commandsMaxNameLen {
+			c.commandsMaxNameLen = nameLen
+		}
+		// If global normalization function exists, update all children
+		if c.globNormFunc != nil {
+			x.SetGlobalNormalizationFunc(c.globNormFunc)
+		}
+		c.commands = append(c.commands, x)
+		c.commandsAreSorted = false
+	}
+}
+
+// RemoveCommand removes one or more commands from a parent command.
+func (c *Command) RemoveCommand(cmds ...*Command) {
+	commands := []*Command{}
+main:
+	for _, command := range c.commands {
+		for _, cmd := range cmds {
+			if command == cmd {
+				command.parent = nil
+				continue main
+			}
+		}
+		commands = append(commands, command)
+	}
+	c.commands = commands
+	// recompute all lengths
+	c.commandsMaxUseLen = 0
+	c.commandsMaxCommandPathLen = 0
+	c.commandsMaxNameLen = 0
+	for _, command := range c.commands {
+		usageLen := len(command.Use)
+		if usageLen > c.commandsMaxUseLen {
+			c.commandsMaxUseLen = usageLen
+		}
+		commandPathLen := len(command.CommandPath())
+		if commandPathLen > c.commandsMaxCommandPathLen {
+			c.commandsMaxCommandPathLen = commandPathLen
+		}
+		nameLen := len(command.Name())
+		if nameLen > c.commandsMaxNameLen {
+			c.commandsMaxNameLen = nameLen
+		}
+	}
+}
+
+// Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
+func (c *Command) Print(i ...interface{}) {
+	fmt.Fprint(c.OutOrStderr(), i...)
+}
+
+// Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
+func (c *Command) Println(i ...interface{}) {
+	c.Print(fmt.Sprintln(i...))
+}
+
+// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
+func (c *Command) Printf(format string, i ...interface{}) {
+	c.Print(fmt.Sprintf(format, i...))
+}
+
+// CommandPath returns the full path to this command.
+func (c *Command) CommandPath() string {
+	if c.HasParent() {
+		return c.Parent().CommandPath() + " " + c.Name()
+	}
+	return c.Name()
+}
+
+// UseLine puts out the full usage for a given command (including parents).
+func (c *Command) UseLine() string {
+	var useline string
+	if c.HasParent() {
+		useline = c.parent.CommandPath() + " " + c.Use
+	} else {
+		useline = c.Use
+	}
+	if c.DisableFlagsInUseLine {
+		return useline
+	}
+	if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") {
+		useline += " [flags]"
+	}
+	return useline
+}
+
+// DebugFlags used to determine which flags have been assigned to which commands
+// and which persist.
+func (c *Command) DebugFlags() {
+	c.Println("DebugFlags called on", c.Name())
+	var debugflags func(*Command)
+
+	debugflags = func(x *Command) {
+		if x.HasFlags() || x.HasPersistentFlags() {
+			c.Println(x.Name())
+		}
+		if x.HasFlags() {
+			x.flags.VisitAll(func(f *flag.Flag) {
+				if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil {
+					c.Println("  -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, "  [LP]")
+				} else {
+					c.Println("  -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, "  [L]")
+				}
+			})
+		}
+		if x.HasPersistentFlags() {
+			x.pflags.VisitAll(func(f *flag.Flag) {
+				if x.HasFlags() {
+					if x.flags.Lookup(f.Name) == nil {
+						c.Println("  -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, "  [P]")
+					}
+				} else {
+					c.Println("  -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, "  [P]")
+				}
+			})
+		}
+		c.Println(x.flagErrorBuf)
+		if x.HasSubCommands() {
+			for _, y := range x.commands {
+				debugflags(y)
+			}
+		}
+	}
+
+	debugflags(c)
+}
+
+// Name returns the command's name: the first word in the use line.
+func (c *Command) Name() string {
+	name := c.Use
+	i := strings.Index(name, " ")
+	if i >= 0 {
+		name = name[:i]
+	}
+	return name
+}
+
+// HasAlias determines if a given string is an alias of the command.
+func (c *Command) HasAlias(s string) bool {
+	for _, a := range c.Aliases {
+		if a == s {
+			return true
+		}
+	}
+	return false
+}
+
+// CalledAs returns the command name or alias that was used to invoke
+// this command or an empty string if the command has not been called.
+func (c *Command) CalledAs() string {
+	if c.commandCalledAs.called {
+		return c.commandCalledAs.name
+	}
+	return ""
+}
+
+// hasNameOrAliasPrefix returns true if the Name or any of aliases start
+// with prefix
+func (c *Command) hasNameOrAliasPrefix(prefix string) bool {
+	if strings.HasPrefix(c.Name(), prefix) {
+		c.commandCalledAs.name = c.Name()
+		return true
+	}
+	for _, alias := range c.Aliases {
+		if strings.HasPrefix(alias, prefix) {
+			c.commandCalledAs.name = alias
+			return true
+		}
+	}
+	return false
+}
+
+// NameAndAliases returns a list of the command name and all aliases
+func (c *Command) NameAndAliases() string {
+	return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ")
+}
+
+// HasExample determines if the command has example.
+func (c *Command) HasExample() bool {
+	return len(c.Example) > 0
+}
+
+// Runnable determines if the command is itself runnable.
+func (c *Command) Runnable() bool {
+	return c.Run != nil || c.RunE != nil
+}
+
+// HasSubCommands determines if the command has children commands.
+func (c *Command) HasSubCommands() bool {
+	return len(c.commands) > 0
+}
+
+// IsAvailableCommand determines if a command is available as a non-help command
+// (this includes all non deprecated/hidden commands).
+func (c *Command) IsAvailableCommand() bool {
+	if len(c.Deprecated) != 0 || c.Hidden {
+		return false
+	}
+
+	if c.HasParent() && c.Parent().helpCommand == c {
+		return false
+	}
+
+	if c.Runnable() || c.HasAvailableSubCommands() {
+		return true
+	}
+
+	return false
+}
+
+// IsAdditionalHelpTopicCommand determines if a command is an additional
+// help topic command; additional help topic command is determined by the
+// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
+// are runnable/hidden/deprecated.
+// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
+func (c *Command) IsAdditionalHelpTopicCommand() bool {
+	// if a command is runnable, deprecated, or hidden it is not a 'help' command
+	if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden {
+		return false
+	}
+
+	// if any non-help sub commands are found, the command is not a 'help' command
+	for _, sub := range c.commands {
+		if !sub.IsAdditionalHelpTopicCommand() {
+			return false
+		}
+	}
+
+	// the command either has no sub commands, or no non-help sub commands
+	return true
+}
+
+// HasHelpSubCommands determines if a command has any available 'help' sub commands
+// that need to be shown in the usage/help default template under 'additional help
+// topics'.
+func (c *Command) HasHelpSubCommands() bool {
+	// return true on the first found available 'help' sub command
+	for _, sub := range c.commands {
+		if sub.IsAdditionalHelpTopicCommand() {
+			return true
+		}
+	}
+
+	// the command either has no sub commands, or no available 'help' sub commands
+	return false
+}
+
+// HasAvailableSubCommands determines if a command has available sub commands that
+// need to be shown in the usage/help default template under 'available commands'.
+func (c *Command) HasAvailableSubCommands() bool {
+	// return true on the first found available (non deprecated/help/hidden)
+	// sub command
+	for _, sub := range c.commands {
+		if sub.IsAvailableCommand() {
+			return true
+		}
+	}
+
+	// the command either has no sub commands, or no available (non deprecated/help/hidden)
+	// sub commands
+	return false
+}
+
+// HasParent determines if the command is a child command.
+func (c *Command) HasParent() bool {
+	return c.parent != nil
+}
+
+// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
+func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName {
+	return c.globNormFunc
+}
+
+// Flags returns the complete FlagSet that applies
+// to this command (local and persistent declared here and by all parents).
+func (c *Command) Flags() *flag.FlagSet {
+	if c.flags == nil {
+		c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+		if c.flagErrorBuf == nil {
+			c.flagErrorBuf = new(bytes.Buffer)
+		}
+		c.flags.SetOutput(c.flagErrorBuf)
+	}
+
+	return c.flags
+}
+
+// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
+func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
+	persistentFlags := c.PersistentFlags()
+
+	out := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+	c.LocalFlags().VisitAll(func(f *flag.Flag) {
+		if persistentFlags.Lookup(f.Name) == nil {
+			out.AddFlag(f)
+		}
+	})
+	return out
+}
+
+// LocalFlags returns the local FlagSet specifically set in the current command.
+func (c *Command) LocalFlags() *flag.FlagSet {
+	c.mergePersistentFlags()
+
+	if c.lflags == nil {
+		c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+		if c.flagErrorBuf == nil {
+			c.flagErrorBuf = new(bytes.Buffer)
+		}
+		c.lflags.SetOutput(c.flagErrorBuf)
+	}
+	c.lflags.SortFlags = c.Flags().SortFlags
+	if c.globNormFunc != nil {
+		c.lflags.SetNormalizeFunc(c.globNormFunc)
+	}
+
+	addToLocal := func(f *flag.Flag) {
+		if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil {
+			c.lflags.AddFlag(f)
+		}
+	}
+	c.Flags().VisitAll(addToLocal)
+	c.PersistentFlags().VisitAll(addToLocal)
+	return c.lflags
+}
+
+// InheritedFlags returns all flags which were inherited from parents commands.
+func (c *Command) InheritedFlags() *flag.FlagSet {
+	c.mergePersistentFlags()
+
+	if c.iflags == nil {
+		c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+		if c.flagErrorBuf == nil {
+			c.flagErrorBuf = new(bytes.Buffer)
+		}
+		c.iflags.SetOutput(c.flagErrorBuf)
+	}
+
+	local := c.LocalFlags()
+	if c.globNormFunc != nil {
+		c.iflags.SetNormalizeFunc(c.globNormFunc)
+	}
+
+	c.parentsPflags.VisitAll(func(f *flag.Flag) {
+		if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil {
+			c.iflags.AddFlag(f)
+		}
+	})
+	return c.iflags
+}
+
+// NonInheritedFlags returns all flags which were not inherited from parent commands.
+func (c *Command) NonInheritedFlags() *flag.FlagSet {
+	return c.LocalFlags()
+}
+
+// PersistentFlags returns the persistent FlagSet specifically set in the current command.
+func (c *Command) PersistentFlags() *flag.FlagSet {
+	if c.pflags == nil {
+		c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+		if c.flagErrorBuf == nil {
+			c.flagErrorBuf = new(bytes.Buffer)
+		}
+		c.pflags.SetOutput(c.flagErrorBuf)
+	}
+	return c.pflags
+}
+
+// ResetFlags deletes all flags from command.
+func (c *Command) ResetFlags() {
+	c.flagErrorBuf = new(bytes.Buffer)
+	c.flagErrorBuf.Reset()
+	c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+	c.flags.SetOutput(c.flagErrorBuf)
+	c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+	c.pflags.SetOutput(c.flagErrorBuf)
+
+	c.lflags = nil
+	c.iflags = nil
+	c.parentsPflags = nil
+}
+
+// HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
+func (c *Command) HasFlags() bool {
+	return c.Flags().HasFlags()
+}
+
+// HasPersistentFlags checks if the command contains persistent flags.
+func (c *Command) HasPersistentFlags() bool {
+	return c.PersistentFlags().HasFlags()
+}
+
+// HasLocalFlags checks if the command has flags specifically declared locally.
+func (c *Command) HasLocalFlags() bool {
+	return c.LocalFlags().HasFlags()
+}
+
+// HasInheritedFlags checks if the command has flags inherited from its parent command.
+func (c *Command) HasInheritedFlags() bool {
+	return c.InheritedFlags().HasFlags()
+}
+
+// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
+// structure) which are not hidden or deprecated.
+func (c *Command) HasAvailableFlags() bool {
+	return c.Flags().HasAvailableFlags()
+}
+
+// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
+func (c *Command) HasAvailablePersistentFlags() bool {
+	return c.PersistentFlags().HasAvailableFlags()
+}
+
+// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
+// or deprecated.
+func (c *Command) HasAvailableLocalFlags() bool {
+	return c.LocalFlags().HasAvailableFlags()
+}
+
+// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
+// not hidden or deprecated.
+func (c *Command) HasAvailableInheritedFlags() bool {
+	return c.InheritedFlags().HasAvailableFlags()
+}
+
+// Flag climbs up the command tree looking for matching flag.
+func (c *Command) Flag(name string) (flag *flag.Flag) {
+	flag = c.Flags().Lookup(name)
+
+	if flag == nil {
+		flag = c.persistentFlag(name)
+	}
+
+	return
+}
+
+// Recursively find matching persistent flag.
+func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
+	if c.HasPersistentFlags() {
+		flag = c.PersistentFlags().Lookup(name)
+	}
+
+	if flag == nil {
+		c.updateParentsPflags()
+		flag = c.parentsPflags.Lookup(name)
+	}
+	return
+}
+
+// ParseFlags parses persistent flag tree and local flags.
+func (c *Command) ParseFlags(args []string) error {
+	if c.DisableFlagParsing {
+		return nil
+	}
+
+	if c.flagErrorBuf == nil {
+		c.flagErrorBuf = new(bytes.Buffer)
+	}
+	beforeErrorBufLen := c.flagErrorBuf.Len()
+	c.mergePersistentFlags()
+
+	//do it here after merging all flags and just before parse
+	c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist)
+
+	err := c.Flags().Parse(args)
+	// Print warnings if they occurred (e.g. deprecated flag messages).
+	if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil {
+		c.Print(c.flagErrorBuf.String())
+	}
+
+	return err
+}
+
+// Parent returns a commands parent command.
+func (c *Command) Parent() *Command {
+	return c.parent
+}
+
+// mergePersistentFlags merges c.PersistentFlags() to c.Flags()
+// and adds missing persistent flags of all parents.
+func (c *Command) mergePersistentFlags() {
+	c.updateParentsPflags()
+	c.Flags().AddFlagSet(c.PersistentFlags())
+	c.Flags().AddFlagSet(c.parentsPflags)
+}
+
+// updateParentsPflags updates c.parentsPflags by adding
+// new persistent flags of all parents.
+// If c.parentsPflags == nil, it makes new.
+func (c *Command) updateParentsPflags() {
+	if c.parentsPflags == nil {
+		c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+		c.parentsPflags.SetOutput(c.flagErrorBuf)
+		c.parentsPflags.SortFlags = false
+	}
+
+	if c.globNormFunc != nil {
+		c.parentsPflags.SetNormalizeFunc(c.globNormFunc)
+	}
+
+	c.Root().PersistentFlags().AddFlagSet(flag.CommandLine)
+
+	c.VisitParents(func(parent *Command) {
+		c.parentsPflags.AddFlagSet(parent.PersistentFlags())
+	})
+}
diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go
new file mode 100644
index 0000000..6159c1c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_notwin.go
@@ -0,0 +1,5 @@
+// +build !windows
+
+package cobra
+
+var preExecHookFn func(*Command)
diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go
new file mode 100644
index 0000000..edec728
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_win.go
@@ -0,0 +1,20 @@
+// +build windows
+
+package cobra
+
+import (
+	"os"
+	"time"
+
+	"github.com/inconshreveable/mousetrap"
+)
+
+var preExecHookFn = preExecHook
+
+func preExecHook(c *Command) {
+	if MousetrapHelpText != "" && mousetrap.StartedByExplorer() {
+		c.Print(MousetrapHelpText)
+		time.Sleep(5 * time.Second)
+		os.Exit(1)
+	}
+}
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
new file mode 100644
index 0000000..889c22e
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -0,0 +1,126 @@
+package cobra
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+	"strings"
+)
+
+// GenZshCompletionFile generates zsh completion file.
+func (c *Command) GenZshCompletionFile(filename string) error {
+	outFile, err := os.Create(filename)
+	if err != nil {
+		return err
+	}
+	defer outFile.Close()
+
+	return c.GenZshCompletion(outFile)
+}
+
+// GenZshCompletion generates a zsh completion file and writes to the passed writer.
+func (c *Command) GenZshCompletion(w io.Writer) error {
+	buf := new(bytes.Buffer)
+
+	writeHeader(buf, c)
+	maxDepth := maxDepth(c)
+	writeLevelMapping(buf, maxDepth)
+	writeLevelCases(buf, maxDepth, c)
+
+	_, err := buf.WriteTo(w)
+	return err
+}
+
+func writeHeader(w io.Writer, cmd *Command) {
+	fmt.Fprintf(w, "#compdef %s\n\n", cmd.Name())
+}
+
+func maxDepth(c *Command) int {
+	if len(c.Commands()) == 0 {
+		return 0
+	}
+	maxDepthSub := 0
+	for _, s := range c.Commands() {
+		subDepth := maxDepth(s)
+		if subDepth > maxDepthSub {
+			maxDepthSub = subDepth
+		}
+	}
+	return 1 + maxDepthSub
+}
+
+func writeLevelMapping(w io.Writer, numLevels int) {
+	fmt.Fprintln(w, `_arguments \`)
+	for i := 1; i <= numLevels; i++ {
+		fmt.Fprintf(w, `  '%d: :->level%d' \`, i, i)
+		fmt.Fprintln(w)
+	}
+	fmt.Fprintf(w, `  '%d: :%s'`, numLevels+1, "_files")
+	fmt.Fprintln(w)
+}
+
+func writeLevelCases(w io.Writer, maxDepth int, root *Command) {
+	fmt.Fprintln(w, "case $state in")
+	defer fmt.Fprintln(w, "esac")
+
+	for i := 1; i <= maxDepth; i++ {
+		fmt.Fprintf(w, "  level%d)\n", i)
+		writeLevel(w, root, i)
+		fmt.Fprintln(w, "  ;;")
+	}
+	fmt.Fprintln(w, "  *)")
+	fmt.Fprintln(w, "    _arguments '*: :_files'")
+	fmt.Fprintln(w, "  ;;")
+}
+
+func writeLevel(w io.Writer, root *Command, i int) {
+	fmt.Fprintf(w, "    case $words[%d] in\n", i)
+	defer fmt.Fprintln(w, "    esac")
+
+	commands := filterByLevel(root, i)
+	byParent := groupByParent(commands)
+
+	for p, c := range byParent {
+		names := names(c)
+		fmt.Fprintf(w, "      %s)\n", p)
+		fmt.Fprintf(w, "        _arguments '%d: :(%s)'\n", i, strings.Join(names, " "))
+		fmt.Fprintln(w, "      ;;")
+	}
+	fmt.Fprintln(w, "      *)")
+	fmt.Fprintln(w, "        _arguments '*: :_files'")
+	fmt.Fprintln(w, "      ;;")
+
+}
+
+func filterByLevel(c *Command, l int) []*Command {
+	cs := make([]*Command, 0)
+	if l == 0 {
+		cs = append(cs, c)
+		return cs
+	}
+	for _, s := range c.Commands() {
+		cs = append(cs, filterByLevel(s, l-1)...)
+	}
+	return cs
+}
+
+func groupByParent(commands []*Command) map[string][]*Command {
+	m := make(map[string][]*Command)
+	for _, c := range commands {
+		parent := c.Parent()
+		if parent == nil {
+			continue
+		}
+		m[parent.Name()] = append(m[parent.Name()], c)
+	}
+	return m
+}
+
+func names(commands []*Command) []string {
+	ns := make([]string, len(commands))
+	for i, c := range commands {
+		ns[i] = c.Name()
+	}
+	return ns
+}
diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore
new file mode 100644
index 0000000..c3da290
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.gitignore
@@ -0,0 +1,2 @@
+.idea/*
+
diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml
new file mode 100644
index 0000000..f8a63b3
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.travis.yml
@@ -0,0 +1,21 @@
+sudo: false
+
+language: go
+
+go:
+  - 1.7.3
+  - 1.8.1
+  - tip
+
+matrix:
+  allow_failures:
+    - go: tip
+
+install:
+  - go get github.com/golang/lint/golint
+  - export PATH=$GOPATH/bin:$PATH
+  - go install ./...
+
+script:
+  - verify/all.sh -v
+  - go test ./...
diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE
new file mode 100644
index 0000000..63ed1cf
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Alex Ogier. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md
new file mode 100644
index 0000000..b052414
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/README.md
@@ -0,0 +1,296 @@
+[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag)
+[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag)
+
+## Description
+
+pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the [GNU extensions to the POSIX recommendations
+for command-line options][1]. For a more precise description, see the
+"Command-line flag syntax" section below.
+
+[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+pflag is available under the same style of BSD license as the Go language,
+which can be found in the LICENSE file.
+
+## Installation
+
+pflag is available using the standard `go get` command.
+
+Install by running:
+
+    go get github.com/spf13/pflag
+
+Run tests by running:
+
+    go test github.com/spf13/pflag
+
+## Usage
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+``` go
+import flag "github.com/spf13/pflag"
+```
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+
+``` go
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+```
+
+If you like, you can bind the flag to a variable using the Var() functions.
+
+``` go
+var flagvar int
+func init() {
+    flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+}
+```
+
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+
+``` go
+flag.Var(&flagVal, "name", "help message for flagname")
+```
+
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+
+``` go
+flag.Parse()
+```
+
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+
+``` go
+fmt.Println("ip has value ", *ip)
+fmt.Println("flagvar has value ", flagvar)
+```
+
+There are helpers function to get values later if you have the FlagSet but
+it was difficult to keep up with all of the flag pointers in your code.
+If you have a pflag.FlagSet with a flag called 'flagname' of type int you
+can use GetInt() to get the int value. But notice that 'flagname' must exist
+and it must be an int. GetString("flagname") will fail.
+
+``` go
+i, err := flagset.GetInt("flagname")
+```
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+var flagvar bool
+func init() {
+	flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
+}
+flag.VarP(&flagVal, "varname", "v", "help message")
+```
+
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+The default set of command-line flags is controlled by
+top-level functions.  The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+
+## Setting no option default values for flags
+
+After you create a flag it is possible to set the pflag.NoOptDefVal for
+the given flag. Doing this changes the meaning of the flag slightly. If
+a flag has a NoOptDefVal and the flag is set on the command line without
+an option the flag will be set to the NoOptDefVal. For example given:
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+flag.Lookup("flagname").NoOptDefVal = "4321"
+```
+
+Would result in something like
+
+| Parsed Arguments | Resulting Value |
+| -------------    | -------------   |
+| --flagname=1357  | ip=1357         |
+| --flagname       | ip=4321         |
+| [nothing]        | ip=1234         |
+
+## Command line flag syntax
+
+```
+--flag    // boolean flags, or flags with no option default values
+--flag x  // only on flags without a default value
+--flag=x
+```
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags
+or a flag with a default value
+
+```
+// boolean or flags where the 'no option default value' is set
+-f
+-f=true
+-abc
+but
+-b true is INVALID
+
+// non-boolean and flags without a 'no option default value'
+-n 1234
+-n=1234
+-n1234
+
+// mixed
+-abcs "hello"
+-absd="hello"
+-abcs1234
+```
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+## Mutating or "Normalizing" Flag names
+
+It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow.
+
+**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag
+
+``` go
+func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+	from := []string{"-", "_"}
+	to := "."
+	for _, sep := range from {
+		name = strings.Replace(name, sep, to, -1)
+	}
+	return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc)
+```
+
+**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name
+
+``` go
+func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+	switch name {
+	case "old-flag-name":
+		name = "new-flag-name"
+		break
+	}
+	return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(aliasNormalizeFunc)
+```
+
+## Deprecating a flag or its shorthand
+It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used.
+
+**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead.
+```go
+// deprecate a flag by specifying its name and a usage message
+flags.MarkDeprecated("badflag", "please use --good-flag instead")
+```
+This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used.
+
+**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n".
+```go
+// deprecate a flag shorthand by specifying its flag name and a usage message
+flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only")
+```
+This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used.
+
+Note that usage message is essential here, and it should not be empty.
+
+## Hidden flags
+It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text.
+
+**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available.
+```go
+// hide a flag by specifying its name
+flags.MarkHidden("secretFlag")
+```
+
+## Disable sorting of flags
+`pflag` allows you to disable sorting of flags for help and usage message.
+
+**Example**:
+```go
+flags.BoolP("verbose", "v", false, "verbose output")
+flags.String("coolflag", "yeaah", "it's really cool flag")
+flags.Int("usefulflag", 777, "sometimes it's very useful")
+flags.SortFlags = false
+flags.PrintDefaults()
+```
+**Output**:
+```
+  -v, --verbose           verbose output
+      --coolflag string   it's really cool flag (default "yeaah")
+      --usefulflag int    sometimes it's very useful (default 777)
+```
+
+
+## Supporting Go flags when using pflag
+In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary
+to support flags defined by third-party dependencies (e.g. `golang/glog`).
+
+**Example**: You want to add the Go flags to the `CommandLine` flagset
+```go
+import (
+	goflag "flag"
+	flag "github.com/spf13/pflag"
+)
+
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+
+func main() {
+	flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
+	flag.Parse()
+}
+```
+
+## More info
+
+You can see the full reference documentation of the pflag package
+[at godoc.org][3], or through go's standard documentation system by
+running `godoc -http=:6060` and browsing to
+[http://localhost:6060/pkg/github.com/spf13/pflag][2] after
+installation.
+
+[2]: http://localhost:6060/pkg/github.com/spf13/pflag
+[3]: http://godoc.org/github.com/spf13/pflag
diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go
new file mode 100644
index 0000000..c4c5c0b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool.go
@@ -0,0 +1,94 @@
+package pflag
+
+import "strconv"
+
+// optional interface to indicate boolean flags that can be
+// supplied without "=value" text
+type boolFlag interface {
+	Value
+	IsBoolFlag() bool
+}
+
+// -- bool Value
+type boolValue bool
+
+func newBoolValue(val bool, p *bool) *boolValue {
+	*p = val
+	return (*boolValue)(p)
+}
+
+func (b *boolValue) Set(s string) error {
+	v, err := strconv.ParseBool(s)
+	*b = boolValue(v)
+	return err
+}
+
+func (b *boolValue) Type() string {
+	return "bool"
+}
+
+func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
+
+func (b *boolValue) IsBoolFlag() bool { return true }
+
+func boolConv(sval string) (interface{}, error) {
+	return strconv.ParseBool(sval)
+}
+
+// GetBool return the bool value of a flag with the given name
+func (f *FlagSet) GetBool(name string) (bool, error) {
+	val, err := f.getFlagType(name, "bool", boolConv)
+	if err != nil {
+		return false, err
+	}
+	return val.(bool), nil
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
+	f.BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+	flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage)
+	flag.NoOptDefVal = "true"
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func BoolVar(p *bool, name string, value bool, usage string) {
+	BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+	flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage)
+	flag.NoOptDefVal = "true"
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
+	return f.BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool {
+	p := new(bool)
+	f.BoolVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func Bool(name string, value bool, usage string) *bool {
+	return BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func BoolP(name, shorthand string, value bool, usage string) *bool {
+	b := CommandLine.BoolP(name, shorthand, value, usage)
+	return b
+}
diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go
new file mode 100644
index 0000000..5af02f1
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool_slice.go
@@ -0,0 +1,147 @@
+package pflag
+
+import (
+	"io"
+	"strconv"
+	"strings"
+)
+
+// -- boolSlice Value
+type boolSliceValue struct {
+	value   *[]bool
+	changed bool
+}
+
+func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue {
+	bsv := new(boolSliceValue)
+	bsv.value = p
+	*bsv.value = val
+	return bsv
+}
+
+// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag.
+// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended.
+func (s *boolSliceValue) Set(val string) error {
+
+	// remove all quote characters
+	rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+	// read flag arguments with CSV parser
+	boolStrSlice, err := readAsCSV(rmQuote.Replace(val))
+	if err != nil && err != io.EOF {
+		return err
+	}
+
+	// parse boolean values into slice
+	out := make([]bool, 0, len(boolStrSlice))
+	for _, boolStr := range boolStrSlice {
+		b, err := strconv.ParseBool(strings.TrimSpace(boolStr))
+		if err != nil {
+			return err
+		}
+		out = append(out, b)
+	}
+
+	if !s.changed {
+		*s.value = out
+	} else {
+		*s.value = append(*s.value, out...)
+	}
+
+	s.changed = true
+
+	return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *boolSliceValue) Type() string {
+	return "boolSlice"
+}
+
+// String defines a "native" format for this boolean slice flag value.
+func (s *boolSliceValue) String() string {
+
+	boolStrSlice := make([]string, len(*s.value))
+	for i, b := range *s.value {
+		boolStrSlice[i] = strconv.FormatBool(b)
+	}
+
+	out, _ := writeAsCSV(boolStrSlice)
+
+	return "[" + out + "]"
+}
+
+func boolSliceConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// Empty string would cause a slice with one (empty) entry
+	if len(val) == 0 {
+		return []bool{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make([]bool, len(ss))
+	for i, t := range ss {
+		var err error
+		out[i], err = strconv.ParseBool(t)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return out, nil
+}
+
+// GetBoolSlice returns the []bool value of a flag with the given name.
+func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) {
+	val, err := f.getFlagType(name, "boolSlice", boolSliceConv)
+	if err != nil {
+		return []bool{}, err
+	}
+	return val.([]bool), nil
+}
+
+// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+	f.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+	f.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSliceVar defines a []bool flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+	CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+	CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool {
+	p := []bool{}
+	f.BoolSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+	p := []bool{}
+	f.BoolSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func BoolSlice(name string, value []bool, usage string) *[]bool {
+	return CommandLine.BoolSliceP(name, "", value, usage)
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+	return CommandLine.BoolSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/bytes.go b/vendor/github.com/spf13/pflag/bytes.go
new file mode 100644
index 0000000..67d5304
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bytes.go
@@ -0,0 +1,209 @@
+package pflag
+
+import (
+	"encoding/base64"
+	"encoding/hex"
+	"fmt"
+	"strings"
+)
+
+// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded
+type bytesHexValue []byte
+
+// String implements pflag.Value.String.
+func (bytesHex bytesHexValue) String() string {
+	return fmt.Sprintf("%X", []byte(bytesHex))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesHex *bytesHexValue) Set(value string) error {
+	bin, err := hex.DecodeString(strings.TrimSpace(value))
+
+	if err != nil {
+		return err
+	}
+
+	*bytesHex = bin
+
+	return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesHexValue) Type() string {
+	return "bytesHex"
+}
+
+func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue {
+	*p = val
+	return (*bytesHexValue)(p)
+}
+
+func bytesHexConv(sval string) (interface{}, error) {
+
+	bin, err := hex.DecodeString(sval)
+
+	if err == nil {
+		return bin, nil
+	}
+
+	return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesHex return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesHex(name string) ([]byte, error) {
+	val, err := f.getFlagType(name, "bytesHex", bytesHexConv)
+
+	if err != nil {
+		return []byte{}, err
+	}
+
+	return val.([]byte), nil
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+	f.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+	f.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+	CommandLine.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+	CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte {
+	p := new([]byte)
+	f.BytesHexVarP(p, name, "", value, usage)
+	return p
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+	p := new([]byte)
+	f.BytesHexVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesHex(name string, value []byte, usage string) *[]byte {
+	return CommandLine.BytesHexP(name, "", value, usage)
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+	return CommandLine.BytesHexP(name, shorthand, value, usage)
+}
+
+// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded
+type bytesBase64Value []byte
+
+// String implements pflag.Value.String.
+func (bytesBase64 bytesBase64Value) String() string {
+	return base64.StdEncoding.EncodeToString([]byte(bytesBase64))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesBase64 *bytesBase64Value) Set(value string) error {
+	bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value))
+
+	if err != nil {
+		return err
+	}
+
+	*bytesBase64 = bin
+
+	return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesBase64Value) Type() string {
+	return "bytesBase64"
+}
+
+func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value {
+	*p = val
+	return (*bytesBase64Value)(p)
+}
+
+func bytesBase64ValueConv(sval string) (interface{}, error) {
+
+	bin, err := base64.StdEncoding.DecodeString(sval)
+	if err == nil {
+		return bin, nil
+	}
+
+	return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesBase64 return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) {
+	val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv)
+
+	if err != nil {
+		return []byte{}, err
+	}
+
+	return val.([]byte), nil
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+	f.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+	f.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+	CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+	CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte {
+	p := new([]byte)
+	f.BytesBase64VarP(p, name, "", value, usage)
+	return p
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+	p := new([]byte)
+	f.BytesBase64VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesBase64(name string, value []byte, usage string) *[]byte {
+	return CommandLine.BytesBase64P(name, "", value, usage)
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+	return CommandLine.BytesBase64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go
new file mode 100644
index 0000000..aa126e4
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/count.go
@@ -0,0 +1,96 @@
+package pflag
+
+import "strconv"
+
+// -- count Value
+type countValue int
+
+func newCountValue(val int, p *int) *countValue {
+	*p = val
+	return (*countValue)(p)
+}
+
+func (i *countValue) Set(s string) error {
+	// "+1" means that no specific value was passed, so increment
+	if s == "+1" {
+		*i = countValue(*i + 1)
+		return nil
+	}
+	v, err := strconv.ParseInt(s, 0, 0)
+	*i = countValue(v)
+	return err
+}
+
+func (i *countValue) Type() string {
+	return "count"
+}
+
+func (i *countValue) String() string { return strconv.Itoa(int(*i)) }
+
+func countConv(sval string) (interface{}, error) {
+	i, err := strconv.Atoi(sval)
+	if err != nil {
+		return nil, err
+	}
+	return i, nil
+}
+
+// GetCount return the int value of a flag with the given name
+func (f *FlagSet) GetCount(name string) (int, error) {
+	val, err := f.getFlagType(name, "count", countConv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int), nil
+}
+
+// CountVar defines a count flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func (f *FlagSet) CountVar(p *int, name string, usage string) {
+	f.CountVarP(p, name, "", usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
+	flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
+	flag.NoOptDefVal = "+1"
+}
+
+// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
+func CountVar(p *int, name string, usage string) {
+	CommandLine.CountVar(p, name, usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func CountVarP(p *int, name, shorthand string, usage string) {
+	CommandLine.CountVarP(p, name, shorthand, usage)
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func (f *FlagSet) Count(name string, usage string) *int {
+	p := new(int)
+	f.CountVarP(p, name, "", usage)
+	return p
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
+	p := new(int)
+	f.CountVarP(p, name, shorthand, usage)
+	return p
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func Count(name string, usage string) *int {
+	return CommandLine.CountP(name, "", usage)
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func CountP(name, shorthand string, usage string) *int {
+	return CommandLine.CountP(name, shorthand, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go
new file mode 100644
index 0000000..e9debef
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration.go
@@ -0,0 +1,86 @@
+package pflag
+
+import (
+	"time"
+)
+
+// -- time.Duration Value
+type durationValue time.Duration
+
+func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
+	*p = val
+	return (*durationValue)(p)
+}
+
+func (d *durationValue) Set(s string) error {
+	v, err := time.ParseDuration(s)
+	*d = durationValue(v)
+	return err
+}
+
+func (d *durationValue) Type() string {
+	return "duration"
+}
+
+func (d *durationValue) String() string { return (*time.Duration)(d).String() }
+
+func durationConv(sval string) (interface{}, error) {
+	return time.ParseDuration(sval)
+}
+
+// GetDuration return the duration value of a flag with the given name
+func (f *FlagSet) GetDuration(name string) (time.Duration, error) {
+	val, err := f.getFlagType(name, "duration", durationConv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(time.Duration), nil
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+	f.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+	f.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+	CommandLine.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+	CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
+	p := new(time.Duration)
+	f.DurationVarP(p, name, "", value, usage)
+	return p
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+	p := new(time.Duration)
+	f.DurationVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func Duration(name string, value time.Duration, usage string) *time.Duration {
+	return CommandLine.DurationP(name, "", value, usage)
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+	return CommandLine.DurationP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go
new file mode 100644
index 0000000..52c6b6d
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration_slice.go
@@ -0,0 +1,128 @@
+package pflag
+
+import (
+	"fmt"
+	"strings"
+	"time"
+)
+
+// -- durationSlice Value
+type durationSliceValue struct {
+	value   *[]time.Duration
+	changed bool
+}
+
+func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue {
+	dsv := new(durationSliceValue)
+	dsv.value = p
+	*dsv.value = val
+	return dsv
+}
+
+func (s *durationSliceValue) Set(val string) error {
+	ss := strings.Split(val, ",")
+	out := make([]time.Duration, len(ss))
+	for i, d := range ss {
+		var err error
+		out[i], err = time.ParseDuration(d)
+		if err != nil {
+			return err
+		}
+
+	}
+	if !s.changed {
+		*s.value = out
+	} else {
+		*s.value = append(*s.value, out...)
+	}
+	s.changed = true
+	return nil
+}
+
+func (s *durationSliceValue) Type() string {
+	return "durationSlice"
+}
+
+func (s *durationSliceValue) String() string {
+	out := make([]string, len(*s.value))
+	for i, d := range *s.value {
+		out[i] = fmt.Sprintf("%s", d)
+	}
+	return "[" + strings.Join(out, ",") + "]"
+}
+
+func durationSliceConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// Empty string would cause a slice with one (empty) entry
+	if len(val) == 0 {
+		return []time.Duration{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make([]time.Duration, len(ss))
+	for i, d := range ss {
+		var err error
+		out[i], err = time.ParseDuration(d)
+		if err != nil {
+			return nil, err
+		}
+
+	}
+	return out, nil
+}
+
+// GetDurationSlice returns the []time.Duration value of a flag with the given name
+func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) {
+	val, err := f.getFlagType(name, "durationSlice", durationSliceConv)
+	if err != nil {
+		return []time.Duration{}, err
+	}
+	return val.([]time.Duration), nil
+}
+
+// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string.
+// The argument p points to a []time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+	f.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+	f.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string.
+// The argument p points to a duration[] variable in which to store the value of the flag.
+func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+	CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+	CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+	p := []time.Duration{}
+	f.DurationSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+	p := []time.Duration{}
+	f.DurationSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+	return CommandLine.DurationSliceP(name, "", value, usage)
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+	return CommandLine.DurationSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
new file mode 100644
index 0000000..9beeda8
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -0,0 +1,1227 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the GNU extensions to the POSIX recommendations
+for command-line options. See
+http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+Usage:
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+	import flag "github.com/spf13/pflag"
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+	var ip = flag.Int("flagname", 1234, "help message for flagname")
+If you like, you can bind the flag to a variable using the Var() functions.
+	var flagvar int
+	func init() {
+		flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+	}
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+	flag.Var(&flagVal, "name", "help message for flagname")
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+	flag.Parse()
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+	fmt.Println("ip has value ", *ip)
+	fmt.Println("flagvar has value ", flagvar)
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+	var ip = flag.IntP("flagname", "f", 1234, "help message")
+	var flagvar bool
+	func init() {
+		flag.BoolVarP("boolname", "b", true, "help message")
+	}
+	flag.VarP(&flagVar, "varname", "v", 1234, "help message")
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+Command line flag syntax:
+	--flag    // boolean flags only
+	--flag=x
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags.
+	// boolean flags
+	-f
+	-abc
+	// non-boolean flags
+	-n 1234
+	-Ifile
+	// mixed
+	-abcs "hello"
+	-abcn1234
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+The default set of command-line flags is controlled by
+top-level functions.  The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+*/
+package pflag
+
+import (
+	"bytes"
+	"errors"
+	goflag "flag"
+	"fmt"
+	"io"
+	"os"
+	"sort"
+	"strings"
+)
+
+// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
+var ErrHelp = errors.New("pflag: help requested")
+
+// ErrorHandling defines how to handle flag parsing errors.
+type ErrorHandling int
+
+const (
+	// ContinueOnError will return an err from Parse() if an error is found
+	ContinueOnError ErrorHandling = iota
+	// ExitOnError will call os.Exit(2) if an error is found when parsing
+	ExitOnError
+	// PanicOnError will panic() if an error is found when parsing flags
+	PanicOnError
+)
+
+// ParseErrorsWhitelist defines the parsing errors that can be ignored
+type ParseErrorsWhitelist struct {
+	// UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags
+	UnknownFlags bool
+}
+
+// NormalizedName is a flag name that has been normalized according to rules
+// for the FlagSet (e.g. making '-' and '_' equivalent).
+type NormalizedName string
+
+// A FlagSet represents a set of defined flags.
+type FlagSet struct {
+	// Usage is the function called when an error occurs while parsing flags.
+	// The field is a function (not a method) that may be changed to point to
+	// a custom error handler.
+	Usage func()
+
+	// SortFlags is used to indicate, if user wants to have sorted flags in
+	// help/usage messages.
+	SortFlags bool
+
+	// ParseErrorsWhitelist is used to configure a whitelist of errors
+	ParseErrorsWhitelist ParseErrorsWhitelist
+
+	name              string
+	parsed            bool
+	actual            map[NormalizedName]*Flag
+	orderedActual     []*Flag
+	sortedActual      []*Flag
+	formal            map[NormalizedName]*Flag
+	orderedFormal     []*Flag
+	sortedFormal      []*Flag
+	shorthands        map[byte]*Flag
+	args              []string // arguments after flags
+	argsLenAtDash     int      // len(args) when a '--' was located when parsing, or -1 if no --
+	errorHandling     ErrorHandling
+	output            io.Writer // nil means stderr; use out() accessor
+	interspersed      bool      // allow interspersed option/non-option args
+	normalizeNameFunc func(f *FlagSet, name string) NormalizedName
+
+	addedGoFlagSets []*goflag.FlagSet
+}
+
+// A Flag represents the state of a flag.
+type Flag struct {
+	Name                string              // name as it appears on command line
+	Shorthand           string              // one-letter abbreviated flag
+	Usage               string              // help message
+	Value               Value               // value as set
+	DefValue            string              // default value (as text); for usage message
+	Changed             bool                // If the user set the value (or if left to default)
+	NoOptDefVal         string              // default value (as text); if the flag is on the command line without any options
+	Deprecated          string              // If this flag is deprecated, this string is the new or now thing to use
+	Hidden              bool                // used by cobra.Command to allow flags to be hidden from help/usage text
+	ShorthandDeprecated string              // If the shorthand of this flag is deprecated, this string is the new or now thing to use
+	Annotations         map[string][]string // used by cobra.Command bash autocomple code
+}
+
+// Value is the interface to the dynamic value stored in a flag.
+// (The default value is represented as a string.)
+type Value interface {
+	String() string
+	Set(string) error
+	Type() string
+}
+
+// sortFlags returns the flags as a slice in lexicographical sorted order.
+func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
+	list := make(sort.StringSlice, len(flags))
+	i := 0
+	for k := range flags {
+		list[i] = string(k)
+		i++
+	}
+	list.Sort()
+	result := make([]*Flag, len(list))
+	for i, name := range list {
+		result[i] = flags[NormalizedName(name)]
+	}
+	return result
+}
+
+// SetNormalizeFunc allows you to add a function which can translate flag names.
+// Flags added to the FlagSet will be translated and then when anything tries to
+// look up the flag that will also be translated. So it would be possible to create
+// a flag named "getURL" and have it translated to "geturl".  A user could then pass
+// "--getUrl" which may also be translated to "geturl" and everything will work.
+func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
+	f.normalizeNameFunc = n
+	f.sortedFormal = f.sortedFormal[:0]
+	for fname, flag := range f.formal {
+		nname := f.normalizeFlagName(flag.Name)
+		if fname == nname {
+			continue
+		}
+		flag.Name = string(nname)
+		delete(f.formal, fname)
+		f.formal[nname] = flag
+		if _, set := f.actual[fname]; set {
+			delete(f.actual, fname)
+			f.actual[nname] = flag
+		}
+	}
+}
+
+// GetNormalizeFunc returns the previously set NormalizeFunc of a function which
+// does no translation, if not set previously.
+func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName {
+	if f.normalizeNameFunc != nil {
+		return f.normalizeNameFunc
+	}
+	return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) }
+}
+
+func (f *FlagSet) normalizeFlagName(name string) NormalizedName {
+	n := f.GetNormalizeFunc()
+	return n(f, name)
+}
+
+func (f *FlagSet) out() io.Writer {
+	if f.output == nil {
+		return os.Stderr
+	}
+	return f.output
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (f *FlagSet) SetOutput(output io.Writer) {
+	f.output = output
+}
+
+// VisitAll visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func (f *FlagSet) VisitAll(fn func(*Flag)) {
+	if len(f.formal) == 0 {
+		return
+	}
+
+	var flags []*Flag
+	if f.SortFlags {
+		if len(f.formal) != len(f.sortedFormal) {
+			f.sortedFormal = sortFlags(f.formal)
+		}
+		flags = f.sortedFormal
+	} else {
+		flags = f.orderedFormal
+	}
+
+	for _, flag := range flags {
+		fn(flag)
+	}
+}
+
+// HasFlags returns a bool to indicate if the FlagSet has any flags defined.
+func (f *FlagSet) HasFlags() bool {
+	return len(f.formal) > 0
+}
+
+// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags
+// that are not hidden.
+func (f *FlagSet) HasAvailableFlags() bool {
+	for _, flag := range f.formal {
+		if !flag.Hidden {
+			return true
+		}
+	}
+	return false
+}
+
+// VisitAll visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func VisitAll(fn func(*Flag)) {
+	CommandLine.VisitAll(fn)
+}
+
+// Visit visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func (f *FlagSet) Visit(fn func(*Flag)) {
+	if len(f.actual) == 0 {
+		return
+	}
+
+	var flags []*Flag
+	if f.SortFlags {
+		if len(f.actual) != len(f.sortedActual) {
+			f.sortedActual = sortFlags(f.actual)
+		}
+		flags = f.sortedActual
+	} else {
+		flags = f.orderedActual
+	}
+
+	for _, flag := range flags {
+		fn(flag)
+	}
+}
+
+// Visit visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func Visit(fn func(*Flag)) {
+	CommandLine.Visit(fn)
+}
+
+// Lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) Lookup(name string) *Flag {
+	return f.lookup(f.normalizeFlagName(name))
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+// It panics, if len(name) > 1.
+func (f *FlagSet) ShorthandLookup(name string) *Flag {
+	if name == "" {
+		return nil
+	}
+	if len(name) > 1 {
+		msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name)
+		fmt.Fprintf(f.out(), msg)
+		panic(msg)
+	}
+	c := name[0]
+	return f.shorthands[c]
+}
+
+// lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) lookup(name NormalizedName) *Flag {
+	return f.formal[name]
+}
+
+// func to return a given type for a given flag name
+func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) {
+	flag := f.Lookup(name)
+	if flag == nil {
+		err := fmt.Errorf("flag accessed but not defined: %s", name)
+		return nil, err
+	}
+
+	if flag.Value.Type() != ftype {
+		err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type())
+		return nil, err
+	}
+
+	sval := flag.Value.String()
+	result, err := convFunc(sval)
+	if err != nil {
+		return nil, err
+	}
+	return result, nil
+}
+
+// ArgsLenAtDash will return the length of f.Args at the moment when a -- was
+// found during arg parsing. This allows your program to know which args were
+// before the -- and which came after.
+func (f *FlagSet) ArgsLenAtDash() int {
+	return f.argsLenAtDash
+}
+
+// MarkDeprecated indicated that a flag is deprecated in your program. It will
+// continue to function but will not show up in help or usage messages. Using
+// this flag will also print the given usageMessage.
+func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
+	flag := f.Lookup(name)
+	if flag == nil {
+		return fmt.Errorf("flag %q does not exist", name)
+	}
+	if usageMessage == "" {
+		return fmt.Errorf("deprecated message for flag %q must be set", name)
+	}
+	flag.Deprecated = usageMessage
+	flag.Hidden = true
+	return nil
+}
+
+// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your
+// program. It will continue to function but will not show up in help or usage
+// messages. Using this flag will also print the given usageMessage.
+func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error {
+	flag := f.Lookup(name)
+	if flag == nil {
+		return fmt.Errorf("flag %q does not exist", name)
+	}
+	if usageMessage == "" {
+		return fmt.Errorf("deprecated message for flag %q must be set", name)
+	}
+	flag.ShorthandDeprecated = usageMessage
+	return nil
+}
+
+// MarkHidden sets a flag to 'hidden' in your program. It will continue to
+// function but will not show up in help or usage messages.
+func (f *FlagSet) MarkHidden(name string) error {
+	flag := f.Lookup(name)
+	if flag == nil {
+		return fmt.Errorf("flag %q does not exist", name)
+	}
+	flag.Hidden = true
+	return nil
+}
+
+// Lookup returns the Flag structure of the named command-line flag,
+// returning nil if none exists.
+func Lookup(name string) *Flag {
+	return CommandLine.Lookup(name)
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+func ShorthandLookup(name string) *Flag {
+	return CommandLine.ShorthandLookup(name)
+}
+
+// Set sets the value of the named flag.
+func (f *FlagSet) Set(name, value string) error {
+	normalName := f.normalizeFlagName(name)
+	flag, ok := f.formal[normalName]
+	if !ok {
+		return fmt.Errorf("no such flag -%v", name)
+	}
+
+	err := flag.Value.Set(value)
+	if err != nil {
+		var flagName string
+		if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+			flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
+		} else {
+			flagName = fmt.Sprintf("--%s", flag.Name)
+		}
+		return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
+	}
+
+	if !flag.Changed {
+		if f.actual == nil {
+			f.actual = make(map[NormalizedName]*Flag)
+		}
+		f.actual[normalName] = flag
+		f.orderedActual = append(f.orderedActual, flag)
+
+		flag.Changed = true
+	}
+
+	if flag.Deprecated != "" {
+		fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
+	}
+	return nil
+}
+
+// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet.
+// This is sometimes used by spf13/cobra programs which want to generate additional
+// bash completion information.
+func (f *FlagSet) SetAnnotation(name, key string, values []string) error {
+	normalName := f.normalizeFlagName(name)
+	flag, ok := f.formal[normalName]
+	if !ok {
+		return fmt.Errorf("no such flag -%v", name)
+	}
+	if flag.Annotations == nil {
+		flag.Annotations = map[string][]string{}
+	}
+	flag.Annotations[key] = values
+	return nil
+}
+
+// Changed returns true if the flag was explicitly set during Parse() and false
+// otherwise
+func (f *FlagSet) Changed(name string) bool {
+	flag := f.Lookup(name)
+	// If a flag doesn't exist, it wasn't changed....
+	if flag == nil {
+		return false
+	}
+	return flag.Changed
+}
+
+// Set sets the value of the named command-line flag.
+func Set(name, value string) error {
+	return CommandLine.Set(name, value)
+}
+
+// PrintDefaults prints, to standard error unless configured
+// otherwise, the default values of all defined flags in the set.
+func (f *FlagSet) PrintDefaults() {
+	usages := f.FlagUsages()
+	fmt.Fprint(f.out(), usages)
+}
+
+// defaultIsZeroValue returns true if the default value for this flag represents
+// a zero value.
+func (f *Flag) defaultIsZeroValue() bool {
+	switch f.Value.(type) {
+	case boolFlag:
+		return f.DefValue == "false"
+	case *durationValue:
+		// Beginning in Go 1.7, duration zero values are "0s"
+		return f.DefValue == "0" || f.DefValue == "0s"
+	case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value:
+		return f.DefValue == "0"
+	case *stringValue:
+		return f.DefValue == ""
+	case *ipValue, *ipMaskValue, *ipNetValue:
+		return f.DefValue == "<nil>"
+	case *intSliceValue, *stringSliceValue, *stringArrayValue:
+		return f.DefValue == "[]"
+	default:
+		switch f.Value.String() {
+		case "false":
+			return true
+		case "<nil>":
+			return true
+		case "":
+			return true
+		case "0":
+			return true
+		}
+		return false
+	}
+}
+
+// UnquoteUsage extracts a back-quoted name from the usage
+// string for a flag and returns it and the un-quoted usage.
+// Given "a `name` to show" it returns ("name", "a name to show").
+// If there are no back quotes, the name is an educated guess of the
+// type of the flag's value, or the empty string if the flag is boolean.
+func UnquoteUsage(flag *Flag) (name string, usage string) {
+	// Look for a back-quoted name, but avoid the strings package.
+	usage = flag.Usage
+	for i := 0; i < len(usage); i++ {
+		if usage[i] == '`' {
+			for j := i + 1; j < len(usage); j++ {
+				if usage[j] == '`' {
+					name = usage[i+1 : j]
+					usage = usage[:i] + name + usage[j+1:]
+					return name, usage
+				}
+			}
+			break // Only one back quote; use type name.
+		}
+	}
+
+	name = flag.Value.Type()
+	switch name {
+	case "bool":
+		name = ""
+	case "float64":
+		name = "float"
+	case "int64":
+		name = "int"
+	case "uint64":
+		name = "uint"
+	case "stringSlice":
+		name = "strings"
+	case "intSlice":
+		name = "ints"
+	case "uintSlice":
+		name = "uints"
+	case "boolSlice":
+		name = "bools"
+	}
+
+	return
+}
+
+// Splits the string `s` on whitespace into an initial substring up to
+// `i` runes in length and the remainder. Will go `slop` over `i` if
+// that encompasses the entire string (which allows the caller to
+// avoid short orphan words on the final line).
+func wrapN(i, slop int, s string) (string, string) {
+	if i+slop > len(s) {
+		return s, ""
+	}
+
+	w := strings.LastIndexAny(s[:i], " \t\n")
+	if w <= 0 {
+		return s, ""
+	}
+	nlPos := strings.LastIndex(s[:i], "\n")
+	if nlPos > 0 && nlPos < w {
+		return s[:nlPos], s[nlPos+1:]
+	}
+	return s[:w], s[w+1:]
+}
+
+// Wraps the string `s` to a maximum width `w` with leading indent
+// `i`. The first line is not indented (this is assumed to be done by
+// caller). Pass `w` == 0 to do no wrapping
+func wrap(i, w int, s string) string {
+	if w == 0 {
+		return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1)
+	}
+
+	// space between indent i and end of line width w into which
+	// we should wrap the text.
+	wrap := w - i
+
+	var r, l string
+
+	// Not enough space for sensible wrapping. Wrap as a block on
+	// the next line instead.
+	if wrap < 24 {
+		i = 16
+		wrap = w - i
+		r += "\n" + strings.Repeat(" ", i)
+	}
+	// If still not enough space then don't even try to wrap.
+	if wrap < 24 {
+		return strings.Replace(s, "\n", r, -1)
+	}
+
+	// Try to avoid short orphan words on the final line, by
+	// allowing wrapN to go a bit over if that would fit in the
+	// remainder of the line.
+	slop := 5
+	wrap = wrap - slop
+
+	// Handle first line, which is indented by the caller (or the
+	// special case above)
+	l, s = wrapN(wrap, slop, s)
+	r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1)
+
+	// Now wrap the rest
+	for s != "" {
+		var t string
+
+		t, s = wrapN(wrap, slop, s)
+		r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1)
+	}
+
+	return r
+
+}
+
+// FlagUsagesWrapped returns a string containing the usage information
+// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
+// wrapping)
+func (f *FlagSet) FlagUsagesWrapped(cols int) string {
+	buf := new(bytes.Buffer)
+
+	lines := make([]string, 0, len(f.formal))
+
+	maxlen := 0
+	f.VisitAll(func(flag *Flag) {
+		if flag.Hidden {
+			return
+		}
+
+		line := ""
+		if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+			line = fmt.Sprintf("  -%s, --%s", flag.Shorthand, flag.Name)
+		} else {
+			line = fmt.Sprintf("      --%s", flag.Name)
+		}
+
+		varname, usage := UnquoteUsage(flag)
+		if varname != "" {
+			line += " " + varname
+		}
+		if flag.NoOptDefVal != "" {
+			switch flag.Value.Type() {
+			case "string":
+				line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
+			case "bool":
+				if flag.NoOptDefVal != "true" {
+					line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+				}
+			case "count":
+				if flag.NoOptDefVal != "+1" {
+					line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+				}
+			default:
+				line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+			}
+		}
+
+		// This special character will be replaced with spacing once the
+		// correct alignment is calculated
+		line += "\x00"
+		if len(line) > maxlen {
+			maxlen = len(line)
+		}
+
+		line += usage
+		if !flag.defaultIsZeroValue() {
+			if flag.Value.Type() == "string" {
+				line += fmt.Sprintf(" (default %q)", flag.DefValue)
+			} else {
+				line += fmt.Sprintf(" (default %s)", flag.DefValue)
+			}
+		}
+		if len(flag.Deprecated) != 0 {
+			line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated)
+		}
+
+		lines = append(lines, line)
+	})
+
+	for _, line := range lines {
+		sidx := strings.Index(line, "\x00")
+		spacing := strings.Repeat(" ", maxlen-sidx)
+		// maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
+		fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
+	}
+
+	return buf.String()
+}
+
+// FlagUsages returns a string containing the usage information for all flags in
+// the FlagSet
+func (f *FlagSet) FlagUsages() string {
+	return f.FlagUsagesWrapped(0)
+}
+
+// PrintDefaults prints to standard error the default values of all defined command-line flags.
+func PrintDefaults() {
+	CommandLine.PrintDefaults()
+}
+
+// defaultUsage is the default function to print a usage message.
+func defaultUsage(f *FlagSet) {
+	fmt.Fprintf(f.out(), "Usage of %s:\n", f.name)
+	f.PrintDefaults()
+}
+
+// NOTE: Usage is not just defaultUsage(CommandLine)
+// because it serves (via godoc flag Usage) as the example
+// for how to write your own usage function.
+
+// Usage prints to standard error a usage message documenting all defined command-line flags.
+// The function is a variable that may be changed to point to a custom function.
+// By default it prints a simple header and calls PrintDefaults; for details about the
+// format of the output and how to control it, see the documentation for PrintDefaults.
+var Usage = func() {
+	fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+	PrintDefaults()
+}
+
+// NFlag returns the number of flags that have been set.
+func (f *FlagSet) NFlag() int { return len(f.actual) }
+
+// NFlag returns the number of command-line flags that have been set.
+func NFlag() int { return len(CommandLine.actual) }
+
+// Arg returns the i'th argument.  Arg(0) is the first remaining argument
+// after flags have been processed.
+func (f *FlagSet) Arg(i int) string {
+	if i < 0 || i >= len(f.args) {
+		return ""
+	}
+	return f.args[i]
+}
+
+// Arg returns the i'th command-line argument.  Arg(0) is the first remaining argument
+// after flags have been processed.
+func Arg(i int) string {
+	return CommandLine.Arg(i)
+}
+
+// NArg is the number of arguments remaining after flags have been processed.
+func (f *FlagSet) NArg() int { return len(f.args) }
+
+// NArg is the number of arguments remaining after flags have been processed.
+func NArg() int { return len(CommandLine.args) }
+
+// Args returns the non-flag arguments.
+func (f *FlagSet) Args() []string { return f.args }
+
+// Args returns the non-flag command-line arguments.
+func Args() []string { return CommandLine.args }
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func (f *FlagSet) Var(value Value, name string, usage string) {
+	f.VarP(value, name, "", usage)
+}
+
+// VarPF is like VarP, but returns the flag created
+func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag {
+	// Remember the default value as a string; it won't change.
+	flag := &Flag{
+		Name:      name,
+		Shorthand: shorthand,
+		Usage:     usage,
+		Value:     value,
+		DefValue:  value.String(),
+	}
+	f.AddFlag(flag)
+	return flag
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
+	f.VarPF(value, name, shorthand, usage)
+}
+
+// AddFlag will add the flag to the FlagSet
+func (f *FlagSet) AddFlag(flag *Flag) {
+	normalizedFlagName := f.normalizeFlagName(flag.Name)
+
+	_, alreadyThere := f.formal[normalizedFlagName]
+	if alreadyThere {
+		msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
+		fmt.Fprintln(f.out(), msg)
+		panic(msg) // Happens only if flags are declared with identical names
+	}
+	if f.formal == nil {
+		f.formal = make(map[NormalizedName]*Flag)
+	}
+
+	flag.Name = string(normalizedFlagName)
+	f.formal[normalizedFlagName] = flag
+	f.orderedFormal = append(f.orderedFormal, flag)
+
+	if flag.Shorthand == "" {
+		return
+	}
+	if len(flag.Shorthand) > 1 {
+		msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand)
+		fmt.Fprintf(f.out(), msg)
+		panic(msg)
+	}
+	if f.shorthands == nil {
+		f.shorthands = make(map[byte]*Flag)
+	}
+	c := flag.Shorthand[0]
+	used, alreadyThere := f.shorthands[c]
+	if alreadyThere {
+		msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name)
+		fmt.Fprintf(f.out(), msg)
+		panic(msg)
+	}
+	f.shorthands[c] = flag
+}
+
+// AddFlagSet adds one FlagSet to another. If a flag is already present in f
+// the flag from newSet will be ignored.
+func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
+	if newSet == nil {
+		return
+	}
+	newSet.VisitAll(func(flag *Flag) {
+		if f.Lookup(flag.Name) == nil {
+			f.AddFlag(flag)
+		}
+	})
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func Var(value Value, name string, usage string) {
+	CommandLine.VarP(value, name, "", usage)
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func VarP(value Value, name, shorthand, usage string) {
+	CommandLine.VarP(value, name, shorthand, usage)
+}
+
+// failf prints to standard error a formatted error and usage message and
+// returns the error.
+func (f *FlagSet) failf(format string, a ...interface{}) error {
+	err := fmt.Errorf(format, a...)
+	if f.errorHandling != ContinueOnError {
+		fmt.Fprintln(f.out(), err)
+		f.usage()
+	}
+	return err
+}
+
+// usage calls the Usage method for the flag set, or the usage function if
+// the flag set is CommandLine.
+func (f *FlagSet) usage() {
+	if f == CommandLine {
+		Usage()
+	} else if f.Usage == nil {
+		defaultUsage(f)
+	} else {
+		f.Usage()
+	}
+}
+
+//--unknown (args will be empty)
+//--unknown --next-flag ... (args will be --next-flag ...)
+//--unknown arg ... (args will be arg ...)
+func stripUnknownFlagValue(args []string) []string {
+	if len(args) == 0 {
+		//--unknown
+		return args
+	}
+
+	first := args[0]
+	if len(first) > 0 && first[0] == '-' {
+		//--unknown --next-flag ...
+		return args
+	}
+
+	//--unknown arg ... (args will be arg ...)
+	if len(args) > 1 {
+		return args[1:]
+	}
+	return nil
+}
+
+func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
+	a = args
+	name := s[2:]
+	if len(name) == 0 || name[0] == '-' || name[0] == '=' {
+		err = f.failf("bad flag syntax: %s", s)
+		return
+	}
+
+	split := strings.SplitN(name, "=", 2)
+	name = split[0]
+	flag, exists := f.formal[f.normalizeFlagName(name)]
+
+	if !exists {
+		switch {
+		case name == "help":
+			f.usage()
+			return a, ErrHelp
+		case f.ParseErrorsWhitelist.UnknownFlags:
+			// --unknown=unknownval arg ...
+			// we do not want to lose arg in this case
+			if len(split) >= 2 {
+				return a, nil
+			}
+
+			return stripUnknownFlagValue(a), nil
+		default:
+			err = f.failf("unknown flag: --%s", name)
+			return
+		}
+	}
+
+	var value string
+	if len(split) == 2 {
+		// '--flag=arg'
+		value = split[1]
+	} else if flag.NoOptDefVal != "" {
+		// '--flag' (arg was optional)
+		value = flag.NoOptDefVal
+	} else if len(a) > 0 {
+		// '--flag arg'
+		value = a[0]
+		a = a[1:]
+	} else {
+		// '--flag' (arg was required)
+		err = f.failf("flag needs an argument: %s", s)
+		return
+	}
+
+	err = fn(flag, value)
+	if err != nil {
+		f.failf(err.Error())
+	}
+	return
+}
+
+func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
+	outArgs = args
+
+	if strings.HasPrefix(shorthands, "test.") {
+		return
+	}
+
+	outShorts = shorthands[1:]
+	c := shorthands[0]
+
+	flag, exists := f.shorthands[c]
+	if !exists {
+		switch {
+		case c == 'h':
+			f.usage()
+			err = ErrHelp
+			return
+		case f.ParseErrorsWhitelist.UnknownFlags:
+			// '-f=arg arg ...'
+			// we do not want to lose arg in this case
+			if len(shorthands) > 2 && shorthands[1] == '=' {
+				outShorts = ""
+				return
+			}
+
+			outArgs = stripUnknownFlagValue(outArgs)
+			return
+		default:
+			err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
+			return
+		}
+	}
+
+	var value string
+	if len(shorthands) > 2 && shorthands[1] == '=' {
+		// '-f=arg'
+		value = shorthands[2:]
+		outShorts = ""
+	} else if flag.NoOptDefVal != "" {
+		// '-f' (arg was optional)
+		value = flag.NoOptDefVal
+	} else if len(shorthands) > 1 {
+		// '-farg'
+		value = shorthands[1:]
+		outShorts = ""
+	} else if len(args) > 0 {
+		// '-f arg'
+		value = args[0]
+		outArgs = args[1:]
+	} else {
+		// '-f' (arg was required)
+		err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
+		return
+	}
+
+	if flag.ShorthandDeprecated != "" {
+		fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
+	}
+
+	err = fn(flag, value)
+	if err != nil {
+		f.failf(err.Error())
+	}
+	return
+}
+
+func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) {
+	a = args
+	shorthands := s[1:]
+
+	// "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv").
+	for len(shorthands) > 0 {
+		shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn)
+		if err != nil {
+			return
+		}
+	}
+
+	return
+}
+
+func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) {
+	for len(args) > 0 {
+		s := args[0]
+		args = args[1:]
+		if len(s) == 0 || s[0] != '-' || len(s) == 1 {
+			if !f.interspersed {
+				f.args = append(f.args, s)
+				f.args = append(f.args, args...)
+				return nil
+			}
+			f.args = append(f.args, s)
+			continue
+		}
+
+		if s[1] == '-' {
+			if len(s) == 2 { // "--" terminates the flags
+				f.argsLenAtDash = len(f.args)
+				f.args = append(f.args, args...)
+				break
+			}
+			args, err = f.parseLongArg(s, args, fn)
+		} else {
+			args, err = f.parseShortArg(s, args, fn)
+		}
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// Parse parses flag definitions from the argument list, which should not
+// include the command name.  Must be called after all flags in the FlagSet
+// are defined and before flags are accessed by the program.
+// The return value will be ErrHelp if -help was set but not defined.
+func (f *FlagSet) Parse(arguments []string) error {
+	if f.addedGoFlagSets != nil {
+		for _, goFlagSet := range f.addedGoFlagSets {
+			goFlagSet.Parse(nil)
+		}
+	}
+	f.parsed = true
+
+	if len(arguments) < 0 {
+		return nil
+	}
+
+	f.args = make([]string, 0, len(arguments))
+
+	set := func(flag *Flag, value string) error {
+		return f.Set(flag.Name, value)
+	}
+
+	err := f.parseArgs(arguments, set)
+	if err != nil {
+		switch f.errorHandling {
+		case ContinueOnError:
+			return err
+		case ExitOnError:
+			fmt.Println(err)
+			os.Exit(2)
+		case PanicOnError:
+			panic(err)
+		}
+	}
+	return nil
+}
+
+type parseFunc func(flag *Flag, value string) error
+
+// ParseAll parses flag definitions from the argument list, which should not
+// include the command name. The arguments for fn are flag and value. Must be
+// called after all flags in the FlagSet are defined and before flags are
+// accessed by the program. The return value will be ErrHelp if -help was set
+// but not defined.
+func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error {
+	f.parsed = true
+	f.args = make([]string, 0, len(arguments))
+
+	err := f.parseArgs(arguments, fn)
+	if err != nil {
+		switch f.errorHandling {
+		case ContinueOnError:
+			return err
+		case ExitOnError:
+			os.Exit(2)
+		case PanicOnError:
+			panic(err)
+		}
+	}
+	return nil
+}
+
+// Parsed reports whether f.Parse has been called.
+func (f *FlagSet) Parsed() bool {
+	return f.parsed
+}
+
+// Parse parses the command-line flags from os.Args[1:].  Must be called
+// after all flags are defined and before flags are accessed by the program.
+func Parse() {
+	// Ignore errors; CommandLine is set for ExitOnError.
+	CommandLine.Parse(os.Args[1:])
+}
+
+// ParseAll parses the command-line flags from os.Args[1:] and called fn for each.
+// The arguments for fn are flag and value. Must be called after all flags are
+// defined and before flags are accessed by the program.
+func ParseAll(fn func(flag *Flag, value string) error) {
+	// Ignore errors; CommandLine is set for ExitOnError.
+	CommandLine.ParseAll(os.Args[1:], fn)
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func SetInterspersed(interspersed bool) {
+	CommandLine.SetInterspersed(interspersed)
+}
+
+// Parsed returns true if the command-line flags have been parsed.
+func Parsed() bool {
+	return CommandLine.Parsed()
+}
+
+// CommandLine is the default set of command-line flags, parsed from os.Args.
+var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
+
+// NewFlagSet returns a new, empty flag set with the specified name,
+// error handling property and SortFlags set to true.
+func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
+	f := &FlagSet{
+		name:          name,
+		errorHandling: errorHandling,
+		argsLenAtDash: -1,
+		interspersed:  true,
+		SortFlags:     true,
+	}
+	return f
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func (f *FlagSet) SetInterspersed(interspersed bool) {
+	f.interspersed = interspersed
+}
+
+// Init sets the name and error handling property for a flag set.
+// By default, the zero FlagSet uses an empty name and the
+// ContinueOnError error handling policy.
+func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
+	f.name = name
+	f.errorHandling = errorHandling
+	f.argsLenAtDash = -1
+}
diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go
new file mode 100644
index 0000000..a243f81
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- float32 Value
+type float32Value float32
+
+func newFloat32Value(val float32, p *float32) *float32Value {
+	*p = val
+	return (*float32Value)(p)
+}
+
+func (f *float32Value) Set(s string) error {
+	v, err := strconv.ParseFloat(s, 32)
+	*f = float32Value(v)
+	return err
+}
+
+func (f *float32Value) Type() string {
+	return "float32"
+}
+
+func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) }
+
+func float32Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseFloat(sval, 32)
+	if err != nil {
+		return 0, err
+	}
+	return float32(v), nil
+}
+
+// GetFloat32 return the float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32(name string) (float32, error) {
+	val, err := f.getFlagType(name, "float32", float32Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(float32), nil
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) {
+	f.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+	f.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func Float32Var(p *float32, name string, value float32, usage string) {
+	CommandLine.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+	CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32(name string, value float32, usage string) *float32 {
+	p := new(float32)
+	f.Float32VarP(p, name, "", value, usage)
+	return p
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 {
+	p := new(float32)
+	f.Float32VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func Float32(name string, value float32, usage string) *float32 {
+	return CommandLine.Float32P(name, "", value, usage)
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func Float32P(name, shorthand string, value float32, usage string) *float32 {
+	return CommandLine.Float32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go
new file mode 100644
index 0000000..04b5492
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- float64 Value
+type float64Value float64
+
+func newFloat64Value(val float64, p *float64) *float64Value {
+	*p = val
+	return (*float64Value)(p)
+}
+
+func (f *float64Value) Set(s string) error {
+	v, err := strconv.ParseFloat(s, 64)
+	*f = float64Value(v)
+	return err
+}
+
+func (f *float64Value) Type() string {
+	return "float64"
+}
+
+func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
+
+func float64Conv(sval string) (interface{}, error) {
+	return strconv.ParseFloat(sval, 64)
+}
+
+// GetFloat64 return the float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64(name string) (float64, error) {
+	val, err := f.getFlagType(name, "float64", float64Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(float64), nil
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
+	f.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+	f.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func Float64Var(p *float64, name string, value float64, usage string) {
+	CommandLine.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+	CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
+	p := new(float64)
+	f.Float64VarP(p, name, "", value, usage)
+	return p
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 {
+	p := new(float64)
+	f.Float64VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func Float64(name string, value float64, usage string) *float64 {
+	return CommandLine.Float64P(name, "", value, usage)
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func Float64P(name, shorthand string, value float64, usage string) *float64 {
+	return CommandLine.Float64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go
new file mode 100644
index 0000000..d3dd72b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/golangflag.go
@@ -0,0 +1,105 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+	goflag "flag"
+	"reflect"
+	"strings"
+)
+
+// flagValueWrapper implements pflag.Value around a flag.Value.  The main
+// difference here is the addition of the Type method that returns a string
+// name of the type.  As this is generally unknown, we approximate that with
+// reflection.
+type flagValueWrapper struct {
+	inner    goflag.Value
+	flagType string
+}
+
+// We are just copying the boolFlag interface out of goflag as that is what
+// they use to decide if a flag should get "true" when no arg is given.
+type goBoolFlag interface {
+	goflag.Value
+	IsBoolFlag() bool
+}
+
+func wrapFlagValue(v goflag.Value) Value {
+	// If the flag.Value happens to also be a pflag.Value, just use it directly.
+	if pv, ok := v.(Value); ok {
+		return pv
+	}
+
+	pv := &flagValueWrapper{
+		inner: v,
+	}
+
+	t := reflect.TypeOf(v)
+	if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr {
+		t = t.Elem()
+	}
+
+	pv.flagType = strings.TrimSuffix(t.Name(), "Value")
+	return pv
+}
+
+func (v *flagValueWrapper) String() string {
+	return v.inner.String()
+}
+
+func (v *flagValueWrapper) Set(s string) error {
+	return v.inner.Set(s)
+}
+
+func (v *flagValueWrapper) Type() string {
+	return v.flagType
+}
+
+// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag
+// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei
+// with both `-v` and `--v` in flags. If the golang flag was more than a single
+// character (ex: `verbose`) it will only be accessible via `--verbose`
+func PFlagFromGoFlag(goflag *goflag.Flag) *Flag {
+	// Remember the default value as a string; it won't change.
+	flag := &Flag{
+		Name:  goflag.Name,
+		Usage: goflag.Usage,
+		Value: wrapFlagValue(goflag.Value),
+		// Looks like golang flags don't set DefValue correctly  :-(
+		//DefValue: goflag.DefValue,
+		DefValue: goflag.Value.String(),
+	}
+	// Ex: if the golang flag was -v, allow both -v and --v to work
+	if len(flag.Name) == 1 {
+		flag.Shorthand = flag.Name
+	}
+	if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() {
+		flag.NoOptDefVal = "true"
+	}
+	return flag
+}
+
+// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet
+func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) {
+	if f.Lookup(goflag.Name) != nil {
+		return
+	}
+	newflag := PFlagFromGoFlag(goflag)
+	f.AddFlag(newflag)
+}
+
+// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet
+func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
+	if newSet == nil {
+		return
+	}
+	newSet.VisitAll(func(goflag *goflag.Flag) {
+		f.AddGoFlag(goflag)
+	})
+	if f.addedGoFlagSets == nil {
+		f.addedGoFlagSets = make([]*goflag.FlagSet, 0)
+	}
+	f.addedGoFlagSets = append(f.addedGoFlagSets, newSet)
+}
diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go
new file mode 100644
index 0000000..1474b89
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int Value
+type intValue int
+
+func newIntValue(val int, p *int) *intValue {
+	*p = val
+	return (*intValue)(p)
+}
+
+func (i *intValue) Set(s string) error {
+	v, err := strconv.ParseInt(s, 0, 64)
+	*i = intValue(v)
+	return err
+}
+
+func (i *intValue) Type() string {
+	return "int"
+}
+
+func (i *intValue) String() string { return strconv.Itoa(int(*i)) }
+
+func intConv(sval string) (interface{}, error) {
+	return strconv.Atoi(sval)
+}
+
+// GetInt return the int value of a flag with the given name
+func (f *FlagSet) GetInt(name string) (int, error) {
+	val, err := f.getFlagType(name, "int", intConv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int), nil
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
+	f.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) {
+	f.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func IntVar(p *int, name string, value int, usage string) {
+	CommandLine.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func IntVarP(p *int, name, shorthand string, value int, usage string) {
+	CommandLine.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func (f *FlagSet) Int(name string, value int, usage string) *int {
+	p := new(int)
+	f.IntVarP(p, name, "", value, usage)
+	return p
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int {
+	p := new(int)
+	f.IntVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func Int(name string, value int, usage string) *int {
+	return CommandLine.IntP(name, "", value, usage)
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func IntP(name, shorthand string, value int, usage string) *int {
+	return CommandLine.IntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go
new file mode 100644
index 0000000..f1a01d0
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int16 Value
+type int16Value int16
+
+func newInt16Value(val int16, p *int16) *int16Value {
+	*p = val
+	return (*int16Value)(p)
+}
+
+func (i *int16Value) Set(s string) error {
+	v, err := strconv.ParseInt(s, 0, 16)
+	*i = int16Value(v)
+	return err
+}
+
+func (i *int16Value) Type() string {
+	return "int16"
+}
+
+func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int16Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseInt(sval, 0, 16)
+	if err != nil {
+		return 0, err
+	}
+	return int16(v), nil
+}
+
+// GetInt16 returns the int16 value of a flag with the given name
+func (f *FlagSet) GetInt16(name string) (int16, error) {
+	val, err := f.getFlagType(name, "int16", int16Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int16), nil
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) {
+	f.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+	f.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func Int16Var(p *int16, name string, value int16, usage string) {
+	CommandLine.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+	CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func (f *FlagSet) Int16(name string, value int16, usage string) *int16 {
+	p := new(int16)
+	f.Int16VarP(p, name, "", value, usage)
+	return p
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 {
+	p := new(int16)
+	f.Int16VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func Int16(name string, value int16, usage string) *int16 {
+	return CommandLine.Int16P(name, "", value, usage)
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func Int16P(name, shorthand string, value int16, usage string) *int16 {
+	return CommandLine.Int16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go
new file mode 100644
index 0000000..9b95944
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int32 Value
+type int32Value int32
+
+func newInt32Value(val int32, p *int32) *int32Value {
+	*p = val
+	return (*int32Value)(p)
+}
+
+func (i *int32Value) Set(s string) error {
+	v, err := strconv.ParseInt(s, 0, 32)
+	*i = int32Value(v)
+	return err
+}
+
+func (i *int32Value) Type() string {
+	return "int32"
+}
+
+func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int32Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseInt(sval, 0, 32)
+	if err != nil {
+		return 0, err
+	}
+	return int32(v), nil
+}
+
+// GetInt32 return the int32 value of a flag with the given name
+func (f *FlagSet) GetInt32(name string) (int32, error) {
+	val, err := f.getFlagType(name, "int32", int32Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int32), nil
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) {
+	f.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+	f.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func Int32Var(p *int32, name string, value int32, usage string) {
+	CommandLine.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+	CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32(name string, value int32, usage string) *int32 {
+	p := new(int32)
+	f.Int32VarP(p, name, "", value, usage)
+	return p
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 {
+	p := new(int32)
+	f.Int32VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func Int32(name string, value int32, usage string) *int32 {
+	return CommandLine.Int32P(name, "", value, usage)
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func Int32P(name, shorthand string, value int32, usage string) *int32 {
+	return CommandLine.Int32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go
new file mode 100644
index 0000000..0026d78
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int64 Value
+type int64Value int64
+
+func newInt64Value(val int64, p *int64) *int64Value {
+	*p = val
+	return (*int64Value)(p)
+}
+
+func (i *int64Value) Set(s string) error {
+	v, err := strconv.ParseInt(s, 0, 64)
+	*i = int64Value(v)
+	return err
+}
+
+func (i *int64Value) Type() string {
+	return "int64"
+}
+
+func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int64Conv(sval string) (interface{}, error) {
+	return strconv.ParseInt(sval, 0, 64)
+}
+
+// GetInt64 return the int64 value of a flag with the given name
+func (f *FlagSet) GetInt64(name string) (int64, error) {
+	val, err := f.getFlagType(name, "int64", int64Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int64), nil
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
+	f.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+	f.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func Int64Var(p *int64, name string, value int64, usage string) {
+	CommandLine.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+	CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
+	p := new(int64)
+	f.Int64VarP(p, name, "", value, usage)
+	return p
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 {
+	p := new(int64)
+	f.Int64VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func Int64(name string, value int64, usage string) *int64 {
+	return CommandLine.Int64P(name, "", value, usage)
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func Int64P(name, shorthand string, value int64, usage string) *int64 {
+	return CommandLine.Int64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go
new file mode 100644
index 0000000..4da9222
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int8 Value
+type int8Value int8
+
+func newInt8Value(val int8, p *int8) *int8Value {
+	*p = val
+	return (*int8Value)(p)
+}
+
+func (i *int8Value) Set(s string) error {
+	v, err := strconv.ParseInt(s, 0, 8)
+	*i = int8Value(v)
+	return err
+}
+
+func (i *int8Value) Type() string {
+	return "int8"
+}
+
+func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int8Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseInt(sval, 0, 8)
+	if err != nil {
+		return 0, err
+	}
+	return int8(v), nil
+}
+
+// GetInt8 return the int8 value of a flag with the given name
+func (f *FlagSet) GetInt8(name string) (int8, error) {
+	val, err := f.getFlagType(name, "int8", int8Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int8), nil
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) {
+	f.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+	f.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func Int8Var(p *int8, name string, value int8, usage string) {
+	CommandLine.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+	CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func (f *FlagSet) Int8(name string, value int8, usage string) *int8 {
+	p := new(int8)
+	f.Int8VarP(p, name, "", value, usage)
+	return p
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 {
+	p := new(int8)
+	f.Int8VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func Int8(name string, value int8, usage string) *int8 {
+	return CommandLine.Int8P(name, "", value, usage)
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func Int8P(name, shorthand string, value int8, usage string) *int8 {
+	return CommandLine.Int8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go
new file mode 100644
index 0000000..1e7c9ed
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int_slice.go
@@ -0,0 +1,128 @@
+package pflag
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// -- intSlice Value
+type intSliceValue struct {
+	value   *[]int
+	changed bool
+}
+
+func newIntSliceValue(val []int, p *[]int) *intSliceValue {
+	isv := new(intSliceValue)
+	isv.value = p
+	*isv.value = val
+	return isv
+}
+
+func (s *intSliceValue) Set(val string) error {
+	ss := strings.Split(val, ",")
+	out := make([]int, len(ss))
+	for i, d := range ss {
+		var err error
+		out[i], err = strconv.Atoi(d)
+		if err != nil {
+			return err
+		}
+
+	}
+	if !s.changed {
+		*s.value = out
+	} else {
+		*s.value = append(*s.value, out...)
+	}
+	s.changed = true
+	return nil
+}
+
+func (s *intSliceValue) Type() string {
+	return "intSlice"
+}
+
+func (s *intSliceValue) String() string {
+	out := make([]string, len(*s.value))
+	for i, d := range *s.value {
+		out[i] = fmt.Sprintf("%d", d)
+	}
+	return "[" + strings.Join(out, ",") + "]"
+}
+
+func intSliceConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// Empty string would cause a slice with one (empty) entry
+	if len(val) == 0 {
+		return []int{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make([]int, len(ss))
+	for i, d := range ss {
+		var err error
+		out[i], err = strconv.Atoi(d)
+		if err != nil {
+			return nil, err
+		}
+
+	}
+	return out, nil
+}
+
+// GetIntSlice return the []int value of a flag with the given name
+func (f *FlagSet) GetIntSlice(name string) ([]int, error) {
+	val, err := f.getFlagType(name, "intSlice", intSliceConv)
+	if err != nil {
+		return []int{}, err
+	}
+	return val.([]int), nil
+}
+
+// IntSliceVar defines a intSlice flag with specified name, default value, and usage string.
+// The argument p points to a []int variable in which to store the value of the flag.
+func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) {
+	f.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+	f.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSliceVar defines a int[] flag with specified name, default value, and usage string.
+// The argument p points to a int[] variable in which to store the value of the flag.
+func IntSliceVar(p *[]int, name string, value []int, usage string) {
+	CommandLine.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+	CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int {
+	p := []int{}
+	f.IntSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+	p := []int{}
+	f.IntSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func IntSlice(name string, value []int, usage string) *[]int {
+	return CommandLine.IntSliceP(name, "", value, usage)
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+	return CommandLine.IntSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go
new file mode 100644
index 0000000..3d414ba
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip.go
@@ -0,0 +1,94 @@
+package pflag
+
+import (
+	"fmt"
+	"net"
+	"strings"
+)
+
+// -- net.IP value
+type ipValue net.IP
+
+func newIPValue(val net.IP, p *net.IP) *ipValue {
+	*p = val
+	return (*ipValue)(p)
+}
+
+func (i *ipValue) String() string { return net.IP(*i).String() }
+func (i *ipValue) Set(s string) error {
+	ip := net.ParseIP(strings.TrimSpace(s))
+	if ip == nil {
+		return fmt.Errorf("failed to parse IP: %q", s)
+	}
+	*i = ipValue(ip)
+	return nil
+}
+
+func (i *ipValue) Type() string {
+	return "ip"
+}
+
+func ipConv(sval string) (interface{}, error) {
+	ip := net.ParseIP(sval)
+	if ip != nil {
+		return ip, nil
+	}
+	return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+}
+
+// GetIP return the net.IP value of a flag with the given name
+func (f *FlagSet) GetIP(name string) (net.IP, error) {
+	val, err := f.getFlagType(name, "ip", ipConv)
+	if err != nil {
+		return nil, err
+	}
+	return val.(net.IP), nil
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) {
+	f.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+	f.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func IPVar(p *net.IP, name string, value net.IP, usage string) {
+	CommandLine.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+	CommandLine.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP {
+	p := new(net.IP)
+	f.IPVarP(p, name, "", value, usage)
+	return p
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+	p := new(net.IP)
+	f.IPVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func IP(name string, value net.IP, usage string) *net.IP {
+	return CommandLine.IPP(name, "", value, usage)
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+	return CommandLine.IPP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go
new file mode 100644
index 0000000..7dd196f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip_slice.go
@@ -0,0 +1,148 @@
+package pflag
+
+import (
+	"fmt"
+	"io"
+	"net"
+	"strings"
+)
+
+// -- ipSlice Value
+type ipSliceValue struct {
+	value   *[]net.IP
+	changed bool
+}
+
+func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue {
+	ipsv := new(ipSliceValue)
+	ipsv.value = p
+	*ipsv.value = val
+	return ipsv
+}
+
+// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag.
+// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended.
+func (s *ipSliceValue) Set(val string) error {
+
+	// remove all quote characters
+	rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+	// read flag arguments with CSV parser
+	ipStrSlice, err := readAsCSV(rmQuote.Replace(val))
+	if err != nil && err != io.EOF {
+		return err
+	}
+
+	// parse ip values into slice
+	out := make([]net.IP, 0, len(ipStrSlice))
+	for _, ipStr := range ipStrSlice {
+		ip := net.ParseIP(strings.TrimSpace(ipStr))
+		if ip == nil {
+			return fmt.Errorf("invalid string being converted to IP address: %s", ipStr)
+		}
+		out = append(out, ip)
+	}
+
+	if !s.changed {
+		*s.value = out
+	} else {
+		*s.value = append(*s.value, out...)
+	}
+
+	s.changed = true
+
+	return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *ipSliceValue) Type() string {
+	return "ipSlice"
+}
+
+// String defines a "native" format for this net.IP slice flag value.
+func (s *ipSliceValue) String() string {
+
+	ipStrSlice := make([]string, len(*s.value))
+	for i, ip := range *s.value {
+		ipStrSlice[i] = ip.String()
+	}
+
+	out, _ := writeAsCSV(ipStrSlice)
+
+	return "[" + out + "]"
+}
+
+func ipSliceConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// Emtpy string would cause a slice with one (empty) entry
+	if len(val) == 0 {
+		return []net.IP{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make([]net.IP, len(ss))
+	for i, sval := range ss {
+		ip := net.ParseIP(strings.TrimSpace(sval))
+		if ip == nil {
+			return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+		}
+		out[i] = ip
+	}
+	return out, nil
+}
+
+// GetIPSlice returns the []net.IP value of a flag with the given name
+func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) {
+	val, err := f.getFlagType(name, "ipSlice", ipSliceConv)
+	if err != nil {
+		return []net.IP{}, err
+	}
+	return val.([]net.IP), nil
+}
+
+// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+	f.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+	f.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+	CommandLine.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+	CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of that flag.
+func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+	p := []net.IP{}
+	f.IPSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+	p := []net.IP{}
+	f.IPSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of the flag.
+func IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+	return CommandLine.IPSliceP(name, "", value, usage)
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+	return CommandLine.IPSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go
new file mode 100644
index 0000000..5bd44bd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipmask.go
@@ -0,0 +1,122 @@
+package pflag
+
+import (
+	"fmt"
+	"net"
+	"strconv"
+)
+
+// -- net.IPMask value
+type ipMaskValue net.IPMask
+
+func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue {
+	*p = val
+	return (*ipMaskValue)(p)
+}
+
+func (i *ipMaskValue) String() string { return net.IPMask(*i).String() }
+func (i *ipMaskValue) Set(s string) error {
+	ip := ParseIPv4Mask(s)
+	if ip == nil {
+		return fmt.Errorf("failed to parse IP mask: %q", s)
+	}
+	*i = ipMaskValue(ip)
+	return nil
+}
+
+func (i *ipMaskValue) Type() string {
+	return "ipMask"
+}
+
+// ParseIPv4Mask written in IP form (e.g. 255.255.255.0).
+// This function should really belong to the net package.
+func ParseIPv4Mask(s string) net.IPMask {
+	mask := net.ParseIP(s)
+	if mask == nil {
+		if len(s) != 8 {
+			return nil
+		}
+		// net.IPMask.String() actually outputs things like ffffff00
+		// so write a horrible parser for that as well  :-(
+		m := []int{}
+		for i := 0; i < 4; i++ {
+			b := "0x" + s[2*i:2*i+2]
+			d, err := strconv.ParseInt(b, 0, 0)
+			if err != nil {
+				return nil
+			}
+			m = append(m, int(d))
+		}
+		s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3])
+		mask = net.ParseIP(s)
+		if mask == nil {
+			return nil
+		}
+	}
+	return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])
+}
+
+func parseIPv4Mask(sval string) (interface{}, error) {
+	mask := ParseIPv4Mask(sval)
+	if mask == nil {
+		return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval)
+	}
+	return mask, nil
+}
+
+// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name
+func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) {
+	val, err := f.getFlagType(name, "ipMask", parseIPv4Mask)
+	if err != nil {
+		return nil, err
+	}
+	return val.(net.IPMask), nil
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+	f.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+	f.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+	CommandLine.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+	CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+	p := new(net.IPMask)
+	f.IPMaskVarP(p, name, "", value, usage)
+	return p
+}
+
+// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+	p := new(net.IPMask)
+	f.IPMaskVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+	return CommandLine.IPMaskP(name, "", value, usage)
+}
+
+// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+	return CommandLine.IPMaskP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go
new file mode 100644
index 0000000..e2c1b8b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipnet.go
@@ -0,0 +1,98 @@
+package pflag
+
+import (
+	"fmt"
+	"net"
+	"strings"
+)
+
+// IPNet adapts net.IPNet for use as a flag.
+type ipNetValue net.IPNet
+
+func (ipnet ipNetValue) String() string {
+	n := net.IPNet(ipnet)
+	return n.String()
+}
+
+func (ipnet *ipNetValue) Set(value string) error {
+	_, n, err := net.ParseCIDR(strings.TrimSpace(value))
+	if err != nil {
+		return err
+	}
+	*ipnet = ipNetValue(*n)
+	return nil
+}
+
+func (*ipNetValue) Type() string {
+	return "ipNet"
+}
+
+func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue {
+	*p = val
+	return (*ipNetValue)(p)
+}
+
+func ipNetConv(sval string) (interface{}, error) {
+	_, n, err := net.ParseCIDR(strings.TrimSpace(sval))
+	if err == nil {
+		return *n, nil
+	}
+	return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval)
+}
+
+// GetIPNet return the net.IPNet value of a flag with the given name
+func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) {
+	val, err := f.getFlagType(name, "ipNet", ipNetConv)
+	if err != nil {
+		return net.IPNet{}, err
+	}
+	return val.(net.IPNet), nil
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+	f.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+	f.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+	CommandLine.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+	CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+	p := new(net.IPNet)
+	f.IPNetVarP(p, name, "", value, usage)
+	return p
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+	p := new(net.IPNet)
+	f.IPNetVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+	return CommandLine.IPNetP(name, "", value, usage)
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+	return CommandLine.IPNetP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go
new file mode 100644
index 0000000..04e0a26
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string.go
@@ -0,0 +1,80 @@
+package pflag
+
+// -- string Value
+type stringValue string
+
+func newStringValue(val string, p *string) *stringValue {
+	*p = val
+	return (*stringValue)(p)
+}
+
+func (s *stringValue) Set(val string) error {
+	*s = stringValue(val)
+	return nil
+}
+func (s *stringValue) Type() string {
+	return "string"
+}
+
+func (s *stringValue) String() string { return string(*s) }
+
+func stringConv(sval string) (interface{}, error) {
+	return sval, nil
+}
+
+// GetString return the string value of a flag with the given name
+func (f *FlagSet) GetString(name string) (string, error) {
+	val, err := f.getFlagType(name, "string", stringConv)
+	if err != nil {
+		return "", err
+	}
+	return val.(string), nil
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
+	f.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) {
+	f.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func StringVar(p *string, name string, value string, usage string) {
+	CommandLine.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringVarP(p *string, name, shorthand string, value string, usage string) {
+	CommandLine.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func (f *FlagSet) String(name string, value string, usage string) *string {
+	p := new(string)
+	f.StringVarP(p, name, "", value, usage)
+	return p
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string {
+	p := new(string)
+	f.StringVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func String(name string, value string, usage string) *string {
+	return CommandLine.StringP(name, "", value, usage)
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func StringP(name, shorthand string, value string, usage string) *string {
+	return CommandLine.StringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go
new file mode 100644
index 0000000..fa7bc60
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_array.go
@@ -0,0 +1,103 @@
+package pflag
+
+// -- stringArray Value
+type stringArrayValue struct {
+	value   *[]string
+	changed bool
+}
+
+func newStringArrayValue(val []string, p *[]string) *stringArrayValue {
+	ssv := new(stringArrayValue)
+	ssv.value = p
+	*ssv.value = val
+	return ssv
+}
+
+func (s *stringArrayValue) Set(val string) error {
+	if !s.changed {
+		*s.value = []string{val}
+		s.changed = true
+	} else {
+		*s.value = append(*s.value, val)
+	}
+	return nil
+}
+
+func (s *stringArrayValue) Type() string {
+	return "stringArray"
+}
+
+func (s *stringArrayValue) String() string {
+	str, _ := writeAsCSV(*s.value)
+	return "[" + str + "]"
+}
+
+func stringArrayConv(sval string) (interface{}, error) {
+	sval = sval[1 : len(sval)-1]
+	// An empty string would cause a array with one (empty) string
+	if len(sval) == 0 {
+		return []string{}, nil
+	}
+	return readAsCSV(sval)
+}
+
+// GetStringArray return the []string value of a flag with the given name
+func (f *FlagSet) GetStringArray(name string) ([]string, error) {
+	val, err := f.getFlagType(name, "stringArray", stringArrayConv)
+	if err != nil {
+		return []string{}, err
+	}
+	return val.([]string), nil
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) {
+	f.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+	f.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArrayVar(p *[]string, name string, value []string, usage string) {
+	CommandLine.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+	CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string {
+	p := []string{}
+	f.StringArrayVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+	p := []string{}
+	f.StringArrayVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArray(name string, value []string, usage string) *[]string {
+	return CommandLine.StringArrayP(name, "", value, usage)
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+	return CommandLine.StringArrayP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go
new file mode 100644
index 0000000..0cd3ccc
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_slice.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+	"bytes"
+	"encoding/csv"
+	"strings"
+)
+
+// -- stringSlice Value
+type stringSliceValue struct {
+	value   *[]string
+	changed bool
+}
+
+func newStringSliceValue(val []string, p *[]string) *stringSliceValue {
+	ssv := new(stringSliceValue)
+	ssv.value = p
+	*ssv.value = val
+	return ssv
+}
+
+func readAsCSV(val string) ([]string, error) {
+	if val == "" {
+		return []string{}, nil
+	}
+	stringReader := strings.NewReader(val)
+	csvReader := csv.NewReader(stringReader)
+	return csvReader.Read()
+}
+
+func writeAsCSV(vals []string) (string, error) {
+	b := &bytes.Buffer{}
+	w := csv.NewWriter(b)
+	err := w.Write(vals)
+	if err != nil {
+		return "", err
+	}
+	w.Flush()
+	return strings.TrimSuffix(b.String(), "\n"), nil
+}
+
+func (s *stringSliceValue) Set(val string) error {
+	v, err := readAsCSV(val)
+	if err != nil {
+		return err
+	}
+	if !s.changed {
+		*s.value = v
+	} else {
+		*s.value = append(*s.value, v...)
+	}
+	s.changed = true
+	return nil
+}
+
+func (s *stringSliceValue) Type() string {
+	return "stringSlice"
+}
+
+func (s *stringSliceValue) String() string {
+	str, _ := writeAsCSV(*s.value)
+	return "[" + str + "]"
+}
+
+func stringSliceConv(sval string) (interface{}, error) {
+	sval = sval[1 : len(sval)-1]
+	// An empty string would cause a slice with one (empty) string
+	if len(sval) == 0 {
+		return []string{}, nil
+	}
+	return readAsCSV(sval)
+}
+
+// GetStringSlice return the []string value of a flag with the given name
+func (f *FlagSet) GetStringSlice(name string) ([]string, error) {
+	val, err := f.getFlagType(name, "stringSlice", stringSliceConv)
+	if err != nil {
+		return []string{}, err
+	}
+	return val.([]string), nil
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+//   --ss="v1,v2" -ss="v3"
+// will result in
+//   []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) {
+	f.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+	f.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+//   --ss="v1,v2" -ss="v3"
+// will result in
+//   []string{"v1", "v2", "v3"}
+func StringSliceVar(p *[]string, name string, value []string, usage string) {
+	CommandLine.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+	CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+//   --ss="v1,v2" -ss="v3"
+// will result in
+//   []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string {
+	p := []string{}
+	f.StringSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+	p := []string{}
+	f.StringSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+//   --ss="v1,v2" -ss="v3"
+// will result in
+//   []string{"v1", "v2", "v3"}
+func StringSlice(name string, value []string, usage string) *[]string {
+	return CommandLine.StringSliceP(name, "", value, usage)
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+	return CommandLine.StringSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_int.go b/vendor/github.com/spf13/pflag/string_to_int.go
new file mode 100644
index 0000000..5ceda39
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_int.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+	"bytes"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// -- stringToInt Value
+type stringToIntValue struct {
+	value   *map[string]int
+	changed bool
+}
+
+func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue {
+	ssv := new(stringToIntValue)
+	ssv.value = p
+	*ssv.value = val
+	return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToIntValue) Set(val string) error {
+	ss := strings.Split(val, ",")
+	out := make(map[string]int, len(ss))
+	for _, pair := range ss {
+		kv := strings.SplitN(pair, "=", 2)
+		if len(kv) != 2 {
+			return fmt.Errorf("%s must be formatted as key=value", pair)
+		}
+		var err error
+		out[kv[0]], err = strconv.Atoi(kv[1])
+		if err != nil {
+			return err
+		}
+	}
+	if !s.changed {
+		*s.value = out
+	} else {
+		for k, v := range out {
+			(*s.value)[k] = v
+		}
+	}
+	s.changed = true
+	return nil
+}
+
+func (s *stringToIntValue) Type() string {
+	return "stringToInt"
+}
+
+func (s *stringToIntValue) String() string {
+	var buf bytes.Buffer
+	i := 0
+	for k, v := range *s.value {
+		if i > 0 {
+			buf.WriteRune(',')
+		}
+		buf.WriteString(k)
+		buf.WriteRune('=')
+		buf.WriteString(strconv.Itoa(v))
+		i++
+	}
+	return "[" + buf.String() + "]"
+}
+
+func stringToIntConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// An empty string would cause an empty map
+	if len(val) == 0 {
+		return map[string]int{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make(map[string]int, len(ss))
+	for _, pair := range ss {
+		kv := strings.SplitN(pair, "=", 2)
+		if len(kv) != 2 {
+			return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+		}
+		var err error
+		out[kv[0]], err = strconv.Atoi(kv[1])
+		if err != nil {
+			return nil, err
+		}
+	}
+	return out, nil
+}
+
+// GetStringToInt return the map[string]int value of a flag with the given name
+func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) {
+	val, err := f.getFlagType(name, "stringToInt", stringToIntConv)
+	if err != nil {
+		return map[string]int{}, err
+	}
+	return val.(map[string]int), nil
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+	f.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+	f.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+	CommandLine.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+	CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int {
+	p := map[string]int{}
+	f.StringToIntVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+	p := map[string]int{}
+	f.StringToIntVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt(name string, value map[string]int, usage string) *map[string]int {
+	return CommandLine.StringToIntP(name, "", value, usage)
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+	return CommandLine.StringToIntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go
new file mode 100644
index 0000000..890a01a
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_string.go
@@ -0,0 +1,160 @@
+package pflag
+
+import (
+	"bytes"
+	"encoding/csv"
+	"fmt"
+	"strings"
+)
+
+// -- stringToString Value
+type stringToStringValue struct {
+	value   *map[string]string
+	changed bool
+}
+
+func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue {
+	ssv := new(stringToStringValue)
+	ssv.value = p
+	*ssv.value = val
+	return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToStringValue) Set(val string) error {
+	var ss []string
+	n := strings.Count(val, "=")
+	switch n {
+	case 0:
+		return fmt.Errorf("%s must be formatted as key=value", val)
+	case 1:
+		ss = append(ss, strings.Trim(val, `"`))
+	default:
+		r := csv.NewReader(strings.NewReader(val))
+		var err error
+		ss, err = r.Read()
+		if err != nil {
+			return err
+		}
+	}
+
+	out := make(map[string]string, len(ss))
+	for _, pair := range ss {
+		kv := strings.SplitN(pair, "=", 2)
+		if len(kv) != 2 {
+			return fmt.Errorf("%s must be formatted as key=value", pair)
+		}
+		out[kv[0]] = kv[1]
+	}
+	if !s.changed {
+		*s.value = out
+	} else {
+		for k, v := range out {
+			(*s.value)[k] = v
+		}
+	}
+	s.changed = true
+	return nil
+}
+
+func (s *stringToStringValue) Type() string {
+	return "stringToString"
+}
+
+func (s *stringToStringValue) String() string {
+	records := make([]string, 0, len(*s.value)>>1)
+	for k, v := range *s.value {
+		records = append(records, k+"="+v)
+	}
+
+	var buf bytes.Buffer
+	w := csv.NewWriter(&buf)
+	if err := w.Write(records); err != nil {
+		panic(err)
+	}
+	w.Flush()
+	return "[" + strings.TrimSpace(buf.String()) + "]"
+}
+
+func stringToStringConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// An empty string would cause an empty map
+	if len(val) == 0 {
+		return map[string]string{}, nil
+	}
+	r := csv.NewReader(strings.NewReader(val))
+	ss, err := r.Read()
+	if err != nil {
+		return nil, err
+	}
+	out := make(map[string]string, len(ss))
+	for _, pair := range ss {
+		kv := strings.SplitN(pair, "=", 2)
+		if len(kv) != 2 {
+			return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+		}
+		out[kv[0]] = kv[1]
+	}
+	return out, nil
+}
+
+// GetStringToString return the map[string]string value of a flag with the given name
+func (f *FlagSet) GetStringToString(name string) (map[string]string, error) {
+	val, err := f.getFlagType(name, "stringToString", stringToStringConv)
+	if err != nil {
+		return map[string]string{}, err
+	}
+	return val.(map[string]string), nil
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+	f.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+	f.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+	CommandLine.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+	CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string {
+	p := map[string]string{}
+	f.StringToStringVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+	p := map[string]string{}
+	f.StringToStringVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToString(name string, value map[string]string, usage string) *map[string]string {
+	return CommandLine.StringToStringP(name, "", value, usage)
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+	return CommandLine.StringToStringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go
new file mode 100644
index 0000000..dcbc2b7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint Value
+type uintValue uint
+
+func newUintValue(val uint, p *uint) *uintValue {
+	*p = val
+	return (*uintValue)(p)
+}
+
+func (i *uintValue) Set(s string) error {
+	v, err := strconv.ParseUint(s, 0, 64)
+	*i = uintValue(v)
+	return err
+}
+
+func (i *uintValue) Type() string {
+	return "uint"
+}
+
+func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uintConv(sval string) (interface{}, error) {
+	v, err := strconv.ParseUint(sval, 0, 0)
+	if err != nil {
+		return 0, err
+	}
+	return uint(v), nil
+}
+
+// GetUint return the uint value of a flag with the given name
+func (f *FlagSet) GetUint(name string) (uint, error) {
+	val, err := f.getFlagType(name, "uint", uintConv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(uint), nil
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
+	f.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+	f.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint  variable in which to store the value of the flag.
+func UintVar(p *uint, name string, value uint, usage string) {
+	CommandLine.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+	CommandLine.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint  variable that stores the value of the flag.
+func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
+	p := new(uint)
+	f.UintVarP(p, name, "", value, usage)
+	return p
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint {
+	p := new(uint)
+	f.UintVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint  variable that stores the value of the flag.
+func Uint(name string, value uint, usage string) *uint {
+	return CommandLine.UintP(name, "", value, usage)
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func UintP(name, shorthand string, value uint, usage string) *uint {
+	return CommandLine.UintP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go
new file mode 100644
index 0000000..7e9914e
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint16 value
+type uint16Value uint16
+
+func newUint16Value(val uint16, p *uint16) *uint16Value {
+	*p = val
+	return (*uint16Value)(p)
+}
+
+func (i *uint16Value) Set(s string) error {
+	v, err := strconv.ParseUint(s, 0, 16)
+	*i = uint16Value(v)
+	return err
+}
+
+func (i *uint16Value) Type() string {
+	return "uint16"
+}
+
+func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint16Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseUint(sval, 0, 16)
+	if err != nil {
+		return 0, err
+	}
+	return uint16(v), nil
+}
+
+// GetUint16 return the uint16 value of a flag with the given name
+func (f *FlagSet) GetUint16(name string) (uint16, error) {
+	val, err := f.getFlagType(name, "uint16", uint16Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(uint16), nil
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) {
+	f.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+	f.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint  variable in which to store the value of the flag.
+func Uint16Var(p *uint16, name string, value uint16, usage string) {
+	CommandLine.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+	CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint  variable that stores the value of the flag.
+func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 {
+	p := new(uint16)
+	f.Uint16VarP(p, name, "", value, usage)
+	return p
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+	p := new(uint16)
+	f.Uint16VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint  variable that stores the value of the flag.
+func Uint16(name string, value uint16, usage string) *uint16 {
+	return CommandLine.Uint16P(name, "", value, usage)
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+	return CommandLine.Uint16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go
new file mode 100644
index 0000000..d802453
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint32 value
+type uint32Value uint32
+
+func newUint32Value(val uint32, p *uint32) *uint32Value {
+	*p = val
+	return (*uint32Value)(p)
+}
+
+func (i *uint32Value) Set(s string) error {
+	v, err := strconv.ParseUint(s, 0, 32)
+	*i = uint32Value(v)
+	return err
+}
+
+func (i *uint32Value) Type() string {
+	return "uint32"
+}
+
+func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint32Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseUint(sval, 0, 32)
+	if err != nil {
+		return 0, err
+	}
+	return uint32(v), nil
+}
+
+// GetUint32 return the uint32 value of a flag with the given name
+func (f *FlagSet) GetUint32(name string) (uint32, error) {
+	val, err := f.getFlagType(name, "uint32", uint32Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(uint32), nil
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) {
+	f.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+	f.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32  variable in which to store the value of the flag.
+func Uint32Var(p *uint32, name string, value uint32, usage string) {
+	CommandLine.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+	CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32  variable that stores the value of the flag.
+func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 {
+	p := new(uint32)
+	f.Uint32VarP(p, name, "", value, usage)
+	return p
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+	p := new(uint32)
+	f.Uint32VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32  variable that stores the value of the flag.
+func Uint32(name string, value uint32, usage string) *uint32 {
+	return CommandLine.Uint32P(name, "", value, usage)
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+	return CommandLine.Uint32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go
new file mode 100644
index 0000000..f62240f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint64.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint64 Value
+type uint64Value uint64
+
+func newUint64Value(val uint64, p *uint64) *uint64Value {
+	*p = val
+	return (*uint64Value)(p)
+}
+
+func (i *uint64Value) Set(s string) error {
+	v, err := strconv.ParseUint(s, 0, 64)
+	*i = uint64Value(v)
+	return err
+}
+
+func (i *uint64Value) Type() string {
+	return "uint64"
+}
+
+func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint64Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseUint(sval, 0, 64)
+	if err != nil {
+		return 0, err
+	}
+	return uint64(v), nil
+}
+
+// GetUint64 return the uint64 value of a flag with the given name
+func (f *FlagSet) GetUint64(name string) (uint64, error) {
+	val, err := f.getFlagType(name, "uint64", uint64Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(uint64), nil
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
+	f.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+	f.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func Uint64Var(p *uint64, name string, value uint64, usage string) {
+	CommandLine.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+	CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
+	p := new(uint64)
+	f.Uint64VarP(p, name, "", value, usage)
+	return p
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+	p := new(uint64)
+	f.Uint64VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func Uint64(name string, value uint64, usage string) *uint64 {
+	return CommandLine.Uint64P(name, "", value, usage)
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+	return CommandLine.Uint64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go
new file mode 100644
index 0000000..bb0e83c
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint8 Value
+type uint8Value uint8
+
+func newUint8Value(val uint8, p *uint8) *uint8Value {
+	*p = val
+	return (*uint8Value)(p)
+}
+
+func (i *uint8Value) Set(s string) error {
+	v, err := strconv.ParseUint(s, 0, 8)
+	*i = uint8Value(v)
+	return err
+}
+
+func (i *uint8Value) Type() string {
+	return "uint8"
+}
+
+func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint8Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseUint(sval, 0, 8)
+	if err != nil {
+		return 0, err
+	}
+	return uint8(v), nil
+}
+
+// GetUint8 return the uint8 value of a flag with the given name
+func (f *FlagSet) GetUint8(name string) (uint8, error) {
+	val, err := f.getFlagType(name, "uint8", uint8Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(uint8), nil
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) {
+	f.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+	f.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func Uint8Var(p *uint8, name string, value uint8, usage string) {
+	CommandLine.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+	CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 {
+	p := new(uint8)
+	f.Uint8VarP(p, name, "", value, usage)
+	return p
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+	p := new(uint8)
+	f.Uint8VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func Uint8(name string, value uint8, usage string) *uint8 {
+	return CommandLine.Uint8P(name, "", value, usage)
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+	return CommandLine.Uint8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go
new file mode 100644
index 0000000..edd94c6
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint_slice.go
@@ -0,0 +1,126 @@
+package pflag
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// -- uintSlice Value
+type uintSliceValue struct {
+	value   *[]uint
+	changed bool
+}
+
+func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue {
+	uisv := new(uintSliceValue)
+	uisv.value = p
+	*uisv.value = val
+	return uisv
+}
+
+func (s *uintSliceValue) Set(val string) error {
+	ss := strings.Split(val, ",")
+	out := make([]uint, len(ss))
+	for i, d := range ss {
+		u, err := strconv.ParseUint(d, 10, 0)
+		if err != nil {
+			return err
+		}
+		out[i] = uint(u)
+	}
+	if !s.changed {
+		*s.value = out
+	} else {
+		*s.value = append(*s.value, out...)
+	}
+	s.changed = true
+	return nil
+}
+
+func (s *uintSliceValue) Type() string {
+	return "uintSlice"
+}
+
+func (s *uintSliceValue) String() string {
+	out := make([]string, len(*s.value))
+	for i, d := range *s.value {
+		out[i] = fmt.Sprintf("%d", d)
+	}
+	return "[" + strings.Join(out, ",") + "]"
+}
+
+func uintSliceConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// Empty string would cause a slice with one (empty) entry
+	if len(val) == 0 {
+		return []uint{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make([]uint, len(ss))
+	for i, d := range ss {
+		u, err := strconv.ParseUint(d, 10, 0)
+		if err != nil {
+			return nil, err
+		}
+		out[i] = uint(u)
+	}
+	return out, nil
+}
+
+// GetUintSlice returns the []uint value of a flag with the given name.
+func (f *FlagSet) GetUintSlice(name string) ([]uint, error) {
+	val, err := f.getFlagType(name, "uintSlice", uintSliceConv)
+	if err != nil {
+		return []uint{}, err
+	}
+	return val.([]uint), nil
+}
+
+// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string.
+// The argument p points to a []uint variable in which to store the value of the flag.
+func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+	f.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+	f.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSliceVar defines a uint[] flag with specified name, default value, and usage string.
+// The argument p points to a uint[] variable in which to store the value of the flag.
+func UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+	CommandLine.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+	CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint {
+	p := []uint{}
+	f.UintSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+	p := []uint{}
+	f.UintSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func UintSlice(name string, value []uint, usage string) *[]uint {
+	return CommandLine.UintSliceP(name, "", value, usage)
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+	return CommandLine.UintSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/tmc/grpc-websocket-proxy/LICENSE b/vendor/github.com/tmc/grpc-websocket-proxy/LICENSE
new file mode 100644
index 0000000..95d0bc8
--- /dev/null
+++ b/vendor/github.com/tmc/grpc-websocket-proxy/LICENSE
@@ -0,0 +1,7 @@
+Copyright (C) 2016 Travis Cline
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/tmc/grpc-websocket-proxy/wsproxy/doc.go b/vendor/github.com/tmc/grpc-websocket-proxy/wsproxy/doc.go
new file mode 100644
index 0000000..baf9545
--- /dev/null
+++ b/vendor/github.com/tmc/grpc-websocket-proxy/wsproxy/doc.go
@@ -0,0 +1,2 @@
+// Package wsproxy implements a websocket proxy for grpc-gateway backed services
+package wsproxy
diff --git a/vendor/github.com/tmc/grpc-websocket-proxy/wsproxy/websocket_proxy.go b/vendor/github.com/tmc/grpc-websocket-proxy/wsproxy/websocket_proxy.go
new file mode 100644
index 0000000..ada4c4e
--- /dev/null
+++ b/vendor/github.com/tmc/grpc-websocket-proxy/wsproxy/websocket_proxy.go
@@ -0,0 +1,291 @@
+package wsproxy
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"net/http"
+	"strings"
+
+	"github.com/gorilla/websocket"
+	"github.com/sirupsen/logrus"
+	"golang.org/x/net/context"
+)
+
+// MethodOverrideParam defines the special URL parameter that is translated into the subsequent proxied streaming http request's method.
+//
+// Deprecated: it is preferable to use the Options parameters to WebSocketProxy to supply parameters.
+var MethodOverrideParam = "method"
+
+// TokenCookieName defines the cookie name that is translated to an 'Authorization: Bearer' header in the streaming http request's headers.
+//
+// Deprecated: it is preferable to use the Options parameters to WebSocketProxy to supply parameters.
+var TokenCookieName = "token"
+
+// RequestMutatorFunc can supply an alternate outgoing request.
+type RequestMutatorFunc func(incoming *http.Request, outgoing *http.Request) *http.Request
+
+// Proxy provides websocket transport upgrade to compatible endpoints.
+type Proxy struct {
+	h                   http.Handler
+	logger              Logger
+	methodOverrideParam string
+	tokenCookieName     string
+	requestMutator      RequestMutatorFunc
+	headerForwarder     func(header string) bool
+}
+
+// Logger collects log messages.
+type Logger interface {
+	Warnln(...interface{})
+	Debugln(...interface{})
+}
+
+func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	if !websocket.IsWebSocketUpgrade(r) {
+		p.h.ServeHTTP(w, r)
+		return
+	}
+	p.proxy(w, r)
+}
+
+// Option allows customization of the proxy.
+type Option func(*Proxy)
+
+// WithMethodParamOverride allows specification of the special http parameter that is used in the proxied streaming request.
+func WithMethodParamOverride(param string) Option {
+	return func(p *Proxy) {
+		p.methodOverrideParam = param
+	}
+}
+
+// WithTokenCookieName allows specification of the cookie that is supplied as an upstream 'Authorization: Bearer' http header.
+func WithTokenCookieName(param string) Option {
+	return func(p *Proxy) {
+		p.tokenCookieName = param
+	}
+}
+
+// WithRequestMutator allows a custom RequestMutatorFunc to be supplied.
+func WithRequestMutator(fn RequestMutatorFunc) Option {
+	return func(p *Proxy) {
+		p.requestMutator = fn
+	}
+}
+
+// WithForwardedHeaders allows controlling which headers are forwarded.
+func WithForwardedHeaders(fn func(header string) bool) Option {
+	return func(p *Proxy) {
+		p.headerForwarder = fn
+	}
+}
+
+// WithLogger allows a custom FieldLogger to be supplied
+func WithLogger(logger Logger) Option {
+	return func(p *Proxy) {
+		p.logger = logger
+	}
+}
+
+var defaultHeadersToForward = map[string]bool{
+	"Origin":  true,
+	"origin":  true,
+	"Referer": true,
+	"referer": true,
+}
+
+func defaultHeaderForwarder(header string) bool {
+	return defaultHeadersToForward[header]
+}
+
+// WebsocketProxy attempts to expose the underlying handler as a bidi websocket stream with newline-delimited
+// JSON as the content encoding.
+//
+// The HTTP Authorization header is either populated from the Sec-Websocket-Protocol field or by a cookie.
+// The cookie name is specified by the TokenCookieName value.
+//
+// example:
+//   Sec-Websocket-Protocol: Bearer, foobar
+// is converted to:
+//   Authorization: Bearer foobar
+//
+// Method can be overwritten with the MethodOverrideParam get parameter in the requested URL
+func WebsocketProxy(h http.Handler, opts ...Option) http.Handler {
+	p := &Proxy{
+		h:                   h,
+		logger:              logrus.New(),
+		methodOverrideParam: MethodOverrideParam,
+		tokenCookieName:     TokenCookieName,
+		headerForwarder:     defaultHeaderForwarder,
+	}
+	for _, o := range opts {
+		o(p)
+	}
+	return p
+}
+
+// TODO(tmc): allow modification of upgrader settings?
+var upgrader = websocket.Upgrader{
+	ReadBufferSize:  1024,
+	WriteBufferSize: 1024,
+	CheckOrigin:     func(r *http.Request) bool { return true },
+}
+
+func isClosedConnError(err error) bool {
+	str := err.Error()
+	if strings.Contains(str, "use of closed network connection") {
+		return true
+	}
+	return websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway)
+}
+
+func (p *Proxy) proxy(w http.ResponseWriter, r *http.Request) {
+	var responseHeader http.Header
+	// If Sec-WebSocket-Protocol starts with "Bearer", respond in kind.
+	// TODO(tmc): consider customizability/extension point here.
+	if strings.HasPrefix(r.Header.Get("Sec-WebSocket-Protocol"), "Bearer") {
+		responseHeader = http.Header{
+			"Sec-WebSocket-Protocol": []string{"Bearer"},
+		}
+	}
+	conn, err := upgrader.Upgrade(w, r, responseHeader)
+	if err != nil {
+		p.logger.Warnln("error upgrading websocket:", err)
+		return
+	}
+	defer conn.Close()
+
+	ctx, cancelFn := context.WithCancel(context.Background())
+	defer cancelFn()
+
+	requestBodyR, requestBodyW := io.Pipe()
+	request, err := http.NewRequest(r.Method, r.URL.String(), requestBodyR)
+	if err != nil {
+		p.logger.Warnln("error preparing request:", err)
+		return
+	}
+	if swsp := r.Header.Get("Sec-WebSocket-Protocol"); swsp != "" {
+		request.Header.Set("Authorization", transformSubProtocolHeader(swsp))
+	}
+	for header := range r.Header {
+		if p.headerForwarder(header) {
+			request.Header.Set(header, r.Header.Get(header))
+		}
+	}
+	// If token cookie is present, populate Authorization header from the cookie instead.
+	if cookie, err := r.Cookie(p.tokenCookieName); err == nil {
+		request.Header.Set("Authorization", "Bearer "+cookie.Value)
+	}
+	if m := r.URL.Query().Get(p.methodOverrideParam); m != "" {
+		request.Method = m
+	}
+
+	if p.requestMutator != nil {
+		request = p.requestMutator(r, request)
+	}
+
+	responseBodyR, responseBodyW := io.Pipe()
+	response := newInMemoryResponseWriter(responseBodyW)
+	go func() {
+		<-ctx.Done()
+		p.logger.Debugln("closing pipes")
+		requestBodyW.CloseWithError(io.EOF)
+		responseBodyW.CloseWithError(io.EOF)
+		response.closed <- true
+	}()
+
+	go func() {
+		defer cancelFn()
+		p.h.ServeHTTP(response, request)
+	}()
+
+	// read loop -- take messages from websocket and write to http request
+	go func() {
+		defer func() {
+			cancelFn()
+		}()
+		for {
+			select {
+			case <-ctx.Done():
+				p.logger.Debugln("read loop done")
+				return
+			default:
+			}
+			p.logger.Debugln("[read] reading from socket.")
+			_, payload, err := conn.ReadMessage()
+			if err != nil {
+				if isClosedConnError(err) {
+					p.logger.Debugln("[read] websocket closed:", err)
+					return
+				}
+				p.logger.Warnln("error reading websocket message:", err)
+				return
+			}
+			p.logger.Debugln("[read] read payload:", string(payload))
+			p.logger.Debugln("[read] writing to requestBody:")
+			n, err := requestBodyW.Write(payload)
+			requestBodyW.Write([]byte("\n"))
+			p.logger.Debugln("[read] wrote to requestBody", n)
+			if err != nil {
+				p.logger.Warnln("[read] error writing message to upstream http server:", err)
+				return
+			}
+		}
+	}()
+	// write loop -- take messages from response and write to websocket
+	scanner := bufio.NewScanner(responseBodyR)
+	for scanner.Scan() {
+		if len(scanner.Bytes()) == 0 {
+			p.logger.Warnln("[write] empty scan", scanner.Err())
+			continue
+		}
+		p.logger.Debugln("[write] scanned", scanner.Text())
+		if err = conn.WriteMessage(websocket.TextMessage, scanner.Bytes()); err != nil {
+			p.logger.Warnln("[write] error writing websocket message:", err)
+			return
+		}
+	}
+	if err := scanner.Err(); err != nil {
+		p.logger.Warnln("scanner err:", err)
+	}
+}
+
+type inMemoryResponseWriter struct {
+	io.Writer
+	header http.Header
+	code   int
+	closed chan bool
+}
+
+func newInMemoryResponseWriter(w io.Writer) *inMemoryResponseWriter {
+	return &inMemoryResponseWriter{
+		Writer: w,
+		header: http.Header{},
+		closed: make(chan bool, 1),
+	}
+}
+
+// IE and Edge do not delimit Sec-WebSocket-Protocol strings with spaces
+func transformSubProtocolHeader(header string) string {
+	tokens := strings.SplitN(header, "Bearer,", 2)
+
+	if len(tokens) < 2 {
+		return ""
+	}
+
+	return fmt.Sprintf("Bearer %v", strings.Trim(tokens[1], " "))
+}
+
+func (w *inMemoryResponseWriter) Write(b []byte) (int, error) {
+	return w.Writer.Write(b)
+}
+func (w *inMemoryResponseWriter) Header() http.Header {
+	return w.header
+}
+func (w *inMemoryResponseWriter) WriteHeader(code int) {
+	w.code = code
+}
+func (w *inMemoryResponseWriter) CloseNotify() <-chan bool {
+	return w.closed
+}
+func (w *inMemoryResponseWriter) Flush() {}
diff --git a/vendor/github.com/ugorji/go/LICENSE b/vendor/github.com/ugorji/go/LICENSE
new file mode 100644
index 0000000..95a0f05
--- /dev/null
+++ b/vendor/github.com/ugorji/go/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2012-2015 Ugorji Nwoke.
+All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/ugorji/go/codec/0doc.go b/vendor/github.com/ugorji/go/codec/0doc.go
new file mode 100644
index 0000000..b61a818
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/0doc.go
@@ -0,0 +1,264 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+/*
+Package codec provides a
+High Performance, Feature-Rich Idiomatic Go 1.4+ codec/encoding library
+for binc, msgpack, cbor, json.
+
+Supported Serialization formats are:
+
+  - msgpack: https://github.com/msgpack/msgpack
+  - binc:    http://github.com/ugorji/binc
+  - cbor:    http://cbor.io http://tools.ietf.org/html/rfc7049
+  - json:    http://json.org http://tools.ietf.org/html/rfc7159
+  - simple:
+
+To install:
+
+    go get github.com/ugorji/go/codec
+
+This package will carefully use 'unsafe' for performance reasons in specific places.
+You can build without unsafe use by passing the safe or appengine tag
+i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 3
+go sdk versions e.g. current go release is go 1.9, so we support unsafe use only from
+go 1.7+ . This is because supporting unsafe requires knowledge of implementation details.
+
+For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer .
+
+The idiomatic Go support is as seen in other encoding packages in
+the standard library (ie json, xml, gob, etc).
+
+Rich Feature Set includes:
+
+  - Simple but extremely powerful and feature-rich API
+  - Support for go1.4 and above, while selectively using newer APIs for later releases
+  - Excellent code coverage ( > 90% )
+  - Very High Performance.
+    Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
+  - Careful selected use of 'unsafe' for targeted performance gains.
+    100% mode exists where 'unsafe' is not used at all.
+  - Lock-free (sans mutex) concurrency for scaling to 100's of cores
+  - Coerce types where appropriate
+    e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc
+  - Corner Cases:
+    Overflows, nil maps/slices, nil values in streams are handled correctly
+  - Standard field renaming via tags
+  - Support for omitting empty fields during an encoding
+  - Encoding from any value and decoding into pointer to any value
+    (struct, slice, map, primitives, pointers, interface{}, etc)
+  - Extensions to support efficient encoding/decoding of any named types
+  - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
+  - Support IsZero() bool to determine if a value is a zero value.
+    Analogous to time.Time.IsZero() bool.
+  - Decoding without a schema (into a interface{}).
+    Includes Options to configure what specific map or slice type to use
+    when decoding an encoded list or map into a nil interface{}
+  - Mapping a non-interface type to an interface, so we can decode appropriately
+    into any interface type with a correctly configured non-interface value.
+  - Encode a struct as an array, and decode struct from an array in the data stream
+  - Option to encode struct keys as numbers (instead of strings)
+    (to support structured streams with fields encoded as numeric codes)
+  - Comprehensive support for anonymous fields
+  - Fast (no-reflection) encoding/decoding of common maps and slices
+  - Code-generation for faster performance.
+  - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
+  - Support indefinite-length formats to enable true streaming
+    (for formats which support it e.g. json, cbor)
+  - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
+    This mostly applies to maps, where iteration order is non-deterministic.
+  - NIL in data stream decoded as zero value
+  - Never silently skip data when decoding.
+    User decides whether to return an error or silently skip data when keys or indexes
+    in the data stream do not map to fields in the struct.
+  - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown)
+  - Encode/Decode from/to chan types (for iterative streaming support)
+  - Drop-in replacement for encoding/json. `json:` key in struct tag supported.
+  - Provides a RPC Server and Client Codec for net/rpc communication protocol.
+  - Handle unique idiosyncrasies of codecs e.g.
+    - For messagepack, configure how ambiguities in handling raw bytes are resolved
+    - For messagepack, provide rpc server/client codec to support
+      msgpack-rpc protocol defined at:
+      https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+
+Extension Support
+
+Users can register a function to handle the encoding or decoding of
+their custom types.
+
+There are no restrictions on what the custom type can be. Some examples:
+
+    type BisSet   []int
+    type BitSet64 uint64
+    type UUID     string
+    type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
+    type GifImage struct { ... }
+
+As an illustration, MyStructWithUnexportedFields would normally be
+encoded as an empty map because it has no exported fields, while UUID
+would be encoded as a string. However, with extension support, you can
+encode any of these however you like.
+
+Custom Encoding and Decoding
+
+This package maintains symmetry in the encoding and decoding halfs.
+We determine how to encode or decode by walking this decision tree
+
+  - is type a codec.Selfer?
+  - is there an extension registered for the type?
+  - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler?
+  - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler?
+  - is format text-based, and type an encoding.TextMarshaler?
+  - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc
+
+This symmetry is important to reduce chances of issues happening because the
+encoding and decoding sides are out of sync e.g. decoded via very specific
+encoding.TextUnmarshaler but encoded via kind-specific generalized mode.
+
+Consequently, if a type only defines one-half of the symmetry
+(e.g. it implements UnmarshalJSON() but not MarshalJSON() ),
+then that type doesn't satisfy the check and we will continue walking down the
+decision tree.
+
+RPC
+
+RPC Client and Server Codecs are implemented, so the codecs can be used
+with the standard net/rpc package.
+
+Usage
+
+The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification.
+
+The Encoder and Decoder are NOT safe for concurrent use.
+
+Consequently, the usage model is basically:
+
+    - Create and initialize the Handle before any use.
+      Once created, DO NOT modify it.
+    - Multiple Encoders or Decoders can now use the Handle concurrently.
+      They only read information off the Handle (never write).
+    - However, each Encoder or Decoder MUST not be used concurrently
+    - To re-use an Encoder/Decoder, call Reset(...) on it first.
+      This allows you use state maintained on the Encoder/Decoder.
+
+Sample usage model:
+
+    // create and configure Handle
+    var (
+      bh codec.BincHandle
+      mh codec.MsgpackHandle
+      ch codec.CborHandle
+    )
+
+    mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
+
+    // configure extensions
+    // e.g. for msgpack, define functions and enable Time support for tag 1
+    // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
+
+    // create and use decoder/encoder
+    var (
+      r io.Reader
+      w io.Writer
+      b []byte
+      h = &bh // or mh to use msgpack
+    )
+
+    dec = codec.NewDecoder(r, h)
+    dec = codec.NewDecoderBytes(b, h)
+    err = dec.Decode(&v)
+
+    enc = codec.NewEncoder(w, h)
+    enc = codec.NewEncoderBytes(&b, h)
+    err = enc.Encode(v)
+
+    //RPC Server
+    go func() {
+        for {
+            conn, err := listener.Accept()
+            rpcCodec := codec.GoRpc.ServerCodec(conn, h)
+            //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
+            rpc.ServeCodec(rpcCodec)
+        }
+    }()
+
+    //RPC Communication (client side)
+    conn, err = net.Dial("tcp", "localhost:5555")
+    rpcCodec := codec.GoRpc.ClientCodec(conn, h)
+    //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
+    client := rpc.NewClientWithCodec(rpcCodec)
+
+Running Tests
+
+To run tests, use the following:
+
+    go test
+
+To run the full suite of tests, use the following:
+
+    go test -tags alltests -run Suite
+
+You can run the tag 'safe' to run tests or build in safe mode. e.g.
+
+    go test -tags safe -run Json
+    go test -tags "alltests safe" -run Suite
+
+Running Benchmarks
+
+Please see http://github.com/ugorji/go-codec-bench .
+
+Caveats
+
+Struct fields matching the following are ignored during encoding and decoding
+    - struct tag value set to -
+    - func, complex numbers, unsafe pointers
+    - unexported and not embedded
+    - unexported and embedded and not struct kind
+    - unexported and embedded pointers (from go1.10)
+
+Every other field in a struct will be encoded/decoded.
+
+Embedded fields are encoded as if they exist in the top-level struct,
+with some caveats. See Encode documentation.
+
+*/
+package codec
+
+// TODO:
+//   - For Go 1.11, when mid-stack inlining is enabled,
+//     we should use committed functions for writeXXX and readXXX calls.
+//     This involves uncommenting the methods for decReaderSwitch and encWriterSwitch
+//     and using those (decReaderSwitch and encWriterSwitch) in all handles
+//     instead of encWriter and decReader.
+//     The benefit is that, for the (En|De)coder over []byte, the encWriter/decReader
+//     will be inlined, giving a performance bump for that typical case.
+//     However, it will only  be inlined if mid-stack inlining is enabled,
+//     as we call panic to raise errors, and panic currently prevents inlining.
+//
+// PUNTED:
+//   - To make Handle comparable, make extHandle in BasicHandle a non-embedded pointer,
+//     and use overlay methods on *BasicHandle to call through to extHandle after initializing
+//     the "xh *extHandle" to point to a real slice.
+//
+// BEFORE EACH RELEASE:
+//   - Look through and fix padding for each type, to eliminate false sharing
+//     - critical shared objects that are read many times
+//       TypeInfos
+//     - pooled objects:
+//       decNaked, decNakedContainers, codecFner, typeInfoLoadArray, 
+//     - small objects allocated independently, that we read/use much across threads:
+//       codecFn, typeInfo
+//     - Objects allocated independently and used a lot
+//       Decoder, Encoder,
+//       xxxHandle, xxxEncDriver, xxxDecDriver (xxx = json, msgpack, cbor, binc, simple)
+//     - In all above, arrange values modified together to be close to each other.
+//
+//     For all of these, either ensure that they occupy full cache lines,
+//     or ensure that the things just past the cache line boundary are hardly read/written
+//     e.g. JsonHandle.RawBytesExt - which is copied into json(En|De)cDriver at init
+//
+//     Occupying full cache lines means they occupy 8*N words (where N is an integer).
+//     Check this out by running: ./run.sh -z
+//     - look at those tagged ****, meaning they are not occupying full cache lines
+//     - look at those tagged <<<<, meaning they are larger than 32 words (something to watch)
+//   - Run "golint -min_confidence 0.81"
diff --git a/vendor/github.com/ugorji/go/codec/README.md b/vendor/github.com/ugorji/go/codec/README.md
new file mode 100644
index 0000000..50d65e5
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/README.md
@@ -0,0 +1,206 @@
+# Codec
+
+High Performance, Feature-Rich Idiomatic Go codec/encoding library for
+binc, msgpack, cbor, json.
+
+Supported Serialization formats are:
+
+  - msgpack: https://github.com/msgpack/msgpack
+  - binc:    http://github.com/ugorji/binc
+  - cbor:    http://cbor.io http://tools.ietf.org/html/rfc7049
+  - json:    http://json.org http://tools.ietf.org/html/rfc7159
+  - simple: 
+
+To install:
+
+    go get github.com/ugorji/go/codec
+
+This package will carefully use 'unsafe' for performance reasons in specific places.
+You can build without unsafe use by passing the safe or appengine tag
+i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 3
+go sdk versions e.g. current go release is go 1.9, so we support unsafe use only from
+go 1.7+ . This is because supporting unsafe requires knowledge of implementation details.
+
+Online documentation: http://godoc.org/github.com/ugorji/go/codec  
+Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer
+
+The idiomatic Go support is as seen in other encoding packages in
+the standard library (ie json, xml, gob, etc).
+
+Rich Feature Set includes:
+
+  - Simple but extremely powerful and feature-rich API
+  - Support for go1.4 and above, while selectively using newer APIs for later releases
+  - Excellent code coverage ( > 90% )
+  - Very High Performance.
+    Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
+  - Careful selected use of 'unsafe' for targeted performance gains.
+    100% mode exists where 'unsafe' is not used at all.
+  - Lock-free (sans mutex) concurrency for scaling to 100's of cores
+  - Coerce types where appropriate
+    e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc
+  - Corner Cases: 
+    Overflows, nil maps/slices, nil values in streams are handled correctly
+  - Standard field renaming via tags
+  - Support for omitting empty fields during an encoding
+  - Encoding from any value and decoding into pointer to any value
+    (struct, slice, map, primitives, pointers, interface{}, etc)
+  - Extensions to support efficient encoding/decoding of any named types
+  - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
+  - Support IsZero() bool to determine if a value is a zero value.
+    Analogous to time.Time.IsZero() bool.
+  - Decoding without a schema (into a interface{}).
+    Includes Options to configure what specific map or slice type to use
+    when decoding an encoded list or map into a nil interface{}
+  - Mapping a non-interface type to an interface, so we can decode appropriately
+    into any interface type with a correctly configured non-interface value.
+  - Encode a struct as an array, and decode struct from an array in the data stream
+  - Option to encode struct keys as numbers (instead of strings)
+    (to support structured streams with fields encoded as numeric codes)
+  - Comprehensive support for anonymous fields
+  - Fast (no-reflection) encoding/decoding of common maps and slices
+  - Code-generation for faster performance.
+  - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
+  - Support indefinite-length formats to enable true streaming 
+    (for formats which support it e.g. json, cbor)
+  - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
+    This mostly applies to maps, where iteration order is non-deterministic.
+  - NIL in data stream decoded as zero value
+  - Never silently skip data when decoding.
+    User decides whether to return an error or silently skip data when keys or indexes
+    in the data stream do not map to fields in the struct.
+  - Encode/Decode from/to chan types (for iterative streaming support)
+  - Drop-in replacement for encoding/json. `json:` key in struct tag supported.
+  - Provides a RPC Server and Client Codec for net/rpc communication protocol.
+  - Handle unique idiosyncrasies of codecs e.g. 
+    - For messagepack, configure how ambiguities in handling raw bytes are resolved 
+    - For messagepack, provide rpc server/client codec to support
+      msgpack-rpc protocol defined at:
+      https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+
+## Extension Support
+
+Users can register a function to handle the encoding or decoding of
+their custom types.
+
+There are no restrictions on what the custom type can be. Some examples:
+
+    type BisSet   []int
+    type BitSet64 uint64
+    type UUID     string
+    type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
+    type GifImage struct { ... }
+
+As an illustration, MyStructWithUnexportedFields would normally be
+encoded as an empty map because it has no exported fields, while UUID
+would be encoded as a string. However, with extension support, you can
+encode any of these however you like.
+
+## Custom Encoding and Decoding
+
+This package maintains symmetry in the encoding and decoding halfs.
+We determine how to encode or decode by walking this decision tree
+
+  - is type a codec.Selfer?
+  - is there an extension registered for the type?
+  - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler?
+  - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler?
+  - is format text-based, and type an encoding.TextMarshaler?
+  - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc
+
+This symmetry is important to reduce chances of issues happening because the
+encoding and decoding sides are out of sync e.g. decoded via very specific
+encoding.TextUnmarshaler but encoded via kind-specific generalized mode.
+
+Consequently, if a type only defines one-half of the symmetry
+(e.g. it implements UnmarshalJSON() but not MarshalJSON() ),
+then that type doesn't satisfy the check and we will continue walking down the
+decision tree.
+
+## RPC
+
+RPC Client and Server Codecs are implemented, so the codecs can be used
+with the standard net/rpc package.
+
+## Usage
+
+Typical usage model:
+
+    // create and configure Handle
+    var (
+      bh codec.BincHandle
+      mh codec.MsgpackHandle
+      ch codec.CborHandle
+    )
+
+    mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
+
+    // configure extensions
+    // e.g. for msgpack, define functions and enable Time support for tag 1
+    // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
+
+    // create and use decoder/encoder
+    var (
+      r io.Reader
+      w io.Writer
+      b []byte
+      h = &bh // or mh to use msgpack
+    )
+
+    dec = codec.NewDecoder(r, h)
+    dec = codec.NewDecoderBytes(b, h)
+    err = dec.Decode(&v)
+
+    enc = codec.NewEncoder(w, h)
+    enc = codec.NewEncoderBytes(&b, h)
+    err = enc.Encode(v)
+
+    //RPC Server
+    go func() {
+        for {
+            conn, err := listener.Accept()
+            rpcCodec := codec.GoRpc.ServerCodec(conn, h)
+            //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
+            rpc.ServeCodec(rpcCodec)
+        }
+    }()
+
+    //RPC Communication (client side)
+    conn, err = net.Dial("tcp", "localhost:5555")
+    rpcCodec := codec.GoRpc.ClientCodec(conn, h)
+    //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
+    client := rpc.NewClientWithCodec(rpcCodec)
+
+## Running Tests
+
+To run tests, use the following:
+
+    go test
+
+To run the full suite of tests, use the following:
+
+    go test -tags alltests -run Suite
+
+You can run the tag 'safe' to run tests or build in safe mode. e.g.
+
+    go test -tags safe -run Json
+    go test -tags "alltests safe" -run Suite
+
+## Running Benchmarks
+
+Please see http://github.com/ugorji/go-codec-bench .
+
+## Caveats
+
+Struct fields matching the following are ignored during encoding and decoding
+
+  - struct tag value set to -
+  - func, complex numbers, unsafe pointers
+  - unexported and not embedded
+  - unexported and embedded and not struct kind
+  - unexported and embedded pointers (from go1.10)
+
+Every other field in a struct will be encoded/decoded.
+
+Embedded fields are encoded as if they exist in the top-level struct,
+with some caveats. See Encode documentation.
diff --git a/vendor/github.com/ugorji/go/codec/binc.go b/vendor/github.com/ugorji/go/codec/binc.go
new file mode 100644
index 0000000..a3c96fe
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/binc.go
@@ -0,0 +1,1168 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+	"math"
+	"reflect"
+	"time"
+)
+
+const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning.
+
+// vd as low 4 bits (there are 16 slots)
+const (
+	bincVdSpecial byte = iota
+	bincVdPosInt
+	bincVdNegInt
+	bincVdFloat
+
+	bincVdString
+	bincVdByteArray
+	bincVdArray
+	bincVdMap
+
+	bincVdTimestamp
+	bincVdSmallInt
+	bincVdUnicodeOther
+	bincVdSymbol
+
+	bincVdDecimal
+	_               // open slot
+	_               // open slot
+	bincVdCustomExt = 0x0f
+)
+
+const (
+	bincSpNil byte = iota
+	bincSpFalse
+	bincSpTrue
+	bincSpNan
+	bincSpPosInf
+	bincSpNegInf
+	bincSpZeroFloat
+	bincSpZero
+	bincSpNegOne
+)
+
+const (
+	bincFlBin16 byte = iota
+	bincFlBin32
+	_ // bincFlBin32e
+	bincFlBin64
+	_ // bincFlBin64e
+	// others not currently supported
+)
+
+func bincdesc(vd, vs byte) string {
+	switch vd {
+	case bincVdSpecial:
+		switch vs {
+		case bincSpNil:
+			return "nil"
+		case bincSpFalse:
+			return "false"
+		case bincSpTrue:
+			return "true"
+		case bincSpNan, bincSpPosInf, bincSpNegInf, bincSpZeroFloat:
+			return "float"
+		case bincSpZero:
+			return "uint"
+		case bincSpNegOne:
+			return "int"
+		default:
+			return "unknown"
+		}
+	case bincVdSmallInt, bincVdPosInt:
+		return "uint"
+	case bincVdNegInt:
+		return "int"
+	case bincVdFloat:
+		return "float"
+	case bincVdSymbol:
+		return "string"
+	case bincVdString:
+		return "string"
+	case bincVdByteArray:
+		return "bytes"
+	case bincVdTimestamp:
+		return "time"
+	case bincVdCustomExt:
+		return "ext"
+	case bincVdArray:
+		return "array"
+	case bincVdMap:
+		return "map"
+	default:
+		return "unknown"
+	}
+}
+
+type bincEncDriver struct {
+	e *Encoder
+	h *BincHandle
+	w encWriter
+	m map[string]uint16 // symbols
+	b [16]byte          // scratch, used for encoding numbers - bigendian style
+	s uint16            // symbols sequencer
+	// c containerState
+	encDriverTrackContainerWriter
+	noBuiltInTypes
+	// encNoSeparator
+}
+
+func (e *bincEncDriver) EncodeNil() {
+	e.w.writen1(bincVdSpecial<<4 | bincSpNil)
+}
+
+func (e *bincEncDriver) EncodeTime(t time.Time) {
+	if t.IsZero() {
+		e.EncodeNil()
+	} else {
+		bs := bincEncodeTime(t)
+		e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs)))
+		e.w.writeb(bs)
+	}
+}
+
+func (e *bincEncDriver) EncodeBool(b bool) {
+	if b {
+		e.w.writen1(bincVdSpecial<<4 | bincSpTrue)
+	} else {
+		e.w.writen1(bincVdSpecial<<4 | bincSpFalse)
+	}
+}
+
+func (e *bincEncDriver) EncodeFloat32(f float32) {
+	if f == 0 {
+		e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
+		return
+	}
+	e.w.writen1(bincVdFloat<<4 | bincFlBin32)
+	bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *bincEncDriver) EncodeFloat64(f float64) {
+	if f == 0 {
+		e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
+		return
+	}
+	bigen.PutUint64(e.b[:8], math.Float64bits(f))
+	if bincDoPrune {
+		i := 7
+		for ; i >= 0 && (e.b[i] == 0); i-- {
+		}
+		i++
+		if i <= 6 {
+			e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64)
+			e.w.writen1(byte(i))
+			e.w.writeb(e.b[:i])
+			return
+		}
+	}
+	e.w.writen1(bincVdFloat<<4 | bincFlBin64)
+	e.w.writeb(e.b[:8])
+}
+
+func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) {
+	if lim == 4 {
+		bigen.PutUint32(e.b[:lim], uint32(v))
+	} else {
+		bigen.PutUint64(e.b[:lim], v)
+	}
+	if bincDoPrune {
+		i := pruneSignExt(e.b[:lim], pos)
+		e.w.writen1(bd | lim - 1 - byte(i))
+		e.w.writeb(e.b[i:lim])
+	} else {
+		e.w.writen1(bd | lim - 1)
+		e.w.writeb(e.b[:lim])
+	}
+}
+
+func (e *bincEncDriver) EncodeInt(v int64) {
+	const nbd byte = bincVdNegInt << 4
+	if v >= 0 {
+		e.encUint(bincVdPosInt<<4, true, uint64(v))
+	} else if v == -1 {
+		e.w.writen1(bincVdSpecial<<4 | bincSpNegOne)
+	} else {
+		e.encUint(bincVdNegInt<<4, false, uint64(-v))
+	}
+}
+
+func (e *bincEncDriver) EncodeUint(v uint64) {
+	e.encUint(bincVdPosInt<<4, true, v)
+}
+
+func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) {
+	if v == 0 {
+		e.w.writen1(bincVdSpecial<<4 | bincSpZero)
+	} else if pos && v >= 1 && v <= 16 {
+		e.w.writen1(bincVdSmallInt<<4 | byte(v-1))
+	} else if v <= math.MaxUint8 {
+		e.w.writen2(bd|0x0, byte(v))
+	} else if v <= math.MaxUint16 {
+		e.w.writen1(bd | 0x01)
+		bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
+	} else if v <= math.MaxUint32 {
+		e.encIntegerPrune(bd, pos, v, 4)
+	} else {
+		e.encIntegerPrune(bd, pos, v, 8)
+	}
+}
+
+func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
+	bs := ext.WriteExt(rv)
+	if bs == nil {
+		e.EncodeNil()
+		return
+	}
+	e.encodeExtPreamble(uint8(xtag), len(bs))
+	e.w.writeb(bs)
+}
+
+func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
+	e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+	e.w.writeb(re.Data)
+}
+
+func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) {
+	e.encLen(bincVdCustomExt<<4, uint64(length))
+	e.w.writen1(xtag)
+}
+
+func (e *bincEncDriver) WriteArrayStart(length int) {
+	e.encLen(bincVdArray<<4, uint64(length))
+	e.c = containerArrayStart
+}
+
+func (e *bincEncDriver) WriteMapStart(length int) {
+	e.encLen(bincVdMap<<4, uint64(length))
+	e.c = containerMapStart
+}
+
+func (e *bincEncDriver) EncodeString(c charEncoding, v string) {
+	if e.c == containerMapKey && c == cUTF8 && (e.h.AsSymbols == 0 || e.h.AsSymbols == 1) {
+		e.EncodeSymbol(v)
+		return
+	}
+	l := uint64(len(v))
+	e.encBytesLen(c, l)
+	if l > 0 {
+		e.w.writestr(v)
+	}
+}
+
+func (e *bincEncDriver) EncodeSymbol(v string) {
+	// if WriteSymbolsNoRefs {
+	// 	e.encodeString(cUTF8, v)
+	// 	return
+	// }
+
+	//symbols only offer benefit when string length > 1.
+	//This is because strings with length 1 take only 2 bytes to store
+	//(bd with embedded length, and single byte for string val).
+
+	l := len(v)
+	if l == 0 {
+		e.encBytesLen(cUTF8, 0)
+		return
+	} else if l == 1 {
+		e.encBytesLen(cUTF8, 1)
+		e.w.writen1(v[0])
+		return
+	}
+	if e.m == nil {
+		e.m = make(map[string]uint16, 16)
+	}
+	ui, ok := e.m[v]
+	if ok {
+		if ui <= math.MaxUint8 {
+			e.w.writen2(bincVdSymbol<<4, byte(ui))
+		} else {
+			e.w.writen1(bincVdSymbol<<4 | 0x8)
+			bigenHelper{e.b[:2], e.w}.writeUint16(ui)
+		}
+	} else {
+		e.s++
+		ui = e.s
+		//ui = uint16(atomic.AddUint32(&e.s, 1))
+		e.m[v] = ui
+		var lenprec uint8
+		if l <= math.MaxUint8 {
+			// lenprec = 0
+		} else if l <= math.MaxUint16 {
+			lenprec = 1
+		} else if int64(l) <= math.MaxUint32 {
+			lenprec = 2
+		} else {
+			lenprec = 3
+		}
+		if ui <= math.MaxUint8 {
+			e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui))
+		} else {
+			e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec)
+			bigenHelper{e.b[:2], e.w}.writeUint16(ui)
+		}
+		if lenprec == 0 {
+			e.w.writen1(byte(l))
+		} else if lenprec == 1 {
+			bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l))
+		} else if lenprec == 2 {
+			bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l))
+		} else {
+			bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l))
+		}
+		e.w.writestr(v)
+	}
+}
+
+func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+	if v == nil {
+		e.EncodeNil()
+		return
+	}
+	l := uint64(len(v))
+	e.encBytesLen(c, l)
+	if l > 0 {
+		e.w.writeb(v)
+	}
+}
+
+func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) {
+	//TODO: support bincUnicodeOther (for now, just use string or bytearray)
+	if c == cRAW {
+		e.encLen(bincVdByteArray<<4, length)
+	} else {
+		e.encLen(bincVdString<<4, length)
+	}
+}
+
+func (e *bincEncDriver) encLen(bd byte, l uint64) {
+	if l < 12 {
+		e.w.writen1(bd | uint8(l+4))
+	} else {
+		e.encLenNumber(bd, l)
+	}
+}
+
+func (e *bincEncDriver) encLenNumber(bd byte, v uint64) {
+	if v <= math.MaxUint8 {
+		e.w.writen2(bd, byte(v))
+	} else if v <= math.MaxUint16 {
+		e.w.writen1(bd | 0x01)
+		bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
+	} else if v <= math.MaxUint32 {
+		e.w.writen1(bd | 0x02)
+		bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
+	} else {
+		e.w.writen1(bd | 0x03)
+		bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v))
+	}
+}
+
+//------------------------------------
+
+type bincDecSymbol struct {
+	s string
+	b []byte
+	i uint16
+}
+
+type bincDecDriver struct {
+	decDriverNoopContainerReader
+	noBuiltInTypes
+
+	d      *Decoder
+	h      *BincHandle
+	r      decReader
+	br     bool // bytes reader
+	bdRead bool
+	bd     byte
+	vd     byte
+	vs     byte
+	_      [3]byte // padding
+	// linear searching on this slice is ok,
+	// because we typically expect < 32 symbols in each stream.
+	s []bincDecSymbol
+
+	// noStreamingCodec
+	// decNoSeparator
+
+	b [8 * 8]byte // scratch
+}
+
+func (d *bincDecDriver) readNextBd() {
+	d.bd = d.r.readn1()
+	d.vd = d.bd >> 4
+	d.vs = d.bd & 0x0f
+	d.bdRead = true
+}
+
+func (d *bincDecDriver) uncacheRead() {
+	if d.bdRead {
+		d.r.unreadn1()
+		d.bdRead = false
+	}
+}
+
+func (d *bincDecDriver) ContainerType() (vt valueType) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.vd == bincVdSpecial && d.vs == bincSpNil {
+		return valueTypeNil
+	} else if d.vd == bincVdByteArray {
+		return valueTypeBytes
+	} else if d.vd == bincVdString {
+		return valueTypeString
+	} else if d.vd == bincVdArray {
+		return valueTypeArray
+	} else if d.vd == bincVdMap {
+		return valueTypeMap
+	}
+	// else {
+	// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+	// }
+	return valueTypeUnset
+}
+
+func (d *bincDecDriver) TryDecodeAsNil() bool {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.bd == bincVdSpecial<<4|bincSpNil {
+		d.bdRead = false
+		return true
+	}
+	return false
+}
+
+func (d *bincDecDriver) DecodeTime() (t time.Time) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.bd == bincVdSpecial<<4|bincSpNil {
+		d.bdRead = false
+		return
+	}
+	if d.vd != bincVdTimestamp {
+		d.d.errorf("cannot decode time - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+		return
+	}
+	t, err := bincDecodeTime(d.r.readx(int(d.vs)))
+	if err != nil {
+		panic(err)
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) {
+	if vs&0x8 == 0 {
+		d.r.readb(d.b[0:defaultLen])
+	} else {
+		l := d.r.readn1()
+		if l > 8 {
+			d.d.errorf("cannot read float - at most 8 bytes used to represent float - received %v bytes", l)
+			return
+		}
+		for i := l; i < 8; i++ {
+			d.b[i] = 0
+		}
+		d.r.readb(d.b[0:l])
+	}
+}
+
+func (d *bincDecDriver) decFloat() (f float64) {
+	//if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; }
+	if x := d.vs & 0x7; x == bincFlBin32 {
+		d.decFloatPre(d.vs, 4)
+		f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4])))
+	} else if x == bincFlBin64 {
+		d.decFloatPre(d.vs, 8)
+		f = math.Float64frombits(bigen.Uint64(d.b[0:8]))
+	} else {
+		d.d.errorf("read float - only float32 and float64 are supported - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+		return
+	}
+	return
+}
+
+func (d *bincDecDriver) decUint() (v uint64) {
+	// need to inline the code (interface conversion and type assertion expensive)
+	switch d.vs {
+	case 0:
+		v = uint64(d.r.readn1())
+	case 1:
+		d.r.readb(d.b[6:8])
+		v = uint64(bigen.Uint16(d.b[6:8]))
+	case 2:
+		d.b[4] = 0
+		d.r.readb(d.b[5:8])
+		v = uint64(bigen.Uint32(d.b[4:8]))
+	case 3:
+		d.r.readb(d.b[4:8])
+		v = uint64(bigen.Uint32(d.b[4:8]))
+	case 4, 5, 6:
+		lim := int(7 - d.vs)
+		d.r.readb(d.b[lim:8])
+		for i := 0; i < lim; i++ {
+			d.b[i] = 0
+		}
+		v = uint64(bigen.Uint64(d.b[:8]))
+	case 7:
+		d.r.readb(d.b[:8])
+		v = uint64(bigen.Uint64(d.b[:8]))
+	default:
+		d.d.errorf("unsigned integers with greater than 64 bits of precision not supported")
+		return
+	}
+	return
+}
+
+func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	vd, vs := d.vd, d.vs
+	if vd == bincVdPosInt {
+		ui = d.decUint()
+	} else if vd == bincVdNegInt {
+		ui = d.decUint()
+		neg = true
+	} else if vd == bincVdSmallInt {
+		ui = uint64(d.vs) + 1
+	} else if vd == bincVdSpecial {
+		if vs == bincSpZero {
+			//i = 0
+		} else if vs == bincSpNegOne {
+			neg = true
+			ui = 1
+		} else {
+			d.d.errorf("integer decode fails - invalid special value from descriptor %x-%x/%s",
+				d.vd, d.vs, bincdesc(d.vd, d.vs))
+			return
+		}
+	} else {
+		d.d.errorf("integer can only be decoded from int/uint. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd)
+		return
+	}
+	return
+}
+
+func (d *bincDecDriver) DecodeInt64() (i int64) {
+	ui, neg := d.decCheckInteger()
+	i = chkOvf.SignedIntV(ui)
+	if neg {
+		i = -i
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *bincDecDriver) DecodeUint64() (ui uint64) {
+	ui, neg := d.decCheckInteger()
+	if neg {
+		d.d.errorf("assigning negative signed value to unsigned integer type")
+		return
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *bincDecDriver) DecodeFloat64() (f float64) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	vd, vs := d.vd, d.vs
+	if vd == bincVdSpecial {
+		d.bdRead = false
+		if vs == bincSpNan {
+			return math.NaN()
+		} else if vs == bincSpPosInf {
+			return math.Inf(1)
+		} else if vs == bincSpZeroFloat || vs == bincSpZero {
+			return
+		} else if vs == bincSpNegInf {
+			return math.Inf(-1)
+		} else {
+			d.d.errorf("float - invalid special value from descriptor %x-%x/%s",
+				d.vd, d.vs, bincdesc(d.vd, d.vs))
+			return
+		}
+	} else if vd == bincVdFloat {
+		f = d.decFloat()
+	} else {
+		f = float64(d.DecodeInt64())
+	}
+	d.bdRead = false
+	return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *bincDecDriver) DecodeBool() (b bool) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) {
+		// b = false
+	} else if bd == (bincVdSpecial | bincSpTrue) {
+		b = true
+	} else {
+		d.d.errorf("bool - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+		return
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *bincDecDriver) ReadMapStart() (length int) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.vd != bincVdMap {
+		d.d.errorf("map - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+		return
+	}
+	length = d.decLen()
+	d.bdRead = false
+	return
+}
+
+func (d *bincDecDriver) ReadArrayStart() (length int) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.vd != bincVdArray {
+		d.d.errorf("array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+		return
+	}
+	length = d.decLen()
+	d.bdRead = false
+	return
+}
+
+func (d *bincDecDriver) decLen() int {
+	if d.vs > 3 {
+		return int(d.vs - 4)
+	}
+	return int(d.decLenNumber())
+}
+
+func (d *bincDecDriver) decLenNumber() (v uint64) {
+	if x := d.vs; x == 0 {
+		v = uint64(d.r.readn1())
+	} else if x == 1 {
+		d.r.readb(d.b[6:8])
+		v = uint64(bigen.Uint16(d.b[6:8]))
+	} else if x == 2 {
+		d.r.readb(d.b[4:8])
+		v = uint64(bigen.Uint32(d.b[4:8]))
+	} else {
+		d.r.readb(d.b[:8])
+		v = bigen.Uint64(d.b[:8])
+	}
+	return
+}
+
+func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (
+	bs2 []byte, s string) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.bd == bincVdSpecial<<4|bincSpNil {
+		d.bdRead = false
+		return
+	}
+	var slen = -1
+	// var ok bool
+	switch d.vd {
+	case bincVdString, bincVdByteArray:
+		slen = d.decLen()
+		if zerocopy {
+			if d.br {
+				bs2 = d.r.readx(slen)
+			} else if len(bs) == 0 {
+				bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, d.b[:])
+			} else {
+				bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs)
+			}
+		} else {
+			bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs)
+		}
+		if withString {
+			s = string(bs2)
+		}
+	case bincVdSymbol:
+		// zerocopy doesn't apply for symbols,
+		// as the values must be stored in a table for later use.
+		//
+		//from vs: extract numSymbolBytes, containsStringVal, strLenPrecision,
+		//extract symbol
+		//if containsStringVal, read it and put in map
+		//else look in map for string value
+		var symbol uint16
+		vs := d.vs
+		if vs&0x8 == 0 {
+			symbol = uint16(d.r.readn1())
+		} else {
+			symbol = uint16(bigen.Uint16(d.r.readx(2)))
+		}
+		if d.s == nil {
+			d.s = make([]bincDecSymbol, 0, 16)
+		}
+
+		if vs&0x4 == 0 {
+			for i := range d.s {
+				j := &d.s[i]
+				if j.i == symbol {
+					bs2 = j.b
+					if withString {
+						if j.s == "" && bs2 != nil {
+							j.s = string(bs2)
+						}
+						s = j.s
+					}
+					break
+				}
+			}
+		} else {
+			switch vs & 0x3 {
+			case 0:
+				slen = int(d.r.readn1())
+			case 1:
+				slen = int(bigen.Uint16(d.r.readx(2)))
+			case 2:
+				slen = int(bigen.Uint32(d.r.readx(4)))
+			case 3:
+				slen = int(bigen.Uint64(d.r.readx(8)))
+			}
+			// since using symbols, do not store any part of
+			// the parameter bs in the map, as it might be a shared buffer.
+			// bs2 = decByteSlice(d.r, slen, bs)
+			bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, nil)
+			if withString {
+				s = string(bs2)
+			}
+			d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2})
+		}
+	default:
+		d.d.errorf("string/bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+		return
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *bincDecDriver) DecodeString() (s string) {
+	// DecodeBytes does not accommodate symbols, whose impl stores string version in map.
+	// Use decStringAndBytes directly.
+	// return string(d.DecodeBytes(d.b[:], true, true))
+	_, s = d.decStringAndBytes(d.b[:], true, true)
+	return
+}
+
+func (d *bincDecDriver) DecodeStringAsBytes() (s []byte) {
+	s, _ = d.decStringAndBytes(d.b[:], false, true)
+	return
+}
+
+func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.bd == bincVdSpecial<<4|bincSpNil {
+		d.bdRead = false
+		return nil
+	}
+	// check if an "array" of uint8's (see ContainerType for how to infer if an array)
+	if d.vd == bincVdArray {
+		bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
+		return
+	}
+	var clen int
+	if d.vd == bincVdString || d.vd == bincVdByteArray {
+		clen = d.decLen()
+	} else {
+		d.d.errorf("bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+		return
+	}
+	d.bdRead = false
+	if zerocopy {
+		if d.br {
+			return d.r.readx(clen)
+		} else if len(bs) == 0 {
+			bs = d.b[:]
+		}
+	}
+	return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
+}
+
+func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+	if xtag > 0xff {
+		d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
+		return
+	}
+	realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
+	realxtag = uint64(realxtag1)
+	if ext == nil {
+		re := rv.(*RawExt)
+		re.Tag = realxtag
+		re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
+	} else {
+		ext.ReadExt(rv, xbs)
+	}
+	return
+}
+
+func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.vd == bincVdCustomExt {
+		l := d.decLen()
+		xtag = d.r.readn1()
+		if verifyTag && xtag != tag {
+			d.d.errorf("wrong extension tag - got %b, expecting: %v", xtag, tag)
+			return
+		}
+		xbs = d.r.readx(l)
+	} else if d.vd == bincVdByteArray {
+		xbs = d.DecodeBytes(nil, true)
+	} else {
+		d.d.errorf("ext - expecting extensions or byte array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+		return
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *bincDecDriver) DecodeNaked() {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+
+	n := d.d.n
+	var decodeFurther bool
+
+	switch d.vd {
+	case bincVdSpecial:
+		switch d.vs {
+		case bincSpNil:
+			n.v = valueTypeNil
+		case bincSpFalse:
+			n.v = valueTypeBool
+			n.b = false
+		case bincSpTrue:
+			n.v = valueTypeBool
+			n.b = true
+		case bincSpNan:
+			n.v = valueTypeFloat
+			n.f = math.NaN()
+		case bincSpPosInf:
+			n.v = valueTypeFloat
+			n.f = math.Inf(1)
+		case bincSpNegInf:
+			n.v = valueTypeFloat
+			n.f = math.Inf(-1)
+		case bincSpZeroFloat:
+			n.v = valueTypeFloat
+			n.f = float64(0)
+		case bincSpZero:
+			n.v = valueTypeUint
+			n.u = uint64(0) // int8(0)
+		case bincSpNegOne:
+			n.v = valueTypeInt
+			n.i = int64(-1) // int8(-1)
+		default:
+			d.d.errorf("cannot infer value - unrecognized special value from descriptor %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs))
+		}
+	case bincVdSmallInt:
+		n.v = valueTypeUint
+		n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1
+	case bincVdPosInt:
+		n.v = valueTypeUint
+		n.u = d.decUint()
+	case bincVdNegInt:
+		n.v = valueTypeInt
+		n.i = -(int64(d.decUint()))
+	case bincVdFloat:
+		n.v = valueTypeFloat
+		n.f = d.decFloat()
+	case bincVdSymbol:
+		n.v = valueTypeSymbol
+		n.s = d.DecodeString()
+	case bincVdString:
+		n.v = valueTypeString
+		n.s = d.DecodeString()
+	case bincVdByteArray:
+		n.v = valueTypeBytes
+		n.l = d.DecodeBytes(nil, false)
+	case bincVdTimestamp:
+		n.v = valueTypeTime
+		tt, err := bincDecodeTime(d.r.readx(int(d.vs)))
+		if err != nil {
+			panic(err)
+		}
+		n.t = tt
+	case bincVdCustomExt:
+		n.v = valueTypeExt
+		l := d.decLen()
+		n.u = uint64(d.r.readn1())
+		n.l = d.r.readx(l)
+	case bincVdArray:
+		n.v = valueTypeArray
+		decodeFurther = true
+	case bincVdMap:
+		n.v = valueTypeMap
+		decodeFurther = true
+	default:
+		d.d.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+	}
+
+	if !decodeFurther {
+		d.bdRead = false
+	}
+	if n.v == valueTypeUint && d.h.SignedInteger {
+		n.v = valueTypeInt
+		n.i = int64(n.u)
+	}
+	return
+}
+
+//------------------------------------
+
+//BincHandle is a Handle for the Binc Schema-Free Encoding Format
+//defined at https://github.com/ugorji/binc .
+//
+//BincHandle currently supports all Binc features with the following EXCEPTIONS:
+//  - only integers up to 64 bits of precision are supported.
+//    big integers are unsupported.
+//  - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types).
+//    extended precision and decimal IEEE 754 floats are unsupported.
+//  - Only UTF-8 strings supported.
+//    Unicode_Other Binc types (UTF16, UTF32) are currently unsupported.
+//
+//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon.
+type BincHandle struct {
+	BasicHandle
+	binaryEncodingType
+	noElemSeparators
+
+	// AsSymbols defines what should be encoded as symbols.
+	//
+	// Encoding as symbols can reduce the encoded size significantly.
+	//
+	// However, during decoding, each string to be encoded as a symbol must
+	// be checked to see if it has been seen before. Consequently, encoding time
+	// will increase if using symbols, because string comparisons has a clear cost.
+	//
+	// Values:
+	// - 0: default: library uses best judgement
+	// - 1: use symbols
+	// - 2: do not use symbols
+	AsSymbols uint8
+
+	// AsSymbols: may later on introduce more options ...
+	// - m: map keys
+	// - s: struct fields
+	// - n: none
+	// - a: all: same as m, s, ...
+
+	// _ [1]uint64 // padding
+}
+
+// Name returns the name of the handle: binc
+func (h *BincHandle) Name() string { return "binc" }
+
+// SetBytesExt sets an extension
+func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+	return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}})
+}
+
+func (h *BincHandle) newEncDriver(e *Encoder) encDriver {
+	return &bincEncDriver{e: e, h: h, w: e.w}
+}
+
+func (h *BincHandle) newDecDriver(d *Decoder) decDriver {
+	return &bincDecDriver{d: d, h: h, r: d.r, br: d.bytes}
+}
+
+func (e *bincEncDriver) reset() {
+	e.w = e.e.w
+	e.s = 0
+	e.c = 0
+	e.m = nil
+}
+
+func (d *bincDecDriver) reset() {
+	d.r, d.br = d.d.r, d.d.bytes
+	d.s = nil
+	d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0
+}
+
+// var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
+
+// EncodeTime encodes a time.Time as a []byte, including
+// information on the instant in time and UTC offset.
+//
+// Format Description
+//
+//   A timestamp is composed of 3 components:
+//
+//   - secs: signed integer representing seconds since unix epoch
+//   - nsces: unsigned integer representing fractional seconds as a
+//     nanosecond offset within secs, in the range 0 <= nsecs < 1e9
+//   - tz: signed integer representing timezone offset in minutes east of UTC,
+//     and a dst (daylight savings time) flag
+//
+//   When encoding a timestamp, the first byte is the descriptor, which
+//   defines which components are encoded and how many bytes are used to
+//   encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it
+//   is not encoded in the byte array explicitly*.
+//
+//       Descriptor 8 bits are of the form `A B C DDD EE`:
+//           A:   Is secs component encoded? 1 = true
+//           B:   Is nsecs component encoded? 1 = true
+//           C:   Is tz component encoded? 1 = true
+//           DDD: Number of extra bytes for secs (range 0-7).
+//                If A = 1, secs encoded in DDD+1 bytes.
+//                    If A = 0, secs is not encoded, and is assumed to be 0.
+//                    If A = 1, then we need at least 1 byte to encode secs.
+//                    DDD says the number of extra bytes beyond that 1.
+//                    E.g. if DDD=0, then secs is represented in 1 byte.
+//                         if DDD=2, then secs is represented in 3 bytes.
+//           EE:  Number of extra bytes for nsecs (range 0-3).
+//                If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above)
+//
+//   Following the descriptor bytes, subsequent bytes are:
+//
+//       secs component encoded in `DDD + 1` bytes (if A == 1)
+//       nsecs component encoded in `EE + 1` bytes (if B == 1)
+//       tz component encoded in 2 bytes (if C == 1)
+//
+//   secs and nsecs components are integers encoded in a BigEndian
+//   2-complement encoding format.
+//
+//   tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to
+//   Least significant bit 0 are described below:
+//
+//       Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes).
+//       Bit 15 = have\_dst: set to 1 if we set the dst flag.
+//       Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not.
+//       Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
+//
+func bincEncodeTime(t time.Time) []byte {
+	//t := rv.Interface().(time.Time)
+	tsecs, tnsecs := t.Unix(), t.Nanosecond()
+	var (
+		bd   byte
+		btmp [8]byte
+		bs   [16]byte
+		i    int = 1
+	)
+	l := t.Location()
+	if l == time.UTC {
+		l = nil
+	}
+	if tsecs != 0 {
+		bd = bd | 0x80
+		bigen.PutUint64(btmp[:], uint64(tsecs))
+		f := pruneSignExt(btmp[:], tsecs >= 0)
+		bd = bd | (byte(7-f) << 2)
+		copy(bs[i:], btmp[f:])
+		i = i + (8 - f)
+	}
+	if tnsecs != 0 {
+		bd = bd | 0x40
+		bigen.PutUint32(btmp[:4], uint32(tnsecs))
+		f := pruneSignExt(btmp[:4], true)
+		bd = bd | byte(3-f)
+		copy(bs[i:], btmp[f:4])
+		i = i + (4 - f)
+	}
+	if l != nil {
+		bd = bd | 0x20
+		// Note that Go Libs do not give access to dst flag.
+		_, zoneOffset := t.Zone()
+		//zoneName, zoneOffset := t.Zone()
+		zoneOffset /= 60
+		z := uint16(zoneOffset)
+		bigen.PutUint16(btmp[:2], z)
+		// clear dst flags
+		bs[i] = btmp[0] & 0x3f
+		bs[i+1] = btmp[1]
+		i = i + 2
+	}
+	bs[0] = bd
+	return bs[0:i]
+}
+
+// bincDecodeTime decodes a []byte into a time.Time.
+func bincDecodeTime(bs []byte) (tt time.Time, err error) {
+	bd := bs[0]
+	var (
+		tsec  int64
+		tnsec uint32
+		tz    uint16
+		i     byte = 1
+		i2    byte
+		n     byte
+	)
+	if bd&(1<<7) != 0 {
+		var btmp [8]byte
+		n = ((bd >> 2) & 0x7) + 1
+		i2 = i + n
+		copy(btmp[8-n:], bs[i:i2])
+		//if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
+		if bs[i]&(1<<7) != 0 {
+			copy(btmp[0:8-n], bsAll0xff)
+			//for j,k := byte(0), 8-n; j < k; j++ {	btmp[j] = 0xff }
+		}
+		i = i2
+		tsec = int64(bigen.Uint64(btmp[:]))
+	}
+	if bd&(1<<6) != 0 {
+		var btmp [4]byte
+		n = (bd & 0x3) + 1
+		i2 = i + n
+		copy(btmp[4-n:], bs[i:i2])
+		i = i2
+		tnsec = bigen.Uint32(btmp[:])
+	}
+	if bd&(1<<5) == 0 {
+		tt = time.Unix(tsec, int64(tnsec)).UTC()
+		return
+	}
+	// In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
+	// However, we need name here, so it can be shown when time is printed.
+	// Zone name is in form: UTC-08:00.
+	// Note that Go Libs do not give access to dst flag, so we ignore dst bits
+
+	i2 = i + 2
+	tz = bigen.Uint16(bs[i:i2])
+	// i = i2
+	// sign extend sign bit into top 2 MSB (which were dst bits):
+	if tz&(1<<13) == 0 { // positive
+		tz = tz & 0x3fff //clear 2 MSBs: dst bits
+	} else { // negative
+		tz = tz | 0xc000 //set 2 MSBs: dst bits
+	}
+	tzint := int16(tz)
+	if tzint == 0 {
+		tt = time.Unix(tsec, int64(tnsec)).UTC()
+	} else {
+		// For Go Time, do not use a descriptive timezone.
+		// It's unnecessary, and makes it harder to do a reflect.DeepEqual.
+		// The Offset already tells what the offset should be, if not on UTC and unknown zone name.
+		// var zoneName = timeLocUTCName(tzint)
+		tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60))
+	}
+	return
+}
+
+var _ decDriver = (*bincDecDriver)(nil)
+var _ encDriver = (*bincEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/ugorji/go/codec/cbor.go
new file mode 100644
index 0000000..7633c04
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/cbor.go
@@ -0,0 +1,756 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+	"math"
+	"reflect"
+	"time"
+)
+
+const (
+	cborMajorUint byte = iota
+	cborMajorNegInt
+	cborMajorBytes
+	cborMajorText
+	cborMajorArray
+	cborMajorMap
+	cborMajorTag
+	cborMajorOther
+)
+
+const (
+	cborBdFalse byte = 0xf4 + iota
+	cborBdTrue
+	cborBdNil
+	cborBdUndefined
+	cborBdExt
+	cborBdFloat16
+	cborBdFloat32
+	cborBdFloat64
+)
+
+const (
+	cborBdIndefiniteBytes  byte = 0x5f
+	cborBdIndefiniteString      = 0x7f
+	cborBdIndefiniteArray       = 0x9f
+	cborBdIndefiniteMap         = 0xbf
+	cborBdBreak                 = 0xff
+)
+
+// These define some in-stream descriptors for
+// manual encoding e.g. when doing explicit indefinite-length
+const (
+	CborStreamBytes  byte = 0x5f
+	CborStreamString      = 0x7f
+	CborStreamArray       = 0x9f
+	CborStreamMap         = 0xbf
+	CborStreamBreak       = 0xff
+)
+
+const (
+	cborBaseUint   byte = 0x00
+	cborBaseNegInt      = 0x20
+	cborBaseBytes       = 0x40
+	cborBaseString      = 0x60
+	cborBaseArray       = 0x80
+	cborBaseMap         = 0xa0
+	cborBaseTag         = 0xc0
+	cborBaseSimple      = 0xe0
+)
+
+func cbordesc(bd byte) string {
+	switch bd {
+	case cborBdNil:
+		return "nil"
+	case cborBdFalse:
+		return "false"
+	case cborBdTrue:
+		return "true"
+	case cborBdFloat16, cborBdFloat32, cborBdFloat64:
+		return "float"
+	case cborBdIndefiniteBytes:
+		return "bytes*"
+	case cborBdIndefiniteString:
+		return "string*"
+	case cborBdIndefiniteArray:
+		return "array*"
+	case cborBdIndefiniteMap:
+		return "map*"
+	default:
+		switch {
+		case bd >= cborBaseUint && bd < cborBaseNegInt:
+			return "(u)int"
+		case bd >= cborBaseNegInt && bd < cborBaseBytes:
+			return "int"
+		case bd >= cborBaseBytes && bd < cborBaseString:
+			return "bytes"
+		case bd >= cborBaseString && bd < cborBaseArray:
+			return "string"
+		case bd >= cborBaseArray && bd < cborBaseMap:
+			return "array"
+		case bd >= cborBaseMap && bd < cborBaseTag:
+			return "map"
+		case bd >= cborBaseTag && bd < cborBaseSimple:
+			return "ext"
+		default:
+			return "unknown"
+		}
+	}
+}
+
+// -------------------
+
+type cborEncDriver struct {
+	noBuiltInTypes
+	encDriverNoopContainerWriter
+	// encNoSeparator
+	e *Encoder
+	w encWriter
+	h *CborHandle
+	x [8]byte
+	_ [3]uint64 // padding
+}
+
+func (e *cborEncDriver) EncodeNil() {
+	e.w.writen1(cborBdNil)
+}
+
+func (e *cborEncDriver) EncodeBool(b bool) {
+	if b {
+		e.w.writen1(cborBdTrue)
+	} else {
+		e.w.writen1(cborBdFalse)
+	}
+}
+
+func (e *cborEncDriver) EncodeFloat32(f float32) {
+	e.w.writen1(cborBdFloat32)
+	bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *cborEncDriver) EncodeFloat64(f float64) {
+	e.w.writen1(cborBdFloat64)
+	bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
+}
+
+func (e *cborEncDriver) encUint(v uint64, bd byte) {
+	if v <= 0x17 {
+		e.w.writen1(byte(v) + bd)
+	} else if v <= math.MaxUint8 {
+		e.w.writen2(bd+0x18, uint8(v))
+	} else if v <= math.MaxUint16 {
+		e.w.writen1(bd + 0x19)
+		bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v))
+	} else if v <= math.MaxUint32 {
+		e.w.writen1(bd + 0x1a)
+		bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v))
+	} else { // if v <= math.MaxUint64 {
+		e.w.writen1(bd + 0x1b)
+		bigenHelper{e.x[:8], e.w}.writeUint64(v)
+	}
+}
+
+func (e *cborEncDriver) EncodeInt(v int64) {
+	if v < 0 {
+		e.encUint(uint64(-1-v), cborBaseNegInt)
+	} else {
+		e.encUint(uint64(v), cborBaseUint)
+	}
+}
+
+func (e *cborEncDriver) EncodeUint(v uint64) {
+	e.encUint(v, cborBaseUint)
+}
+
+func (e *cborEncDriver) encLen(bd byte, length int) {
+	e.encUint(uint64(length), bd)
+}
+
+func (e *cborEncDriver) EncodeTime(t time.Time) {
+	if t.IsZero() {
+		e.EncodeNil()
+	} else if e.h.TimeRFC3339 {
+		e.encUint(0, cborBaseTag)
+		e.EncodeString(cUTF8, t.Format(time.RFC3339Nano))
+	} else {
+		e.encUint(1, cborBaseTag)
+		t = t.UTC().Round(time.Microsecond)
+		sec, nsec := t.Unix(), uint64(t.Nanosecond())
+		if nsec == 0 {
+			e.EncodeInt(sec)
+		} else {
+			e.EncodeFloat64(float64(sec) + float64(nsec)/1e9)
+		}
+	}
+}
+
+func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
+	e.encUint(uint64(xtag), cborBaseTag)
+	if v := ext.ConvertExt(rv); v == nil {
+		e.EncodeNil()
+	} else {
+		en.encode(v)
+	}
+}
+
+func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
+	e.encUint(uint64(re.Tag), cborBaseTag)
+	if false && re.Data != nil {
+		en.encode(re.Data)
+	} else if re.Value != nil {
+		en.encode(re.Value)
+	} else {
+		e.EncodeNil()
+	}
+}
+
+func (e *cborEncDriver) WriteArrayStart(length int) {
+	if e.h.IndefiniteLength {
+		e.w.writen1(cborBdIndefiniteArray)
+	} else {
+		e.encLen(cborBaseArray, length)
+	}
+}
+
+func (e *cborEncDriver) WriteMapStart(length int) {
+	if e.h.IndefiniteLength {
+		e.w.writen1(cborBdIndefiniteMap)
+	} else {
+		e.encLen(cborBaseMap, length)
+	}
+}
+
+func (e *cborEncDriver) WriteMapEnd() {
+	if e.h.IndefiniteLength {
+		e.w.writen1(cborBdBreak)
+	}
+}
+
+func (e *cborEncDriver) WriteArrayEnd() {
+	if e.h.IndefiniteLength {
+		e.w.writen1(cborBdBreak)
+	}
+}
+
+func (e *cborEncDriver) EncodeString(c charEncoding, v string) {
+	e.encStringBytesS(cborBaseString, v)
+}
+
+func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+	if v == nil {
+		e.EncodeNil()
+	} else if c == cRAW {
+		e.encStringBytesS(cborBaseBytes, stringView(v))
+	} else {
+		e.encStringBytesS(cborBaseString, stringView(v))
+	}
+}
+
+func (e *cborEncDriver) encStringBytesS(bb byte, v string) {
+	if e.h.IndefiniteLength {
+		if bb == cborBaseBytes {
+			e.w.writen1(cborBdIndefiniteBytes)
+		} else {
+			e.w.writen1(cborBdIndefiniteString)
+		}
+		blen := len(v) / 4
+		if blen == 0 {
+			blen = 64
+		} else if blen > 1024 {
+			blen = 1024
+		}
+		for i := 0; i < len(v); {
+			var v2 string
+			i2 := i + blen
+			if i2 < len(v) {
+				v2 = v[i:i2]
+			} else {
+				v2 = v[i:]
+			}
+			e.encLen(bb, len(v2))
+			e.w.writestr(v2)
+			i = i2
+		}
+		e.w.writen1(cborBdBreak)
+	} else {
+		e.encLen(bb, len(v))
+		e.w.writestr(v)
+	}
+}
+
+// ----------------------
+
+type cborDecDriver struct {
+	d *Decoder
+	h *CborHandle
+	r decReader
+	// b      [scratchByteArrayLen]byte
+	br     bool // bytes reader
+	bdRead bool
+	bd     byte
+	noBuiltInTypes
+	// decNoSeparator
+	decDriverNoopContainerReader
+	_ [3]uint64 // padding
+}
+
+func (d *cborDecDriver) readNextBd() {
+	d.bd = d.r.readn1()
+	d.bdRead = true
+}
+
+func (d *cborDecDriver) uncacheRead() {
+	if d.bdRead {
+		d.r.unreadn1()
+		d.bdRead = false
+	}
+}
+
+func (d *cborDecDriver) ContainerType() (vt valueType) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.bd == cborBdNil {
+		return valueTypeNil
+	} else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) {
+		return valueTypeBytes
+	} else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) {
+		return valueTypeString
+	} else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
+		return valueTypeArray
+	} else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) {
+		return valueTypeMap
+	}
+	// else {
+	// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+	// }
+	return valueTypeUnset
+}
+
+func (d *cborDecDriver) TryDecodeAsNil() bool {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	// treat Nil and Undefined as nil values
+	if d.bd == cborBdNil || d.bd == cborBdUndefined {
+		d.bdRead = false
+		return true
+	}
+	return false
+}
+
+func (d *cborDecDriver) CheckBreak() bool {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.bd == cborBdBreak {
+		d.bdRead = false
+		return true
+	}
+	return false
+}
+
+func (d *cborDecDriver) decUint() (ui uint64) {
+	v := d.bd & 0x1f
+	if v <= 0x17 {
+		ui = uint64(v)
+	} else {
+		if v == 0x18 {
+			ui = uint64(d.r.readn1())
+		} else if v == 0x19 {
+			ui = uint64(bigen.Uint16(d.r.readx(2)))
+		} else if v == 0x1a {
+			ui = uint64(bigen.Uint32(d.r.readx(4)))
+		} else if v == 0x1b {
+			ui = uint64(bigen.Uint64(d.r.readx(8)))
+		} else {
+			d.d.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd))
+			return
+		}
+	}
+	return
+}
+
+func (d *cborDecDriver) decCheckInteger() (neg bool) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	major := d.bd >> 5
+	if major == cborMajorUint {
+	} else if major == cborMajorNegInt {
+		neg = true
+	} else {
+		d.d.errorf("not an integer - invalid major %v from descriptor %x/%s", major, d.bd, cbordesc(d.bd))
+		return
+	}
+	return
+}
+
+func (d *cborDecDriver) DecodeInt64() (i int64) {
+	neg := d.decCheckInteger()
+	ui := d.decUint()
+	// check if this number can be converted to an int without overflow
+	if neg {
+		i = -(chkOvf.SignedIntV(ui + 1))
+	} else {
+		i = chkOvf.SignedIntV(ui)
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *cborDecDriver) DecodeUint64() (ui uint64) {
+	if d.decCheckInteger() {
+		d.d.errorf("assigning negative signed value to unsigned type")
+		return
+	}
+	ui = d.decUint()
+	d.bdRead = false
+	return
+}
+
+func (d *cborDecDriver) DecodeFloat64() (f float64) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if bd := d.bd; bd == cborBdFloat16 {
+		f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2)))))
+	} else if bd == cborBdFloat32 {
+		f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+	} else if bd == cborBdFloat64 {
+		f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+	} else if bd >= cborBaseUint && bd < cborBaseBytes {
+		f = float64(d.DecodeInt64())
+	} else {
+		d.d.errorf("float only valid from float16/32/64 - invalid descriptor %x/%s", bd, cbordesc(bd))
+		return
+	}
+	d.bdRead = false
+	return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *cborDecDriver) DecodeBool() (b bool) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if bd := d.bd; bd == cborBdTrue {
+		b = true
+	} else if bd == cborBdFalse {
+	} else {
+		d.d.errorf("not bool - %s %x/%s", msgBadDesc, d.bd, cbordesc(d.bd))
+		return
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *cborDecDriver) ReadMapStart() (length int) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	d.bdRead = false
+	if d.bd == cborBdIndefiniteMap {
+		return -1
+	}
+	return d.decLen()
+}
+
+func (d *cborDecDriver) ReadArrayStart() (length int) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	d.bdRead = false
+	if d.bd == cborBdIndefiniteArray {
+		return -1
+	}
+	return d.decLen()
+}
+
+func (d *cborDecDriver) decLen() int {
+	return int(d.decUint())
+}
+
+func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
+	d.bdRead = false
+	for {
+		if d.CheckBreak() {
+			break
+		}
+		if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText {
+			d.d.errorf("expect bytes/string major type in indefinite string/bytes;"+
+				" got major %v from descriptor %x/%x", major, d.bd, cbordesc(d.bd))
+			return nil
+		}
+		n := d.decLen()
+		oldLen := len(bs)
+		newLen := oldLen + n
+		if newLen > cap(bs) {
+			bs2 := make([]byte, newLen, 2*cap(bs)+n)
+			copy(bs2, bs)
+			bs = bs2
+		} else {
+			bs = bs[:newLen]
+		}
+		d.r.readb(bs[oldLen:newLen])
+		// bs = append(bs, d.r.readn()...)
+		d.bdRead = false
+	}
+	d.bdRead = false
+	return bs
+}
+
+func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.bd == cborBdNil || d.bd == cborBdUndefined {
+		d.bdRead = false
+		return nil
+	}
+	if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
+		d.bdRead = false
+		if bs == nil {
+			if zerocopy {
+				return d.decAppendIndefiniteBytes(d.d.b[:0])
+			}
+			return d.decAppendIndefiniteBytes(zeroByteSlice)
+		}
+		return d.decAppendIndefiniteBytes(bs[:0])
+	}
+	// check if an "array" of uint8's (see ContainerType for how to infer if an array)
+	if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
+		bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
+		return
+	}
+	clen := d.decLen()
+	d.bdRead = false
+	if zerocopy {
+		if d.br {
+			return d.r.readx(clen)
+		} else if len(bs) == 0 {
+			bs = d.d.b[:]
+		}
+	}
+	return decByteSlice(d.r, clen, d.h.MaxInitLen, bs)
+}
+
+func (d *cborDecDriver) DecodeString() (s string) {
+	return string(d.DecodeBytes(d.d.b[:], true))
+}
+
+func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) {
+	return d.DecodeBytes(d.d.b[:], true)
+}
+
+func (d *cborDecDriver) DecodeTime() (t time.Time) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.bd == cborBdNil || d.bd == cborBdUndefined {
+		d.bdRead = false
+		return
+	}
+	xtag := d.decUint()
+	d.bdRead = false
+	return d.decodeTime(xtag)
+}
+
+func (d *cborDecDriver) decodeTime(xtag uint64) (t time.Time) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	switch xtag {
+	case 0:
+		var err error
+		if t, err = time.Parse(time.RFC3339, stringView(d.DecodeStringAsBytes())); err != nil {
+			d.d.errorv(err)
+		}
+	case 1:
+		// decode an int64 or a float, and infer time.Time from there.
+		// for floats, round to microseconds, as that is what is guaranteed to fit well.
+		switch {
+		case d.bd == cborBdFloat16, d.bd == cborBdFloat32:
+			f1, f2 := math.Modf(d.DecodeFloat64())
+			t = time.Unix(int64(f1), int64(f2*1e9))
+		case d.bd == cborBdFloat64:
+			f1, f2 := math.Modf(d.DecodeFloat64())
+			t = time.Unix(int64(f1), int64(f2*1e9))
+		case d.bd >= cborBaseUint && d.bd < cborBaseNegInt,
+			d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
+			t = time.Unix(d.DecodeInt64(), 0)
+		default:
+			d.d.errorf("time.Time can only be decoded from a number (or RFC3339 string)")
+		}
+	default:
+		d.d.errorf("invalid tag for time.Time - expecting 0 or 1, got 0x%x", xtag)
+	}
+	t = t.UTC().Round(time.Microsecond)
+	return
+}
+
+func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	u := d.decUint()
+	d.bdRead = false
+	realxtag = u
+	if ext == nil {
+		re := rv.(*RawExt)
+		re.Tag = realxtag
+		d.d.decode(&re.Value)
+	} else if xtag != realxtag {
+		d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag)
+		return
+	} else {
+		var v interface{}
+		d.d.decode(&v)
+		ext.UpdateExt(rv, v)
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *cborDecDriver) DecodeNaked() {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+
+	n := d.d.n
+	var decodeFurther bool
+
+	switch d.bd {
+	case cborBdNil:
+		n.v = valueTypeNil
+	case cborBdFalse:
+		n.v = valueTypeBool
+		n.b = false
+	case cborBdTrue:
+		n.v = valueTypeBool
+		n.b = true
+	case cborBdFloat16, cborBdFloat32, cborBdFloat64:
+		n.v = valueTypeFloat
+		n.f = d.DecodeFloat64()
+	case cborBdIndefiniteBytes:
+		n.v = valueTypeBytes
+		n.l = d.DecodeBytes(nil, false)
+	case cborBdIndefiniteString:
+		n.v = valueTypeString
+		n.s = d.DecodeString()
+	case cborBdIndefiniteArray:
+		n.v = valueTypeArray
+		decodeFurther = true
+	case cborBdIndefiniteMap:
+		n.v = valueTypeMap
+		decodeFurther = true
+	default:
+		switch {
+		case d.bd >= cborBaseUint && d.bd < cborBaseNegInt:
+			if d.h.SignedInteger {
+				n.v = valueTypeInt
+				n.i = d.DecodeInt64()
+			} else {
+				n.v = valueTypeUint
+				n.u = d.DecodeUint64()
+			}
+		case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
+			n.v = valueTypeInt
+			n.i = d.DecodeInt64()
+		case d.bd >= cborBaseBytes && d.bd < cborBaseString:
+			n.v = valueTypeBytes
+			n.l = d.DecodeBytes(nil, false)
+		case d.bd >= cborBaseString && d.bd < cborBaseArray:
+			n.v = valueTypeString
+			n.s = d.DecodeString()
+		case d.bd >= cborBaseArray && d.bd < cborBaseMap:
+			n.v = valueTypeArray
+			decodeFurther = true
+		case d.bd >= cborBaseMap && d.bd < cborBaseTag:
+			n.v = valueTypeMap
+			decodeFurther = true
+		case d.bd >= cborBaseTag && d.bd < cborBaseSimple:
+			n.v = valueTypeExt
+			n.u = d.decUint()
+			n.l = nil
+			if n.u == 0 || n.u == 1 {
+				d.bdRead = false
+				n.v = valueTypeTime
+				n.t = d.decodeTime(n.u)
+			}
+			// d.bdRead = false
+			// d.d.decode(&re.Value) // handled by decode itself.
+			// decodeFurther = true
+		default:
+			d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
+			return
+		}
+	}
+
+	if !decodeFurther {
+		d.bdRead = false
+	}
+	return
+}
+
+// -------------------------
+
+// CborHandle is a Handle for the CBOR encoding format,
+// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io .
+//
+// CBOR is comprehensively supported, including support for:
+//   - indefinite-length arrays/maps/bytes/strings
+//   - (extension) tags in range 0..0xffff (0 .. 65535)
+//   - half, single and double-precision floats
+//   - all numbers (1, 2, 4 and 8-byte signed and unsigned integers)
+//   - nil, true, false, ...
+//   - arrays and maps, bytes and text strings
+//
+// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box.
+// Users can implement them as needed (using SetExt), including spec-documented ones:
+//   - timestamp, BigNum, BigFloat, Decimals,
+//   - Encoded Text (e.g. URL, regexp, base64, MIME Message), etc.
+type CborHandle struct {
+	binaryEncodingType
+	noElemSeparators
+	BasicHandle
+
+	// IndefiniteLength=true, means that we encode using indefinitelength
+	IndefiniteLength bool
+
+	// TimeRFC3339 says to encode time.Time using RFC3339 format.
+	// If unset, we encode time.Time using seconds past epoch.
+	TimeRFC3339 bool
+
+	// _ [1]uint64 // padding
+}
+
+// Name returns the name of the handle: cbor
+func (h *CborHandle) Name() string { return "cbor" }
+
+// SetInterfaceExt sets an extension
+func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
+	return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext})
+}
+
+func (h *CborHandle) newEncDriver(e *Encoder) encDriver {
+	return &cborEncDriver{e: e, w: e.w, h: h}
+}
+
+func (h *CborHandle) newDecDriver(d *Decoder) decDriver {
+	return &cborDecDriver{d: d, h: h, r: d.r, br: d.bytes}
+}
+
+func (e *cborEncDriver) reset() {
+	e.w = e.e.w
+}
+
+func (d *cborDecDriver) reset() {
+	d.r, d.br = d.d.r, d.d.bytes
+	d.bd, d.bdRead = 0, false
+}
+
+var _ decDriver = (*cborDecDriver)(nil)
+var _ encDriver = (*cborEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/decode.go b/vendor/github.com/ugorji/go/codec/decode.go
new file mode 100644
index 0000000..1c0817a
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/decode.go
@@ -0,0 +1,2552 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+	"sync"
+	"time"
+)
+
+// Some tagging information for error messages.
+const (
+	msgBadDesc            = "unrecognized descriptor byte"
+	msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v"
+)
+
+const decDefSliceCap = 8
+const decDefChanCap = 64 // should be large, as cap cannot be expanded
+const decScratchByteArrayLen = cacheLineSize - 8
+
+var (
+	errstrOnlyMapOrArrayCanDecodeIntoStruct = "only encoded map or array can be decoded into a struct"
+	errstrCannotDecodeIntoNil               = "cannot decode into nil"
+
+	errmsgExpandSliceOverflow     = "expand slice: slice overflow"
+	errmsgExpandSliceCannotChange = "expand slice: cannot change"
+
+	errDecoderNotInitialized = errors.New("Decoder not initialized")
+
+	errDecUnreadByteNothingToRead   = errors.New("cannot unread - nothing has been read")
+	errDecUnreadByteLastByteNotRead = errors.New("cannot unread - last byte has not been read")
+	errDecUnreadByteUnknown         = errors.New("cannot unread - reason unknown")
+)
+
+// decReader abstracts the reading source, allowing implementations that can
+// read from an io.Reader or directly off a byte slice with zero-copying.
+type decReader interface {
+	unreadn1()
+
+	// readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR
+	// just return a view of the []byte being decoded from.
+	// Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control.
+	readx(n int) []byte
+	readb([]byte)
+	readn1() uint8
+	numread() int // number of bytes read
+	track()
+	stopTrack() []byte
+
+	// skip will skip any byte that matches, and return the first non-matching byte
+	skip(accept *bitset256) (token byte)
+	// readTo will read any byte that matches, stopping once no-longer matching.
+	readTo(in []byte, accept *bitset256) (out []byte)
+	// readUntil will read, only stopping once it matches the 'stop' byte.
+	readUntil(in []byte, stop byte) (out []byte)
+}
+
+type decDriver interface {
+	// this will check if the next token is a break.
+	CheckBreak() bool
+	// Note: TryDecodeAsNil should be careful not to share any temporary []byte with
+	// the rest of the decDriver. This is because sometimes, we optimize by holding onto
+	// a transient []byte, and ensuring the only other call we make to the decDriver
+	// during that time is maybe a TryDecodeAsNil() call.
+	TryDecodeAsNil() bool
+	// vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known.
+	ContainerType() (vt valueType)
+	// IsBuiltinType(rt uintptr) bool
+
+	// DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt.
+	// For maps and arrays, it will not do the decoding in-band, but will signal
+	// the decoder, so that is done later, by setting the decNaked.valueType field.
+	//
+	// Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types).
+	// for extensions, DecodeNaked must read the tag and the []byte if it exists.
+	// if the []byte is not read, then kInterfaceNaked will treat it as a Handle
+	// that stores the subsequent value in-band, and complete reading the RawExt.
+	//
+	// extensions should also use readx to decode them, for efficiency.
+	// kInterface will extract the detached byte slice if it has to pass it outside its realm.
+	DecodeNaked()
+
+	// Deprecated: use DecodeInt64 and DecodeUint64 instead
+	// DecodeInt(bitsize uint8) (i int64)
+	// DecodeUint(bitsize uint8) (ui uint64)
+
+	DecodeInt64() (i int64)
+	DecodeUint64() (ui uint64)
+
+	DecodeFloat64() (f float64)
+	DecodeBool() (b bool)
+	// DecodeString can also decode symbols.
+	// It looks redundant as DecodeBytes is available.
+	// However, some codecs (e.g. binc) support symbols and can
+	// return a pre-stored string value, meaning that it can bypass
+	// the cost of []byte->string conversion.
+	DecodeString() (s string)
+	DecodeStringAsBytes() (v []byte)
+
+	// DecodeBytes may be called directly, without going through reflection.
+	// Consequently, it must be designed to handle possible nil.
+	DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte)
+	// DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte)
+
+	// decodeExt will decode into a *RawExt or into an extension.
+	DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64)
+	// decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte)
+
+	DecodeTime() (t time.Time)
+
+	ReadArrayStart() int
+	ReadArrayElem()
+	ReadArrayEnd()
+	ReadMapStart() int
+	ReadMapElemKey()
+	ReadMapElemValue()
+	ReadMapEnd()
+
+	reset()
+	uncacheRead()
+}
+
+type decDriverNoopContainerReader struct{}
+
+func (x decDriverNoopContainerReader) ReadArrayStart() (v int) { return }
+func (x decDriverNoopContainerReader) ReadArrayElem()          {}
+func (x decDriverNoopContainerReader) ReadArrayEnd()           {}
+func (x decDriverNoopContainerReader) ReadMapStart() (v int)   { return }
+func (x decDriverNoopContainerReader) ReadMapElemKey()         {}
+func (x decDriverNoopContainerReader) ReadMapElemValue()       {}
+func (x decDriverNoopContainerReader) ReadMapEnd()             {}
+func (x decDriverNoopContainerReader) CheckBreak() (v bool)    { return }
+
+// func (x decNoSeparator) uncacheRead() {}
+
+// DecodeOptions captures configuration options during decode.
+type DecodeOptions struct {
+	// MapType specifies type to use during schema-less decoding of a map in the stream.
+	// If nil (unset), we default to map[string]interface{} iff json handle and MapStringAsKey=true,
+	// else map[interface{}]interface{}.
+	MapType reflect.Type
+
+	// SliceType specifies type to use during schema-less decoding of an array in the stream.
+	// If nil (unset), we default to []interface{} for all formats.
+	SliceType reflect.Type
+
+	// MaxInitLen defines the maxinum initial length that we "make" a collection
+	// (string, slice, map, chan). If 0 or negative, we default to a sensible value
+	// based on the size of an element in the collection.
+	//
+	// For example, when decoding, a stream may say that it has 2^64 elements.
+	// We should not auto-matically provision a slice of that size, to prevent Out-Of-Memory crash.
+	// Instead, we provision up to MaxInitLen, fill that up, and start appending after that.
+	MaxInitLen int
+
+	// ReaderBufferSize is the size of the buffer used when reading.
+	//
+	// if > 0, we use a smart buffer internally for performance purposes.
+	ReaderBufferSize int
+
+	// If ErrorIfNoField, return an error when decoding a map
+	// from a codec stream into a struct, and no matching struct field is found.
+	ErrorIfNoField bool
+
+	// If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded.
+	// For example, the stream contains an array of 8 items, but you are decoding into a [4]T array,
+	// or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set).
+	ErrorIfNoArrayExpand bool
+
+	// If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64).
+	SignedInteger bool
+
+	// MapValueReset controls how we decode into a map value.
+	//
+	// By default, we MAY retrieve the mapping for a key, and then decode into that.
+	// However, especially with big maps, that retrieval may be expensive and unnecessary
+	// if the stream already contains all that is necessary to recreate the value.
+	//
+	// If true, we will never retrieve the previous mapping,
+	// but rather decode into a new value and set that in the map.
+	//
+	// If false, we will retrieve the previous mapping if necessary e.g.
+	// the previous mapping is a pointer, or is a struct or array with pre-set state,
+	// or is an interface.
+	MapValueReset bool
+
+	// SliceElementReset: on decoding a slice, reset the element to a zero value first.
+	//
+	// concern: if the slice already contained some garbage, we will decode into that garbage.
+	SliceElementReset bool
+
+	// InterfaceReset controls how we decode into an interface.
+	//
+	// By default, when we see a field that is an interface{...},
+	// or a map with interface{...} value, we will attempt decoding into the
+	// "contained" value.
+	//
+	// However, this prevents us from reading a string into an interface{}
+	// that formerly contained a number.
+	//
+	// If true, we will decode into a new "blank" value, and set that in the interface.
+	// If false, we will decode into whatever is contained in the interface.
+	InterfaceReset bool
+
+	// InternString controls interning of strings during decoding.
+	//
+	// Some handles, e.g. json, typically will read map keys as strings.
+	// If the set of keys are finite, it may help reduce allocation to
+	// look them up from a map (than to allocate them afresh).
+	//
+	// Note: Handles will be smart when using the intern functionality.
+	// Every string should not be interned.
+	// An excellent use-case for interning is struct field names,
+	// or map keys where key type is string.
+	InternString bool
+
+	// PreferArrayOverSlice controls whether to decode to an array or a slice.
+	//
+	// This only impacts decoding into a nil interface{}.
+	// Consequently, it has no effect on codecgen.
+	//
+	// *Note*: This only applies if using go1.5 and above,
+	// as it requires reflect.ArrayOf support which was absent before go1.5.
+	PreferArrayOverSlice bool
+
+	// DeleteOnNilMapValue controls how to decode a nil value in the stream.
+	//
+	// If true, we will delete the mapping of the key.
+	// Else, just set the mapping to the zero value of the type.
+	DeleteOnNilMapValue bool
+}
+
+// ------------------------------------
+
+type bufioDecReader struct {
+	buf []byte
+	r   io.Reader
+
+	c   int // cursor
+	n   int // num read
+	err error
+
+	tr  []byte
+	trb bool
+	b   [4]byte
+}
+
+func (z *bufioDecReader) reset(r io.Reader) {
+	z.r, z.c, z.n, z.err, z.trb = r, 0, 0, nil, false
+	if z.tr != nil {
+		z.tr = z.tr[:0]
+	}
+}
+
+func (z *bufioDecReader) Read(p []byte) (n int, err error) {
+	if z.err != nil {
+		return 0, z.err
+	}
+	p0 := p
+	n = copy(p, z.buf[z.c:])
+	z.c += n
+	if z.c == len(z.buf) {
+		z.c = 0
+	}
+	z.n += n
+	if len(p) == n {
+		if z.c == 0 {
+			z.buf = z.buf[:1]
+			z.buf[0] = p[len(p)-1]
+			z.c = 1
+		}
+		if z.trb {
+			z.tr = append(z.tr, p0[:n]...)
+		}
+		return
+	}
+	p = p[n:]
+	var n2 int
+	// if we are here, then z.buf is all read
+	if len(p) > len(z.buf) {
+		n2, err = decReadFull(z.r, p)
+		n += n2
+		z.n += n2
+		z.err = err
+		// don't return EOF if some bytes were read. keep for next time.
+		if n > 0 && err == io.EOF {
+			err = nil
+		}
+		// always keep last byte in z.buf
+		z.buf = z.buf[:1]
+		z.buf[0] = p[len(p)-1]
+		z.c = 1
+		if z.trb {
+			z.tr = append(z.tr, p0[:n]...)
+		}
+		return
+	}
+	// z.c is now 0, and len(p) <= len(z.buf)
+	for len(p) > 0 && z.err == nil {
+		// println("len(p) loop starting ... ")
+		z.c = 0
+		z.buf = z.buf[0:cap(z.buf)]
+		n2, err = z.r.Read(z.buf)
+		if n2 > 0 {
+			if err == io.EOF {
+				err = nil
+			}
+			z.buf = z.buf[:n2]
+			n2 = copy(p, z.buf)
+			z.c = n2
+			n += n2
+			z.n += n2
+			p = p[n2:]
+		}
+		z.err = err
+		// println("... len(p) loop done")
+	}
+	if z.c == 0 {
+		z.buf = z.buf[:1]
+		z.buf[0] = p[len(p)-1]
+		z.c = 1
+	}
+	if z.trb {
+		z.tr = append(z.tr, p0[:n]...)
+	}
+	return
+}
+
+func (z *bufioDecReader) ReadByte() (b byte, err error) {
+	z.b[0] = 0
+	_, err = z.Read(z.b[:1])
+	b = z.b[0]
+	return
+}
+
+func (z *bufioDecReader) UnreadByte() (err error) {
+	if z.err != nil {
+		return z.err
+	}
+	if z.c > 0 {
+		z.c--
+		z.n--
+		if z.trb {
+			z.tr = z.tr[:len(z.tr)-1]
+		}
+		return
+	}
+	return errDecUnreadByteNothingToRead
+}
+
+func (z *bufioDecReader) numread() int {
+	return z.n
+}
+
+func (z *bufioDecReader) readx(n int) (bs []byte) {
+	if n <= 0 || z.err != nil {
+		return
+	}
+	if z.c+n <= len(z.buf) {
+		bs = z.buf[z.c : z.c+n]
+		z.n += n
+		z.c += n
+		if z.trb {
+			z.tr = append(z.tr, bs...)
+		}
+		return
+	}
+	bs = make([]byte, n)
+	_, err := z.Read(bs)
+	if err != nil {
+		panic(err)
+	}
+	return
+}
+
+func (z *bufioDecReader) readb(bs []byte) {
+	_, err := z.Read(bs)
+	if err != nil {
+		panic(err)
+	}
+}
+
+// func (z *bufioDecReader) readn1eof() (b uint8, eof bool) {
+// 	b, err := z.ReadByte()
+// 	if err != nil {
+// 		if err == io.EOF {
+// 			eof = true
+// 		} else {
+// 			panic(err)
+// 		}
+// 	}
+// 	return
+// }
+
+func (z *bufioDecReader) readn1() (b uint8) {
+	b, err := z.ReadByte()
+	if err != nil {
+		panic(err)
+	}
+	return
+}
+
+func (z *bufioDecReader) search(in []byte, accept *bitset256, stop, flag uint8) (token byte, out []byte) {
+	// flag: 1 (skip), 2 (readTo), 4 (readUntil)
+	if flag == 4 {
+		for i := z.c; i < len(z.buf); i++ {
+			if z.buf[i] == stop {
+				token = z.buf[i]
+				z.n = z.n + (i - z.c) - 1
+				i++
+				out = z.buf[z.c:i]
+				if z.trb {
+					z.tr = append(z.tr, z.buf[z.c:i]...)
+				}
+				z.c = i
+				return
+			}
+		}
+	} else {
+		for i := z.c; i < len(z.buf); i++ {
+			if !accept.isset(z.buf[i]) {
+				token = z.buf[i]
+				z.n = z.n + (i - z.c) - 1
+				if flag == 1 {
+					i++
+				} else {
+					out = z.buf[z.c:i]
+				}
+				if z.trb {
+					z.tr = append(z.tr, z.buf[z.c:i]...)
+				}
+				z.c = i
+				return
+			}
+		}
+	}
+	z.n += len(z.buf) - z.c
+	if flag != 1 {
+		out = append(in, z.buf[z.c:]...)
+	}
+	if z.trb {
+		z.tr = append(z.tr, z.buf[z.c:]...)
+	}
+	var n2 int
+	if z.err != nil {
+		return
+	}
+	for {
+		z.c = 0
+		z.buf = z.buf[0:cap(z.buf)]
+		n2, z.err = z.r.Read(z.buf)
+		if n2 > 0 && z.err != nil {
+			z.err = nil
+		}
+		z.buf = z.buf[:n2]
+		if flag == 4 {
+			for i := 0; i < n2; i++ {
+				if z.buf[i] == stop {
+					token = z.buf[i]
+					z.n += i - 1
+					i++
+					out = append(out, z.buf[z.c:i]...)
+					if z.trb {
+						z.tr = append(z.tr, z.buf[z.c:i]...)
+					}
+					z.c = i
+					return
+				}
+			}
+		} else {
+			for i := 0; i < n2; i++ {
+				if !accept.isset(z.buf[i]) {
+					token = z.buf[i]
+					z.n += i - 1
+					if flag == 1 {
+						i++
+					}
+					if flag != 1 {
+						out = append(out, z.buf[z.c:i]...)
+					}
+					if z.trb {
+						z.tr = append(z.tr, z.buf[z.c:i]...)
+					}
+					z.c = i
+					return
+				}
+			}
+		}
+		if flag != 1 {
+			out = append(out, z.buf[:n2]...)
+		}
+		z.n += n2
+		if z.err != nil {
+			return
+		}
+		if z.trb {
+			z.tr = append(z.tr, z.buf[:n2]...)
+		}
+	}
+}
+
+func (z *bufioDecReader) skip(accept *bitset256) (token byte) {
+	token, _ = z.search(nil, accept, 0, 1)
+	return
+}
+
+func (z *bufioDecReader) readTo(in []byte, accept *bitset256) (out []byte) {
+	_, out = z.search(in, accept, 0, 2)
+	return
+}
+
+func (z *bufioDecReader) readUntil(in []byte, stop byte) (out []byte) {
+	_, out = z.search(in, nil, stop, 4)
+	return
+}
+
+func (z *bufioDecReader) unreadn1() {
+	err := z.UnreadByte()
+	if err != nil {
+		panic(err)
+	}
+}
+
+func (z *bufioDecReader) track() {
+	if z.tr != nil {
+		z.tr = z.tr[:0]
+	}
+	z.trb = true
+}
+
+func (z *bufioDecReader) stopTrack() (bs []byte) {
+	z.trb = false
+	return z.tr
+}
+
+// ioDecReader is a decReader that reads off an io.Reader.
+//
+// It also has a fallback implementation of ByteScanner if needed.
+type ioDecReader struct {
+	r io.Reader // the reader passed in
+
+	rr io.Reader
+	br io.ByteScanner
+
+	l   byte // last byte
+	ls  byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread
+	trb bool // tracking bytes turned on
+	_   bool
+	b   [4]byte // tiny buffer for reading single bytes
+
+	x  [scratchByteArrayLen]byte // for: get struct field name, swallow valueTypeBytes, etc
+	n  int                       // num read
+	tr []byte                    // tracking bytes read
+}
+
+func (z *ioDecReader) reset(r io.Reader) {
+	z.r = r
+	z.rr = r
+	z.l, z.ls, z.n, z.trb = 0, 0, 0, false
+	if z.tr != nil {
+		z.tr = z.tr[:0]
+	}
+	var ok bool
+	if z.br, ok = r.(io.ByteScanner); !ok {
+		z.br = z
+		z.rr = z
+	}
+}
+
+func (z *ioDecReader) Read(p []byte) (n int, err error) {
+	if len(p) == 0 {
+		return
+	}
+	var firstByte bool
+	if z.ls == 1 {
+		z.ls = 2
+		p[0] = z.l
+		if len(p) == 1 {
+			n = 1
+			return
+		}
+		firstByte = true
+		p = p[1:]
+	}
+	n, err = z.r.Read(p)
+	if n > 0 {
+		if err == io.EOF && n == len(p) {
+			err = nil // read was successful, so postpone EOF (till next time)
+		}
+		z.l = p[n-1]
+		z.ls = 2
+	}
+	if firstByte {
+		n++
+	}
+	return
+}
+
+func (z *ioDecReader) ReadByte() (c byte, err error) {
+	n, err := z.Read(z.b[:1])
+	if n == 1 {
+		c = z.b[0]
+		if err == io.EOF {
+			err = nil // read was successful, so postpone EOF (till next time)
+		}
+	}
+	return
+}
+
+func (z *ioDecReader) UnreadByte() (err error) {
+	switch z.ls {
+	case 2:
+		z.ls = 1
+	case 0:
+		err = errDecUnreadByteNothingToRead
+	case 1:
+		err = errDecUnreadByteLastByteNotRead
+	default:
+		err = errDecUnreadByteUnknown
+	}
+	return
+}
+
+func (z *ioDecReader) numread() int {
+	return z.n
+}
+
+func (z *ioDecReader) readx(n int) (bs []byte) {
+	if n <= 0 {
+		return
+	}
+	if n < len(z.x) {
+		bs = z.x[:n]
+	} else {
+		bs = make([]byte, n)
+	}
+	if _, err := decReadFull(z.rr, bs); err != nil {
+		panic(err)
+	}
+	z.n += len(bs)
+	if z.trb {
+		z.tr = append(z.tr, bs...)
+	}
+	return
+}
+
+func (z *ioDecReader) readb(bs []byte) {
+	// if len(bs) == 0 {
+	// 	return
+	// }
+	if _, err := decReadFull(z.rr, bs); err != nil {
+		panic(err)
+	}
+	z.n += len(bs)
+	if z.trb {
+		z.tr = append(z.tr, bs...)
+	}
+}
+
+func (z *ioDecReader) readn1eof() (b uint8, eof bool) {
+	b, err := z.br.ReadByte()
+	if err == nil {
+		z.n++
+		if z.trb {
+			z.tr = append(z.tr, b)
+		}
+	} else if err == io.EOF {
+		eof = true
+	} else {
+		panic(err)
+	}
+	return
+}
+
+func (z *ioDecReader) readn1() (b uint8) {
+	var err error
+	if b, err = z.br.ReadByte(); err == nil {
+		z.n++
+		if z.trb {
+			z.tr = append(z.tr, b)
+		}
+		return
+	}
+	panic(err)
+}
+
+func (z *ioDecReader) skip(accept *bitset256) (token byte) {
+	for {
+		var eof bool
+		token, eof = z.readn1eof()
+		if eof {
+			return
+		}
+		if accept.isset(token) {
+			continue
+		}
+		return
+	}
+}
+
+func (z *ioDecReader) readTo(in []byte, accept *bitset256) (out []byte) {
+	out = in
+	for {
+		token, eof := z.readn1eof()
+		if eof {
+			return
+		}
+		if accept.isset(token) {
+			out = append(out, token)
+		} else {
+			z.unreadn1()
+			return
+		}
+	}
+}
+
+func (z *ioDecReader) readUntil(in []byte, stop byte) (out []byte) {
+	out = in
+	for {
+		token, eof := z.readn1eof()
+		if eof {
+			panic(io.EOF)
+		}
+		out = append(out, token)
+		if token == stop {
+			return
+		}
+	}
+}
+
+func (z *ioDecReader) unreadn1() {
+	err := z.br.UnreadByte()
+	if err != nil {
+		panic(err)
+	}
+	z.n--
+	if z.trb {
+		if l := len(z.tr) - 1; l >= 0 {
+			z.tr = z.tr[:l]
+		}
+	}
+}
+
+func (z *ioDecReader) track() {
+	if z.tr != nil {
+		z.tr = z.tr[:0]
+	}
+	z.trb = true
+}
+
+func (z *ioDecReader) stopTrack() (bs []byte) {
+	z.trb = false
+	return z.tr
+}
+
+// ------------------------------------
+
+var errBytesDecReaderCannotUnread = errors.New("cannot unread last byte read")
+
+// bytesDecReader is a decReader that reads off a byte slice with zero copying
+type bytesDecReader struct {
+	b []byte // data
+	c int    // cursor
+	a int    // available
+	t int    // track start
+}
+
+func (z *bytesDecReader) reset(in []byte) {
+	z.b = in
+	z.a = len(in)
+	z.c = 0
+	z.t = 0
+}
+
+func (z *bytesDecReader) numread() int {
+	return z.c
+}
+
+func (z *bytesDecReader) unreadn1() {
+	if z.c == 0 || len(z.b) == 0 {
+		panic(errBytesDecReaderCannotUnread)
+	}
+	z.c--
+	z.a++
+	return
+}
+
+func (z *bytesDecReader) readx(n int) (bs []byte) {
+	// slicing from a non-constant start position is more expensive,
+	// as more computation is required to decipher the pointer start position.
+	// However, we do it only once, and it's better than reslicing both z.b and return value.
+
+	if n <= 0 {
+	} else if z.a == 0 {
+		panic(io.EOF)
+	} else if n > z.a {
+		panic(io.ErrUnexpectedEOF)
+	} else {
+		c0 := z.c
+		z.c = c0 + n
+		z.a = z.a - n
+		bs = z.b[c0:z.c]
+	}
+	return
+}
+
+func (z *bytesDecReader) readb(bs []byte) {
+	copy(bs, z.readx(len(bs)))
+}
+
+func (z *bytesDecReader) readn1() (v uint8) {
+	if z.a == 0 {
+		panic(io.EOF)
+	}
+	v = z.b[z.c]
+	z.c++
+	z.a--
+	return
+}
+
+// func (z *bytesDecReader) readn1eof() (v uint8, eof bool) {
+// 	if z.a == 0 {
+// 		eof = true
+// 		return
+// 	}
+// 	v = z.b[z.c]
+// 	z.c++
+// 	z.a--
+// 	return
+// }
+
+func (z *bytesDecReader) skip(accept *bitset256) (token byte) {
+	if z.a == 0 {
+		return
+	}
+	blen := len(z.b)
+	for i := z.c; i < blen; i++ {
+		if !accept.isset(z.b[i]) {
+			token = z.b[i]
+			i++
+			z.a -= (i - z.c)
+			z.c = i
+			return
+		}
+	}
+	z.a, z.c = 0, blen
+	return
+}
+
+func (z *bytesDecReader) readTo(_ []byte, accept *bitset256) (out []byte) {
+	if z.a == 0 {
+		return
+	}
+	blen := len(z.b)
+	for i := z.c; i < blen; i++ {
+		if !accept.isset(z.b[i]) {
+			out = z.b[z.c:i]
+			z.a -= (i - z.c)
+			z.c = i
+			return
+		}
+	}
+	out = z.b[z.c:]
+	z.a, z.c = 0, blen
+	return
+}
+
+func (z *bytesDecReader) readUntil(_ []byte, stop byte) (out []byte) {
+	if z.a == 0 {
+		panic(io.EOF)
+	}
+	blen := len(z.b)
+	for i := z.c; i < blen; i++ {
+		if z.b[i] == stop {
+			i++
+			out = z.b[z.c:i]
+			z.a -= (i - z.c)
+			z.c = i
+			return
+		}
+	}
+	z.a, z.c = 0, blen
+	panic(io.EOF)
+}
+
+func (z *bytesDecReader) track() {
+	z.t = z.c
+}
+
+func (z *bytesDecReader) stopTrack() (bs []byte) {
+	return z.b[z.t:z.c]
+}
+
+// ----------------------------------------
+
+// func (d *Decoder) builtin(f *codecFnInfo, rv reflect.Value) {
+// 	d.d.DecodeBuiltin(f.ti.rtid, rv2i(rv))
+// }
+
+func (d *Decoder) rawExt(f *codecFnInfo, rv reflect.Value) {
+	d.d.DecodeExt(rv2i(rv), 0, nil)
+}
+
+func (d *Decoder) ext(f *codecFnInfo, rv reflect.Value) {
+	d.d.DecodeExt(rv2i(rv), f.xfTag, f.xfFn)
+}
+
+func (d *Decoder) selferUnmarshal(f *codecFnInfo, rv reflect.Value) {
+	rv2i(rv).(Selfer).CodecDecodeSelf(d)
+}
+
+func (d *Decoder) binaryUnmarshal(f *codecFnInfo, rv reflect.Value) {
+	bm := rv2i(rv).(encoding.BinaryUnmarshaler)
+	xbs := d.d.DecodeBytes(nil, true)
+	if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil {
+		panic(fnerr)
+	}
+}
+
+func (d *Decoder) textUnmarshal(f *codecFnInfo, rv reflect.Value) {
+	tm := rv2i(rv).(encoding.TextUnmarshaler)
+	fnerr := tm.UnmarshalText(d.d.DecodeStringAsBytes())
+	if fnerr != nil {
+		panic(fnerr)
+	}
+}
+
+func (d *Decoder) jsonUnmarshal(f *codecFnInfo, rv reflect.Value) {
+	tm := rv2i(rv).(jsonUnmarshaler)
+	// bs := d.d.DecodeBytes(d.b[:], true, true)
+	// grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+	fnerr := tm.UnmarshalJSON(d.nextValueBytes())
+	if fnerr != nil {
+		panic(fnerr)
+	}
+}
+
+func (d *Decoder) kErr(f *codecFnInfo, rv reflect.Value) {
+	d.errorf("no decoding function defined for kind %v", rv.Kind())
+}
+
+// var kIntfCtr uint64
+
+func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) {
+	// nil interface:
+	// use some hieristics to decode it appropriately
+	// based on the detected next value in the stream.
+	n := d.naked()
+	d.d.DecodeNaked()
+	if n.v == valueTypeNil {
+		return
+	}
+	// We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader).
+	if f.ti.numMeth > 0 {
+		d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth)
+		return
+	}
+	// var useRvn bool
+	switch n.v {
+	case valueTypeMap:
+		// if json, default to a map type with string keys
+		mtid := d.mtid
+		if mtid == 0 {
+			if d.jsms {
+				mtid = mapStrIntfTypId
+			} else {
+				mtid = mapIntfIntfTypId
+			}
+		}
+		if mtid == mapIntfIntfTypId {
+			n.initContainers()
+			if n.lm < arrayCacheLen {
+				n.ma[n.lm] = nil
+				rvn = n.rma[n.lm]
+				n.lm++
+				d.decode(&n.ma[n.lm-1])
+				n.lm--
+			} else {
+				var v2 map[interface{}]interface{}
+				d.decode(&v2)
+				rvn = reflect.ValueOf(&v2).Elem()
+			}
+		} else if mtid == mapStrIntfTypId { // for json performance
+			n.initContainers()
+			if n.ln < arrayCacheLen {
+				n.na[n.ln] = nil
+				rvn = n.rna[n.ln]
+				n.ln++
+				d.decode(&n.na[n.ln-1])
+				n.ln--
+			} else {
+				var v2 map[string]interface{}
+				d.decode(&v2)
+				rvn = reflect.ValueOf(&v2).Elem()
+			}
+		} else {
+			if d.mtr {
+				rvn = reflect.New(d.h.MapType)
+				d.decode(rv2i(rvn))
+				rvn = rvn.Elem()
+			} else {
+				rvn = reflect.New(d.h.MapType).Elem()
+				d.decodeValue(rvn, nil, true)
+			}
+		}
+	case valueTypeArray:
+		if d.stid == 0 || d.stid == intfSliceTypId {
+			n.initContainers()
+			if n.ls < arrayCacheLen {
+				n.sa[n.ls] = nil
+				rvn = n.rsa[n.ls]
+				n.ls++
+				d.decode(&n.sa[n.ls-1])
+				n.ls--
+			} else {
+				var v2 []interface{}
+				d.decode(&v2)
+				rvn = reflect.ValueOf(&v2).Elem()
+			}
+			if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice {
+				rvn2 := reflect.New(reflectArrayOf(rvn.Len(), intfTyp)).Elem()
+				reflect.Copy(rvn2, rvn)
+				rvn = rvn2
+			}
+		} else {
+			if d.str {
+				rvn = reflect.New(d.h.SliceType)
+				d.decode(rv2i(rvn))
+				rvn = rvn.Elem()
+			} else {
+				rvn = reflect.New(d.h.SliceType).Elem()
+				d.decodeValue(rvn, nil, true)
+			}
+		}
+	case valueTypeExt:
+		var v interface{}
+		tag, bytes := n.u, n.l // calling decode below might taint the values
+		if bytes == nil {
+			n.initContainers()
+			if n.li < arrayCacheLen {
+				n.ia[n.li] = nil
+				n.li++
+				d.decode(&n.ia[n.li-1])
+				// v = *(&n.ia[l])
+				n.li--
+				v = n.ia[n.li]
+				n.ia[n.li] = nil
+			} else {
+				d.decode(&v)
+			}
+		}
+		bfn := d.h.getExtForTag(tag)
+		if bfn == nil {
+			var re RawExt
+			re.Tag = tag
+			re.Data = detachZeroCopyBytes(d.bytes, nil, bytes)
+			re.Value = v
+			rvn = reflect.ValueOf(&re).Elem()
+		} else {
+			rvnA := reflect.New(bfn.rt)
+			if bytes != nil {
+				bfn.ext.ReadExt(rv2i(rvnA), bytes)
+			} else {
+				bfn.ext.UpdateExt(rv2i(rvnA), v)
+			}
+			rvn = rvnA.Elem()
+		}
+	case valueTypeNil:
+		// no-op
+	case valueTypeInt:
+		rvn = n.ri
+	case valueTypeUint:
+		rvn = n.ru
+	case valueTypeFloat:
+		rvn = n.rf
+	case valueTypeBool:
+		rvn = n.rb
+	case valueTypeString, valueTypeSymbol:
+		rvn = n.rs
+	case valueTypeBytes:
+		rvn = n.rl
+	case valueTypeTime:
+		rvn = n.rt
+	default:
+		panicv.errorf("kInterfaceNaked: unexpected valueType: %d", n.v)
+	}
+	return
+}
+
+func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) {
+	// Note:
+	// A consequence of how kInterface works, is that
+	// if an interface already contains something, we try
+	// to decode into what was there before.
+	// We do not replace with a generic value (as got from decodeNaked).
+
+	// every interface passed here MUST be settable.
+	var rvn reflect.Value
+	if rv.IsNil() || d.h.InterfaceReset {
+		// check if mapping to a type: if so, initialize it and move on
+		rvn = d.h.intf2impl(f.ti.rtid)
+		if rvn.IsValid() {
+			rv.Set(rvn)
+		} else {
+			rvn = d.kInterfaceNaked(f)
+			if rvn.IsValid() {
+				rv.Set(rvn)
+			} else if d.h.InterfaceReset {
+				// reset to zero value based on current type in there.
+				rv.Set(reflect.Zero(rv.Elem().Type()))
+			}
+			return
+		}
+	} else {
+		// now we have a non-nil interface value, meaning it contains a type
+		rvn = rv.Elem()
+	}
+	if d.d.TryDecodeAsNil() {
+		rv.Set(reflect.Zero(rvn.Type()))
+		return
+	}
+
+	// Note: interface{} is settable, but underlying type may not be.
+	// Consequently, we MAY have to create a decodable value out of the underlying value,
+	// decode into it, and reset the interface itself.
+	// fmt.Printf(">>>> kInterface: rvn type: %v, rv type: %v\n", rvn.Type(), rv.Type())
+
+	rvn2, canDecode := isDecodeable(rvn)
+	if canDecode {
+		d.decodeValue(rvn2, nil, true)
+		return
+	}
+
+	rvn2 = reflect.New(rvn.Type()).Elem()
+	rvn2.Set(rvn)
+	d.decodeValue(rvn2, nil, true)
+	rv.Set(rvn2)
+}
+
+func decStructFieldKey(dd decDriver, keyType valueType, b *[decScratchByteArrayLen]byte) (rvkencname []byte) {
+	// use if-else-if, not switch (which compiles to binary-search)
+	// since keyType is typically valueTypeString, branch prediction is pretty good.
+
+	if keyType == valueTypeString {
+		rvkencname = dd.DecodeStringAsBytes()
+	} else if keyType == valueTypeInt {
+		rvkencname = strconv.AppendInt(b[:0], dd.DecodeInt64(), 10)
+	} else if keyType == valueTypeUint {
+		rvkencname = strconv.AppendUint(b[:0], dd.DecodeUint64(), 10)
+	} else if keyType == valueTypeFloat {
+		rvkencname = strconv.AppendFloat(b[:0], dd.DecodeFloat64(), 'f', -1, 64)
+	} else {
+		rvkencname = dd.DecodeStringAsBytes()
+	}
+	return rvkencname
+}
+
+func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) {
+	fti := f.ti
+	dd := d.d
+	elemsep := d.esep
+	sfn := structFieldNode{v: rv, update: true}
+	ctyp := dd.ContainerType()
+	if ctyp == valueTypeMap {
+		containerLen := dd.ReadMapStart()
+		if containerLen == 0 {
+			dd.ReadMapEnd()
+			return
+		}
+		tisfi := fti.sfiSort
+		hasLen := containerLen >= 0
+
+		var rvkencname []byte
+		for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+			if elemsep {
+				dd.ReadMapElemKey()
+			}
+			rvkencname = decStructFieldKey(dd, fti.keyType, &d.b)
+			if elemsep {
+				dd.ReadMapElemValue()
+			}
+			if k := fti.indexForEncName(rvkencname); k > -1 {
+				si := tisfi[k]
+				if dd.TryDecodeAsNil() {
+					si.setToZeroValue(rv)
+				} else {
+					d.decodeValue(sfn.field(si), nil, true)
+				}
+			} else {
+				d.structFieldNotFound(-1, stringView(rvkencname))
+			}
+			// keepAlive4StringView(rvkencnameB) // not needed, as reference is outside loop
+		}
+		dd.ReadMapEnd()
+	} else if ctyp == valueTypeArray {
+		containerLen := dd.ReadArrayStart()
+		if containerLen == 0 {
+			dd.ReadArrayEnd()
+			return
+		}
+		// Not much gain from doing it two ways for array.
+		// Arrays are not used as much for structs.
+		hasLen := containerLen >= 0
+		for j, si := range fti.sfiSrc {
+			if (hasLen && j == containerLen) || (!hasLen && dd.CheckBreak()) {
+				break
+			}
+			if elemsep {
+				dd.ReadArrayElem()
+			}
+			if dd.TryDecodeAsNil() {
+				si.setToZeroValue(rv)
+			} else {
+				d.decodeValue(sfn.field(si), nil, true)
+			}
+		}
+		if containerLen > len(fti.sfiSrc) {
+			// read remaining values and throw away
+			for j := len(fti.sfiSrc); j < containerLen; j++ {
+				if elemsep {
+					dd.ReadArrayElem()
+				}
+				d.structFieldNotFound(j, "")
+			}
+		}
+		dd.ReadArrayEnd()
+	} else {
+		d.errorstr(errstrOnlyMapOrArrayCanDecodeIntoStruct)
+		return
+	}
+}
+
+func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
+	// A slice can be set from a map or array in stream.
+	// This way, the order can be kept (as order is lost with map).
+	ti := f.ti
+	if f.seq == seqTypeChan && ti.chandir&uint8(reflect.SendDir) == 0 {
+		d.errorf("receive-only channel cannot be decoded")
+	}
+	dd := d.d
+	rtelem0 := ti.elem
+	ctyp := dd.ContainerType()
+	if ctyp == valueTypeBytes || ctyp == valueTypeString {
+		// you can only decode bytes or string in the stream into a slice or array of bytes
+		if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) {
+			d.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt)
+		}
+		if f.seq == seqTypeChan {
+			bs2 := dd.DecodeBytes(nil, true)
+			irv := rv2i(rv)
+			ch, ok := irv.(chan<- byte)
+			if !ok {
+				ch = irv.(chan byte)
+			}
+			for _, b := range bs2 {
+				ch <- b
+			}
+		} else {
+			rvbs := rv.Bytes()
+			bs2 := dd.DecodeBytes(rvbs, false)
+			// if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) {
+			if !(len(bs2) > 0 && len(bs2) == len(rvbs) && &bs2[0] == &rvbs[0]) {
+				if rv.CanSet() {
+					rv.SetBytes(bs2)
+				} else if len(rvbs) > 0 && len(bs2) > 0 {
+					copy(rvbs, bs2)
+				}
+			}
+		}
+		return
+	}
+
+	// array := f.seq == seqTypeChan
+
+	slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map)
+
+	// an array can never return a nil slice. so no need to check f.array here.
+	if containerLenS == 0 {
+		if rv.CanSet() {
+			if f.seq == seqTypeSlice {
+				if rv.IsNil() {
+					rv.Set(reflect.MakeSlice(ti.rt, 0, 0))
+				} else {
+					rv.SetLen(0)
+				}
+			} else if f.seq == seqTypeChan {
+				if rv.IsNil() {
+					rv.Set(reflect.MakeChan(ti.rt, 0))
+				}
+			}
+		}
+		slh.End()
+		return
+	}
+
+	rtelem0Size := int(rtelem0.Size())
+	rtElem0Kind := rtelem0.Kind()
+	rtelem0Mut := !isImmutableKind(rtElem0Kind)
+	rtelem := rtelem0
+	rtelemkind := rtelem.Kind()
+	for rtelemkind == reflect.Ptr {
+		rtelem = rtelem.Elem()
+		rtelemkind = rtelem.Kind()
+	}
+
+	var fn *codecFn
+
+	var rvCanset = rv.CanSet()
+	var rvChanged bool
+	var rv0 = rv
+	var rv9 reflect.Value
+
+	rvlen := rv.Len()
+	rvcap := rv.Cap()
+	hasLen := containerLenS > 0
+	if hasLen && f.seq == seqTypeSlice {
+		if containerLenS > rvcap {
+			oldRvlenGtZero := rvlen > 0
+			rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(rtelem0.Size()))
+			if rvlen <= rvcap {
+				if rvCanset {
+					rv.SetLen(rvlen)
+				}
+			} else if rvCanset {
+				rv = reflect.MakeSlice(ti.rt, rvlen, rvlen)
+				rvcap = rvlen
+				rvChanged = true
+			} else {
+				d.errorf("cannot decode into non-settable slice")
+			}
+			if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) {
+				reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap)
+			}
+		} else if containerLenS != rvlen {
+			rvlen = containerLenS
+			if rvCanset {
+				rv.SetLen(rvlen)
+			}
+			// else {
+			// rv = rv.Slice(0, rvlen)
+			// rvChanged = true
+			// d.errorf("cannot decode into non-settable slice")
+			// }
+		}
+	}
+
+	// consider creating new element once, and just decoding into it.
+	var rtelem0Zero reflect.Value
+	var rtelem0ZeroValid bool
+	var decodeAsNil bool
+	var j int
+	d.cfer()
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && (f.seq == seqTypeSlice || f.seq == seqTypeChan) && rv.IsNil() {
+			if hasLen {
+				rvlen = decInferLen(containerLenS, d.h.MaxInitLen, rtelem0Size)
+			} else if f.seq == seqTypeSlice {
+				rvlen = decDefSliceCap
+			} else {
+				rvlen = decDefChanCap
+			}
+			if rvCanset {
+				if f.seq == seqTypeSlice {
+					rv = reflect.MakeSlice(ti.rt, rvlen, rvlen)
+					rvChanged = true
+				} else { // chan
+					// xdebugf(">>>>>> haslen = %v, make chan of type '%v' with length: %v", hasLen, ti.rt, rvlen)
+					rv = reflect.MakeChan(ti.rt, rvlen)
+					rvChanged = true
+				}
+			} else {
+				d.errorf("cannot decode into non-settable slice")
+			}
+		}
+		slh.ElemContainerState(j)
+		decodeAsNil = dd.TryDecodeAsNil()
+		if f.seq == seqTypeChan {
+			if decodeAsNil {
+				rv.Send(reflect.Zero(rtelem0))
+				continue
+			}
+			if rtelem0Mut || !rv9.IsValid() { // || (rtElem0Kind == reflect.Ptr && rv9.IsNil()) {
+				rv9 = reflect.New(rtelem0).Elem()
+			}
+			if fn == nil {
+				fn = d.cf.get(rtelem, true, true)
+			}
+			d.decodeValue(rv9, fn, true)
+			// xdebugf(">>>> rv9 sent on %v during decode: %v, with len=%v, cap=%v", rv.Type(), rv9, rv.Len(), rv.Cap())
+			rv.Send(rv9)
+		} else {
+			// if indefinite, etc, then expand the slice if necessary
+			var decodeIntoBlank bool
+			if j >= rvlen {
+				if f.seq == seqTypeArray {
+					d.arrayCannotExpand(rvlen, j+1)
+					decodeIntoBlank = true
+				} else { // if f.seq == seqTypeSlice
+					// rv = reflect.Append(rv, reflect.Zero(rtelem0)) // append logic + varargs
+					var rvcap2 int
+					var rvErrmsg2 string
+					rv9, rvcap2, rvChanged, rvErrmsg2 =
+						expandSliceRV(rv, ti.rt, rvCanset, rtelem0Size, 1, rvlen, rvcap)
+					if rvErrmsg2 != "" {
+						d.errorf(rvErrmsg2)
+					}
+					rvlen++
+					if rvChanged {
+						rv = rv9
+						rvcap = rvcap2
+					}
+				}
+			}
+			if decodeIntoBlank {
+				if !decodeAsNil {
+					d.swallow()
+				}
+			} else {
+				rv9 = rv.Index(j)
+				if d.h.SliceElementReset || decodeAsNil {
+					if !rtelem0ZeroValid {
+						rtelem0ZeroValid = true
+						rtelem0Zero = reflect.Zero(rtelem0)
+					}
+					rv9.Set(rtelem0Zero)
+				}
+				if decodeAsNil {
+					continue
+				}
+
+				if fn == nil {
+					fn = d.cf.get(rtelem, true, true)
+				}
+				d.decodeValue(rv9, fn, true)
+			}
+		}
+	}
+	if f.seq == seqTypeSlice {
+		if j < rvlen {
+			if rv.CanSet() {
+				rv.SetLen(j)
+			} else if rvCanset {
+				rv = rv.Slice(0, j)
+				rvChanged = true
+			} // else { d.errorf("kSlice: cannot change non-settable slice") }
+			rvlen = j
+		} else if j == 0 && rv.IsNil() {
+			if rvCanset {
+				rv = reflect.MakeSlice(ti.rt, 0, 0)
+				rvChanged = true
+			} // else { d.errorf("kSlice: cannot change non-settable slice") }
+		}
+	}
+	slh.End()
+
+	if rvChanged { // infers rvCanset=true, so it can be reset
+		rv0.Set(rv)
+	}
+}
+
+// func (d *Decoder) kArray(f *codecFnInfo, rv reflect.Value) {
+// 	// d.decodeValueFn(rv.Slice(0, rv.Len()))
+// 	f.kSlice(rv.Slice(0, rv.Len()))
+// }
+
+func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) {
+	dd := d.d
+	containerLen := dd.ReadMapStart()
+	elemsep := d.esep
+	ti := f.ti
+	if rv.IsNil() {
+		rv.Set(makeMapReflect(ti.rt, containerLen))
+	}
+
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return
+	}
+
+	ktype, vtype := ti.key, ti.elem
+	ktypeId := rt2id(ktype)
+	vtypeKind := vtype.Kind()
+
+	var keyFn, valFn *codecFn
+	var ktypeLo, vtypeLo reflect.Type
+
+	for ktypeLo = ktype; ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() {
+	}
+
+	for vtypeLo = vtype; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() {
+	}
+
+	var mapGet, mapSet bool
+	rvvImmut := isImmutableKind(vtypeKind)
+	if !d.h.MapValueReset {
+		// if pointer, mapGet = true
+		// if interface, mapGet = true if !DecodeNakedAlways (else false)
+		// if builtin, mapGet = false
+		// else mapGet = true
+		if vtypeKind == reflect.Ptr {
+			mapGet = true
+		} else if vtypeKind == reflect.Interface {
+			if !d.h.InterfaceReset {
+				mapGet = true
+			}
+		} else if !rvvImmut {
+			mapGet = true
+		}
+	}
+
+	var rvk, rvkp, rvv, rvz reflect.Value
+	rvkMut := !isImmutableKind(ktype.Kind()) // if ktype is immutable, then re-use the same rvk.
+	ktypeIsString := ktypeId == stringTypId
+	ktypeIsIntf := ktypeId == intfTypId
+	hasLen := containerLen > 0
+	var kstrbs []byte
+	d.cfer()
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if rvkMut || !rvkp.IsValid() {
+			rvkp = reflect.New(ktype)
+			rvk = rvkp.Elem()
+		}
+		if elemsep {
+			dd.ReadMapElemKey()
+		}
+		if false && dd.TryDecodeAsNil() { // nil cannot be a map key, so disregard this block
+			// Previously, if a nil key, we just ignored the mapped value and continued.
+			// However, that makes the result of encoding and then decoding map[intf]intf{nil:nil}
+			// to be an empty map.
+			// Instead, we treat a nil key as the zero value of the type.
+			rvk.Set(reflect.Zero(ktype))
+		} else if ktypeIsString {
+			kstrbs = dd.DecodeStringAsBytes()
+			rvk.SetString(stringView(kstrbs))
+			// NOTE: if doing an insert, you MUST use a real string (not stringview)
+		} else {
+			if keyFn == nil {
+				keyFn = d.cf.get(ktypeLo, true, true)
+			}
+			d.decodeValue(rvk, keyFn, true)
+		}
+		// special case if a byte array.
+		if ktypeIsIntf {
+			if rvk2 := rvk.Elem(); rvk2.IsValid() {
+				if rvk2.Type() == uint8SliceTyp {
+					rvk = reflect.ValueOf(d.string(rvk2.Bytes()))
+				} else {
+					rvk = rvk2
+				}
+			}
+		}
+
+		if elemsep {
+			dd.ReadMapElemValue()
+		}
+
+		// Brittle, but OK per TryDecodeAsNil() contract.
+		// i.e. TryDecodeAsNil never shares slices with other decDriver procedures
+		if dd.TryDecodeAsNil() {
+			if ktypeIsString {
+				rvk.SetString(d.string(kstrbs))
+			}
+			if d.h.DeleteOnNilMapValue {
+				rv.SetMapIndex(rvk, reflect.Value{})
+			} else {
+				rv.SetMapIndex(rvk, reflect.Zero(vtype))
+			}
+			continue
+		}
+
+		mapSet = true // set to false if u do a get, and its a non-nil pointer
+		if mapGet {
+			// mapGet true only in case where kind=Ptr|Interface or kind is otherwise mutable.
+			rvv = rv.MapIndex(rvk)
+			if !rvv.IsValid() {
+				rvv = reflect.New(vtype).Elem()
+			} else if vtypeKind == reflect.Ptr {
+				if rvv.IsNil() {
+					rvv = reflect.New(vtype).Elem()
+				} else {
+					mapSet = false
+				}
+			} else if vtypeKind == reflect.Interface {
+				// not addressable, and thus not settable.
+				// e MUST create a settable/addressable variant
+				rvv2 := reflect.New(rvv.Type()).Elem()
+				if !rvv.IsNil() {
+					rvv2.Set(rvv)
+				}
+				rvv = rvv2
+			}
+			// else it is ~mutable, and we can just decode into it directly
+		} else if rvvImmut {
+			if !rvz.IsValid() {
+				rvz = reflect.New(vtype).Elem()
+			}
+			rvv = rvz
+		} else {
+			rvv = reflect.New(vtype).Elem()
+		}
+
+		// We MUST be done with the stringview of the key, before decoding the value
+		// so that we don't bastardize the reused byte array.
+		if mapSet && ktypeIsString {
+			rvk.SetString(d.string(kstrbs))
+		}
+		if valFn == nil {
+			valFn = d.cf.get(vtypeLo, true, true)
+		}
+		d.decodeValue(rvv, valFn, true)
+		// d.decodeValueFn(rvv, valFn)
+		if mapSet {
+			rv.SetMapIndex(rvk, rvv)
+		}
+		// if ktypeIsString {
+		// 	// keepAlive4StringView(kstrbs) // not needed, as reference is outside loop
+		// }
+	}
+
+	dd.ReadMapEnd()
+}
+
+// decNaked is used to keep track of the primitives decoded.
+// Without it, we would have to decode each primitive and wrap it
+// in an interface{}, causing an allocation.
+// In this model, the primitives are decoded in a "pseudo-atomic" fashion,
+// so we can rest assured that no other decoding happens while these
+// primitives are being decoded.
+//
+// maps and arrays are not handled by this mechanism.
+// However, RawExt is, and we accommodate for extensions that decode
+// RawExt from DecodeNaked, but need to decode the value subsequently.
+// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat.
+//
+// However, decNaked also keeps some arrays of default maps and slices
+// used in DecodeNaked. This way, we can get a pointer to it
+// without causing a new heap allocation.
+//
+// kInterfaceNaked will ensure that there is no allocation for the common
+// uses.
+
+type decNakedContainers struct {
+	// array/stacks for reducing allocation
+	// keep arrays at the bottom? Chance is that they are not used much.
+	ia [arrayCacheLen]interface{}
+	ma [arrayCacheLen]map[interface{}]interface{}
+	na [arrayCacheLen]map[string]interface{}
+	sa [arrayCacheLen][]interface{}
+
+	// ria [arrayCacheLen]reflect.Value // not needed, as we decode directly into &ia[n]
+	rma, rna, rsa [arrayCacheLen]reflect.Value // reflect.Value mapping to above
+}
+
+func (n *decNakedContainers) init() {
+	for i := 0; i < arrayCacheLen; i++ {
+		// n.ria[i] = reflect.ValueOf(&(n.ia[i])).Elem()
+		n.rma[i] = reflect.ValueOf(&(n.ma[i])).Elem()
+		n.rna[i] = reflect.ValueOf(&(n.na[i])).Elem()
+		n.rsa[i] = reflect.ValueOf(&(n.sa[i])).Elem()
+	}
+}
+
+type decNaked struct {
+	// r RawExt // used for RawExt, uint, []byte.
+
+	// primitives below
+	u uint64
+	i int64
+	f float64
+	l []byte
+	s string
+
+	// ---- cpu cache line boundary?
+	t time.Time
+	b bool
+
+	// state
+	v              valueType
+	li, lm, ln, ls int8
+	inited         bool
+
+	*decNakedContainers
+
+	ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above
+
+	// _ [6]uint64 // padding // no padding - rt goes into next cache line
+}
+
+func (n *decNaked) init() {
+	if n.inited {
+		return
+	}
+	n.ru = reflect.ValueOf(&n.u).Elem()
+	n.ri = reflect.ValueOf(&n.i).Elem()
+	n.rf = reflect.ValueOf(&n.f).Elem()
+	n.rl = reflect.ValueOf(&n.l).Elem()
+	n.rs = reflect.ValueOf(&n.s).Elem()
+	n.rt = reflect.ValueOf(&n.t).Elem()
+	n.rb = reflect.ValueOf(&n.b).Elem()
+
+	n.inited = true
+	// n.rr[] = reflect.ValueOf(&n.)
+}
+
+func (n *decNaked) initContainers() {
+	if n.decNakedContainers == nil {
+		n.decNakedContainers = new(decNakedContainers)
+		n.decNakedContainers.init()
+	}
+}
+
+func (n *decNaked) reset() {
+	if n == nil {
+		return
+	}
+	n.li, n.lm, n.ln, n.ls = 0, 0, 0, 0
+}
+
+type rtid2rv struct {
+	rtid uintptr
+	rv   reflect.Value
+}
+
+// --------------
+
+type decReaderSwitch struct {
+	rb bytesDecReader
+	// ---- cpu cache line boundary?
+	ri       *ioDecReader
+	mtr, str bool // whether maptype or slicetype are known types
+
+	be    bool // is binary encoding
+	bytes bool // is bytes reader
+	js    bool // is json handle
+	jsms  bool // is json handle, and MapKeyAsString
+	esep  bool // has elem separators
+}
+
+// TODO: Uncomment after mid-stack inlining enabled in go 1.11
+//
+// func (z *decReaderSwitch) unreadn1() {
+// 	if z.bytes {
+// 		z.rb.unreadn1()
+// 	} else {
+// 		z.ri.unreadn1()
+// 	}
+// }
+// func (z *decReaderSwitch) readx(n int) []byte {
+// 	if z.bytes {
+// 		return z.rb.readx(n)
+// 	}
+// 	return z.ri.readx(n)
+// }
+// func (z *decReaderSwitch) readb(s []byte) {
+// 	if z.bytes {
+// 		z.rb.readb(s)
+// 	} else {
+// 		z.ri.readb(s)
+// 	}
+// }
+// func (z *decReaderSwitch) readn1() uint8 {
+// 	if z.bytes {
+// 		return z.rb.readn1()
+// 	}
+// 	return z.ri.readn1()
+// }
+// func (z *decReaderSwitch) numread() int {
+// 	if z.bytes {
+// 		return z.rb.numread()
+// 	}
+// 	return z.ri.numread()
+// }
+// func (z *decReaderSwitch) track() {
+// 	if z.bytes {
+// 		z.rb.track()
+// 	} else {
+// 		z.ri.track()
+// 	}
+// }
+// func (z *decReaderSwitch) stopTrack() []byte {
+// 	if z.bytes {
+// 		return z.rb.stopTrack()
+// 	}
+// 	return z.ri.stopTrack()
+// }
+// func (z *decReaderSwitch) skip(accept *bitset256) (token byte) {
+// 	if z.bytes {
+// 		return z.rb.skip(accept)
+// 	}
+// 	return z.ri.skip(accept)
+// }
+// func (z *decReaderSwitch) readTo(in []byte, accept *bitset256) (out []byte) {
+// 	if z.bytes {
+// 		return z.rb.readTo(in, accept)
+// 	}
+// 	return z.ri.readTo(in, accept)
+// }
+// func (z *decReaderSwitch) readUntil(in []byte, stop byte) (out []byte) {
+// 	if z.bytes {
+// 		return z.rb.readUntil(in, stop)
+// 	}
+// 	return z.ri.readUntil(in, stop)
+// }
+
+// A Decoder reads and decodes an object from an input stream in the codec format.
+type Decoder struct {
+	panicHdl
+	// hopefully, reduce derefencing cost by laying the decReader inside the Decoder.
+	// Try to put things that go together to fit within a cache line (8 words).
+
+	d decDriver
+	// NOTE: Decoder shouldn't call it's read methods,
+	// as the handler MAY need to do some coordination.
+	r  decReader
+	h  *BasicHandle
+	bi *bufioDecReader
+	// cache the mapTypeId and sliceTypeId for faster comparisons
+	mtid uintptr
+	stid uintptr
+
+	// ---- cpu cache line boundary?
+	decReaderSwitch
+
+	// ---- cpu cache line boundary?
+	codecFnPooler
+	// cr containerStateRecv
+	n   *decNaked
+	nsp *sync.Pool
+	err error
+
+	// ---- cpu cache line boundary?
+	b  [decScratchByteArrayLen]byte // scratch buffer, used by Decoder and xxxEncDrivers
+	is map[string]string            // used for interning strings
+
+	// padding - false sharing help // modify 232 if Decoder struct changes.
+	// _ [cacheLineSize - 232%cacheLineSize]byte
+}
+
+// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader.
+//
+// For efficiency, Users are encouraged to pass in a memory buffered reader
+// (eg bufio.Reader, bytes.Buffer).
+func NewDecoder(r io.Reader, h Handle) *Decoder {
+	d := newDecoder(h)
+	d.Reset(r)
+	return d
+}
+
+// NewDecoderBytes returns a Decoder which efficiently decodes directly
+// from a byte slice with zero copying.
+func NewDecoderBytes(in []byte, h Handle) *Decoder {
+	d := newDecoder(h)
+	d.ResetBytes(in)
+	return d
+}
+
+var defaultDecNaked decNaked
+
+func newDecoder(h Handle) *Decoder {
+	d := &Decoder{h: h.getBasicHandle(), err: errDecoderNotInitialized}
+	d.hh = h
+	d.be = h.isBinary()
+	// NOTE: do not initialize d.n here. It is lazily initialized in d.naked()
+	var jh *JsonHandle
+	jh, d.js = h.(*JsonHandle)
+	if d.js {
+		d.jsms = jh.MapKeyAsString
+	}
+	d.esep = d.hh.hasElemSeparators()
+	if d.h.InternString {
+		d.is = make(map[string]string, 32)
+	}
+	d.d = h.newDecDriver(d)
+	// d.cr, _ = d.d.(containerStateRecv)
+	return d
+}
+
+func (d *Decoder) resetCommon() {
+	d.n.reset()
+	d.d.reset()
+	d.err = nil
+	// reset all things which were cached from the Handle, but could change
+	d.mtid, d.stid = 0, 0
+	d.mtr, d.str = false, false
+	if d.h.MapType != nil {
+		d.mtid = rt2id(d.h.MapType)
+		d.mtr = fastpathAV.index(d.mtid) != -1
+	}
+	if d.h.SliceType != nil {
+		d.stid = rt2id(d.h.SliceType)
+		d.str = fastpathAV.index(d.stid) != -1
+	}
+}
+
+// Reset the Decoder with a new Reader to decode from,
+// clearing all state from last run(s).
+func (d *Decoder) Reset(r io.Reader) {
+	if r == nil {
+		return
+	}
+	if d.bi == nil {
+		d.bi = new(bufioDecReader)
+	}
+	d.bytes = false
+	if d.h.ReaderBufferSize > 0 {
+		d.bi.buf = make([]byte, 0, d.h.ReaderBufferSize)
+		d.bi.reset(r)
+		d.r = d.bi
+	} else {
+		// d.ri.x = &d.b
+		// d.s = d.sa[:0]
+		if d.ri == nil {
+			d.ri = new(ioDecReader)
+		}
+		d.ri.reset(r)
+		d.r = d.ri
+	}
+	d.resetCommon()
+}
+
+// ResetBytes resets the Decoder with a new []byte to decode from,
+// clearing all state from last run(s).
+func (d *Decoder) ResetBytes(in []byte) {
+	if in == nil {
+		return
+	}
+	d.bytes = true
+	d.rb.reset(in)
+	d.r = &d.rb
+	d.resetCommon()
+}
+
+// naked must be called before each call to .DecodeNaked,
+// as they will use it.
+func (d *Decoder) naked() *decNaked {
+	if d.n == nil {
+		// consider one of:
+		//   - get from sync.Pool  (if GC is frequent, there's no value here)
+		//   - new alloc           (safest. only init'ed if it a naked decode will be done)
+		//   - field in Decoder    (makes the Decoder struct very big)
+		// To support using a decoder where a DecodeNaked is not needed,
+		// we prefer #1 or #2.
+		// d.n = new(decNaked) // &d.nv // new(decNaked) // grab from a sync.Pool
+		// d.n.init()
+		var v interface{}
+		d.nsp, v = pool.decNaked()
+		d.n = v.(*decNaked)
+	}
+	return d.n
+}
+
+// Decode decodes the stream from reader and stores the result in the
+// value pointed to by v. v cannot be a nil pointer. v can also be
+// a reflect.Value of a pointer.
+//
+// Note that a pointer to a nil interface is not a nil pointer.
+// If you do not know what type of stream it is, pass in a pointer to a nil interface.
+// We will decode and store a value in that nil interface.
+//
+// Sample usages:
+//   // Decoding into a non-nil typed value
+//   var f float32
+//   err = codec.NewDecoder(r, handle).Decode(&f)
+//
+//   // Decoding into nil interface
+//   var v interface{}
+//   dec := codec.NewDecoder(r, handle)
+//   err = dec.Decode(&v)
+//
+// When decoding into a nil interface{}, we will decode into an appropriate value based
+// on the contents of the stream:
+//   - Numbers are decoded as float64, int64 or uint64.
+//   - Other values are decoded appropriately depending on the type:
+//     bool, string, []byte, time.Time, etc
+//   - Extensions are decoded as RawExt (if no ext function registered for the tag)
+// Configurations exist on the Handle to override defaults
+// (e.g. for MapType, SliceType and how to decode raw bytes).
+//
+// When decoding into a non-nil interface{} value, the mode of encoding is based on the
+// type of the value. When a value is seen:
+//   - If an extension is registered for it, call that extension function
+//   - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error
+//   - Else decode it based on its reflect.Kind
+//
+// There are some special rules when decoding into containers (slice/array/map/struct).
+// Decode will typically use the stream contents to UPDATE the container.
+//   - A map can be decoded from a stream map, by updating matching keys.
+//   - A slice can be decoded from a stream array,
+//     by updating the first n elements, where n is length of the stream.
+//   - A slice can be decoded from a stream map, by decoding as if
+//     it contains a sequence of key-value pairs.
+//   - A struct can be decoded from a stream map, by updating matching fields.
+//   - A struct can be decoded from a stream array,
+//     by updating fields as they occur in the struct (by index).
+//
+// When decoding a stream map or array with length of 0 into a nil map or slice,
+// we reset the destination map or slice to a zero-length value.
+//
+// However, when decoding a stream nil, we reset the destination container
+// to its "zero" value (e.g. nil for slice/map, etc).
+//
+// Note: we allow nil values in the stream anywhere except for map keys.
+// A nil value in the encoded stream where a map key is expected is treated as an error.
+func (d *Decoder) Decode(v interface{}) (err error) {
+	defer d.deferred(&err)
+	d.MustDecode(v)
+	return
+}
+
+// MustDecode is like Decode, but panics if unable to Decode.
+// This provides insight to the code location that triggered the error.
+func (d *Decoder) MustDecode(v interface{}) {
+	// TODO: Top-level: ensure that v is a pointer and not nil.
+	if d.err != nil {
+		panic(d.err)
+	}
+	if d.d.TryDecodeAsNil() {
+		setZero(v)
+	} else {
+		d.decode(v)
+	}
+	d.alwaysAtEnd()
+	// xprintf(">>>>>>>> >>>>>>>> num decFns: %v\n", d.cf.sn)
+}
+
+func (d *Decoder) deferred(err1 *error) {
+	d.alwaysAtEnd()
+	if recoverPanicToErr {
+		if x := recover(); x != nil {
+			panicValToErr(d, x, err1)
+			panicValToErr(d, x, &d.err)
+		}
+	}
+}
+
+func (d *Decoder) alwaysAtEnd() {
+	if d.n != nil {
+		// if n != nil, then nsp != nil (they are always set together)
+		d.nsp.Put(d.n)
+		d.n, d.nsp = nil, nil
+	}
+	d.codecFnPooler.alwaysAtEnd()
+}
+
+// // this is not a smart swallow, as it allocates objects and does unnecessary work.
+// func (d *Decoder) swallowViaHammer() {
+// 	var blank interface{}
+// 	d.decodeValueNoFn(reflect.ValueOf(&blank).Elem())
+// }
+
+func (d *Decoder) swallow() {
+	// smarter decode that just swallows the content
+	dd := d.d
+	if dd.TryDecodeAsNil() {
+		return
+	}
+	elemsep := d.esep
+	switch dd.ContainerType() {
+	case valueTypeMap:
+		containerLen := dd.ReadMapStart()
+		hasLen := containerLen >= 0
+		for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+			// if clenGtEqualZero {if j >= containerLen {break} } else if dd.CheckBreak() {break}
+			if elemsep {
+				dd.ReadMapElemKey()
+			}
+			d.swallow()
+			if elemsep {
+				dd.ReadMapElemValue()
+			}
+			d.swallow()
+		}
+		dd.ReadMapEnd()
+	case valueTypeArray:
+		containerLen := dd.ReadArrayStart()
+		hasLen := containerLen >= 0
+		for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+			if elemsep {
+				dd.ReadArrayElem()
+			}
+			d.swallow()
+		}
+		dd.ReadArrayEnd()
+	case valueTypeBytes:
+		dd.DecodeBytes(d.b[:], true)
+	case valueTypeString:
+		dd.DecodeStringAsBytes()
+	default:
+		// these are all primitives, which we can get from decodeNaked
+		// if RawExt using Value, complete the processing.
+		n := d.naked()
+		dd.DecodeNaked()
+		if n.v == valueTypeExt && n.l == nil {
+			n.initContainers()
+			if n.li < arrayCacheLen {
+				n.ia[n.li] = nil
+				n.li++
+				d.decode(&n.ia[n.li-1])
+				n.ia[n.li-1] = nil
+				n.li--
+			} else {
+				var v2 interface{}
+				d.decode(&v2)
+			}
+		}
+	}
+}
+
+func setZero(iv interface{}) {
+	if iv == nil || definitelyNil(iv) {
+		return
+	}
+	var canDecode bool
+	switch v := iv.(type) {
+	case *string:
+		*v = ""
+	case *bool:
+		*v = false
+	case *int:
+		*v = 0
+	case *int8:
+		*v = 0
+	case *int16:
+		*v = 0
+	case *int32:
+		*v = 0
+	case *int64:
+		*v = 0
+	case *uint:
+		*v = 0
+	case *uint8:
+		*v = 0
+	case *uint16:
+		*v = 0
+	case *uint32:
+		*v = 0
+	case *uint64:
+		*v = 0
+	case *float32:
+		*v = 0
+	case *float64:
+		*v = 0
+	case *[]uint8:
+		*v = nil
+	case *Raw:
+		*v = nil
+	case *time.Time:
+		*v = time.Time{}
+	case reflect.Value:
+		if v, canDecode = isDecodeable(v); canDecode && v.CanSet() {
+			v.Set(reflect.Zero(v.Type()))
+		} // TODO: else drain if chan, clear if map, set all to nil if slice???
+	default:
+		if !fastpathDecodeSetZeroTypeSwitch(iv) {
+			v := reflect.ValueOf(iv)
+			if v, canDecode = isDecodeable(v); canDecode && v.CanSet() {
+				v.Set(reflect.Zero(v.Type()))
+			} // TODO: else drain if chan, clear if map, set all to nil if slice???
+		}
+	}
+}
+
+func (d *Decoder) decode(iv interface{}) {
+	// check nil and interfaces explicitly,
+	// so that type switches just have a run of constant non-interface types.
+	if iv == nil {
+		d.errorstr(errstrCannotDecodeIntoNil)
+		return
+	}
+	if v, ok := iv.(Selfer); ok {
+		v.CodecDecodeSelf(d)
+		return
+	}
+
+	switch v := iv.(type) {
+	// case nil:
+	// case Selfer:
+
+	case reflect.Value:
+		v = d.ensureDecodeable(v)
+		d.decodeValue(v, nil, true)
+
+	case *string:
+		*v = d.d.DecodeString()
+	case *bool:
+		*v = d.d.DecodeBool()
+	case *int:
+		*v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+	case *int8:
+		*v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8))
+	case *int16:
+		*v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16))
+	case *int32:
+		*v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+	case *int64:
+		*v = d.d.DecodeInt64()
+	case *uint:
+		*v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
+	case *uint8:
+		*v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+	case *uint16:
+		*v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))
+	case *uint32:
+		*v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))
+	case *uint64:
+		*v = d.d.DecodeUint64()
+	case *float32:
+		f64 := d.d.DecodeFloat64()
+		if chkOvf.Float32(f64) {
+			d.errorf("float32 overflow: %v", f64)
+		}
+		*v = float32(f64)
+	case *float64:
+		*v = d.d.DecodeFloat64()
+	case *[]uint8:
+		*v = d.d.DecodeBytes(*v, false)
+	case []uint8:
+		b := d.d.DecodeBytes(v, false)
+		if !(len(b) > 0 && len(b) == len(v) && &b[0] == &v[0]) {
+			copy(v, b)
+		}
+	case *time.Time:
+		*v = d.d.DecodeTime()
+	case *Raw:
+		*v = d.rawBytes()
+
+	case *interface{}:
+		d.decodeValue(reflect.ValueOf(iv).Elem(), nil, true)
+		// d.decodeValueNotNil(reflect.ValueOf(iv).Elem())
+
+	default:
+		if !fastpathDecodeTypeSwitch(iv, d) {
+			v := reflect.ValueOf(iv)
+			v = d.ensureDecodeable(v)
+			d.decodeValue(v, nil, false)
+			// d.decodeValueFallback(v)
+		}
+	}
+}
+
+func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn, chkAll bool) {
+	// If stream is not containing a nil value, then we can deref to the base
+	// non-pointer value, and decode into that.
+	var rvp reflect.Value
+	var rvpValid bool
+	if rv.Kind() == reflect.Ptr {
+		rvpValid = true
+		for {
+			if rv.IsNil() {
+				rv.Set(reflect.New(rv.Type().Elem()))
+			}
+			rvp = rv
+			rv = rv.Elem()
+			if rv.Kind() != reflect.Ptr {
+				break
+			}
+		}
+	}
+
+	if fn == nil {
+		// always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer
+		fn = d.cfer().get(rv.Type(), chkAll, true) // chkAll, chkAll)
+	}
+	if fn.i.addrD {
+		if rvpValid {
+			fn.fd(d, &fn.i, rvp)
+		} else if rv.CanAddr() {
+			fn.fd(d, &fn.i, rv.Addr())
+		} else if !fn.i.addrF {
+			fn.fd(d, &fn.i, rv)
+		} else {
+			d.errorf("cannot decode into a non-pointer value")
+		}
+	} else {
+		fn.fd(d, &fn.i, rv)
+	}
+	// return rv
+}
+
+func (d *Decoder) structFieldNotFound(index int, rvkencname string) {
+	// NOTE: rvkencname may be a stringView, so don't pass it to another function.
+	if d.h.ErrorIfNoField {
+		if index >= 0 {
+			d.errorf("no matching struct field found when decoding stream array at index %v", index)
+			return
+		} else if rvkencname != "" {
+			d.errorf("no matching struct field found when decoding stream map with key " + rvkencname)
+			return
+		}
+	}
+	d.swallow()
+}
+
+func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) {
+	if d.h.ErrorIfNoArrayExpand {
+		d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen)
+	}
+}
+
+func isDecodeable(rv reflect.Value) (rv2 reflect.Value, canDecode bool) {
+	switch rv.Kind() {
+	case reflect.Array:
+		return rv, true
+	case reflect.Ptr:
+		if !rv.IsNil() {
+			return rv.Elem(), true
+		}
+	case reflect.Slice, reflect.Chan, reflect.Map:
+		if !rv.IsNil() {
+			return rv, true
+		}
+	}
+	return
+}
+
+func (d *Decoder) ensureDecodeable(rv reflect.Value) (rv2 reflect.Value) {
+	// decode can take any reflect.Value that is a inherently addressable i.e.
+	//   - array
+	//   - non-nil chan    (we will SEND to it)
+	//   - non-nil slice   (we will set its elements)
+	//   - non-nil map     (we will put into it)
+	//   - non-nil pointer (we can "update" it)
+	rv2, canDecode := isDecodeable(rv)
+	if canDecode {
+		return
+	}
+	if !rv.IsValid() {
+		d.errorstr(errstrCannotDecodeIntoNil)
+		return
+	}
+	if !rv.CanInterface() {
+		d.errorf("cannot decode into a value without an interface: %v", rv)
+		return
+	}
+	rvi := rv2i(rv)
+	rvk := rv.Kind()
+	d.errorf("cannot decode into value of kind: %v, type: %T, %v", rvk, rvi, rvi)
+	return
+}
+
+// Possibly get an interned version of a string
+//
+// This should mostly be used for map keys, where the key type is string.
+// This is because keys of a map/struct are typically reused across many objects.
+func (d *Decoder) string(v []byte) (s string) {
+	if d.is == nil {
+		return string(v) // don't return stringView, as we need a real string here.
+	}
+	s, ok := d.is[string(v)] // no allocation here, per go implementation
+	if !ok {
+		s = string(v) // new allocation here
+		d.is[s] = s
+	}
+	return s
+}
+
+// nextValueBytes returns the next value in the stream as a set of bytes.
+func (d *Decoder) nextValueBytes() (bs []byte) {
+	d.d.uncacheRead()
+	d.r.track()
+	d.swallow()
+	bs = d.r.stopTrack()
+	return
+}
+
+func (d *Decoder) rawBytes() []byte {
+	// ensure that this is not a view into the bytes
+	// i.e. make new copy always.
+	bs := d.nextValueBytes()
+	bs2 := make([]byte, len(bs))
+	copy(bs2, bs)
+	return bs2
+}
+
+func (d *Decoder) wrapErrstr(v interface{}, err *error) {
+	*err = fmt.Errorf("%s decode error [pos %d]: %v", d.hh.Name(), d.r.numread(), v)
+}
+
+// --------------------------------------------------
+
+// decSliceHelper assists when decoding into a slice, from a map or an array in the stream.
+// A slice can be set from a map or array in stream. This supports the MapBySlice interface.
+type decSliceHelper struct {
+	d *Decoder
+	// ct valueType
+	array bool
+}
+
+func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) {
+	dd := d.d
+	ctyp := dd.ContainerType()
+	switch ctyp {
+	case valueTypeArray:
+		x.array = true
+		clen = dd.ReadArrayStart()
+	case valueTypeMap:
+		clen = dd.ReadMapStart() * 2
+	default:
+		d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp)
+	}
+	// x.ct = ctyp
+	x.d = d
+	return
+}
+
+func (x decSliceHelper) End() {
+	if x.array {
+		x.d.d.ReadArrayEnd()
+	} else {
+		x.d.d.ReadMapEnd()
+	}
+}
+
+func (x decSliceHelper) ElemContainerState(index int) {
+	if x.array {
+		x.d.d.ReadArrayElem()
+	} else if index%2 == 0 {
+		x.d.d.ReadMapElemKey()
+	} else {
+		x.d.d.ReadMapElemValue()
+	}
+}
+
+func decByteSlice(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) {
+	if clen == 0 {
+		return zeroByteSlice
+	}
+	if len(bs) == clen {
+		bsOut = bs
+		r.readb(bsOut)
+	} else if cap(bs) >= clen {
+		bsOut = bs[:clen]
+		r.readb(bsOut)
+	} else {
+		// bsOut = make([]byte, clen)
+		len2 := decInferLen(clen, maxInitLen, 1)
+		bsOut = make([]byte, len2)
+		r.readb(bsOut)
+		for len2 < clen {
+			len3 := decInferLen(clen-len2, maxInitLen, 1)
+			bs3 := bsOut
+			bsOut = make([]byte, len2+len3)
+			copy(bsOut, bs3)
+			r.readb(bsOut[len2:])
+			len2 += len3
+		}
+	}
+	return
+}
+
+func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) {
+	if xlen := len(in); xlen > 0 {
+		if isBytesReader || xlen <= scratchByteArrayLen {
+			if cap(dest) >= xlen {
+				out = dest[:xlen]
+			} else {
+				out = make([]byte, xlen)
+			}
+			copy(out, in)
+			return
+		}
+	}
+	return in
+}
+
+// decInferLen will infer a sensible length, given the following:
+//    - clen: length wanted.
+//    - maxlen: max length to be returned.
+//      if <= 0, it is unset, and we infer it based on the unit size
+//    - unit: number of bytes for each element of the collection
+func decInferLen(clen, maxlen, unit int) (rvlen int) {
+	// handle when maxlen is not set i.e. <= 0
+	if clen <= 0 {
+		return
+	}
+	if unit == 0 {
+		return clen
+	}
+	if maxlen <= 0 {
+		// no maxlen defined. Use maximum of 256K memory, with a floor of 4K items.
+		// maxlen = 256 * 1024 / unit
+		// if maxlen < (4 * 1024) {
+		// 	maxlen = 4 * 1024
+		// }
+		if unit < (256 / 4) {
+			maxlen = 256 * 1024 / unit
+		} else {
+			maxlen = 4 * 1024
+		}
+	}
+	if clen > maxlen {
+		rvlen = maxlen
+	} else {
+		rvlen = clen
+	}
+	return
+}
+
+func expandSliceRV(s reflect.Value, st reflect.Type, canChange bool, stElemSize, num, slen, scap int) (
+	s2 reflect.Value, scap2 int, changed bool, err string) {
+	l1 := slen + num // new slice length
+	if l1 < slen {
+		err = errmsgExpandSliceOverflow
+		return
+	}
+	if l1 <= scap {
+		if s.CanSet() {
+			s.SetLen(l1)
+		} else if canChange {
+			s2 = s.Slice(0, l1)
+			scap2 = scap
+			changed = true
+		} else {
+			err = errmsgExpandSliceCannotChange
+			return
+		}
+		return
+	}
+	if !canChange {
+		err = errmsgExpandSliceCannotChange
+		return
+	}
+	scap2 = growCap(scap, stElemSize, num)
+	s2 = reflect.MakeSlice(st, l1, scap2)
+	changed = true
+	reflect.Copy(s2, s)
+	return
+}
+
+func decReadFull(r io.Reader, bs []byte) (n int, err error) {
+	var nn int
+	for n < len(bs) && err == nil {
+		nn, err = r.Read(bs[n:])
+		if nn > 0 {
+			if err == io.EOF {
+				// leave EOF for next time
+				err = nil
+			}
+			n += nn
+		}
+	}
+
+	// do not do this - it serves no purpose
+	// if n != len(bs) && err == io.EOF { err = io.ErrUnexpectedEOF }
+	return
+}
diff --git a/vendor/github.com/ugorji/go/codec/encode.go b/vendor/github.com/ugorji/go/codec/encode.go
new file mode 100644
index 0000000..ef46529
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/encode.go
@@ -0,0 +1,1375 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+	"bufio"
+	"encoding"
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strconv"
+	"sync"
+	"time"
+)
+
+const defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024
+
+var errEncoderNotInitialized = errors.New("Encoder not initialized")
+
+// encWriter abstracts writing to a byte array or to an io.Writer.
+type encWriter interface {
+	writeb([]byte)
+	writestr(string)
+	writen1(byte)
+	writen2(byte, byte)
+	atEndOfEncode()
+}
+
+// encDriver abstracts the actual codec (binc vs msgpack, etc)
+type encDriver interface {
+	EncodeNil()
+	EncodeInt(i int64)
+	EncodeUint(i uint64)
+	EncodeBool(b bool)
+	EncodeFloat32(f float32)
+	EncodeFloat64(f float64)
+	// encodeExtPreamble(xtag byte, length int)
+	EncodeRawExt(re *RawExt, e *Encoder)
+	EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder)
+	EncodeString(c charEncoding, v string)
+	// EncodeSymbol(v string)
+	EncodeStringBytes(c charEncoding, v []byte)
+	EncodeTime(time.Time)
+	//encBignum(f *big.Int)
+	//encStringRunes(c charEncoding, v []rune)
+	WriteArrayStart(length int)
+	WriteArrayElem()
+	WriteArrayEnd()
+	WriteMapStart(length int)
+	WriteMapElemKey()
+	WriteMapElemValue()
+	WriteMapEnd()
+
+	reset()
+	atEndOfEncode()
+}
+
+type ioEncStringWriter interface {
+	WriteString(s string) (n int, err error)
+}
+
+type encDriverAsis interface {
+	EncodeAsis(v []byte)
+}
+
+type encDriverNoopContainerWriter struct{}
+
+func (encDriverNoopContainerWriter) WriteArrayStart(length int) {}
+func (encDriverNoopContainerWriter) WriteArrayElem()            {}
+func (encDriverNoopContainerWriter) WriteArrayEnd()             {}
+func (encDriverNoopContainerWriter) WriteMapStart(length int)   {}
+func (encDriverNoopContainerWriter) WriteMapElemKey()           {}
+func (encDriverNoopContainerWriter) WriteMapElemValue()         {}
+func (encDriverNoopContainerWriter) WriteMapEnd()               {}
+func (encDriverNoopContainerWriter) atEndOfEncode()             {}
+
+type encDriverTrackContainerWriter struct {
+	c containerState
+}
+
+func (e *encDriverTrackContainerWriter) WriteArrayStart(length int) { e.c = containerArrayStart }
+func (e *encDriverTrackContainerWriter) WriteArrayElem()            { e.c = containerArrayElem }
+func (e *encDriverTrackContainerWriter) WriteArrayEnd()             { e.c = containerArrayEnd }
+func (e *encDriverTrackContainerWriter) WriteMapStart(length int)   { e.c = containerMapStart }
+func (e *encDriverTrackContainerWriter) WriteMapElemKey()           { e.c = containerMapKey }
+func (e *encDriverTrackContainerWriter) WriteMapElemValue()         { e.c = containerMapValue }
+func (e *encDriverTrackContainerWriter) WriteMapEnd()               { e.c = containerMapEnd }
+func (e *encDriverTrackContainerWriter) atEndOfEncode()             {}
+
+// type ioEncWriterWriter interface {
+// 	WriteByte(c byte) error
+// 	WriteString(s string) (n int, err error)
+// 	Write(p []byte) (n int, err error)
+// }
+
+// EncodeOptions captures configuration options during encode.
+type EncodeOptions struct {
+	// WriterBufferSize is the size of the buffer used when writing.
+	//
+	// if > 0, we use a smart buffer internally for performance purposes.
+	WriterBufferSize int
+
+	// ChanRecvTimeout is the timeout used when selecting from a chan.
+	//
+	// Configuring this controls how we receive from a chan during the encoding process.
+	//   - If ==0, we only consume the elements currently available in the chan.
+	//   - if  <0, we consume until the chan is closed.
+	//   - If  >0, we consume until this timeout.
+	ChanRecvTimeout time.Duration
+
+	// StructToArray specifies to encode a struct as an array, and not as a map
+	StructToArray bool
+
+	// Canonical representation means that encoding a value will always result in the same
+	// sequence of bytes.
+	//
+	// This only affects maps, as the iteration order for maps is random.
+	//
+	// The implementation MAY use the natural sort order for the map keys if possible:
+	//
+	//     - If there is a natural sort order (ie for number, bool, string or []byte keys),
+	//       then the map keys are first sorted in natural order and then written
+	//       with corresponding map values to the strema.
+	//     - If there is no natural sort order, then the map keys will first be
+	//       encoded into []byte, and then sorted,
+	//       before writing the sorted keys and the corresponding map values to the stream.
+	//
+	Canonical bool
+
+	// CheckCircularRef controls whether we check for circular references
+	// and error fast during an encode.
+	//
+	// If enabled, an error is received if a pointer to a struct
+	// references itself either directly or through one of its fields (iteratively).
+	//
+	// This is opt-in, as there may be a performance hit to checking circular references.
+	CheckCircularRef bool
+
+	// RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers
+	// when checking if a value is empty.
+	//
+	// Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls.
+	RecursiveEmptyCheck bool
+
+	// Raw controls whether we encode Raw values.
+	// This is a "dangerous" option and must be explicitly set.
+	// If set, we blindly encode Raw values as-is, without checking
+	// if they are a correct representation of a value in that format.
+	// If unset, we error out.
+	Raw bool
+
+	// // AsSymbols defines what should be encoded as symbols.
+	// //
+	// // Encoding as symbols can reduce the encoded size significantly.
+	// //
+	// // However, during decoding, each string to be encoded as a symbol must
+	// // be checked to see if it has been seen before. Consequently, encoding time
+	// // will increase if using symbols, because string comparisons has a clear cost.
+	// //
+	// // Sample values:
+	// //   AsSymbolNone
+	// //   AsSymbolAll
+	// //   AsSymbolMapStringKeys
+	// //   AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag
+	// AsSymbols AsSymbolFlag
+}
+
+// ---------------------------------------------
+
+// ioEncWriter implements encWriter and can write to an io.Writer implementation
+type ioEncWriter struct {
+	w  io.Writer
+	ww io.Writer
+	bw io.ByteWriter
+	sw ioEncStringWriter
+	fw ioFlusher
+	b  [8]byte
+}
+
+func (z *ioEncWriter) WriteByte(b byte) (err error) {
+	z.b[0] = b
+	_, err = z.w.Write(z.b[:1])
+	return
+}
+
+func (z *ioEncWriter) WriteString(s string) (n int, err error) {
+	return z.w.Write(bytesView(s))
+}
+
+func (z *ioEncWriter) writeb(bs []byte) {
+	if _, err := z.ww.Write(bs); err != nil {
+		panic(err)
+	}
+}
+
+func (z *ioEncWriter) writestr(s string) {
+	if _, err := z.sw.WriteString(s); err != nil {
+		panic(err)
+	}
+}
+
+func (z *ioEncWriter) writen1(b byte) {
+	if err := z.bw.WriteByte(b); err != nil {
+		panic(err)
+	}
+}
+
+func (z *ioEncWriter) writen2(b1, b2 byte) {
+	var err error
+	if err = z.bw.WriteByte(b1); err == nil {
+		if err = z.bw.WriteByte(b2); err == nil {
+			return
+		}
+	}
+	panic(err)
+}
+
+// func (z *ioEncWriter) writen5(b1, b2, b3, b4, b5 byte) {
+// 	z.b[0], z.b[1], z.b[2], z.b[3], z.b[4] = b1, b2, b3, b4, b5
+// 	if _, err := z.ww.Write(z.b[:5]); err != nil {
+// 		panic(err)
+// 	}
+// }
+
+func (z *ioEncWriter) atEndOfEncode() {
+	if z.fw != nil {
+		if err := z.fw.Flush(); err != nil {
+			panic(err)
+		}
+	}
+}
+
+// ---------------------------------------------
+
+// bytesEncAppender implements encWriter and can write to an byte slice.
+type bytesEncAppender struct {
+	b   []byte
+	out *[]byte
+}
+
+func (z *bytesEncAppender) writeb(s []byte) {
+	z.b = append(z.b, s...)
+}
+func (z *bytesEncAppender) writestr(s string) {
+	z.b = append(z.b, s...)
+}
+func (z *bytesEncAppender) writen1(b1 byte) {
+	z.b = append(z.b, b1)
+}
+func (z *bytesEncAppender) writen2(b1, b2 byte) {
+	z.b = append(z.b, b1, b2)
+}
+func (z *bytesEncAppender) atEndOfEncode() {
+	*(z.out) = z.b
+}
+func (z *bytesEncAppender) reset(in []byte, out *[]byte) {
+	z.b = in[:0]
+	z.out = out
+}
+
+// ---------------------------------------------
+
+func (e *Encoder) rawExt(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeRawExt(rv2i(rv).(*RawExt), e)
+}
+
+func (e *Encoder) ext(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeExt(rv2i(rv), f.xfTag, f.xfFn, e)
+}
+
+func (e *Encoder) selferMarshal(f *codecFnInfo, rv reflect.Value) {
+	rv2i(rv).(Selfer).CodecEncodeSelf(e)
+}
+
+func (e *Encoder) binaryMarshal(f *codecFnInfo, rv reflect.Value) {
+	bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary()
+	e.marshal(bs, fnerr, false, cRAW)
+}
+
+func (e *Encoder) textMarshal(f *codecFnInfo, rv reflect.Value) {
+	bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText()
+	e.marshal(bs, fnerr, false, cUTF8)
+}
+
+func (e *Encoder) jsonMarshal(f *codecFnInfo, rv reflect.Value) {
+	bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON()
+	e.marshal(bs, fnerr, true, cUTF8)
+}
+
+func (e *Encoder) raw(f *codecFnInfo, rv reflect.Value) {
+	e.rawBytes(rv2i(rv).(Raw))
+}
+
+func (e *Encoder) kInvalid(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeNil()
+}
+
+func (e *Encoder) kErr(f *codecFnInfo, rv reflect.Value) {
+	e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv)
+}
+
+func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) {
+	ti := f.ti
+	ee := e.e
+	// array may be non-addressable, so we have to manage with care
+	//   (don't call rv.Bytes, rv.Slice, etc).
+	// E.g. type struct S{B [2]byte};
+	//   Encode(S{}) will bomb on "panic: slice of unaddressable array".
+	if f.seq != seqTypeArray {
+		if rv.IsNil() {
+			ee.EncodeNil()
+			return
+		}
+		// If in this method, then there was no extension function defined.
+		// So it's okay to treat as []byte.
+		if ti.rtid == uint8SliceTypId {
+			ee.EncodeStringBytes(cRAW, rv.Bytes())
+			return
+		}
+	}
+	if f.seq == seqTypeChan && ti.chandir&uint8(reflect.RecvDir) == 0 {
+		e.errorf("send-only channel cannot be encoded")
+	}
+	elemsep := e.esep
+	rtelem := ti.elem
+	rtelemIsByte := uint8TypId == rt2id(rtelem) // NOT rtelem.Kind() == reflect.Uint8
+	var l int
+	// if a slice, array or chan of bytes, treat specially
+	if rtelemIsByte {
+		switch f.seq {
+		case seqTypeSlice:
+			ee.EncodeStringBytes(cRAW, rv.Bytes())
+		case seqTypeArray:
+			l = rv.Len()
+			if rv.CanAddr() {
+				ee.EncodeStringBytes(cRAW, rv.Slice(0, l).Bytes())
+			} else {
+				var bs []byte
+				if l <= cap(e.b) {
+					bs = e.b[:l]
+				} else {
+					bs = make([]byte, l)
+				}
+				reflect.Copy(reflect.ValueOf(bs), rv)
+				ee.EncodeStringBytes(cRAW, bs)
+			}
+		case seqTypeChan:
+			// do not use range, so that the number of elements encoded
+			// does not change, and encoding does not hang waiting on someone to close chan.
+			// for b := range rv2i(rv).(<-chan byte) { bs = append(bs, b) }
+			// ch := rv2i(rv).(<-chan byte) // fix error - that this is a chan byte, not a <-chan byte.
+
+			if rv.IsNil() {
+				ee.EncodeNil()
+				break
+			}
+			bs := e.b[:0]
+			irv := rv2i(rv)
+			ch, ok := irv.(<-chan byte)
+			if !ok {
+				ch = irv.(chan byte)
+			}
+
+		L1:
+			switch timeout := e.h.ChanRecvTimeout; {
+			case timeout == 0: // only consume available
+				for {
+					select {
+					case b := <-ch:
+						bs = append(bs, b)
+					default:
+						break L1
+					}
+				}
+			case timeout > 0: // consume until timeout
+				tt := time.NewTimer(timeout)
+				for {
+					select {
+					case b := <-ch:
+						bs = append(bs, b)
+					case <-tt.C:
+						// close(tt.C)
+						break L1
+					}
+				}
+			default: // consume until close
+				for b := range ch {
+					bs = append(bs, b)
+				}
+			}
+
+			ee.EncodeStringBytes(cRAW, bs)
+		}
+		return
+	}
+
+	// if chan, consume chan into a slice, and work off that slice.
+	var rvcs reflect.Value
+	if f.seq == seqTypeChan {
+		rvcs = reflect.Zero(reflect.SliceOf(rtelem))
+		timeout := e.h.ChanRecvTimeout
+		if timeout < 0 { // consume until close
+			for {
+				recv, recvOk := rv.Recv()
+				if !recvOk {
+					break
+				}
+				rvcs = reflect.Append(rvcs, recv)
+			}
+		} else {
+			cases := make([]reflect.SelectCase, 2)
+			cases[0] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: rv}
+			if timeout == 0 {
+				cases[1] = reflect.SelectCase{Dir: reflect.SelectDefault}
+			} else {
+				tt := time.NewTimer(timeout)
+				cases[1] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(tt.C)}
+			}
+			for {
+				chosen, recv, recvOk := reflect.Select(cases)
+				if chosen == 1 || !recvOk {
+					break
+				}
+				rvcs = reflect.Append(rvcs, recv)
+			}
+		}
+		rv = rvcs // TODO: ensure this doesn't mess up anywhere that rv of kind chan is expected
+	}
+
+	l = rv.Len()
+	if ti.mbs {
+		if l%2 == 1 {
+			e.errorf("mapBySlice requires even slice length, but got %v", l)
+			return
+		}
+		ee.WriteMapStart(l / 2)
+	} else {
+		ee.WriteArrayStart(l)
+	}
+
+	if l > 0 {
+		var fn *codecFn
+		for rtelem.Kind() == reflect.Ptr {
+			rtelem = rtelem.Elem()
+		}
+		// if kind is reflect.Interface, do not pre-determine the
+		// encoding type, because preEncodeValue may break it down to
+		// a concrete type and kInterface will bomb.
+		if rtelem.Kind() != reflect.Interface {
+			fn = e.cfer().get(rtelem, true, true)
+		}
+		for j := 0; j < l; j++ {
+			if elemsep {
+				if ti.mbs {
+					if j%2 == 0 {
+						ee.WriteMapElemKey()
+					} else {
+						ee.WriteMapElemValue()
+					}
+				} else {
+					ee.WriteArrayElem()
+				}
+			}
+			e.encodeValue(rv.Index(j), fn, true)
+		}
+	}
+
+	if ti.mbs {
+		ee.WriteMapEnd()
+	} else {
+		ee.WriteArrayEnd()
+	}
+}
+
+func (e *Encoder) kStructNoOmitempty(f *codecFnInfo, rv reflect.Value) {
+	fti := f.ti
+	elemsep := e.esep
+	tisfi := fti.sfiSrc
+	toMap := !(fti.toArray || e.h.StructToArray)
+	if toMap {
+		tisfi = fti.sfiSort
+	}
+	ee := e.e
+
+	sfn := structFieldNode{v: rv, update: false}
+	if toMap {
+		ee.WriteMapStart(len(tisfi))
+		if elemsep {
+			for _, si := range tisfi {
+				ee.WriteMapElemKey()
+				// ee.EncodeString(cUTF8, si.encName)
+				encStructFieldKey(ee, fti.keyType, si.encName)
+				ee.WriteMapElemValue()
+				e.encodeValue(sfn.field(si), nil, true)
+			}
+		} else {
+			for _, si := range tisfi {
+				// ee.EncodeString(cUTF8, si.encName)
+				encStructFieldKey(ee, fti.keyType, si.encName)
+				e.encodeValue(sfn.field(si), nil, true)
+			}
+		}
+		ee.WriteMapEnd()
+	} else {
+		ee.WriteArrayStart(len(tisfi))
+		if elemsep {
+			for _, si := range tisfi {
+				ee.WriteArrayElem()
+				e.encodeValue(sfn.field(si), nil, true)
+			}
+		} else {
+			for _, si := range tisfi {
+				e.encodeValue(sfn.field(si), nil, true)
+			}
+		}
+		ee.WriteArrayEnd()
+	}
+}
+
+func encStructFieldKey(ee encDriver, keyType valueType, s string) {
+	var m must
+
+	// use if-else-if, not switch (which compiles to binary-search)
+	// since keyType is typically valueTypeString, branch prediction is pretty good.
+
+	if keyType == valueTypeString {
+		ee.EncodeString(cUTF8, s)
+	} else if keyType == valueTypeInt {
+		ee.EncodeInt(m.Int(strconv.ParseInt(s, 10, 64)))
+	} else if keyType == valueTypeUint {
+		ee.EncodeUint(m.Uint(strconv.ParseUint(s, 10, 64)))
+	} else if keyType == valueTypeFloat {
+		ee.EncodeFloat64(m.Float(strconv.ParseFloat(s, 64)))
+	} else {
+		ee.EncodeString(cUTF8, s)
+	}
+}
+
+func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) {
+	fti := f.ti
+	elemsep := e.esep
+	tisfi := fti.sfiSrc
+	toMap := !(fti.toArray || e.h.StructToArray)
+	// if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct)
+	if toMap {
+		tisfi = fti.sfiSort
+	}
+	newlen := len(fti.sfiSort)
+	ee := e.e
+
+	// Use sync.Pool to reduce allocating slices unnecessarily.
+	// The cost of sync.Pool is less than the cost of new allocation.
+	//
+	// Each element of the array pools one of encStructPool(8|16|32|64).
+	// It allows the re-use of slices up to 64 in length.
+	// A performance cost of encoding structs was collecting
+	// which values were empty and should be omitted.
+	// We needed slices of reflect.Value and string to collect them.
+	// This shared pool reduces the amount of unnecessary creation we do.
+	// The cost is that of locking sometimes, but sync.Pool is efficient
+	// enough to reduce thread contention.
+
+	var spool *sync.Pool
+	var poolv interface{}
+	var fkvs []stringRv
+	// fmt.Printf(">>>>>>>>>>>>>> encode.kStruct: newlen: %d\n", newlen)
+	if newlen <= 8 {
+		spool, poolv = pool.stringRv8()
+		fkvs = poolv.(*[8]stringRv)[:newlen]
+	} else if newlen <= 16 {
+		spool, poolv = pool.stringRv16()
+		fkvs = poolv.(*[16]stringRv)[:newlen]
+	} else if newlen <= 32 {
+		spool, poolv = pool.stringRv32()
+		fkvs = poolv.(*[32]stringRv)[:newlen]
+	} else if newlen <= 64 {
+		spool, poolv = pool.stringRv64()
+		fkvs = poolv.(*[64]stringRv)[:newlen]
+	} else if newlen <= 128 {
+		spool, poolv = pool.stringRv128()
+		fkvs = poolv.(*[128]stringRv)[:newlen]
+	} else {
+		fkvs = make([]stringRv, newlen)
+	}
+
+	newlen = 0
+	var kv stringRv
+	recur := e.h.RecursiveEmptyCheck
+	sfn := structFieldNode{v: rv, update: false}
+	for _, si := range tisfi {
+		// kv.r = si.field(rv, false)
+		kv.r = sfn.field(si)
+		if toMap {
+			if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) {
+				continue
+			}
+			kv.v = si.encName
+		} else {
+			// use the zero value.
+			// if a reference or struct, set to nil (so you do not output too much)
+			if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) {
+				switch kv.r.Kind() {
+				case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice:
+					kv.r = reflect.Value{} //encode as nil
+				}
+			}
+		}
+		fkvs[newlen] = kv
+		newlen++
+	}
+
+	if toMap {
+		ee.WriteMapStart(newlen)
+		if elemsep {
+			for j := 0; j < newlen; j++ {
+				kv = fkvs[j]
+				ee.WriteMapElemKey()
+				// ee.EncodeString(cUTF8, kv.v)
+				encStructFieldKey(ee, fti.keyType, kv.v)
+				ee.WriteMapElemValue()
+				e.encodeValue(kv.r, nil, true)
+			}
+		} else {
+			for j := 0; j < newlen; j++ {
+				kv = fkvs[j]
+				// ee.EncodeString(cUTF8, kv.v)
+				encStructFieldKey(ee, fti.keyType, kv.v)
+				e.encodeValue(kv.r, nil, true)
+			}
+		}
+		ee.WriteMapEnd()
+	} else {
+		ee.WriteArrayStart(newlen)
+		if elemsep {
+			for j := 0; j < newlen; j++ {
+				ee.WriteArrayElem()
+				e.encodeValue(fkvs[j].r, nil, true)
+			}
+		} else {
+			for j := 0; j < newlen; j++ {
+				e.encodeValue(fkvs[j].r, nil, true)
+			}
+		}
+		ee.WriteArrayEnd()
+	}
+
+	// do not use defer. Instead, use explicit pool return at end of function.
+	// defer has a cost we are trying to avoid.
+	// If there is a panic and these slices are not returned, it is ok.
+	if spool != nil {
+		spool.Put(poolv)
+	}
+}
+
+func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) {
+	ee := e.e
+	if rv.IsNil() {
+		ee.EncodeNil()
+		return
+	}
+
+	l := rv.Len()
+	ee.WriteMapStart(l)
+	elemsep := e.esep
+	if l == 0 {
+		ee.WriteMapEnd()
+		return
+	}
+	// var asSymbols bool
+	// determine the underlying key and val encFn's for the map.
+	// This eliminates some work which is done for each loop iteration i.e.
+	// rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn.
+	//
+	// However, if kind is reflect.Interface, do not pre-determine the
+	// encoding type, because preEncodeValue may break it down to
+	// a concrete type and kInterface will bomb.
+	var keyFn, valFn *codecFn
+	ti := f.ti
+	rtkey0 := ti.key
+	rtkey := rtkey0
+	rtval0 := ti.elem
+	rtval := rtval0
+	// rtkeyid := rt2id(rtkey0)
+	for rtval.Kind() == reflect.Ptr {
+		rtval = rtval.Elem()
+	}
+	if rtval.Kind() != reflect.Interface {
+		valFn = e.cfer().get(rtval, true, true)
+	}
+	mks := rv.MapKeys()
+
+	if e.h.Canonical {
+		e.kMapCanonical(rtkey, rv, mks, valFn)
+		ee.WriteMapEnd()
+		return
+	}
+
+	var keyTypeIsString = stringTypId == rt2id(rtkey0) // rtkeyid
+	if !keyTypeIsString {
+		for rtkey.Kind() == reflect.Ptr {
+			rtkey = rtkey.Elem()
+		}
+		if rtkey.Kind() != reflect.Interface {
+			// rtkeyid = rt2id(rtkey)
+			keyFn = e.cfer().get(rtkey, true, true)
+		}
+	}
+
+	// for j, lmks := 0, len(mks); j < lmks; j++ {
+	for j := range mks {
+		if elemsep {
+			ee.WriteMapElemKey()
+		}
+		if keyTypeIsString {
+			ee.EncodeString(cUTF8, mks[j].String())
+		} else {
+			e.encodeValue(mks[j], keyFn, true)
+		}
+		if elemsep {
+			ee.WriteMapElemValue()
+		}
+		e.encodeValue(rv.MapIndex(mks[j]), valFn, true)
+
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) kMapCanonical(rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *codecFn) {
+	ee := e.e
+	elemsep := e.esep
+	// we previously did out-of-band if an extension was registered.
+	// This is not necessary, as the natural kind is sufficient for ordering.
+
+	switch rtkey.Kind() {
+	case reflect.Bool:
+		mksv := make([]boolRv, len(mks))
+		for i, k := range mks {
+			v := &mksv[i]
+			v.r = k
+			v.v = k.Bool()
+		}
+		sort.Sort(boolRvSlice(mksv))
+		for i := range mksv {
+			if elemsep {
+				ee.WriteMapElemKey()
+			}
+			ee.EncodeBool(mksv[i].v)
+			if elemsep {
+				ee.WriteMapElemValue()
+			}
+			e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
+		}
+	case reflect.String:
+		mksv := make([]stringRv, len(mks))
+		for i, k := range mks {
+			v := &mksv[i]
+			v.r = k
+			v.v = k.String()
+		}
+		sort.Sort(stringRvSlice(mksv))
+		for i := range mksv {
+			if elemsep {
+				ee.WriteMapElemKey()
+			}
+			ee.EncodeString(cUTF8, mksv[i].v)
+			if elemsep {
+				ee.WriteMapElemValue()
+			}
+			e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
+		}
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr:
+		mksv := make([]uintRv, len(mks))
+		for i, k := range mks {
+			v := &mksv[i]
+			v.r = k
+			v.v = k.Uint()
+		}
+		sort.Sort(uintRvSlice(mksv))
+		for i := range mksv {
+			if elemsep {
+				ee.WriteMapElemKey()
+			}
+			ee.EncodeUint(mksv[i].v)
+			if elemsep {
+				ee.WriteMapElemValue()
+			}
+			e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
+		}
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		mksv := make([]intRv, len(mks))
+		for i, k := range mks {
+			v := &mksv[i]
+			v.r = k
+			v.v = k.Int()
+		}
+		sort.Sort(intRvSlice(mksv))
+		for i := range mksv {
+			if elemsep {
+				ee.WriteMapElemKey()
+			}
+			ee.EncodeInt(mksv[i].v)
+			if elemsep {
+				ee.WriteMapElemValue()
+			}
+			e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
+		}
+	case reflect.Float32:
+		mksv := make([]floatRv, len(mks))
+		for i, k := range mks {
+			v := &mksv[i]
+			v.r = k
+			v.v = k.Float()
+		}
+		sort.Sort(floatRvSlice(mksv))
+		for i := range mksv {
+			if elemsep {
+				ee.WriteMapElemKey()
+			}
+			ee.EncodeFloat32(float32(mksv[i].v))
+			if elemsep {
+				ee.WriteMapElemValue()
+			}
+			e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
+		}
+	case reflect.Float64:
+		mksv := make([]floatRv, len(mks))
+		for i, k := range mks {
+			v := &mksv[i]
+			v.r = k
+			v.v = k.Float()
+		}
+		sort.Sort(floatRvSlice(mksv))
+		for i := range mksv {
+			if elemsep {
+				ee.WriteMapElemKey()
+			}
+			ee.EncodeFloat64(mksv[i].v)
+			if elemsep {
+				ee.WriteMapElemValue()
+			}
+			e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
+		}
+	case reflect.Struct:
+		if rv.Type() == timeTyp {
+			mksv := make([]timeRv, len(mks))
+			for i, k := range mks {
+				v := &mksv[i]
+				v.r = k
+				v.v = rv2i(k).(time.Time)
+			}
+			sort.Sort(timeRvSlice(mksv))
+			for i := range mksv {
+				if elemsep {
+					ee.WriteMapElemKey()
+				}
+				ee.EncodeTime(mksv[i].v)
+				if elemsep {
+					ee.WriteMapElemValue()
+				}
+				e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
+			}
+			break
+		}
+		fallthrough
+	default:
+		// out-of-band
+		// first encode each key to a []byte first, then sort them, then record
+		var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		mksbv := make([]bytesRv, len(mks))
+		for i, k := range mks {
+			v := &mksbv[i]
+			l := len(mksv)
+			e2.MustEncode(k)
+			v.r = k
+			v.v = mksv[l:]
+		}
+		sort.Sort(bytesRvSlice(mksbv))
+		for j := range mksbv {
+			if elemsep {
+				ee.WriteMapElemKey()
+			}
+			e.asis(mksbv[j].v)
+			if elemsep {
+				ee.WriteMapElemValue()
+			}
+			e.encodeValue(rv.MapIndex(mksbv[j].r), valFn, true)
+		}
+	}
+}
+
+// // --------------------------------------------------
+
+type encWriterSwitch struct {
+	wi *ioEncWriter
+	// wb bytesEncWriter
+	wb   bytesEncAppender
+	wx   bool // if bytes, wx=true
+	esep bool // whether it has elem separators
+	isas bool // whether e.as != nil
+}
+
+// // TODO: Uncomment after mid-stack inlining enabled in go 1.11
+
+// func (z *encWriterSwitch) writeb(s []byte) {
+// 	if z.wx {
+// 		z.wb.writeb(s)
+// 	} else {
+// 		z.wi.writeb(s)
+// 	}
+// }
+// func (z *encWriterSwitch) writestr(s string) {
+// 	if z.wx {
+// 		z.wb.writestr(s)
+// 	} else {
+// 		z.wi.writestr(s)
+// 	}
+// }
+// func (z *encWriterSwitch) writen1(b1 byte) {
+// 	if z.wx {
+// 		z.wb.writen1(b1)
+// 	} else {
+// 		z.wi.writen1(b1)
+// 	}
+// }
+// func (z *encWriterSwitch) writen2(b1, b2 byte) {
+// 	if z.wx {
+// 		z.wb.writen2(b1, b2)
+// 	} else {
+// 		z.wi.writen2(b1, b2)
+// 	}
+// }
+
+// An Encoder writes an object to an output stream in the codec format.
+type Encoder struct {
+	panicHdl
+	// hopefully, reduce derefencing cost by laying the encWriter inside the Encoder
+	e encDriver
+	// NOTE: Encoder shouldn't call it's write methods,
+	// as the handler MAY need to do some coordination.
+	w encWriter
+
+	h  *BasicHandle
+	bw *bufio.Writer
+	as encDriverAsis
+
+	// ---- cpu cache line boundary?
+
+	// ---- cpu cache line boundary?
+	encWriterSwitch
+	err error
+
+	// ---- cpu cache line boundary?
+	codecFnPooler
+	ci set
+	js bool    // here, so that no need to piggy back on *codecFner for this
+	be bool    // here, so that no need to piggy back on *codecFner for this
+	_  [6]byte // padding
+
+	// ---- writable fields during execution --- *try* to keep in sep cache line
+
+	// ---- cpu cache line boundary?
+	// b [scratchByteArrayLen]byte
+	// _ [cacheLineSize - scratchByteArrayLen]byte // padding
+	b [cacheLineSize - 0]byte // used for encoding a chan or (non-addressable) array of bytes
+}
+
+// NewEncoder returns an Encoder for encoding into an io.Writer.
+//
+// For efficiency, Users are encouraged to pass in a memory buffered writer
+// (eg bufio.Writer, bytes.Buffer).
+func NewEncoder(w io.Writer, h Handle) *Encoder {
+	e := newEncoder(h)
+	e.Reset(w)
+	return e
+}
+
+// NewEncoderBytes returns an encoder for encoding directly and efficiently
+// into a byte slice, using zero-copying to temporary slices.
+//
+// It will potentially replace the output byte slice pointed to.
+// After encoding, the out parameter contains the encoded contents.
+func NewEncoderBytes(out *[]byte, h Handle) *Encoder {
+	e := newEncoder(h)
+	e.ResetBytes(out)
+	return e
+}
+
+func newEncoder(h Handle) *Encoder {
+	e := &Encoder{h: h.getBasicHandle(), err: errEncoderNotInitialized}
+	e.hh = h
+	e.esep = h.hasElemSeparators()
+	return e
+}
+
+func (e *Encoder) resetCommon() {
+	if e.e == nil || e.hh.recreateEncDriver(e.e) {
+		e.e = e.hh.newEncDriver(e)
+		e.as, e.isas = e.e.(encDriverAsis)
+		// e.cr, _ = e.e.(containerStateRecv)
+	}
+	e.be = e.hh.isBinary()
+	_, e.js = e.hh.(*JsonHandle)
+	e.e.reset()
+	e.err = nil
+}
+
+// Reset resets the Encoder with a new output stream.
+//
+// This accommodates using the state of the Encoder,
+// where it has "cached" information about sub-engines.
+func (e *Encoder) Reset(w io.Writer) {
+	if w == nil {
+		return
+	}
+	if e.wi == nil {
+		e.wi = new(ioEncWriter)
+	}
+	var ok bool
+	e.wx = false
+	e.wi.w = w
+	if e.h.WriterBufferSize > 0 {
+		e.bw = bufio.NewWriterSize(w, e.h.WriterBufferSize)
+		e.wi.bw = e.bw
+		e.wi.sw = e.bw
+		e.wi.fw = e.bw
+		e.wi.ww = e.bw
+	} else {
+		if e.wi.bw, ok = w.(io.ByteWriter); !ok {
+			e.wi.bw = e.wi
+		}
+		if e.wi.sw, ok = w.(ioEncStringWriter); !ok {
+			e.wi.sw = e.wi
+		}
+		e.wi.fw, _ = w.(ioFlusher)
+		e.wi.ww = w
+	}
+	e.w = e.wi
+	e.resetCommon()
+}
+
+// ResetBytes resets the Encoder with a new destination output []byte.
+func (e *Encoder) ResetBytes(out *[]byte) {
+	if out == nil {
+		return
+	}
+	var in []byte
+	if out != nil {
+		in = *out
+	}
+	if in == nil {
+		in = make([]byte, defEncByteBufSize)
+	}
+	e.wx = true
+	e.wb.reset(in, out)
+	e.w = &e.wb
+	e.resetCommon()
+}
+
+// Encode writes an object into a stream.
+//
+// Encoding can be configured via the struct tag for the fields.
+// The key (in the struct tags) that we look at is configurable.
+//
+// By default, we look up the "codec" key in the struct field's tags,
+// and fall bak to the "json" key if "codec" is absent.
+// That key in struct field's tag value is the key name,
+// followed by an optional comma and options.
+//
+// To set an option on all fields (e.g. omitempty on all fields), you
+// can create a field called _struct, and set flags on it. The options
+// which can be set on _struct are:
+//    - omitempty: so all fields are omitted if empty
+//    - toarray: so struct is encoded as an array
+//    - int: so struct key names are encoded as signed integers (instead of strings)
+//    - uint: so struct key names are encoded as unsigned integers (instead of strings)
+//    - float: so struct key names are encoded as floats (instead of strings)
+// More details on these below.
+//
+// Struct values "usually" encode as maps. Each exported struct field is encoded unless:
+//    - the field's tag is "-", OR
+//    - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option.
+//
+// When encoding as a map, the first string in the tag (before the comma)
+// is the map key string to use when encoding.
+// ...
+// This key is typically encoded as a string.
+// However, there are instances where the encoded stream has mapping keys encoded as numbers.
+// For example, some cbor streams have keys as integer codes in the stream, but they should map
+// to fields in a structured object. Consequently, a struct is the natural representation in code.
+// For these, configure the struct to encode/decode the keys as numbers (instead of string).
+// This is done with the int,uint or float option on the _struct field (see above).
+//
+// However, struct values may encode as arrays. This happens when:
+//    - StructToArray Encode option is set, OR
+//    - the tag on the _struct field sets the "toarray" option
+// Note that omitempty is ignored when encoding struct values as arrays,
+// as an entry must be encoded for each field, to maintain its position.
+//
+// Values with types that implement MapBySlice are encoded as stream maps.
+//
+// The empty values (for omitempty option) are false, 0, any nil pointer
+// or interface value, and any array, slice, map, or string of length zero.
+//
+// Anonymous fields are encoded inline except:
+//    - the struct tag specifies a replacement name (first value)
+//    - the field is of an interface type
+//
+// Examples:
+//
+//      // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below.
+//      type MyStruct struct {
+//          _struct bool    `codec:",omitempty"`   //set omitempty for every field
+//          Field1 string   `codec:"-"`            //skip this field
+//          Field2 int      `codec:"myName"`       //Use key "myName" in encode stream
+//          Field3 int32    `codec:",omitempty"`   //use key "Field3". Omit if empty.
+//          Field4 bool     `codec:"f4,omitempty"` //use key "f4". Omit if empty.
+//          io.Reader                              //use key "Reader".
+//          MyStruct        `codec:"my1"           //use key "my1".
+//          MyStruct                               //inline it
+//          ...
+//      }
+//
+//      type MyStruct struct {
+//          _struct bool    `codec:",toarray"`     //encode struct as an array
+//      }
+//
+//      type MyStruct struct {
+//          _struct bool    `codec:",uint"`        //encode struct with "unsigned integer" keys
+//          Field1 string   `codec:"1"`            //encode Field1 key using: EncodeInt(1)
+//          Field2 string   `codec:"2"`            //encode Field2 key using: EncodeInt(2)
+//      }
+//
+// The mode of encoding is based on the type of the value. When a value is seen:
+//   - If a Selfer, call its CodecEncodeSelf method
+//   - If an extension is registered for it, call that extension function
+//   - If implements encoding.(Binary|Text|JSON)Marshaler, call Marshal(Binary|Text|JSON) method
+//   - Else encode it based on its reflect.Kind
+//
+// Note that struct field names and keys in map[string]XXX will be treated as symbols.
+// Some formats support symbols (e.g. binc) and will properly encode the string
+// only once in the stream, and use a tag to refer to it thereafter.
+func (e *Encoder) Encode(v interface{}) (err error) {
+	defer e.deferred(&err)
+	e.MustEncode(v)
+	return
+}
+
+// MustEncode is like Encode, but panics if unable to Encode.
+// This provides insight to the code location that triggered the error.
+func (e *Encoder) MustEncode(v interface{}) {
+	if e.err != nil {
+		panic(e.err)
+	}
+	e.encode(v)
+	e.e.atEndOfEncode()
+	e.w.atEndOfEncode()
+	e.alwaysAtEnd()
+}
+
+func (e *Encoder) deferred(err1 *error) {
+	e.alwaysAtEnd()
+	if recoverPanicToErr {
+		if x := recover(); x != nil {
+			panicValToErr(e, x, err1)
+			panicValToErr(e, x, &e.err)
+		}
+	}
+}
+
+// func (e *Encoder) alwaysAtEnd() {
+// 	e.codecFnPooler.alwaysAtEnd()
+// }
+
+func (e *Encoder) encode(iv interface{}) {
+	if iv == nil || definitelyNil(iv) {
+		e.e.EncodeNil()
+		return
+	}
+	if v, ok := iv.(Selfer); ok {
+		v.CodecEncodeSelf(e)
+		return
+	}
+
+	// a switch with only concrete types can be optimized.
+	// consequently, we deal with nil and interfaces outside.
+
+	switch v := iv.(type) {
+	case Raw:
+		e.rawBytes(v)
+	case reflect.Value:
+		e.encodeValue(v, nil, true)
+
+	case string:
+		e.e.EncodeString(cUTF8, v)
+	case bool:
+		e.e.EncodeBool(v)
+	case int:
+		e.e.EncodeInt(int64(v))
+	case int8:
+		e.e.EncodeInt(int64(v))
+	case int16:
+		e.e.EncodeInt(int64(v))
+	case int32:
+		e.e.EncodeInt(int64(v))
+	case int64:
+		e.e.EncodeInt(v)
+	case uint:
+		e.e.EncodeUint(uint64(v))
+	case uint8:
+		e.e.EncodeUint(uint64(v))
+	case uint16:
+		e.e.EncodeUint(uint64(v))
+	case uint32:
+		e.e.EncodeUint(uint64(v))
+	case uint64:
+		e.e.EncodeUint(v)
+	case uintptr:
+		e.e.EncodeUint(uint64(v))
+	case float32:
+		e.e.EncodeFloat32(v)
+	case float64:
+		e.e.EncodeFloat64(v)
+	case time.Time:
+		e.e.EncodeTime(v)
+	case []uint8:
+		e.e.EncodeStringBytes(cRAW, v)
+
+	case *Raw:
+		e.rawBytes(*v)
+
+	case *string:
+		e.e.EncodeString(cUTF8, *v)
+	case *bool:
+		e.e.EncodeBool(*v)
+	case *int:
+		e.e.EncodeInt(int64(*v))
+	case *int8:
+		e.e.EncodeInt(int64(*v))
+	case *int16:
+		e.e.EncodeInt(int64(*v))
+	case *int32:
+		e.e.EncodeInt(int64(*v))
+	case *int64:
+		e.e.EncodeInt(*v)
+	case *uint:
+		e.e.EncodeUint(uint64(*v))
+	case *uint8:
+		e.e.EncodeUint(uint64(*v))
+	case *uint16:
+		e.e.EncodeUint(uint64(*v))
+	case *uint32:
+		e.e.EncodeUint(uint64(*v))
+	case *uint64:
+		e.e.EncodeUint(*v)
+	case *uintptr:
+		e.e.EncodeUint(uint64(*v))
+	case *float32:
+		e.e.EncodeFloat32(*v)
+	case *float64:
+		e.e.EncodeFloat64(*v)
+	case *time.Time:
+		e.e.EncodeTime(*v)
+
+	case *[]uint8:
+		e.e.EncodeStringBytes(cRAW, *v)
+
+	default:
+		if !fastpathEncodeTypeSwitch(iv, e) {
+			// checkfastpath=true (not false), as underlying slice/map type may be fast-path
+			e.encodeValue(reflect.ValueOf(iv), nil, true)
+		}
+	}
+}
+
+func (e *Encoder) encodeValue(rv reflect.Value, fn *codecFn, checkFastpath bool) {
+	// if a valid fn is passed, it MUST BE for the dereferenced type of rv
+	var sptr uintptr
+	var rvp reflect.Value
+	var rvpValid bool
+TOP:
+	switch rv.Kind() {
+	case reflect.Ptr:
+		if rv.IsNil() {
+			e.e.EncodeNil()
+			return
+		}
+		rvpValid = true
+		rvp = rv
+		rv = rv.Elem()
+		if e.h.CheckCircularRef && rv.Kind() == reflect.Struct {
+			// TODO: Movable pointers will be an issue here. Future problem.
+			sptr = rv.UnsafeAddr()
+			break TOP
+		}
+		goto TOP
+	case reflect.Interface:
+		if rv.IsNil() {
+			e.e.EncodeNil()
+			return
+		}
+		rv = rv.Elem()
+		goto TOP
+	case reflect.Slice, reflect.Map:
+		if rv.IsNil() {
+			e.e.EncodeNil()
+			return
+		}
+	case reflect.Invalid, reflect.Func:
+		e.e.EncodeNil()
+		return
+	}
+
+	if sptr != 0 && (&e.ci).add(sptr) {
+		e.errorf("circular reference found: # %d", sptr)
+	}
+
+	if fn == nil {
+		rt := rv.Type()
+		// always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer
+		fn = e.cfer().get(rt, checkFastpath, true)
+	}
+	if fn.i.addrE {
+		if rvpValid {
+			fn.fe(e, &fn.i, rvp)
+		} else if rv.CanAddr() {
+			fn.fe(e, &fn.i, rv.Addr())
+		} else {
+			rv2 := reflect.New(rv.Type())
+			rv2.Elem().Set(rv)
+			fn.fe(e, &fn.i, rv2)
+		}
+	} else {
+		fn.fe(e, &fn.i, rv)
+	}
+	if sptr != 0 {
+		(&e.ci).remove(sptr)
+	}
+}
+
+func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) {
+	if fnerr != nil {
+		panic(fnerr)
+	}
+	if bs == nil {
+		e.e.EncodeNil()
+	} else if asis {
+		e.asis(bs)
+	} else {
+		e.e.EncodeStringBytes(c, bs)
+	}
+}
+
+func (e *Encoder) asis(v []byte) {
+	if e.isas {
+		e.as.EncodeAsis(v)
+	} else {
+		e.w.writeb(v)
+	}
+}
+
+func (e *Encoder) rawBytes(vv Raw) {
+	v := []byte(vv)
+	if !e.h.Raw {
+		e.errorf("Raw values cannot be encoded: %v", v)
+	}
+	e.asis(v)
+}
+
+func (e *Encoder) wrapErrstr(v interface{}, err *error) {
+	*err = fmt.Errorf("%s encode error: %v", e.hh.Name(), v)
+}
diff --git a/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/ugorji/go/codec/fast-path.generated.go
new file mode 100644
index 0000000..87f2562
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/fast-path.generated.go
@@ -0,0 +1,34522 @@
+// +build !notfastpath
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from fast-path.go.tmpl - DO NOT EDIT.
+
+package codec
+
+// Fast path functions try to create a fast path encode or decode implementation
+// for common maps and slices.
+//
+// We define the functions and register then in this single file
+// so as not to pollute the encode.go and decode.go, and create a dependency in there.
+// This file can be omitted without causing a build failure.
+//
+// The advantage of fast paths is:
+//	  - Many calls bypass reflection altogether
+//
+// Currently support
+//	  - slice of all builtin types,
+//	  - map of all builtin types to string or interface value
+//	  - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8)
+// This should provide adequate "typical" implementations.
+//
+// Note that fast track decode functions must handle values for which an address cannot be obtained.
+// For example:
+//	 m2 := map[string]int{}
+//	 p2 := []interface{}{m2}
+//	 // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
+//
+
+import (
+	"reflect"
+	"sort"
+)
+
+const fastpathEnabled = true
+
+type fastpathT struct{}
+
+var fastpathTV fastpathT
+
+type fastpathE struct {
+	rtid  uintptr
+	rt    reflect.Type
+	encfn func(*Encoder, *codecFnInfo, reflect.Value)
+	decfn func(*Decoder, *codecFnInfo, reflect.Value)
+}
+
+type fastpathA [271]fastpathE
+
+func (x *fastpathA) index(rtid uintptr) int {
+	// use binary search to grab the index (adapted from sort/search.go)
+	h, i, j := 0, 0, 271 // len(x)
+	for i < j {
+		h = i + (j-i)/2
+		if x[h].rtid < rtid {
+			i = h + 1
+		} else {
+			j = h
+		}
+	}
+	if i < 271 && x[i].rtid == rtid {
+		return i
+	}
+	return -1
+}
+
+type fastpathAslice []fastpathE
+
+func (x fastpathAslice) Len() int           { return len(x) }
+func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid }
+func (x fastpathAslice) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
+
+var fastpathAV fastpathA
+
+// due to possible initialization loop error, make fastpath in an init()
+func init() {
+	i := 0
+	fn := func(v interface{},
+		fe func(*Encoder, *codecFnInfo, reflect.Value),
+		fd func(*Decoder, *codecFnInfo, reflect.Value)) (f fastpathE) {
+		xrt := reflect.TypeOf(v)
+		xptr := rt2id(xrt)
+		fastpathAV[i] = fastpathE{xptr, xrt, fe, fd}
+		i++
+		return
+	}
+
+	fn([]interface{}(nil), (*Encoder).fastpathEncSliceIntfR, (*Decoder).fastpathDecSliceIntfR)
+	fn([]string(nil), (*Encoder).fastpathEncSliceStringR, (*Decoder).fastpathDecSliceStringR)
+	fn([]float32(nil), (*Encoder).fastpathEncSliceFloat32R, (*Decoder).fastpathDecSliceFloat32R)
+	fn([]float64(nil), (*Encoder).fastpathEncSliceFloat64R, (*Decoder).fastpathDecSliceFloat64R)
+	fn([]uint(nil), (*Encoder).fastpathEncSliceUintR, (*Decoder).fastpathDecSliceUintR)
+	fn([]uint16(nil), (*Encoder).fastpathEncSliceUint16R, (*Decoder).fastpathDecSliceUint16R)
+	fn([]uint32(nil), (*Encoder).fastpathEncSliceUint32R, (*Decoder).fastpathDecSliceUint32R)
+	fn([]uint64(nil), (*Encoder).fastpathEncSliceUint64R, (*Decoder).fastpathDecSliceUint64R)
+	fn([]uintptr(nil), (*Encoder).fastpathEncSliceUintptrR, (*Decoder).fastpathDecSliceUintptrR)
+	fn([]int(nil), (*Encoder).fastpathEncSliceIntR, (*Decoder).fastpathDecSliceIntR)
+	fn([]int8(nil), (*Encoder).fastpathEncSliceInt8R, (*Decoder).fastpathDecSliceInt8R)
+	fn([]int16(nil), (*Encoder).fastpathEncSliceInt16R, (*Decoder).fastpathDecSliceInt16R)
+	fn([]int32(nil), (*Encoder).fastpathEncSliceInt32R, (*Decoder).fastpathDecSliceInt32R)
+	fn([]int64(nil), (*Encoder).fastpathEncSliceInt64R, (*Decoder).fastpathDecSliceInt64R)
+	fn([]bool(nil), (*Encoder).fastpathEncSliceBoolR, (*Decoder).fastpathDecSliceBoolR)
+
+	fn(map[interface{}]interface{}(nil), (*Encoder).fastpathEncMapIntfIntfR, (*Decoder).fastpathDecMapIntfIntfR)
+	fn(map[interface{}]string(nil), (*Encoder).fastpathEncMapIntfStringR, (*Decoder).fastpathDecMapIntfStringR)
+	fn(map[interface{}]uint(nil), (*Encoder).fastpathEncMapIntfUintR, (*Decoder).fastpathDecMapIntfUintR)
+	fn(map[interface{}]uint8(nil), (*Encoder).fastpathEncMapIntfUint8R, (*Decoder).fastpathDecMapIntfUint8R)
+	fn(map[interface{}]uint16(nil), (*Encoder).fastpathEncMapIntfUint16R, (*Decoder).fastpathDecMapIntfUint16R)
+	fn(map[interface{}]uint32(nil), (*Encoder).fastpathEncMapIntfUint32R, (*Decoder).fastpathDecMapIntfUint32R)
+	fn(map[interface{}]uint64(nil), (*Encoder).fastpathEncMapIntfUint64R, (*Decoder).fastpathDecMapIntfUint64R)
+	fn(map[interface{}]uintptr(nil), (*Encoder).fastpathEncMapIntfUintptrR, (*Decoder).fastpathDecMapIntfUintptrR)
+	fn(map[interface{}]int(nil), (*Encoder).fastpathEncMapIntfIntR, (*Decoder).fastpathDecMapIntfIntR)
+	fn(map[interface{}]int8(nil), (*Encoder).fastpathEncMapIntfInt8R, (*Decoder).fastpathDecMapIntfInt8R)
+	fn(map[interface{}]int16(nil), (*Encoder).fastpathEncMapIntfInt16R, (*Decoder).fastpathDecMapIntfInt16R)
+	fn(map[interface{}]int32(nil), (*Encoder).fastpathEncMapIntfInt32R, (*Decoder).fastpathDecMapIntfInt32R)
+	fn(map[interface{}]int64(nil), (*Encoder).fastpathEncMapIntfInt64R, (*Decoder).fastpathDecMapIntfInt64R)
+	fn(map[interface{}]float32(nil), (*Encoder).fastpathEncMapIntfFloat32R, (*Decoder).fastpathDecMapIntfFloat32R)
+	fn(map[interface{}]float64(nil), (*Encoder).fastpathEncMapIntfFloat64R, (*Decoder).fastpathDecMapIntfFloat64R)
+	fn(map[interface{}]bool(nil), (*Encoder).fastpathEncMapIntfBoolR, (*Decoder).fastpathDecMapIntfBoolR)
+	fn(map[string]interface{}(nil), (*Encoder).fastpathEncMapStringIntfR, (*Decoder).fastpathDecMapStringIntfR)
+	fn(map[string]string(nil), (*Encoder).fastpathEncMapStringStringR, (*Decoder).fastpathDecMapStringStringR)
+	fn(map[string]uint(nil), (*Encoder).fastpathEncMapStringUintR, (*Decoder).fastpathDecMapStringUintR)
+	fn(map[string]uint8(nil), (*Encoder).fastpathEncMapStringUint8R, (*Decoder).fastpathDecMapStringUint8R)
+	fn(map[string]uint16(nil), (*Encoder).fastpathEncMapStringUint16R, (*Decoder).fastpathDecMapStringUint16R)
+	fn(map[string]uint32(nil), (*Encoder).fastpathEncMapStringUint32R, (*Decoder).fastpathDecMapStringUint32R)
+	fn(map[string]uint64(nil), (*Encoder).fastpathEncMapStringUint64R, (*Decoder).fastpathDecMapStringUint64R)
+	fn(map[string]uintptr(nil), (*Encoder).fastpathEncMapStringUintptrR, (*Decoder).fastpathDecMapStringUintptrR)
+	fn(map[string]int(nil), (*Encoder).fastpathEncMapStringIntR, (*Decoder).fastpathDecMapStringIntR)
+	fn(map[string]int8(nil), (*Encoder).fastpathEncMapStringInt8R, (*Decoder).fastpathDecMapStringInt8R)
+	fn(map[string]int16(nil), (*Encoder).fastpathEncMapStringInt16R, (*Decoder).fastpathDecMapStringInt16R)
+	fn(map[string]int32(nil), (*Encoder).fastpathEncMapStringInt32R, (*Decoder).fastpathDecMapStringInt32R)
+	fn(map[string]int64(nil), (*Encoder).fastpathEncMapStringInt64R, (*Decoder).fastpathDecMapStringInt64R)
+	fn(map[string]float32(nil), (*Encoder).fastpathEncMapStringFloat32R, (*Decoder).fastpathDecMapStringFloat32R)
+	fn(map[string]float64(nil), (*Encoder).fastpathEncMapStringFloat64R, (*Decoder).fastpathDecMapStringFloat64R)
+	fn(map[string]bool(nil), (*Encoder).fastpathEncMapStringBoolR, (*Decoder).fastpathDecMapStringBoolR)
+	fn(map[float32]interface{}(nil), (*Encoder).fastpathEncMapFloat32IntfR, (*Decoder).fastpathDecMapFloat32IntfR)
+	fn(map[float32]string(nil), (*Encoder).fastpathEncMapFloat32StringR, (*Decoder).fastpathDecMapFloat32StringR)
+	fn(map[float32]uint(nil), (*Encoder).fastpathEncMapFloat32UintR, (*Decoder).fastpathDecMapFloat32UintR)
+	fn(map[float32]uint8(nil), (*Encoder).fastpathEncMapFloat32Uint8R, (*Decoder).fastpathDecMapFloat32Uint8R)
+	fn(map[float32]uint16(nil), (*Encoder).fastpathEncMapFloat32Uint16R, (*Decoder).fastpathDecMapFloat32Uint16R)
+	fn(map[float32]uint32(nil), (*Encoder).fastpathEncMapFloat32Uint32R, (*Decoder).fastpathDecMapFloat32Uint32R)
+	fn(map[float32]uint64(nil), (*Encoder).fastpathEncMapFloat32Uint64R, (*Decoder).fastpathDecMapFloat32Uint64R)
+	fn(map[float32]uintptr(nil), (*Encoder).fastpathEncMapFloat32UintptrR, (*Decoder).fastpathDecMapFloat32UintptrR)
+	fn(map[float32]int(nil), (*Encoder).fastpathEncMapFloat32IntR, (*Decoder).fastpathDecMapFloat32IntR)
+	fn(map[float32]int8(nil), (*Encoder).fastpathEncMapFloat32Int8R, (*Decoder).fastpathDecMapFloat32Int8R)
+	fn(map[float32]int16(nil), (*Encoder).fastpathEncMapFloat32Int16R, (*Decoder).fastpathDecMapFloat32Int16R)
+	fn(map[float32]int32(nil), (*Encoder).fastpathEncMapFloat32Int32R, (*Decoder).fastpathDecMapFloat32Int32R)
+	fn(map[float32]int64(nil), (*Encoder).fastpathEncMapFloat32Int64R, (*Decoder).fastpathDecMapFloat32Int64R)
+	fn(map[float32]float32(nil), (*Encoder).fastpathEncMapFloat32Float32R, (*Decoder).fastpathDecMapFloat32Float32R)
+	fn(map[float32]float64(nil), (*Encoder).fastpathEncMapFloat32Float64R, (*Decoder).fastpathDecMapFloat32Float64R)
+	fn(map[float32]bool(nil), (*Encoder).fastpathEncMapFloat32BoolR, (*Decoder).fastpathDecMapFloat32BoolR)
+	fn(map[float64]interface{}(nil), (*Encoder).fastpathEncMapFloat64IntfR, (*Decoder).fastpathDecMapFloat64IntfR)
+	fn(map[float64]string(nil), (*Encoder).fastpathEncMapFloat64StringR, (*Decoder).fastpathDecMapFloat64StringR)
+	fn(map[float64]uint(nil), (*Encoder).fastpathEncMapFloat64UintR, (*Decoder).fastpathDecMapFloat64UintR)
+	fn(map[float64]uint8(nil), (*Encoder).fastpathEncMapFloat64Uint8R, (*Decoder).fastpathDecMapFloat64Uint8R)
+	fn(map[float64]uint16(nil), (*Encoder).fastpathEncMapFloat64Uint16R, (*Decoder).fastpathDecMapFloat64Uint16R)
+	fn(map[float64]uint32(nil), (*Encoder).fastpathEncMapFloat64Uint32R, (*Decoder).fastpathDecMapFloat64Uint32R)
+	fn(map[float64]uint64(nil), (*Encoder).fastpathEncMapFloat64Uint64R, (*Decoder).fastpathDecMapFloat64Uint64R)
+	fn(map[float64]uintptr(nil), (*Encoder).fastpathEncMapFloat64UintptrR, (*Decoder).fastpathDecMapFloat64UintptrR)
+	fn(map[float64]int(nil), (*Encoder).fastpathEncMapFloat64IntR, (*Decoder).fastpathDecMapFloat64IntR)
+	fn(map[float64]int8(nil), (*Encoder).fastpathEncMapFloat64Int8R, (*Decoder).fastpathDecMapFloat64Int8R)
+	fn(map[float64]int16(nil), (*Encoder).fastpathEncMapFloat64Int16R, (*Decoder).fastpathDecMapFloat64Int16R)
+	fn(map[float64]int32(nil), (*Encoder).fastpathEncMapFloat64Int32R, (*Decoder).fastpathDecMapFloat64Int32R)
+	fn(map[float64]int64(nil), (*Encoder).fastpathEncMapFloat64Int64R, (*Decoder).fastpathDecMapFloat64Int64R)
+	fn(map[float64]float32(nil), (*Encoder).fastpathEncMapFloat64Float32R, (*Decoder).fastpathDecMapFloat64Float32R)
+	fn(map[float64]float64(nil), (*Encoder).fastpathEncMapFloat64Float64R, (*Decoder).fastpathDecMapFloat64Float64R)
+	fn(map[float64]bool(nil), (*Encoder).fastpathEncMapFloat64BoolR, (*Decoder).fastpathDecMapFloat64BoolR)
+	fn(map[uint]interface{}(nil), (*Encoder).fastpathEncMapUintIntfR, (*Decoder).fastpathDecMapUintIntfR)
+	fn(map[uint]string(nil), (*Encoder).fastpathEncMapUintStringR, (*Decoder).fastpathDecMapUintStringR)
+	fn(map[uint]uint(nil), (*Encoder).fastpathEncMapUintUintR, (*Decoder).fastpathDecMapUintUintR)
+	fn(map[uint]uint8(nil), (*Encoder).fastpathEncMapUintUint8R, (*Decoder).fastpathDecMapUintUint8R)
+	fn(map[uint]uint16(nil), (*Encoder).fastpathEncMapUintUint16R, (*Decoder).fastpathDecMapUintUint16R)
+	fn(map[uint]uint32(nil), (*Encoder).fastpathEncMapUintUint32R, (*Decoder).fastpathDecMapUintUint32R)
+	fn(map[uint]uint64(nil), (*Encoder).fastpathEncMapUintUint64R, (*Decoder).fastpathDecMapUintUint64R)
+	fn(map[uint]uintptr(nil), (*Encoder).fastpathEncMapUintUintptrR, (*Decoder).fastpathDecMapUintUintptrR)
+	fn(map[uint]int(nil), (*Encoder).fastpathEncMapUintIntR, (*Decoder).fastpathDecMapUintIntR)
+	fn(map[uint]int8(nil), (*Encoder).fastpathEncMapUintInt8R, (*Decoder).fastpathDecMapUintInt8R)
+	fn(map[uint]int16(nil), (*Encoder).fastpathEncMapUintInt16R, (*Decoder).fastpathDecMapUintInt16R)
+	fn(map[uint]int32(nil), (*Encoder).fastpathEncMapUintInt32R, (*Decoder).fastpathDecMapUintInt32R)
+	fn(map[uint]int64(nil), (*Encoder).fastpathEncMapUintInt64R, (*Decoder).fastpathDecMapUintInt64R)
+	fn(map[uint]float32(nil), (*Encoder).fastpathEncMapUintFloat32R, (*Decoder).fastpathDecMapUintFloat32R)
+	fn(map[uint]float64(nil), (*Encoder).fastpathEncMapUintFloat64R, (*Decoder).fastpathDecMapUintFloat64R)
+	fn(map[uint]bool(nil), (*Encoder).fastpathEncMapUintBoolR, (*Decoder).fastpathDecMapUintBoolR)
+	fn(map[uint8]interface{}(nil), (*Encoder).fastpathEncMapUint8IntfR, (*Decoder).fastpathDecMapUint8IntfR)
+	fn(map[uint8]string(nil), (*Encoder).fastpathEncMapUint8StringR, (*Decoder).fastpathDecMapUint8StringR)
+	fn(map[uint8]uint(nil), (*Encoder).fastpathEncMapUint8UintR, (*Decoder).fastpathDecMapUint8UintR)
+	fn(map[uint8]uint8(nil), (*Encoder).fastpathEncMapUint8Uint8R, (*Decoder).fastpathDecMapUint8Uint8R)
+	fn(map[uint8]uint16(nil), (*Encoder).fastpathEncMapUint8Uint16R, (*Decoder).fastpathDecMapUint8Uint16R)
+	fn(map[uint8]uint32(nil), (*Encoder).fastpathEncMapUint8Uint32R, (*Decoder).fastpathDecMapUint8Uint32R)
+	fn(map[uint8]uint64(nil), (*Encoder).fastpathEncMapUint8Uint64R, (*Decoder).fastpathDecMapUint8Uint64R)
+	fn(map[uint8]uintptr(nil), (*Encoder).fastpathEncMapUint8UintptrR, (*Decoder).fastpathDecMapUint8UintptrR)
+	fn(map[uint8]int(nil), (*Encoder).fastpathEncMapUint8IntR, (*Decoder).fastpathDecMapUint8IntR)
+	fn(map[uint8]int8(nil), (*Encoder).fastpathEncMapUint8Int8R, (*Decoder).fastpathDecMapUint8Int8R)
+	fn(map[uint8]int16(nil), (*Encoder).fastpathEncMapUint8Int16R, (*Decoder).fastpathDecMapUint8Int16R)
+	fn(map[uint8]int32(nil), (*Encoder).fastpathEncMapUint8Int32R, (*Decoder).fastpathDecMapUint8Int32R)
+	fn(map[uint8]int64(nil), (*Encoder).fastpathEncMapUint8Int64R, (*Decoder).fastpathDecMapUint8Int64R)
+	fn(map[uint8]float32(nil), (*Encoder).fastpathEncMapUint8Float32R, (*Decoder).fastpathDecMapUint8Float32R)
+	fn(map[uint8]float64(nil), (*Encoder).fastpathEncMapUint8Float64R, (*Decoder).fastpathDecMapUint8Float64R)
+	fn(map[uint8]bool(nil), (*Encoder).fastpathEncMapUint8BoolR, (*Decoder).fastpathDecMapUint8BoolR)
+	fn(map[uint16]interface{}(nil), (*Encoder).fastpathEncMapUint16IntfR, (*Decoder).fastpathDecMapUint16IntfR)
+	fn(map[uint16]string(nil), (*Encoder).fastpathEncMapUint16StringR, (*Decoder).fastpathDecMapUint16StringR)
+	fn(map[uint16]uint(nil), (*Encoder).fastpathEncMapUint16UintR, (*Decoder).fastpathDecMapUint16UintR)
+	fn(map[uint16]uint8(nil), (*Encoder).fastpathEncMapUint16Uint8R, (*Decoder).fastpathDecMapUint16Uint8R)
+	fn(map[uint16]uint16(nil), (*Encoder).fastpathEncMapUint16Uint16R, (*Decoder).fastpathDecMapUint16Uint16R)
+	fn(map[uint16]uint32(nil), (*Encoder).fastpathEncMapUint16Uint32R, (*Decoder).fastpathDecMapUint16Uint32R)
+	fn(map[uint16]uint64(nil), (*Encoder).fastpathEncMapUint16Uint64R, (*Decoder).fastpathDecMapUint16Uint64R)
+	fn(map[uint16]uintptr(nil), (*Encoder).fastpathEncMapUint16UintptrR, (*Decoder).fastpathDecMapUint16UintptrR)
+	fn(map[uint16]int(nil), (*Encoder).fastpathEncMapUint16IntR, (*Decoder).fastpathDecMapUint16IntR)
+	fn(map[uint16]int8(nil), (*Encoder).fastpathEncMapUint16Int8R, (*Decoder).fastpathDecMapUint16Int8R)
+	fn(map[uint16]int16(nil), (*Encoder).fastpathEncMapUint16Int16R, (*Decoder).fastpathDecMapUint16Int16R)
+	fn(map[uint16]int32(nil), (*Encoder).fastpathEncMapUint16Int32R, (*Decoder).fastpathDecMapUint16Int32R)
+	fn(map[uint16]int64(nil), (*Encoder).fastpathEncMapUint16Int64R, (*Decoder).fastpathDecMapUint16Int64R)
+	fn(map[uint16]float32(nil), (*Encoder).fastpathEncMapUint16Float32R, (*Decoder).fastpathDecMapUint16Float32R)
+	fn(map[uint16]float64(nil), (*Encoder).fastpathEncMapUint16Float64R, (*Decoder).fastpathDecMapUint16Float64R)
+	fn(map[uint16]bool(nil), (*Encoder).fastpathEncMapUint16BoolR, (*Decoder).fastpathDecMapUint16BoolR)
+	fn(map[uint32]interface{}(nil), (*Encoder).fastpathEncMapUint32IntfR, (*Decoder).fastpathDecMapUint32IntfR)
+	fn(map[uint32]string(nil), (*Encoder).fastpathEncMapUint32StringR, (*Decoder).fastpathDecMapUint32StringR)
+	fn(map[uint32]uint(nil), (*Encoder).fastpathEncMapUint32UintR, (*Decoder).fastpathDecMapUint32UintR)
+	fn(map[uint32]uint8(nil), (*Encoder).fastpathEncMapUint32Uint8R, (*Decoder).fastpathDecMapUint32Uint8R)
+	fn(map[uint32]uint16(nil), (*Encoder).fastpathEncMapUint32Uint16R, (*Decoder).fastpathDecMapUint32Uint16R)
+	fn(map[uint32]uint32(nil), (*Encoder).fastpathEncMapUint32Uint32R, (*Decoder).fastpathDecMapUint32Uint32R)
+	fn(map[uint32]uint64(nil), (*Encoder).fastpathEncMapUint32Uint64R, (*Decoder).fastpathDecMapUint32Uint64R)
+	fn(map[uint32]uintptr(nil), (*Encoder).fastpathEncMapUint32UintptrR, (*Decoder).fastpathDecMapUint32UintptrR)
+	fn(map[uint32]int(nil), (*Encoder).fastpathEncMapUint32IntR, (*Decoder).fastpathDecMapUint32IntR)
+	fn(map[uint32]int8(nil), (*Encoder).fastpathEncMapUint32Int8R, (*Decoder).fastpathDecMapUint32Int8R)
+	fn(map[uint32]int16(nil), (*Encoder).fastpathEncMapUint32Int16R, (*Decoder).fastpathDecMapUint32Int16R)
+	fn(map[uint32]int32(nil), (*Encoder).fastpathEncMapUint32Int32R, (*Decoder).fastpathDecMapUint32Int32R)
+	fn(map[uint32]int64(nil), (*Encoder).fastpathEncMapUint32Int64R, (*Decoder).fastpathDecMapUint32Int64R)
+	fn(map[uint32]float32(nil), (*Encoder).fastpathEncMapUint32Float32R, (*Decoder).fastpathDecMapUint32Float32R)
+	fn(map[uint32]float64(nil), (*Encoder).fastpathEncMapUint32Float64R, (*Decoder).fastpathDecMapUint32Float64R)
+	fn(map[uint32]bool(nil), (*Encoder).fastpathEncMapUint32BoolR, (*Decoder).fastpathDecMapUint32BoolR)
+	fn(map[uint64]interface{}(nil), (*Encoder).fastpathEncMapUint64IntfR, (*Decoder).fastpathDecMapUint64IntfR)
+	fn(map[uint64]string(nil), (*Encoder).fastpathEncMapUint64StringR, (*Decoder).fastpathDecMapUint64StringR)
+	fn(map[uint64]uint(nil), (*Encoder).fastpathEncMapUint64UintR, (*Decoder).fastpathDecMapUint64UintR)
+	fn(map[uint64]uint8(nil), (*Encoder).fastpathEncMapUint64Uint8R, (*Decoder).fastpathDecMapUint64Uint8R)
+	fn(map[uint64]uint16(nil), (*Encoder).fastpathEncMapUint64Uint16R, (*Decoder).fastpathDecMapUint64Uint16R)
+	fn(map[uint64]uint32(nil), (*Encoder).fastpathEncMapUint64Uint32R, (*Decoder).fastpathDecMapUint64Uint32R)
+	fn(map[uint64]uint64(nil), (*Encoder).fastpathEncMapUint64Uint64R, (*Decoder).fastpathDecMapUint64Uint64R)
+	fn(map[uint64]uintptr(nil), (*Encoder).fastpathEncMapUint64UintptrR, (*Decoder).fastpathDecMapUint64UintptrR)
+	fn(map[uint64]int(nil), (*Encoder).fastpathEncMapUint64IntR, (*Decoder).fastpathDecMapUint64IntR)
+	fn(map[uint64]int8(nil), (*Encoder).fastpathEncMapUint64Int8R, (*Decoder).fastpathDecMapUint64Int8R)
+	fn(map[uint64]int16(nil), (*Encoder).fastpathEncMapUint64Int16R, (*Decoder).fastpathDecMapUint64Int16R)
+	fn(map[uint64]int32(nil), (*Encoder).fastpathEncMapUint64Int32R, (*Decoder).fastpathDecMapUint64Int32R)
+	fn(map[uint64]int64(nil), (*Encoder).fastpathEncMapUint64Int64R, (*Decoder).fastpathDecMapUint64Int64R)
+	fn(map[uint64]float32(nil), (*Encoder).fastpathEncMapUint64Float32R, (*Decoder).fastpathDecMapUint64Float32R)
+	fn(map[uint64]float64(nil), (*Encoder).fastpathEncMapUint64Float64R, (*Decoder).fastpathDecMapUint64Float64R)
+	fn(map[uint64]bool(nil), (*Encoder).fastpathEncMapUint64BoolR, (*Decoder).fastpathDecMapUint64BoolR)
+	fn(map[uintptr]interface{}(nil), (*Encoder).fastpathEncMapUintptrIntfR, (*Decoder).fastpathDecMapUintptrIntfR)
+	fn(map[uintptr]string(nil), (*Encoder).fastpathEncMapUintptrStringR, (*Decoder).fastpathDecMapUintptrStringR)
+	fn(map[uintptr]uint(nil), (*Encoder).fastpathEncMapUintptrUintR, (*Decoder).fastpathDecMapUintptrUintR)
+	fn(map[uintptr]uint8(nil), (*Encoder).fastpathEncMapUintptrUint8R, (*Decoder).fastpathDecMapUintptrUint8R)
+	fn(map[uintptr]uint16(nil), (*Encoder).fastpathEncMapUintptrUint16R, (*Decoder).fastpathDecMapUintptrUint16R)
+	fn(map[uintptr]uint32(nil), (*Encoder).fastpathEncMapUintptrUint32R, (*Decoder).fastpathDecMapUintptrUint32R)
+	fn(map[uintptr]uint64(nil), (*Encoder).fastpathEncMapUintptrUint64R, (*Decoder).fastpathDecMapUintptrUint64R)
+	fn(map[uintptr]uintptr(nil), (*Encoder).fastpathEncMapUintptrUintptrR, (*Decoder).fastpathDecMapUintptrUintptrR)
+	fn(map[uintptr]int(nil), (*Encoder).fastpathEncMapUintptrIntR, (*Decoder).fastpathDecMapUintptrIntR)
+	fn(map[uintptr]int8(nil), (*Encoder).fastpathEncMapUintptrInt8R, (*Decoder).fastpathDecMapUintptrInt8R)
+	fn(map[uintptr]int16(nil), (*Encoder).fastpathEncMapUintptrInt16R, (*Decoder).fastpathDecMapUintptrInt16R)
+	fn(map[uintptr]int32(nil), (*Encoder).fastpathEncMapUintptrInt32R, (*Decoder).fastpathDecMapUintptrInt32R)
+	fn(map[uintptr]int64(nil), (*Encoder).fastpathEncMapUintptrInt64R, (*Decoder).fastpathDecMapUintptrInt64R)
+	fn(map[uintptr]float32(nil), (*Encoder).fastpathEncMapUintptrFloat32R, (*Decoder).fastpathDecMapUintptrFloat32R)
+	fn(map[uintptr]float64(nil), (*Encoder).fastpathEncMapUintptrFloat64R, (*Decoder).fastpathDecMapUintptrFloat64R)
+	fn(map[uintptr]bool(nil), (*Encoder).fastpathEncMapUintptrBoolR, (*Decoder).fastpathDecMapUintptrBoolR)
+	fn(map[int]interface{}(nil), (*Encoder).fastpathEncMapIntIntfR, (*Decoder).fastpathDecMapIntIntfR)
+	fn(map[int]string(nil), (*Encoder).fastpathEncMapIntStringR, (*Decoder).fastpathDecMapIntStringR)
+	fn(map[int]uint(nil), (*Encoder).fastpathEncMapIntUintR, (*Decoder).fastpathDecMapIntUintR)
+	fn(map[int]uint8(nil), (*Encoder).fastpathEncMapIntUint8R, (*Decoder).fastpathDecMapIntUint8R)
+	fn(map[int]uint16(nil), (*Encoder).fastpathEncMapIntUint16R, (*Decoder).fastpathDecMapIntUint16R)
+	fn(map[int]uint32(nil), (*Encoder).fastpathEncMapIntUint32R, (*Decoder).fastpathDecMapIntUint32R)
+	fn(map[int]uint64(nil), (*Encoder).fastpathEncMapIntUint64R, (*Decoder).fastpathDecMapIntUint64R)
+	fn(map[int]uintptr(nil), (*Encoder).fastpathEncMapIntUintptrR, (*Decoder).fastpathDecMapIntUintptrR)
+	fn(map[int]int(nil), (*Encoder).fastpathEncMapIntIntR, (*Decoder).fastpathDecMapIntIntR)
+	fn(map[int]int8(nil), (*Encoder).fastpathEncMapIntInt8R, (*Decoder).fastpathDecMapIntInt8R)
+	fn(map[int]int16(nil), (*Encoder).fastpathEncMapIntInt16R, (*Decoder).fastpathDecMapIntInt16R)
+	fn(map[int]int32(nil), (*Encoder).fastpathEncMapIntInt32R, (*Decoder).fastpathDecMapIntInt32R)
+	fn(map[int]int64(nil), (*Encoder).fastpathEncMapIntInt64R, (*Decoder).fastpathDecMapIntInt64R)
+	fn(map[int]float32(nil), (*Encoder).fastpathEncMapIntFloat32R, (*Decoder).fastpathDecMapIntFloat32R)
+	fn(map[int]float64(nil), (*Encoder).fastpathEncMapIntFloat64R, (*Decoder).fastpathDecMapIntFloat64R)
+	fn(map[int]bool(nil), (*Encoder).fastpathEncMapIntBoolR, (*Decoder).fastpathDecMapIntBoolR)
+	fn(map[int8]interface{}(nil), (*Encoder).fastpathEncMapInt8IntfR, (*Decoder).fastpathDecMapInt8IntfR)
+	fn(map[int8]string(nil), (*Encoder).fastpathEncMapInt8StringR, (*Decoder).fastpathDecMapInt8StringR)
+	fn(map[int8]uint(nil), (*Encoder).fastpathEncMapInt8UintR, (*Decoder).fastpathDecMapInt8UintR)
+	fn(map[int8]uint8(nil), (*Encoder).fastpathEncMapInt8Uint8R, (*Decoder).fastpathDecMapInt8Uint8R)
+	fn(map[int8]uint16(nil), (*Encoder).fastpathEncMapInt8Uint16R, (*Decoder).fastpathDecMapInt8Uint16R)
+	fn(map[int8]uint32(nil), (*Encoder).fastpathEncMapInt8Uint32R, (*Decoder).fastpathDecMapInt8Uint32R)
+	fn(map[int8]uint64(nil), (*Encoder).fastpathEncMapInt8Uint64R, (*Decoder).fastpathDecMapInt8Uint64R)
+	fn(map[int8]uintptr(nil), (*Encoder).fastpathEncMapInt8UintptrR, (*Decoder).fastpathDecMapInt8UintptrR)
+	fn(map[int8]int(nil), (*Encoder).fastpathEncMapInt8IntR, (*Decoder).fastpathDecMapInt8IntR)
+	fn(map[int8]int8(nil), (*Encoder).fastpathEncMapInt8Int8R, (*Decoder).fastpathDecMapInt8Int8R)
+	fn(map[int8]int16(nil), (*Encoder).fastpathEncMapInt8Int16R, (*Decoder).fastpathDecMapInt8Int16R)
+	fn(map[int8]int32(nil), (*Encoder).fastpathEncMapInt8Int32R, (*Decoder).fastpathDecMapInt8Int32R)
+	fn(map[int8]int64(nil), (*Encoder).fastpathEncMapInt8Int64R, (*Decoder).fastpathDecMapInt8Int64R)
+	fn(map[int8]float32(nil), (*Encoder).fastpathEncMapInt8Float32R, (*Decoder).fastpathDecMapInt8Float32R)
+	fn(map[int8]float64(nil), (*Encoder).fastpathEncMapInt8Float64R, (*Decoder).fastpathDecMapInt8Float64R)
+	fn(map[int8]bool(nil), (*Encoder).fastpathEncMapInt8BoolR, (*Decoder).fastpathDecMapInt8BoolR)
+	fn(map[int16]interface{}(nil), (*Encoder).fastpathEncMapInt16IntfR, (*Decoder).fastpathDecMapInt16IntfR)
+	fn(map[int16]string(nil), (*Encoder).fastpathEncMapInt16StringR, (*Decoder).fastpathDecMapInt16StringR)
+	fn(map[int16]uint(nil), (*Encoder).fastpathEncMapInt16UintR, (*Decoder).fastpathDecMapInt16UintR)
+	fn(map[int16]uint8(nil), (*Encoder).fastpathEncMapInt16Uint8R, (*Decoder).fastpathDecMapInt16Uint8R)
+	fn(map[int16]uint16(nil), (*Encoder).fastpathEncMapInt16Uint16R, (*Decoder).fastpathDecMapInt16Uint16R)
+	fn(map[int16]uint32(nil), (*Encoder).fastpathEncMapInt16Uint32R, (*Decoder).fastpathDecMapInt16Uint32R)
+	fn(map[int16]uint64(nil), (*Encoder).fastpathEncMapInt16Uint64R, (*Decoder).fastpathDecMapInt16Uint64R)
+	fn(map[int16]uintptr(nil), (*Encoder).fastpathEncMapInt16UintptrR, (*Decoder).fastpathDecMapInt16UintptrR)
+	fn(map[int16]int(nil), (*Encoder).fastpathEncMapInt16IntR, (*Decoder).fastpathDecMapInt16IntR)
+	fn(map[int16]int8(nil), (*Encoder).fastpathEncMapInt16Int8R, (*Decoder).fastpathDecMapInt16Int8R)
+	fn(map[int16]int16(nil), (*Encoder).fastpathEncMapInt16Int16R, (*Decoder).fastpathDecMapInt16Int16R)
+	fn(map[int16]int32(nil), (*Encoder).fastpathEncMapInt16Int32R, (*Decoder).fastpathDecMapInt16Int32R)
+	fn(map[int16]int64(nil), (*Encoder).fastpathEncMapInt16Int64R, (*Decoder).fastpathDecMapInt16Int64R)
+	fn(map[int16]float32(nil), (*Encoder).fastpathEncMapInt16Float32R, (*Decoder).fastpathDecMapInt16Float32R)
+	fn(map[int16]float64(nil), (*Encoder).fastpathEncMapInt16Float64R, (*Decoder).fastpathDecMapInt16Float64R)
+	fn(map[int16]bool(nil), (*Encoder).fastpathEncMapInt16BoolR, (*Decoder).fastpathDecMapInt16BoolR)
+	fn(map[int32]interface{}(nil), (*Encoder).fastpathEncMapInt32IntfR, (*Decoder).fastpathDecMapInt32IntfR)
+	fn(map[int32]string(nil), (*Encoder).fastpathEncMapInt32StringR, (*Decoder).fastpathDecMapInt32StringR)
+	fn(map[int32]uint(nil), (*Encoder).fastpathEncMapInt32UintR, (*Decoder).fastpathDecMapInt32UintR)
+	fn(map[int32]uint8(nil), (*Encoder).fastpathEncMapInt32Uint8R, (*Decoder).fastpathDecMapInt32Uint8R)
+	fn(map[int32]uint16(nil), (*Encoder).fastpathEncMapInt32Uint16R, (*Decoder).fastpathDecMapInt32Uint16R)
+	fn(map[int32]uint32(nil), (*Encoder).fastpathEncMapInt32Uint32R, (*Decoder).fastpathDecMapInt32Uint32R)
+	fn(map[int32]uint64(nil), (*Encoder).fastpathEncMapInt32Uint64R, (*Decoder).fastpathDecMapInt32Uint64R)
+	fn(map[int32]uintptr(nil), (*Encoder).fastpathEncMapInt32UintptrR, (*Decoder).fastpathDecMapInt32UintptrR)
+	fn(map[int32]int(nil), (*Encoder).fastpathEncMapInt32IntR, (*Decoder).fastpathDecMapInt32IntR)
+	fn(map[int32]int8(nil), (*Encoder).fastpathEncMapInt32Int8R, (*Decoder).fastpathDecMapInt32Int8R)
+	fn(map[int32]int16(nil), (*Encoder).fastpathEncMapInt32Int16R, (*Decoder).fastpathDecMapInt32Int16R)
+	fn(map[int32]int32(nil), (*Encoder).fastpathEncMapInt32Int32R, (*Decoder).fastpathDecMapInt32Int32R)
+	fn(map[int32]int64(nil), (*Encoder).fastpathEncMapInt32Int64R, (*Decoder).fastpathDecMapInt32Int64R)
+	fn(map[int32]float32(nil), (*Encoder).fastpathEncMapInt32Float32R, (*Decoder).fastpathDecMapInt32Float32R)
+	fn(map[int32]float64(nil), (*Encoder).fastpathEncMapInt32Float64R, (*Decoder).fastpathDecMapInt32Float64R)
+	fn(map[int32]bool(nil), (*Encoder).fastpathEncMapInt32BoolR, (*Decoder).fastpathDecMapInt32BoolR)
+	fn(map[int64]interface{}(nil), (*Encoder).fastpathEncMapInt64IntfR, (*Decoder).fastpathDecMapInt64IntfR)
+	fn(map[int64]string(nil), (*Encoder).fastpathEncMapInt64StringR, (*Decoder).fastpathDecMapInt64StringR)
+	fn(map[int64]uint(nil), (*Encoder).fastpathEncMapInt64UintR, (*Decoder).fastpathDecMapInt64UintR)
+	fn(map[int64]uint8(nil), (*Encoder).fastpathEncMapInt64Uint8R, (*Decoder).fastpathDecMapInt64Uint8R)
+	fn(map[int64]uint16(nil), (*Encoder).fastpathEncMapInt64Uint16R, (*Decoder).fastpathDecMapInt64Uint16R)
+	fn(map[int64]uint32(nil), (*Encoder).fastpathEncMapInt64Uint32R, (*Decoder).fastpathDecMapInt64Uint32R)
+	fn(map[int64]uint64(nil), (*Encoder).fastpathEncMapInt64Uint64R, (*Decoder).fastpathDecMapInt64Uint64R)
+	fn(map[int64]uintptr(nil), (*Encoder).fastpathEncMapInt64UintptrR, (*Decoder).fastpathDecMapInt64UintptrR)
+	fn(map[int64]int(nil), (*Encoder).fastpathEncMapInt64IntR, (*Decoder).fastpathDecMapInt64IntR)
+	fn(map[int64]int8(nil), (*Encoder).fastpathEncMapInt64Int8R, (*Decoder).fastpathDecMapInt64Int8R)
+	fn(map[int64]int16(nil), (*Encoder).fastpathEncMapInt64Int16R, (*Decoder).fastpathDecMapInt64Int16R)
+	fn(map[int64]int32(nil), (*Encoder).fastpathEncMapInt64Int32R, (*Decoder).fastpathDecMapInt64Int32R)
+	fn(map[int64]int64(nil), (*Encoder).fastpathEncMapInt64Int64R, (*Decoder).fastpathDecMapInt64Int64R)
+	fn(map[int64]float32(nil), (*Encoder).fastpathEncMapInt64Float32R, (*Decoder).fastpathDecMapInt64Float32R)
+	fn(map[int64]float64(nil), (*Encoder).fastpathEncMapInt64Float64R, (*Decoder).fastpathDecMapInt64Float64R)
+	fn(map[int64]bool(nil), (*Encoder).fastpathEncMapInt64BoolR, (*Decoder).fastpathDecMapInt64BoolR)
+	fn(map[bool]interface{}(nil), (*Encoder).fastpathEncMapBoolIntfR, (*Decoder).fastpathDecMapBoolIntfR)
+	fn(map[bool]string(nil), (*Encoder).fastpathEncMapBoolStringR, (*Decoder).fastpathDecMapBoolStringR)
+	fn(map[bool]uint(nil), (*Encoder).fastpathEncMapBoolUintR, (*Decoder).fastpathDecMapBoolUintR)
+	fn(map[bool]uint8(nil), (*Encoder).fastpathEncMapBoolUint8R, (*Decoder).fastpathDecMapBoolUint8R)
+	fn(map[bool]uint16(nil), (*Encoder).fastpathEncMapBoolUint16R, (*Decoder).fastpathDecMapBoolUint16R)
+	fn(map[bool]uint32(nil), (*Encoder).fastpathEncMapBoolUint32R, (*Decoder).fastpathDecMapBoolUint32R)
+	fn(map[bool]uint64(nil), (*Encoder).fastpathEncMapBoolUint64R, (*Decoder).fastpathDecMapBoolUint64R)
+	fn(map[bool]uintptr(nil), (*Encoder).fastpathEncMapBoolUintptrR, (*Decoder).fastpathDecMapBoolUintptrR)
+	fn(map[bool]int(nil), (*Encoder).fastpathEncMapBoolIntR, (*Decoder).fastpathDecMapBoolIntR)
+	fn(map[bool]int8(nil), (*Encoder).fastpathEncMapBoolInt8R, (*Decoder).fastpathDecMapBoolInt8R)
+	fn(map[bool]int16(nil), (*Encoder).fastpathEncMapBoolInt16R, (*Decoder).fastpathDecMapBoolInt16R)
+	fn(map[bool]int32(nil), (*Encoder).fastpathEncMapBoolInt32R, (*Decoder).fastpathDecMapBoolInt32R)
+	fn(map[bool]int64(nil), (*Encoder).fastpathEncMapBoolInt64R, (*Decoder).fastpathDecMapBoolInt64R)
+	fn(map[bool]float32(nil), (*Encoder).fastpathEncMapBoolFloat32R, (*Decoder).fastpathDecMapBoolFloat32R)
+	fn(map[bool]float64(nil), (*Encoder).fastpathEncMapBoolFloat64R, (*Decoder).fastpathDecMapBoolFloat64R)
+	fn(map[bool]bool(nil), (*Encoder).fastpathEncMapBoolBoolR, (*Decoder).fastpathDecMapBoolBoolR)
+
+	sort.Sort(fastpathAslice(fastpathAV[:]))
+}
+
+// -- encode
+
+// -- -- fast path type switch
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
+	switch v := iv.(type) {
+
+	case []interface{}:
+		fastpathTV.EncSliceIntfV(v, e)
+	case *[]interface{}:
+		fastpathTV.EncSliceIntfV(*v, e)
+	case []string:
+		fastpathTV.EncSliceStringV(v, e)
+	case *[]string:
+		fastpathTV.EncSliceStringV(*v, e)
+	case []float32:
+		fastpathTV.EncSliceFloat32V(v, e)
+	case *[]float32:
+		fastpathTV.EncSliceFloat32V(*v, e)
+	case []float64:
+		fastpathTV.EncSliceFloat64V(v, e)
+	case *[]float64:
+		fastpathTV.EncSliceFloat64V(*v, e)
+	case []uint:
+		fastpathTV.EncSliceUintV(v, e)
+	case *[]uint:
+		fastpathTV.EncSliceUintV(*v, e)
+	case []uint16:
+		fastpathTV.EncSliceUint16V(v, e)
+	case *[]uint16:
+		fastpathTV.EncSliceUint16V(*v, e)
+	case []uint32:
+		fastpathTV.EncSliceUint32V(v, e)
+	case *[]uint32:
+		fastpathTV.EncSliceUint32V(*v, e)
+	case []uint64:
+		fastpathTV.EncSliceUint64V(v, e)
+	case *[]uint64:
+		fastpathTV.EncSliceUint64V(*v, e)
+	case []uintptr:
+		fastpathTV.EncSliceUintptrV(v, e)
+	case *[]uintptr:
+		fastpathTV.EncSliceUintptrV(*v, e)
+	case []int:
+		fastpathTV.EncSliceIntV(v, e)
+	case *[]int:
+		fastpathTV.EncSliceIntV(*v, e)
+	case []int8:
+		fastpathTV.EncSliceInt8V(v, e)
+	case *[]int8:
+		fastpathTV.EncSliceInt8V(*v, e)
+	case []int16:
+		fastpathTV.EncSliceInt16V(v, e)
+	case *[]int16:
+		fastpathTV.EncSliceInt16V(*v, e)
+	case []int32:
+		fastpathTV.EncSliceInt32V(v, e)
+	case *[]int32:
+		fastpathTV.EncSliceInt32V(*v, e)
+	case []int64:
+		fastpathTV.EncSliceInt64V(v, e)
+	case *[]int64:
+		fastpathTV.EncSliceInt64V(*v, e)
+	case []bool:
+		fastpathTV.EncSliceBoolV(v, e)
+	case *[]bool:
+		fastpathTV.EncSliceBoolV(*v, e)
+
+	case map[interface{}]interface{}:
+		fastpathTV.EncMapIntfIntfV(v, e)
+	case *map[interface{}]interface{}:
+		fastpathTV.EncMapIntfIntfV(*v, e)
+	case map[interface{}]string:
+		fastpathTV.EncMapIntfStringV(v, e)
+	case *map[interface{}]string:
+		fastpathTV.EncMapIntfStringV(*v, e)
+	case map[interface{}]uint:
+		fastpathTV.EncMapIntfUintV(v, e)
+	case *map[interface{}]uint:
+		fastpathTV.EncMapIntfUintV(*v, e)
+	case map[interface{}]uint8:
+		fastpathTV.EncMapIntfUint8V(v, e)
+	case *map[interface{}]uint8:
+		fastpathTV.EncMapIntfUint8V(*v, e)
+	case map[interface{}]uint16:
+		fastpathTV.EncMapIntfUint16V(v, e)
+	case *map[interface{}]uint16:
+		fastpathTV.EncMapIntfUint16V(*v, e)
+	case map[interface{}]uint32:
+		fastpathTV.EncMapIntfUint32V(v, e)
+	case *map[interface{}]uint32:
+		fastpathTV.EncMapIntfUint32V(*v, e)
+	case map[interface{}]uint64:
+		fastpathTV.EncMapIntfUint64V(v, e)
+	case *map[interface{}]uint64:
+		fastpathTV.EncMapIntfUint64V(*v, e)
+	case map[interface{}]uintptr:
+		fastpathTV.EncMapIntfUintptrV(v, e)
+	case *map[interface{}]uintptr:
+		fastpathTV.EncMapIntfUintptrV(*v, e)
+	case map[interface{}]int:
+		fastpathTV.EncMapIntfIntV(v, e)
+	case *map[interface{}]int:
+		fastpathTV.EncMapIntfIntV(*v, e)
+	case map[interface{}]int8:
+		fastpathTV.EncMapIntfInt8V(v, e)
+	case *map[interface{}]int8:
+		fastpathTV.EncMapIntfInt8V(*v, e)
+	case map[interface{}]int16:
+		fastpathTV.EncMapIntfInt16V(v, e)
+	case *map[interface{}]int16:
+		fastpathTV.EncMapIntfInt16V(*v, e)
+	case map[interface{}]int32:
+		fastpathTV.EncMapIntfInt32V(v, e)
+	case *map[interface{}]int32:
+		fastpathTV.EncMapIntfInt32V(*v, e)
+	case map[interface{}]int64:
+		fastpathTV.EncMapIntfInt64V(v, e)
+	case *map[interface{}]int64:
+		fastpathTV.EncMapIntfInt64V(*v, e)
+	case map[interface{}]float32:
+		fastpathTV.EncMapIntfFloat32V(v, e)
+	case *map[interface{}]float32:
+		fastpathTV.EncMapIntfFloat32V(*v, e)
+	case map[interface{}]float64:
+		fastpathTV.EncMapIntfFloat64V(v, e)
+	case *map[interface{}]float64:
+		fastpathTV.EncMapIntfFloat64V(*v, e)
+	case map[interface{}]bool:
+		fastpathTV.EncMapIntfBoolV(v, e)
+	case *map[interface{}]bool:
+		fastpathTV.EncMapIntfBoolV(*v, e)
+	case map[string]interface{}:
+		fastpathTV.EncMapStringIntfV(v, e)
+	case *map[string]interface{}:
+		fastpathTV.EncMapStringIntfV(*v, e)
+	case map[string]string:
+		fastpathTV.EncMapStringStringV(v, e)
+	case *map[string]string:
+		fastpathTV.EncMapStringStringV(*v, e)
+	case map[string]uint:
+		fastpathTV.EncMapStringUintV(v, e)
+	case *map[string]uint:
+		fastpathTV.EncMapStringUintV(*v, e)
+	case map[string]uint8:
+		fastpathTV.EncMapStringUint8V(v, e)
+	case *map[string]uint8:
+		fastpathTV.EncMapStringUint8V(*v, e)
+	case map[string]uint16:
+		fastpathTV.EncMapStringUint16V(v, e)
+	case *map[string]uint16:
+		fastpathTV.EncMapStringUint16V(*v, e)
+	case map[string]uint32:
+		fastpathTV.EncMapStringUint32V(v, e)
+	case *map[string]uint32:
+		fastpathTV.EncMapStringUint32V(*v, e)
+	case map[string]uint64:
+		fastpathTV.EncMapStringUint64V(v, e)
+	case *map[string]uint64:
+		fastpathTV.EncMapStringUint64V(*v, e)
+	case map[string]uintptr:
+		fastpathTV.EncMapStringUintptrV(v, e)
+	case *map[string]uintptr:
+		fastpathTV.EncMapStringUintptrV(*v, e)
+	case map[string]int:
+		fastpathTV.EncMapStringIntV(v, e)
+	case *map[string]int:
+		fastpathTV.EncMapStringIntV(*v, e)
+	case map[string]int8:
+		fastpathTV.EncMapStringInt8V(v, e)
+	case *map[string]int8:
+		fastpathTV.EncMapStringInt8V(*v, e)
+	case map[string]int16:
+		fastpathTV.EncMapStringInt16V(v, e)
+	case *map[string]int16:
+		fastpathTV.EncMapStringInt16V(*v, e)
+	case map[string]int32:
+		fastpathTV.EncMapStringInt32V(v, e)
+	case *map[string]int32:
+		fastpathTV.EncMapStringInt32V(*v, e)
+	case map[string]int64:
+		fastpathTV.EncMapStringInt64V(v, e)
+	case *map[string]int64:
+		fastpathTV.EncMapStringInt64V(*v, e)
+	case map[string]float32:
+		fastpathTV.EncMapStringFloat32V(v, e)
+	case *map[string]float32:
+		fastpathTV.EncMapStringFloat32V(*v, e)
+	case map[string]float64:
+		fastpathTV.EncMapStringFloat64V(v, e)
+	case *map[string]float64:
+		fastpathTV.EncMapStringFloat64V(*v, e)
+	case map[string]bool:
+		fastpathTV.EncMapStringBoolV(v, e)
+	case *map[string]bool:
+		fastpathTV.EncMapStringBoolV(*v, e)
+	case map[float32]interface{}:
+		fastpathTV.EncMapFloat32IntfV(v, e)
+	case *map[float32]interface{}:
+		fastpathTV.EncMapFloat32IntfV(*v, e)
+	case map[float32]string:
+		fastpathTV.EncMapFloat32StringV(v, e)
+	case *map[float32]string:
+		fastpathTV.EncMapFloat32StringV(*v, e)
+	case map[float32]uint:
+		fastpathTV.EncMapFloat32UintV(v, e)
+	case *map[float32]uint:
+		fastpathTV.EncMapFloat32UintV(*v, e)
+	case map[float32]uint8:
+		fastpathTV.EncMapFloat32Uint8V(v, e)
+	case *map[float32]uint8:
+		fastpathTV.EncMapFloat32Uint8V(*v, e)
+	case map[float32]uint16:
+		fastpathTV.EncMapFloat32Uint16V(v, e)
+	case *map[float32]uint16:
+		fastpathTV.EncMapFloat32Uint16V(*v, e)
+	case map[float32]uint32:
+		fastpathTV.EncMapFloat32Uint32V(v, e)
+	case *map[float32]uint32:
+		fastpathTV.EncMapFloat32Uint32V(*v, e)
+	case map[float32]uint64:
+		fastpathTV.EncMapFloat32Uint64V(v, e)
+	case *map[float32]uint64:
+		fastpathTV.EncMapFloat32Uint64V(*v, e)
+	case map[float32]uintptr:
+		fastpathTV.EncMapFloat32UintptrV(v, e)
+	case *map[float32]uintptr:
+		fastpathTV.EncMapFloat32UintptrV(*v, e)
+	case map[float32]int:
+		fastpathTV.EncMapFloat32IntV(v, e)
+	case *map[float32]int:
+		fastpathTV.EncMapFloat32IntV(*v, e)
+	case map[float32]int8:
+		fastpathTV.EncMapFloat32Int8V(v, e)
+	case *map[float32]int8:
+		fastpathTV.EncMapFloat32Int8V(*v, e)
+	case map[float32]int16:
+		fastpathTV.EncMapFloat32Int16V(v, e)
+	case *map[float32]int16:
+		fastpathTV.EncMapFloat32Int16V(*v, e)
+	case map[float32]int32:
+		fastpathTV.EncMapFloat32Int32V(v, e)
+	case *map[float32]int32:
+		fastpathTV.EncMapFloat32Int32V(*v, e)
+	case map[float32]int64:
+		fastpathTV.EncMapFloat32Int64V(v, e)
+	case *map[float32]int64:
+		fastpathTV.EncMapFloat32Int64V(*v, e)
+	case map[float32]float32:
+		fastpathTV.EncMapFloat32Float32V(v, e)
+	case *map[float32]float32:
+		fastpathTV.EncMapFloat32Float32V(*v, e)
+	case map[float32]float64:
+		fastpathTV.EncMapFloat32Float64V(v, e)
+	case *map[float32]float64:
+		fastpathTV.EncMapFloat32Float64V(*v, e)
+	case map[float32]bool:
+		fastpathTV.EncMapFloat32BoolV(v, e)
+	case *map[float32]bool:
+		fastpathTV.EncMapFloat32BoolV(*v, e)
+	case map[float64]interface{}:
+		fastpathTV.EncMapFloat64IntfV(v, e)
+	case *map[float64]interface{}:
+		fastpathTV.EncMapFloat64IntfV(*v, e)
+	case map[float64]string:
+		fastpathTV.EncMapFloat64StringV(v, e)
+	case *map[float64]string:
+		fastpathTV.EncMapFloat64StringV(*v, e)
+	case map[float64]uint:
+		fastpathTV.EncMapFloat64UintV(v, e)
+	case *map[float64]uint:
+		fastpathTV.EncMapFloat64UintV(*v, e)
+	case map[float64]uint8:
+		fastpathTV.EncMapFloat64Uint8V(v, e)
+	case *map[float64]uint8:
+		fastpathTV.EncMapFloat64Uint8V(*v, e)
+	case map[float64]uint16:
+		fastpathTV.EncMapFloat64Uint16V(v, e)
+	case *map[float64]uint16:
+		fastpathTV.EncMapFloat64Uint16V(*v, e)
+	case map[float64]uint32:
+		fastpathTV.EncMapFloat64Uint32V(v, e)
+	case *map[float64]uint32:
+		fastpathTV.EncMapFloat64Uint32V(*v, e)
+	case map[float64]uint64:
+		fastpathTV.EncMapFloat64Uint64V(v, e)
+	case *map[float64]uint64:
+		fastpathTV.EncMapFloat64Uint64V(*v, e)
+	case map[float64]uintptr:
+		fastpathTV.EncMapFloat64UintptrV(v, e)
+	case *map[float64]uintptr:
+		fastpathTV.EncMapFloat64UintptrV(*v, e)
+	case map[float64]int:
+		fastpathTV.EncMapFloat64IntV(v, e)
+	case *map[float64]int:
+		fastpathTV.EncMapFloat64IntV(*v, e)
+	case map[float64]int8:
+		fastpathTV.EncMapFloat64Int8V(v, e)
+	case *map[float64]int8:
+		fastpathTV.EncMapFloat64Int8V(*v, e)
+	case map[float64]int16:
+		fastpathTV.EncMapFloat64Int16V(v, e)
+	case *map[float64]int16:
+		fastpathTV.EncMapFloat64Int16V(*v, e)
+	case map[float64]int32:
+		fastpathTV.EncMapFloat64Int32V(v, e)
+	case *map[float64]int32:
+		fastpathTV.EncMapFloat64Int32V(*v, e)
+	case map[float64]int64:
+		fastpathTV.EncMapFloat64Int64V(v, e)
+	case *map[float64]int64:
+		fastpathTV.EncMapFloat64Int64V(*v, e)
+	case map[float64]float32:
+		fastpathTV.EncMapFloat64Float32V(v, e)
+	case *map[float64]float32:
+		fastpathTV.EncMapFloat64Float32V(*v, e)
+	case map[float64]float64:
+		fastpathTV.EncMapFloat64Float64V(v, e)
+	case *map[float64]float64:
+		fastpathTV.EncMapFloat64Float64V(*v, e)
+	case map[float64]bool:
+		fastpathTV.EncMapFloat64BoolV(v, e)
+	case *map[float64]bool:
+		fastpathTV.EncMapFloat64BoolV(*v, e)
+	case map[uint]interface{}:
+		fastpathTV.EncMapUintIntfV(v, e)
+	case *map[uint]interface{}:
+		fastpathTV.EncMapUintIntfV(*v, e)
+	case map[uint]string:
+		fastpathTV.EncMapUintStringV(v, e)
+	case *map[uint]string:
+		fastpathTV.EncMapUintStringV(*v, e)
+	case map[uint]uint:
+		fastpathTV.EncMapUintUintV(v, e)
+	case *map[uint]uint:
+		fastpathTV.EncMapUintUintV(*v, e)
+	case map[uint]uint8:
+		fastpathTV.EncMapUintUint8V(v, e)
+	case *map[uint]uint8:
+		fastpathTV.EncMapUintUint8V(*v, e)
+	case map[uint]uint16:
+		fastpathTV.EncMapUintUint16V(v, e)
+	case *map[uint]uint16:
+		fastpathTV.EncMapUintUint16V(*v, e)
+	case map[uint]uint32:
+		fastpathTV.EncMapUintUint32V(v, e)
+	case *map[uint]uint32:
+		fastpathTV.EncMapUintUint32V(*v, e)
+	case map[uint]uint64:
+		fastpathTV.EncMapUintUint64V(v, e)
+	case *map[uint]uint64:
+		fastpathTV.EncMapUintUint64V(*v, e)
+	case map[uint]uintptr:
+		fastpathTV.EncMapUintUintptrV(v, e)
+	case *map[uint]uintptr:
+		fastpathTV.EncMapUintUintptrV(*v, e)
+	case map[uint]int:
+		fastpathTV.EncMapUintIntV(v, e)
+	case *map[uint]int:
+		fastpathTV.EncMapUintIntV(*v, e)
+	case map[uint]int8:
+		fastpathTV.EncMapUintInt8V(v, e)
+	case *map[uint]int8:
+		fastpathTV.EncMapUintInt8V(*v, e)
+	case map[uint]int16:
+		fastpathTV.EncMapUintInt16V(v, e)
+	case *map[uint]int16:
+		fastpathTV.EncMapUintInt16V(*v, e)
+	case map[uint]int32:
+		fastpathTV.EncMapUintInt32V(v, e)
+	case *map[uint]int32:
+		fastpathTV.EncMapUintInt32V(*v, e)
+	case map[uint]int64:
+		fastpathTV.EncMapUintInt64V(v, e)
+	case *map[uint]int64:
+		fastpathTV.EncMapUintInt64V(*v, e)
+	case map[uint]float32:
+		fastpathTV.EncMapUintFloat32V(v, e)
+	case *map[uint]float32:
+		fastpathTV.EncMapUintFloat32V(*v, e)
+	case map[uint]float64:
+		fastpathTV.EncMapUintFloat64V(v, e)
+	case *map[uint]float64:
+		fastpathTV.EncMapUintFloat64V(*v, e)
+	case map[uint]bool:
+		fastpathTV.EncMapUintBoolV(v, e)
+	case *map[uint]bool:
+		fastpathTV.EncMapUintBoolV(*v, e)
+	case map[uint8]interface{}:
+		fastpathTV.EncMapUint8IntfV(v, e)
+	case *map[uint8]interface{}:
+		fastpathTV.EncMapUint8IntfV(*v, e)
+	case map[uint8]string:
+		fastpathTV.EncMapUint8StringV(v, e)
+	case *map[uint8]string:
+		fastpathTV.EncMapUint8StringV(*v, e)
+	case map[uint8]uint:
+		fastpathTV.EncMapUint8UintV(v, e)
+	case *map[uint8]uint:
+		fastpathTV.EncMapUint8UintV(*v, e)
+	case map[uint8]uint8:
+		fastpathTV.EncMapUint8Uint8V(v, e)
+	case *map[uint8]uint8:
+		fastpathTV.EncMapUint8Uint8V(*v, e)
+	case map[uint8]uint16:
+		fastpathTV.EncMapUint8Uint16V(v, e)
+	case *map[uint8]uint16:
+		fastpathTV.EncMapUint8Uint16V(*v, e)
+	case map[uint8]uint32:
+		fastpathTV.EncMapUint8Uint32V(v, e)
+	case *map[uint8]uint32:
+		fastpathTV.EncMapUint8Uint32V(*v, e)
+	case map[uint8]uint64:
+		fastpathTV.EncMapUint8Uint64V(v, e)
+	case *map[uint8]uint64:
+		fastpathTV.EncMapUint8Uint64V(*v, e)
+	case map[uint8]uintptr:
+		fastpathTV.EncMapUint8UintptrV(v, e)
+	case *map[uint8]uintptr:
+		fastpathTV.EncMapUint8UintptrV(*v, e)
+	case map[uint8]int:
+		fastpathTV.EncMapUint8IntV(v, e)
+	case *map[uint8]int:
+		fastpathTV.EncMapUint8IntV(*v, e)
+	case map[uint8]int8:
+		fastpathTV.EncMapUint8Int8V(v, e)
+	case *map[uint8]int8:
+		fastpathTV.EncMapUint8Int8V(*v, e)
+	case map[uint8]int16:
+		fastpathTV.EncMapUint8Int16V(v, e)
+	case *map[uint8]int16:
+		fastpathTV.EncMapUint8Int16V(*v, e)
+	case map[uint8]int32:
+		fastpathTV.EncMapUint8Int32V(v, e)
+	case *map[uint8]int32:
+		fastpathTV.EncMapUint8Int32V(*v, e)
+	case map[uint8]int64:
+		fastpathTV.EncMapUint8Int64V(v, e)
+	case *map[uint8]int64:
+		fastpathTV.EncMapUint8Int64V(*v, e)
+	case map[uint8]float32:
+		fastpathTV.EncMapUint8Float32V(v, e)
+	case *map[uint8]float32:
+		fastpathTV.EncMapUint8Float32V(*v, e)
+	case map[uint8]float64:
+		fastpathTV.EncMapUint8Float64V(v, e)
+	case *map[uint8]float64:
+		fastpathTV.EncMapUint8Float64V(*v, e)
+	case map[uint8]bool:
+		fastpathTV.EncMapUint8BoolV(v, e)
+	case *map[uint8]bool:
+		fastpathTV.EncMapUint8BoolV(*v, e)
+	case map[uint16]interface{}:
+		fastpathTV.EncMapUint16IntfV(v, e)
+	case *map[uint16]interface{}:
+		fastpathTV.EncMapUint16IntfV(*v, e)
+	case map[uint16]string:
+		fastpathTV.EncMapUint16StringV(v, e)
+	case *map[uint16]string:
+		fastpathTV.EncMapUint16StringV(*v, e)
+	case map[uint16]uint:
+		fastpathTV.EncMapUint16UintV(v, e)
+	case *map[uint16]uint:
+		fastpathTV.EncMapUint16UintV(*v, e)
+	case map[uint16]uint8:
+		fastpathTV.EncMapUint16Uint8V(v, e)
+	case *map[uint16]uint8:
+		fastpathTV.EncMapUint16Uint8V(*v, e)
+	case map[uint16]uint16:
+		fastpathTV.EncMapUint16Uint16V(v, e)
+	case *map[uint16]uint16:
+		fastpathTV.EncMapUint16Uint16V(*v, e)
+	case map[uint16]uint32:
+		fastpathTV.EncMapUint16Uint32V(v, e)
+	case *map[uint16]uint32:
+		fastpathTV.EncMapUint16Uint32V(*v, e)
+	case map[uint16]uint64:
+		fastpathTV.EncMapUint16Uint64V(v, e)
+	case *map[uint16]uint64:
+		fastpathTV.EncMapUint16Uint64V(*v, e)
+	case map[uint16]uintptr:
+		fastpathTV.EncMapUint16UintptrV(v, e)
+	case *map[uint16]uintptr:
+		fastpathTV.EncMapUint16UintptrV(*v, e)
+	case map[uint16]int:
+		fastpathTV.EncMapUint16IntV(v, e)
+	case *map[uint16]int:
+		fastpathTV.EncMapUint16IntV(*v, e)
+	case map[uint16]int8:
+		fastpathTV.EncMapUint16Int8V(v, e)
+	case *map[uint16]int8:
+		fastpathTV.EncMapUint16Int8V(*v, e)
+	case map[uint16]int16:
+		fastpathTV.EncMapUint16Int16V(v, e)
+	case *map[uint16]int16:
+		fastpathTV.EncMapUint16Int16V(*v, e)
+	case map[uint16]int32:
+		fastpathTV.EncMapUint16Int32V(v, e)
+	case *map[uint16]int32:
+		fastpathTV.EncMapUint16Int32V(*v, e)
+	case map[uint16]int64:
+		fastpathTV.EncMapUint16Int64V(v, e)
+	case *map[uint16]int64:
+		fastpathTV.EncMapUint16Int64V(*v, e)
+	case map[uint16]float32:
+		fastpathTV.EncMapUint16Float32V(v, e)
+	case *map[uint16]float32:
+		fastpathTV.EncMapUint16Float32V(*v, e)
+	case map[uint16]float64:
+		fastpathTV.EncMapUint16Float64V(v, e)
+	case *map[uint16]float64:
+		fastpathTV.EncMapUint16Float64V(*v, e)
+	case map[uint16]bool:
+		fastpathTV.EncMapUint16BoolV(v, e)
+	case *map[uint16]bool:
+		fastpathTV.EncMapUint16BoolV(*v, e)
+	case map[uint32]interface{}:
+		fastpathTV.EncMapUint32IntfV(v, e)
+	case *map[uint32]interface{}:
+		fastpathTV.EncMapUint32IntfV(*v, e)
+	case map[uint32]string:
+		fastpathTV.EncMapUint32StringV(v, e)
+	case *map[uint32]string:
+		fastpathTV.EncMapUint32StringV(*v, e)
+	case map[uint32]uint:
+		fastpathTV.EncMapUint32UintV(v, e)
+	case *map[uint32]uint:
+		fastpathTV.EncMapUint32UintV(*v, e)
+	case map[uint32]uint8:
+		fastpathTV.EncMapUint32Uint8V(v, e)
+	case *map[uint32]uint8:
+		fastpathTV.EncMapUint32Uint8V(*v, e)
+	case map[uint32]uint16:
+		fastpathTV.EncMapUint32Uint16V(v, e)
+	case *map[uint32]uint16:
+		fastpathTV.EncMapUint32Uint16V(*v, e)
+	case map[uint32]uint32:
+		fastpathTV.EncMapUint32Uint32V(v, e)
+	case *map[uint32]uint32:
+		fastpathTV.EncMapUint32Uint32V(*v, e)
+	case map[uint32]uint64:
+		fastpathTV.EncMapUint32Uint64V(v, e)
+	case *map[uint32]uint64:
+		fastpathTV.EncMapUint32Uint64V(*v, e)
+	case map[uint32]uintptr:
+		fastpathTV.EncMapUint32UintptrV(v, e)
+	case *map[uint32]uintptr:
+		fastpathTV.EncMapUint32UintptrV(*v, e)
+	case map[uint32]int:
+		fastpathTV.EncMapUint32IntV(v, e)
+	case *map[uint32]int:
+		fastpathTV.EncMapUint32IntV(*v, e)
+	case map[uint32]int8:
+		fastpathTV.EncMapUint32Int8V(v, e)
+	case *map[uint32]int8:
+		fastpathTV.EncMapUint32Int8V(*v, e)
+	case map[uint32]int16:
+		fastpathTV.EncMapUint32Int16V(v, e)
+	case *map[uint32]int16:
+		fastpathTV.EncMapUint32Int16V(*v, e)
+	case map[uint32]int32:
+		fastpathTV.EncMapUint32Int32V(v, e)
+	case *map[uint32]int32:
+		fastpathTV.EncMapUint32Int32V(*v, e)
+	case map[uint32]int64:
+		fastpathTV.EncMapUint32Int64V(v, e)
+	case *map[uint32]int64:
+		fastpathTV.EncMapUint32Int64V(*v, e)
+	case map[uint32]float32:
+		fastpathTV.EncMapUint32Float32V(v, e)
+	case *map[uint32]float32:
+		fastpathTV.EncMapUint32Float32V(*v, e)
+	case map[uint32]float64:
+		fastpathTV.EncMapUint32Float64V(v, e)
+	case *map[uint32]float64:
+		fastpathTV.EncMapUint32Float64V(*v, e)
+	case map[uint32]bool:
+		fastpathTV.EncMapUint32BoolV(v, e)
+	case *map[uint32]bool:
+		fastpathTV.EncMapUint32BoolV(*v, e)
+	case map[uint64]interface{}:
+		fastpathTV.EncMapUint64IntfV(v, e)
+	case *map[uint64]interface{}:
+		fastpathTV.EncMapUint64IntfV(*v, e)
+	case map[uint64]string:
+		fastpathTV.EncMapUint64StringV(v, e)
+	case *map[uint64]string:
+		fastpathTV.EncMapUint64StringV(*v, e)
+	case map[uint64]uint:
+		fastpathTV.EncMapUint64UintV(v, e)
+	case *map[uint64]uint:
+		fastpathTV.EncMapUint64UintV(*v, e)
+	case map[uint64]uint8:
+		fastpathTV.EncMapUint64Uint8V(v, e)
+	case *map[uint64]uint8:
+		fastpathTV.EncMapUint64Uint8V(*v, e)
+	case map[uint64]uint16:
+		fastpathTV.EncMapUint64Uint16V(v, e)
+	case *map[uint64]uint16:
+		fastpathTV.EncMapUint64Uint16V(*v, e)
+	case map[uint64]uint32:
+		fastpathTV.EncMapUint64Uint32V(v, e)
+	case *map[uint64]uint32:
+		fastpathTV.EncMapUint64Uint32V(*v, e)
+	case map[uint64]uint64:
+		fastpathTV.EncMapUint64Uint64V(v, e)
+	case *map[uint64]uint64:
+		fastpathTV.EncMapUint64Uint64V(*v, e)
+	case map[uint64]uintptr:
+		fastpathTV.EncMapUint64UintptrV(v, e)
+	case *map[uint64]uintptr:
+		fastpathTV.EncMapUint64UintptrV(*v, e)
+	case map[uint64]int:
+		fastpathTV.EncMapUint64IntV(v, e)
+	case *map[uint64]int:
+		fastpathTV.EncMapUint64IntV(*v, e)
+	case map[uint64]int8:
+		fastpathTV.EncMapUint64Int8V(v, e)
+	case *map[uint64]int8:
+		fastpathTV.EncMapUint64Int8V(*v, e)
+	case map[uint64]int16:
+		fastpathTV.EncMapUint64Int16V(v, e)
+	case *map[uint64]int16:
+		fastpathTV.EncMapUint64Int16V(*v, e)
+	case map[uint64]int32:
+		fastpathTV.EncMapUint64Int32V(v, e)
+	case *map[uint64]int32:
+		fastpathTV.EncMapUint64Int32V(*v, e)
+	case map[uint64]int64:
+		fastpathTV.EncMapUint64Int64V(v, e)
+	case *map[uint64]int64:
+		fastpathTV.EncMapUint64Int64V(*v, e)
+	case map[uint64]float32:
+		fastpathTV.EncMapUint64Float32V(v, e)
+	case *map[uint64]float32:
+		fastpathTV.EncMapUint64Float32V(*v, e)
+	case map[uint64]float64:
+		fastpathTV.EncMapUint64Float64V(v, e)
+	case *map[uint64]float64:
+		fastpathTV.EncMapUint64Float64V(*v, e)
+	case map[uint64]bool:
+		fastpathTV.EncMapUint64BoolV(v, e)
+	case *map[uint64]bool:
+		fastpathTV.EncMapUint64BoolV(*v, e)
+	case map[uintptr]interface{}:
+		fastpathTV.EncMapUintptrIntfV(v, e)
+	case *map[uintptr]interface{}:
+		fastpathTV.EncMapUintptrIntfV(*v, e)
+	case map[uintptr]string:
+		fastpathTV.EncMapUintptrStringV(v, e)
+	case *map[uintptr]string:
+		fastpathTV.EncMapUintptrStringV(*v, e)
+	case map[uintptr]uint:
+		fastpathTV.EncMapUintptrUintV(v, e)
+	case *map[uintptr]uint:
+		fastpathTV.EncMapUintptrUintV(*v, e)
+	case map[uintptr]uint8:
+		fastpathTV.EncMapUintptrUint8V(v, e)
+	case *map[uintptr]uint8:
+		fastpathTV.EncMapUintptrUint8V(*v, e)
+	case map[uintptr]uint16:
+		fastpathTV.EncMapUintptrUint16V(v, e)
+	case *map[uintptr]uint16:
+		fastpathTV.EncMapUintptrUint16V(*v, e)
+	case map[uintptr]uint32:
+		fastpathTV.EncMapUintptrUint32V(v, e)
+	case *map[uintptr]uint32:
+		fastpathTV.EncMapUintptrUint32V(*v, e)
+	case map[uintptr]uint64:
+		fastpathTV.EncMapUintptrUint64V(v, e)
+	case *map[uintptr]uint64:
+		fastpathTV.EncMapUintptrUint64V(*v, e)
+	case map[uintptr]uintptr:
+		fastpathTV.EncMapUintptrUintptrV(v, e)
+	case *map[uintptr]uintptr:
+		fastpathTV.EncMapUintptrUintptrV(*v, e)
+	case map[uintptr]int:
+		fastpathTV.EncMapUintptrIntV(v, e)
+	case *map[uintptr]int:
+		fastpathTV.EncMapUintptrIntV(*v, e)
+	case map[uintptr]int8:
+		fastpathTV.EncMapUintptrInt8V(v, e)
+	case *map[uintptr]int8:
+		fastpathTV.EncMapUintptrInt8V(*v, e)
+	case map[uintptr]int16:
+		fastpathTV.EncMapUintptrInt16V(v, e)
+	case *map[uintptr]int16:
+		fastpathTV.EncMapUintptrInt16V(*v, e)
+	case map[uintptr]int32:
+		fastpathTV.EncMapUintptrInt32V(v, e)
+	case *map[uintptr]int32:
+		fastpathTV.EncMapUintptrInt32V(*v, e)
+	case map[uintptr]int64:
+		fastpathTV.EncMapUintptrInt64V(v, e)
+	case *map[uintptr]int64:
+		fastpathTV.EncMapUintptrInt64V(*v, e)
+	case map[uintptr]float32:
+		fastpathTV.EncMapUintptrFloat32V(v, e)
+	case *map[uintptr]float32:
+		fastpathTV.EncMapUintptrFloat32V(*v, e)
+	case map[uintptr]float64:
+		fastpathTV.EncMapUintptrFloat64V(v, e)
+	case *map[uintptr]float64:
+		fastpathTV.EncMapUintptrFloat64V(*v, e)
+	case map[uintptr]bool:
+		fastpathTV.EncMapUintptrBoolV(v, e)
+	case *map[uintptr]bool:
+		fastpathTV.EncMapUintptrBoolV(*v, e)
+	case map[int]interface{}:
+		fastpathTV.EncMapIntIntfV(v, e)
+	case *map[int]interface{}:
+		fastpathTV.EncMapIntIntfV(*v, e)
+	case map[int]string:
+		fastpathTV.EncMapIntStringV(v, e)
+	case *map[int]string:
+		fastpathTV.EncMapIntStringV(*v, e)
+	case map[int]uint:
+		fastpathTV.EncMapIntUintV(v, e)
+	case *map[int]uint:
+		fastpathTV.EncMapIntUintV(*v, e)
+	case map[int]uint8:
+		fastpathTV.EncMapIntUint8V(v, e)
+	case *map[int]uint8:
+		fastpathTV.EncMapIntUint8V(*v, e)
+	case map[int]uint16:
+		fastpathTV.EncMapIntUint16V(v, e)
+	case *map[int]uint16:
+		fastpathTV.EncMapIntUint16V(*v, e)
+	case map[int]uint32:
+		fastpathTV.EncMapIntUint32V(v, e)
+	case *map[int]uint32:
+		fastpathTV.EncMapIntUint32V(*v, e)
+	case map[int]uint64:
+		fastpathTV.EncMapIntUint64V(v, e)
+	case *map[int]uint64:
+		fastpathTV.EncMapIntUint64V(*v, e)
+	case map[int]uintptr:
+		fastpathTV.EncMapIntUintptrV(v, e)
+	case *map[int]uintptr:
+		fastpathTV.EncMapIntUintptrV(*v, e)
+	case map[int]int:
+		fastpathTV.EncMapIntIntV(v, e)
+	case *map[int]int:
+		fastpathTV.EncMapIntIntV(*v, e)
+	case map[int]int8:
+		fastpathTV.EncMapIntInt8V(v, e)
+	case *map[int]int8:
+		fastpathTV.EncMapIntInt8V(*v, e)
+	case map[int]int16:
+		fastpathTV.EncMapIntInt16V(v, e)
+	case *map[int]int16:
+		fastpathTV.EncMapIntInt16V(*v, e)
+	case map[int]int32:
+		fastpathTV.EncMapIntInt32V(v, e)
+	case *map[int]int32:
+		fastpathTV.EncMapIntInt32V(*v, e)
+	case map[int]int64:
+		fastpathTV.EncMapIntInt64V(v, e)
+	case *map[int]int64:
+		fastpathTV.EncMapIntInt64V(*v, e)
+	case map[int]float32:
+		fastpathTV.EncMapIntFloat32V(v, e)
+	case *map[int]float32:
+		fastpathTV.EncMapIntFloat32V(*v, e)
+	case map[int]float64:
+		fastpathTV.EncMapIntFloat64V(v, e)
+	case *map[int]float64:
+		fastpathTV.EncMapIntFloat64V(*v, e)
+	case map[int]bool:
+		fastpathTV.EncMapIntBoolV(v, e)
+	case *map[int]bool:
+		fastpathTV.EncMapIntBoolV(*v, e)
+	case map[int8]interface{}:
+		fastpathTV.EncMapInt8IntfV(v, e)
+	case *map[int8]interface{}:
+		fastpathTV.EncMapInt8IntfV(*v, e)
+	case map[int8]string:
+		fastpathTV.EncMapInt8StringV(v, e)
+	case *map[int8]string:
+		fastpathTV.EncMapInt8StringV(*v, e)
+	case map[int8]uint:
+		fastpathTV.EncMapInt8UintV(v, e)
+	case *map[int8]uint:
+		fastpathTV.EncMapInt8UintV(*v, e)
+	case map[int8]uint8:
+		fastpathTV.EncMapInt8Uint8V(v, e)
+	case *map[int8]uint8:
+		fastpathTV.EncMapInt8Uint8V(*v, e)
+	case map[int8]uint16:
+		fastpathTV.EncMapInt8Uint16V(v, e)
+	case *map[int8]uint16:
+		fastpathTV.EncMapInt8Uint16V(*v, e)
+	case map[int8]uint32:
+		fastpathTV.EncMapInt8Uint32V(v, e)
+	case *map[int8]uint32:
+		fastpathTV.EncMapInt8Uint32V(*v, e)
+	case map[int8]uint64:
+		fastpathTV.EncMapInt8Uint64V(v, e)
+	case *map[int8]uint64:
+		fastpathTV.EncMapInt8Uint64V(*v, e)
+	case map[int8]uintptr:
+		fastpathTV.EncMapInt8UintptrV(v, e)
+	case *map[int8]uintptr:
+		fastpathTV.EncMapInt8UintptrV(*v, e)
+	case map[int8]int:
+		fastpathTV.EncMapInt8IntV(v, e)
+	case *map[int8]int:
+		fastpathTV.EncMapInt8IntV(*v, e)
+	case map[int8]int8:
+		fastpathTV.EncMapInt8Int8V(v, e)
+	case *map[int8]int8:
+		fastpathTV.EncMapInt8Int8V(*v, e)
+	case map[int8]int16:
+		fastpathTV.EncMapInt8Int16V(v, e)
+	case *map[int8]int16:
+		fastpathTV.EncMapInt8Int16V(*v, e)
+	case map[int8]int32:
+		fastpathTV.EncMapInt8Int32V(v, e)
+	case *map[int8]int32:
+		fastpathTV.EncMapInt8Int32V(*v, e)
+	case map[int8]int64:
+		fastpathTV.EncMapInt8Int64V(v, e)
+	case *map[int8]int64:
+		fastpathTV.EncMapInt8Int64V(*v, e)
+	case map[int8]float32:
+		fastpathTV.EncMapInt8Float32V(v, e)
+	case *map[int8]float32:
+		fastpathTV.EncMapInt8Float32V(*v, e)
+	case map[int8]float64:
+		fastpathTV.EncMapInt8Float64V(v, e)
+	case *map[int8]float64:
+		fastpathTV.EncMapInt8Float64V(*v, e)
+	case map[int8]bool:
+		fastpathTV.EncMapInt8BoolV(v, e)
+	case *map[int8]bool:
+		fastpathTV.EncMapInt8BoolV(*v, e)
+	case map[int16]interface{}:
+		fastpathTV.EncMapInt16IntfV(v, e)
+	case *map[int16]interface{}:
+		fastpathTV.EncMapInt16IntfV(*v, e)
+	case map[int16]string:
+		fastpathTV.EncMapInt16StringV(v, e)
+	case *map[int16]string:
+		fastpathTV.EncMapInt16StringV(*v, e)
+	case map[int16]uint:
+		fastpathTV.EncMapInt16UintV(v, e)
+	case *map[int16]uint:
+		fastpathTV.EncMapInt16UintV(*v, e)
+	case map[int16]uint8:
+		fastpathTV.EncMapInt16Uint8V(v, e)
+	case *map[int16]uint8:
+		fastpathTV.EncMapInt16Uint8V(*v, e)
+	case map[int16]uint16:
+		fastpathTV.EncMapInt16Uint16V(v, e)
+	case *map[int16]uint16:
+		fastpathTV.EncMapInt16Uint16V(*v, e)
+	case map[int16]uint32:
+		fastpathTV.EncMapInt16Uint32V(v, e)
+	case *map[int16]uint32:
+		fastpathTV.EncMapInt16Uint32V(*v, e)
+	case map[int16]uint64:
+		fastpathTV.EncMapInt16Uint64V(v, e)
+	case *map[int16]uint64:
+		fastpathTV.EncMapInt16Uint64V(*v, e)
+	case map[int16]uintptr:
+		fastpathTV.EncMapInt16UintptrV(v, e)
+	case *map[int16]uintptr:
+		fastpathTV.EncMapInt16UintptrV(*v, e)
+	case map[int16]int:
+		fastpathTV.EncMapInt16IntV(v, e)
+	case *map[int16]int:
+		fastpathTV.EncMapInt16IntV(*v, e)
+	case map[int16]int8:
+		fastpathTV.EncMapInt16Int8V(v, e)
+	case *map[int16]int8:
+		fastpathTV.EncMapInt16Int8V(*v, e)
+	case map[int16]int16:
+		fastpathTV.EncMapInt16Int16V(v, e)
+	case *map[int16]int16:
+		fastpathTV.EncMapInt16Int16V(*v, e)
+	case map[int16]int32:
+		fastpathTV.EncMapInt16Int32V(v, e)
+	case *map[int16]int32:
+		fastpathTV.EncMapInt16Int32V(*v, e)
+	case map[int16]int64:
+		fastpathTV.EncMapInt16Int64V(v, e)
+	case *map[int16]int64:
+		fastpathTV.EncMapInt16Int64V(*v, e)
+	case map[int16]float32:
+		fastpathTV.EncMapInt16Float32V(v, e)
+	case *map[int16]float32:
+		fastpathTV.EncMapInt16Float32V(*v, e)
+	case map[int16]float64:
+		fastpathTV.EncMapInt16Float64V(v, e)
+	case *map[int16]float64:
+		fastpathTV.EncMapInt16Float64V(*v, e)
+	case map[int16]bool:
+		fastpathTV.EncMapInt16BoolV(v, e)
+	case *map[int16]bool:
+		fastpathTV.EncMapInt16BoolV(*v, e)
+	case map[int32]interface{}:
+		fastpathTV.EncMapInt32IntfV(v, e)
+	case *map[int32]interface{}:
+		fastpathTV.EncMapInt32IntfV(*v, e)
+	case map[int32]string:
+		fastpathTV.EncMapInt32StringV(v, e)
+	case *map[int32]string:
+		fastpathTV.EncMapInt32StringV(*v, e)
+	case map[int32]uint:
+		fastpathTV.EncMapInt32UintV(v, e)
+	case *map[int32]uint:
+		fastpathTV.EncMapInt32UintV(*v, e)
+	case map[int32]uint8:
+		fastpathTV.EncMapInt32Uint8V(v, e)
+	case *map[int32]uint8:
+		fastpathTV.EncMapInt32Uint8V(*v, e)
+	case map[int32]uint16:
+		fastpathTV.EncMapInt32Uint16V(v, e)
+	case *map[int32]uint16:
+		fastpathTV.EncMapInt32Uint16V(*v, e)
+	case map[int32]uint32:
+		fastpathTV.EncMapInt32Uint32V(v, e)
+	case *map[int32]uint32:
+		fastpathTV.EncMapInt32Uint32V(*v, e)
+	case map[int32]uint64:
+		fastpathTV.EncMapInt32Uint64V(v, e)
+	case *map[int32]uint64:
+		fastpathTV.EncMapInt32Uint64V(*v, e)
+	case map[int32]uintptr:
+		fastpathTV.EncMapInt32UintptrV(v, e)
+	case *map[int32]uintptr:
+		fastpathTV.EncMapInt32UintptrV(*v, e)
+	case map[int32]int:
+		fastpathTV.EncMapInt32IntV(v, e)
+	case *map[int32]int:
+		fastpathTV.EncMapInt32IntV(*v, e)
+	case map[int32]int8:
+		fastpathTV.EncMapInt32Int8V(v, e)
+	case *map[int32]int8:
+		fastpathTV.EncMapInt32Int8V(*v, e)
+	case map[int32]int16:
+		fastpathTV.EncMapInt32Int16V(v, e)
+	case *map[int32]int16:
+		fastpathTV.EncMapInt32Int16V(*v, e)
+	case map[int32]int32:
+		fastpathTV.EncMapInt32Int32V(v, e)
+	case *map[int32]int32:
+		fastpathTV.EncMapInt32Int32V(*v, e)
+	case map[int32]int64:
+		fastpathTV.EncMapInt32Int64V(v, e)
+	case *map[int32]int64:
+		fastpathTV.EncMapInt32Int64V(*v, e)
+	case map[int32]float32:
+		fastpathTV.EncMapInt32Float32V(v, e)
+	case *map[int32]float32:
+		fastpathTV.EncMapInt32Float32V(*v, e)
+	case map[int32]float64:
+		fastpathTV.EncMapInt32Float64V(v, e)
+	case *map[int32]float64:
+		fastpathTV.EncMapInt32Float64V(*v, e)
+	case map[int32]bool:
+		fastpathTV.EncMapInt32BoolV(v, e)
+	case *map[int32]bool:
+		fastpathTV.EncMapInt32BoolV(*v, e)
+	case map[int64]interface{}:
+		fastpathTV.EncMapInt64IntfV(v, e)
+	case *map[int64]interface{}:
+		fastpathTV.EncMapInt64IntfV(*v, e)
+	case map[int64]string:
+		fastpathTV.EncMapInt64StringV(v, e)
+	case *map[int64]string:
+		fastpathTV.EncMapInt64StringV(*v, e)
+	case map[int64]uint:
+		fastpathTV.EncMapInt64UintV(v, e)
+	case *map[int64]uint:
+		fastpathTV.EncMapInt64UintV(*v, e)
+	case map[int64]uint8:
+		fastpathTV.EncMapInt64Uint8V(v, e)
+	case *map[int64]uint8:
+		fastpathTV.EncMapInt64Uint8V(*v, e)
+	case map[int64]uint16:
+		fastpathTV.EncMapInt64Uint16V(v, e)
+	case *map[int64]uint16:
+		fastpathTV.EncMapInt64Uint16V(*v, e)
+	case map[int64]uint32:
+		fastpathTV.EncMapInt64Uint32V(v, e)
+	case *map[int64]uint32:
+		fastpathTV.EncMapInt64Uint32V(*v, e)
+	case map[int64]uint64:
+		fastpathTV.EncMapInt64Uint64V(v, e)
+	case *map[int64]uint64:
+		fastpathTV.EncMapInt64Uint64V(*v, e)
+	case map[int64]uintptr:
+		fastpathTV.EncMapInt64UintptrV(v, e)
+	case *map[int64]uintptr:
+		fastpathTV.EncMapInt64UintptrV(*v, e)
+	case map[int64]int:
+		fastpathTV.EncMapInt64IntV(v, e)
+	case *map[int64]int:
+		fastpathTV.EncMapInt64IntV(*v, e)
+	case map[int64]int8:
+		fastpathTV.EncMapInt64Int8V(v, e)
+	case *map[int64]int8:
+		fastpathTV.EncMapInt64Int8V(*v, e)
+	case map[int64]int16:
+		fastpathTV.EncMapInt64Int16V(v, e)
+	case *map[int64]int16:
+		fastpathTV.EncMapInt64Int16V(*v, e)
+	case map[int64]int32:
+		fastpathTV.EncMapInt64Int32V(v, e)
+	case *map[int64]int32:
+		fastpathTV.EncMapInt64Int32V(*v, e)
+	case map[int64]int64:
+		fastpathTV.EncMapInt64Int64V(v, e)
+	case *map[int64]int64:
+		fastpathTV.EncMapInt64Int64V(*v, e)
+	case map[int64]float32:
+		fastpathTV.EncMapInt64Float32V(v, e)
+	case *map[int64]float32:
+		fastpathTV.EncMapInt64Float32V(*v, e)
+	case map[int64]float64:
+		fastpathTV.EncMapInt64Float64V(v, e)
+	case *map[int64]float64:
+		fastpathTV.EncMapInt64Float64V(*v, e)
+	case map[int64]bool:
+		fastpathTV.EncMapInt64BoolV(v, e)
+	case *map[int64]bool:
+		fastpathTV.EncMapInt64BoolV(*v, e)
+	case map[bool]interface{}:
+		fastpathTV.EncMapBoolIntfV(v, e)
+	case *map[bool]interface{}:
+		fastpathTV.EncMapBoolIntfV(*v, e)
+	case map[bool]string:
+		fastpathTV.EncMapBoolStringV(v, e)
+	case *map[bool]string:
+		fastpathTV.EncMapBoolStringV(*v, e)
+	case map[bool]uint:
+		fastpathTV.EncMapBoolUintV(v, e)
+	case *map[bool]uint:
+		fastpathTV.EncMapBoolUintV(*v, e)
+	case map[bool]uint8:
+		fastpathTV.EncMapBoolUint8V(v, e)
+	case *map[bool]uint8:
+		fastpathTV.EncMapBoolUint8V(*v, e)
+	case map[bool]uint16:
+		fastpathTV.EncMapBoolUint16V(v, e)
+	case *map[bool]uint16:
+		fastpathTV.EncMapBoolUint16V(*v, e)
+	case map[bool]uint32:
+		fastpathTV.EncMapBoolUint32V(v, e)
+	case *map[bool]uint32:
+		fastpathTV.EncMapBoolUint32V(*v, e)
+	case map[bool]uint64:
+		fastpathTV.EncMapBoolUint64V(v, e)
+	case *map[bool]uint64:
+		fastpathTV.EncMapBoolUint64V(*v, e)
+	case map[bool]uintptr:
+		fastpathTV.EncMapBoolUintptrV(v, e)
+	case *map[bool]uintptr:
+		fastpathTV.EncMapBoolUintptrV(*v, e)
+	case map[bool]int:
+		fastpathTV.EncMapBoolIntV(v, e)
+	case *map[bool]int:
+		fastpathTV.EncMapBoolIntV(*v, e)
+	case map[bool]int8:
+		fastpathTV.EncMapBoolInt8V(v, e)
+	case *map[bool]int8:
+		fastpathTV.EncMapBoolInt8V(*v, e)
+	case map[bool]int16:
+		fastpathTV.EncMapBoolInt16V(v, e)
+	case *map[bool]int16:
+		fastpathTV.EncMapBoolInt16V(*v, e)
+	case map[bool]int32:
+		fastpathTV.EncMapBoolInt32V(v, e)
+	case *map[bool]int32:
+		fastpathTV.EncMapBoolInt32V(*v, e)
+	case map[bool]int64:
+		fastpathTV.EncMapBoolInt64V(v, e)
+	case *map[bool]int64:
+		fastpathTV.EncMapBoolInt64V(*v, e)
+	case map[bool]float32:
+		fastpathTV.EncMapBoolFloat32V(v, e)
+	case *map[bool]float32:
+		fastpathTV.EncMapBoolFloat32V(*v, e)
+	case map[bool]float64:
+		fastpathTV.EncMapBoolFloat64V(v, e)
+	case *map[bool]float64:
+		fastpathTV.EncMapBoolFloat64V(*v, e)
+	case map[bool]bool:
+		fastpathTV.EncMapBoolBoolV(v, e)
+	case *map[bool]bool:
+		fastpathTV.EncMapBoolBoolV(*v, e)
+
+	default:
+		_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+		return false
+	}
+	return true
+}
+
+// -- -- fast path functions
+
+func (e *Encoder) fastpathEncSliceIntfR(f *codecFnInfo, rv reflect.Value) {
+	if f.ti.mbs {
+		fastpathTV.EncAsMapSliceIntfV(rv2i(rv).([]interface{}), e)
+	} else {
+		fastpathTV.EncSliceIntfV(rv2i(rv).([]interface{}), e)
+	}
+}
+func (_ fastpathT) EncSliceIntfV(v []interface{}, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteArrayStart(len(v))
+	if esep {
+		for _, v2 := range v {
+			ee.WriteArrayElem()
+			e.encode(v2)
+		}
+	} else {
+		for _, v2 := range v {
+			e.encode(v2)
+		}
+	}
+	ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, e *Encoder) {
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	if len(v)%2 == 1 {
+		e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+		return
+	}
+	ee.WriteMapStart(len(v) / 2)
+	if esep {
+		for j, v2 := range v {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+			e.encode(v2)
+		}
+	} else {
+		for _, v2 := range v {
+			e.encode(v2)
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceStringR(f *codecFnInfo, rv reflect.Value) {
+	if f.ti.mbs {
+		fastpathTV.EncAsMapSliceStringV(rv2i(rv).([]string), e)
+	} else {
+		fastpathTV.EncSliceStringV(rv2i(rv).([]string), e)
+	}
+}
+func (_ fastpathT) EncSliceStringV(v []string, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteArrayStart(len(v))
+	if esep {
+		for _, v2 := range v {
+			ee.WriteArrayElem()
+			ee.EncodeString(cUTF8, v2)
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeString(cUTF8, v2)
+		}
+	}
+	ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceStringV(v []string, e *Encoder) {
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	if len(v)%2 == 1 {
+		e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+		return
+	}
+	ee.WriteMapStart(len(v) / 2)
+	if esep {
+		for j, v2 := range v {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+			ee.EncodeString(cUTF8, v2)
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeString(cUTF8, v2)
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceFloat32R(f *codecFnInfo, rv reflect.Value) {
+	if f.ti.mbs {
+		fastpathTV.EncAsMapSliceFloat32V(rv2i(rv).([]float32), e)
+	} else {
+		fastpathTV.EncSliceFloat32V(rv2i(rv).([]float32), e)
+	}
+}
+func (_ fastpathT) EncSliceFloat32V(v []float32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteArrayStart(len(v))
+	if esep {
+		for _, v2 := range v {
+			ee.WriteArrayElem()
+			ee.EncodeFloat32(v2)
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeFloat32(v2)
+		}
+	}
+	ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, e *Encoder) {
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	if len(v)%2 == 1 {
+		e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+		return
+	}
+	ee.WriteMapStart(len(v) / 2)
+	if esep {
+		for j, v2 := range v {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+			ee.EncodeFloat32(v2)
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeFloat32(v2)
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceFloat64R(f *codecFnInfo, rv reflect.Value) {
+	if f.ti.mbs {
+		fastpathTV.EncAsMapSliceFloat64V(rv2i(rv).([]float64), e)
+	} else {
+		fastpathTV.EncSliceFloat64V(rv2i(rv).([]float64), e)
+	}
+}
+func (_ fastpathT) EncSliceFloat64V(v []float64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteArrayStart(len(v))
+	if esep {
+		for _, v2 := range v {
+			ee.WriteArrayElem()
+			ee.EncodeFloat64(v2)
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeFloat64(v2)
+		}
+	}
+	ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, e *Encoder) {
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	if len(v)%2 == 1 {
+		e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+		return
+	}
+	ee.WriteMapStart(len(v) / 2)
+	if esep {
+		for j, v2 := range v {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+			ee.EncodeFloat64(v2)
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeFloat64(v2)
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceUintR(f *codecFnInfo, rv reflect.Value) {
+	if f.ti.mbs {
+		fastpathTV.EncAsMapSliceUintV(rv2i(rv).([]uint), e)
+	} else {
+		fastpathTV.EncSliceUintV(rv2i(rv).([]uint), e)
+	}
+}
+func (_ fastpathT) EncSliceUintV(v []uint, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteArrayStart(len(v))
+	if esep {
+		for _, v2 := range v {
+			ee.WriteArrayElem()
+			ee.EncodeUint(uint64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeUint(uint64(v2))
+		}
+	}
+	ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceUintV(v []uint, e *Encoder) {
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	if len(v)%2 == 1 {
+		e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+		return
+	}
+	ee.WriteMapStart(len(v) / 2)
+	if esep {
+		for j, v2 := range v {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+			ee.EncodeUint(uint64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeUint(uint64(v2))
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceUint8R(f *codecFnInfo, rv reflect.Value) {
+	if f.ti.mbs {
+		fastpathTV.EncAsMapSliceUint8V(rv2i(rv).([]uint8), e)
+	} else {
+		fastpathTV.EncSliceUint8V(rv2i(rv).([]uint8), e)
+	}
+}
+func (_ fastpathT) EncSliceUint8V(v []uint8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteArrayStart(len(v))
+	if esep {
+		for _, v2 := range v {
+			ee.WriteArrayElem()
+			ee.EncodeUint(uint64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeUint(uint64(v2))
+		}
+	}
+	ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceUint8V(v []uint8, e *Encoder) {
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	if len(v)%2 == 1 {
+		e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+		return
+	}
+	ee.WriteMapStart(len(v) / 2)
+	if esep {
+		for j, v2 := range v {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+			ee.EncodeUint(uint64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeUint(uint64(v2))
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceUint16R(f *codecFnInfo, rv reflect.Value) {
+	if f.ti.mbs {
+		fastpathTV.EncAsMapSliceUint16V(rv2i(rv).([]uint16), e)
+	} else {
+		fastpathTV.EncSliceUint16V(rv2i(rv).([]uint16), e)
+	}
+}
+func (_ fastpathT) EncSliceUint16V(v []uint16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteArrayStart(len(v))
+	if esep {
+		for _, v2 := range v {
+			ee.WriteArrayElem()
+			ee.EncodeUint(uint64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeUint(uint64(v2))
+		}
+	}
+	ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, e *Encoder) {
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	if len(v)%2 == 1 {
+		e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+		return
+	}
+	ee.WriteMapStart(len(v) / 2)
+	if esep {
+		for j, v2 := range v {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+			ee.EncodeUint(uint64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeUint(uint64(v2))
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceUint32R(f *codecFnInfo, rv reflect.Value) {
+	if f.ti.mbs {
+		fastpathTV.EncAsMapSliceUint32V(rv2i(rv).([]uint32), e)
+	} else {
+		fastpathTV.EncSliceUint32V(rv2i(rv).([]uint32), e)
+	}
+}
+func (_ fastpathT) EncSliceUint32V(v []uint32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteArrayStart(len(v))
+	if esep {
+		for _, v2 := range v {
+			ee.WriteArrayElem()
+			ee.EncodeUint(uint64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeUint(uint64(v2))
+		}
+	}
+	ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, e *Encoder) {
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	if len(v)%2 == 1 {
+		e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+		return
+	}
+	ee.WriteMapStart(len(v) / 2)
+	if esep {
+		for j, v2 := range v {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+			ee.EncodeUint(uint64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeUint(uint64(v2))
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceUint64R(f *codecFnInfo, rv reflect.Value) {
+	if f.ti.mbs {
+		fastpathTV.EncAsMapSliceUint64V(rv2i(rv).([]uint64), e)
+	} else {
+		fastpathTV.EncSliceUint64V(rv2i(rv).([]uint64), e)
+	}
+}
+func (_ fastpathT) EncSliceUint64V(v []uint64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteArrayStart(len(v))
+	if esep {
+		for _, v2 := range v {
+			ee.WriteArrayElem()
+			ee.EncodeUint(uint64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeUint(uint64(v2))
+		}
+	}
+	ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, e *Encoder) {
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	if len(v)%2 == 1 {
+		e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+		return
+	}
+	ee.WriteMapStart(len(v) / 2)
+	if esep {
+		for j, v2 := range v {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+			ee.EncodeUint(uint64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeUint(uint64(v2))
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceUintptrR(f *codecFnInfo, rv reflect.Value) {
+	if f.ti.mbs {
+		fastpathTV.EncAsMapSliceUintptrV(rv2i(rv).([]uintptr), e)
+	} else {
+		fastpathTV.EncSliceUintptrV(rv2i(rv).([]uintptr), e)
+	}
+}
+func (_ fastpathT) EncSliceUintptrV(v []uintptr, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteArrayStart(len(v))
+	if esep {
+		for _, v2 := range v {
+			ee.WriteArrayElem()
+			e.encode(v2)
+		}
+	} else {
+		for _, v2 := range v {
+			e.encode(v2)
+		}
+	}
+	ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, e *Encoder) {
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	if len(v)%2 == 1 {
+		e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+		return
+	}
+	ee.WriteMapStart(len(v) / 2)
+	if esep {
+		for j, v2 := range v {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+			e.encode(v2)
+		}
+	} else {
+		for _, v2 := range v {
+			e.encode(v2)
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceIntR(f *codecFnInfo, rv reflect.Value) {
+	if f.ti.mbs {
+		fastpathTV.EncAsMapSliceIntV(rv2i(rv).([]int), e)
+	} else {
+		fastpathTV.EncSliceIntV(rv2i(rv).([]int), e)
+	}
+}
+func (_ fastpathT) EncSliceIntV(v []int, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteArrayStart(len(v))
+	if esep {
+		for _, v2 := range v {
+			ee.WriteArrayElem()
+			ee.EncodeInt(int64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeInt(int64(v2))
+		}
+	}
+	ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceIntV(v []int, e *Encoder) {
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	if len(v)%2 == 1 {
+		e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+		return
+	}
+	ee.WriteMapStart(len(v) / 2)
+	if esep {
+		for j, v2 := range v {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+			ee.EncodeInt(int64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeInt(int64(v2))
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceInt8R(f *codecFnInfo, rv reflect.Value) {
+	if f.ti.mbs {
+		fastpathTV.EncAsMapSliceInt8V(rv2i(rv).([]int8), e)
+	} else {
+		fastpathTV.EncSliceInt8V(rv2i(rv).([]int8), e)
+	}
+}
+func (_ fastpathT) EncSliceInt8V(v []int8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteArrayStart(len(v))
+	if esep {
+		for _, v2 := range v {
+			ee.WriteArrayElem()
+			ee.EncodeInt(int64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeInt(int64(v2))
+		}
+	}
+	ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceInt8V(v []int8, e *Encoder) {
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	if len(v)%2 == 1 {
+		e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+		return
+	}
+	ee.WriteMapStart(len(v) / 2)
+	if esep {
+		for j, v2 := range v {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+			ee.EncodeInt(int64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeInt(int64(v2))
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceInt16R(f *codecFnInfo, rv reflect.Value) {
+	if f.ti.mbs {
+		fastpathTV.EncAsMapSliceInt16V(rv2i(rv).([]int16), e)
+	} else {
+		fastpathTV.EncSliceInt16V(rv2i(rv).([]int16), e)
+	}
+}
+func (_ fastpathT) EncSliceInt16V(v []int16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteArrayStart(len(v))
+	if esep {
+		for _, v2 := range v {
+			ee.WriteArrayElem()
+			ee.EncodeInt(int64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeInt(int64(v2))
+		}
+	}
+	ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceInt16V(v []int16, e *Encoder) {
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	if len(v)%2 == 1 {
+		e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+		return
+	}
+	ee.WriteMapStart(len(v) / 2)
+	if esep {
+		for j, v2 := range v {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+			ee.EncodeInt(int64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeInt(int64(v2))
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceInt32R(f *codecFnInfo, rv reflect.Value) {
+	if f.ti.mbs {
+		fastpathTV.EncAsMapSliceInt32V(rv2i(rv).([]int32), e)
+	} else {
+		fastpathTV.EncSliceInt32V(rv2i(rv).([]int32), e)
+	}
+}
+func (_ fastpathT) EncSliceInt32V(v []int32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteArrayStart(len(v))
+	if esep {
+		for _, v2 := range v {
+			ee.WriteArrayElem()
+			ee.EncodeInt(int64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeInt(int64(v2))
+		}
+	}
+	ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceInt32V(v []int32, e *Encoder) {
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	if len(v)%2 == 1 {
+		e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+		return
+	}
+	ee.WriteMapStart(len(v) / 2)
+	if esep {
+		for j, v2 := range v {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+			ee.EncodeInt(int64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeInt(int64(v2))
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceInt64R(f *codecFnInfo, rv reflect.Value) {
+	if f.ti.mbs {
+		fastpathTV.EncAsMapSliceInt64V(rv2i(rv).([]int64), e)
+	} else {
+		fastpathTV.EncSliceInt64V(rv2i(rv).([]int64), e)
+	}
+}
+func (_ fastpathT) EncSliceInt64V(v []int64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteArrayStart(len(v))
+	if esep {
+		for _, v2 := range v {
+			ee.WriteArrayElem()
+			ee.EncodeInt(int64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeInt(int64(v2))
+		}
+	}
+	ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceInt64V(v []int64, e *Encoder) {
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	if len(v)%2 == 1 {
+		e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+		return
+	}
+	ee.WriteMapStart(len(v) / 2)
+	if esep {
+		for j, v2 := range v {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+			ee.EncodeInt(int64(v2))
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeInt(int64(v2))
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceBoolR(f *codecFnInfo, rv reflect.Value) {
+	if f.ti.mbs {
+		fastpathTV.EncAsMapSliceBoolV(rv2i(rv).([]bool), e)
+	} else {
+		fastpathTV.EncSliceBoolV(rv2i(rv).([]bool), e)
+	}
+}
+func (_ fastpathT) EncSliceBoolV(v []bool, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteArrayStart(len(v))
+	if esep {
+		for _, v2 := range v {
+			ee.WriteArrayElem()
+			ee.EncodeBool(v2)
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeBool(v2)
+		}
+	}
+	ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceBoolV(v []bool, e *Encoder) {
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	if len(v)%2 == 1 {
+		e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+		return
+	}
+	ee.WriteMapStart(len(v) / 2)
+	if esep {
+		for j, v2 := range v {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+			ee.EncodeBool(v2)
+		}
+	} else {
+		for _, v2 := range v {
+			ee.EncodeBool(v2)
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfIntfR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), e)
+}
+func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		v2 := make([]bytesI, len(v))
+		var i, l int
+		var vp *bytesI
+		for k2, _ := range v {
+			l = len(mksv)
+			e2.MustEncode(k2)
+			vp = &v2[i]
+			vp.v = mksv[l:]
+			vp.i = k2
+			i++
+		}
+		sort.Sort(bytesISlice(v2))
+		if esep {
+			for j := range v2 {
+				ee.WriteMapElemKey()
+				e.asis(v2[j].v)
+				ee.WriteMapElemValue()
+				e.encode(v[v2[j].i])
+			}
+		} else {
+			for j := range v2 {
+				e.asis(v2[j].v)
+				e.encode(v[v2[j].i])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfStringR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntfStringV(rv2i(rv).(map[interface{}]string), e)
+}
+func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		v2 := make([]bytesI, len(v))
+		var i, l int
+		var vp *bytesI
+		for k2, _ := range v {
+			l = len(mksv)
+			e2.MustEncode(k2)
+			vp = &v2[i]
+			vp.v = mksv[l:]
+			vp.i = k2
+			i++
+		}
+		sort.Sort(bytesISlice(v2))
+		if esep {
+			for j := range v2 {
+				ee.WriteMapElemKey()
+				e.asis(v2[j].v)
+				ee.WriteMapElemValue()
+				e.encode(v[v2[j].i])
+			}
+		} else {
+			for j := range v2 {
+				e.asis(v2[j].v)
+				e.encode(v[v2[j].i])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeString(cUTF8, v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfUintR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntfUintV(rv2i(rv).(map[interface{}]uint), e)
+}
+func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		v2 := make([]bytesI, len(v))
+		var i, l int
+		var vp *bytesI
+		for k2, _ := range v {
+			l = len(mksv)
+			e2.MustEncode(k2)
+			vp = &v2[i]
+			vp.v = mksv[l:]
+			vp.i = k2
+			i++
+		}
+		sort.Sort(bytesISlice(v2))
+		if esep {
+			for j := range v2 {
+				ee.WriteMapElemKey()
+				e.asis(v2[j].v)
+				ee.WriteMapElemValue()
+				e.encode(v[v2[j].i])
+			}
+		} else {
+			for j := range v2 {
+				e.asis(v2[j].v)
+				e.encode(v[v2[j].i])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfUint8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), e)
+}
+func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		v2 := make([]bytesI, len(v))
+		var i, l int
+		var vp *bytesI
+		for k2, _ := range v {
+			l = len(mksv)
+			e2.MustEncode(k2)
+			vp = &v2[i]
+			vp.v = mksv[l:]
+			vp.i = k2
+			i++
+		}
+		sort.Sort(bytesISlice(v2))
+		if esep {
+			for j := range v2 {
+				ee.WriteMapElemKey()
+				e.asis(v2[j].v)
+				ee.WriteMapElemValue()
+				e.encode(v[v2[j].i])
+			}
+		} else {
+			for j := range v2 {
+				e.asis(v2[j].v)
+				e.encode(v[v2[j].i])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfUint16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), e)
+}
+func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		v2 := make([]bytesI, len(v))
+		var i, l int
+		var vp *bytesI
+		for k2, _ := range v {
+			l = len(mksv)
+			e2.MustEncode(k2)
+			vp = &v2[i]
+			vp.v = mksv[l:]
+			vp.i = k2
+			i++
+		}
+		sort.Sort(bytesISlice(v2))
+		if esep {
+			for j := range v2 {
+				ee.WriteMapElemKey()
+				e.asis(v2[j].v)
+				ee.WriteMapElemValue()
+				e.encode(v[v2[j].i])
+			}
+		} else {
+			for j := range v2 {
+				e.asis(v2[j].v)
+				e.encode(v[v2[j].i])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfUint32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), e)
+}
+func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		v2 := make([]bytesI, len(v))
+		var i, l int
+		var vp *bytesI
+		for k2, _ := range v {
+			l = len(mksv)
+			e2.MustEncode(k2)
+			vp = &v2[i]
+			vp.v = mksv[l:]
+			vp.i = k2
+			i++
+		}
+		sort.Sort(bytesISlice(v2))
+		if esep {
+			for j := range v2 {
+				ee.WriteMapElemKey()
+				e.asis(v2[j].v)
+				ee.WriteMapElemValue()
+				e.encode(v[v2[j].i])
+			}
+		} else {
+			for j := range v2 {
+				e.asis(v2[j].v)
+				e.encode(v[v2[j].i])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfUint64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), e)
+}
+func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		v2 := make([]bytesI, len(v))
+		var i, l int
+		var vp *bytesI
+		for k2, _ := range v {
+			l = len(mksv)
+			e2.MustEncode(k2)
+			vp = &v2[i]
+			vp.v = mksv[l:]
+			vp.i = k2
+			i++
+		}
+		sort.Sort(bytesISlice(v2))
+		if esep {
+			for j := range v2 {
+				ee.WriteMapElemKey()
+				e.asis(v2[j].v)
+				ee.WriteMapElemValue()
+				e.encode(v[v2[j].i])
+			}
+		} else {
+			for j := range v2 {
+				e.asis(v2[j].v)
+				e.encode(v[v2[j].i])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), e)
+}
+func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		v2 := make([]bytesI, len(v))
+		var i, l int
+		var vp *bytesI
+		for k2, _ := range v {
+			l = len(mksv)
+			e2.MustEncode(k2)
+			vp = &v2[i]
+			vp.v = mksv[l:]
+			vp.i = k2
+			i++
+		}
+		sort.Sort(bytesISlice(v2))
+		if esep {
+			for j := range v2 {
+				ee.WriteMapElemKey()
+				e.asis(v2[j].v)
+				ee.WriteMapElemValue()
+				e.encode(v[v2[j].i])
+			}
+		} else {
+			for j := range v2 {
+				e.asis(v2[j].v)
+				e.encode(v[v2[j].i])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfIntR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntfIntV(rv2i(rv).(map[interface{}]int), e)
+}
+func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		v2 := make([]bytesI, len(v))
+		var i, l int
+		var vp *bytesI
+		for k2, _ := range v {
+			l = len(mksv)
+			e2.MustEncode(k2)
+			vp = &v2[i]
+			vp.v = mksv[l:]
+			vp.i = k2
+			i++
+		}
+		sort.Sort(bytesISlice(v2))
+		if esep {
+			for j := range v2 {
+				ee.WriteMapElemKey()
+				e.asis(v2[j].v)
+				ee.WriteMapElemValue()
+				e.encode(v[v2[j].i])
+			}
+		} else {
+			for j := range v2 {
+				e.asis(v2[j].v)
+				e.encode(v[v2[j].i])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfInt8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntfInt8V(rv2i(rv).(map[interface{}]int8), e)
+}
+func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		v2 := make([]bytesI, len(v))
+		var i, l int
+		var vp *bytesI
+		for k2, _ := range v {
+			l = len(mksv)
+			e2.MustEncode(k2)
+			vp = &v2[i]
+			vp.v = mksv[l:]
+			vp.i = k2
+			i++
+		}
+		sort.Sort(bytesISlice(v2))
+		if esep {
+			for j := range v2 {
+				ee.WriteMapElemKey()
+				e.asis(v2[j].v)
+				ee.WriteMapElemValue()
+				e.encode(v[v2[j].i])
+			}
+		} else {
+			for j := range v2 {
+				e.asis(v2[j].v)
+				e.encode(v[v2[j].i])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfInt16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntfInt16V(rv2i(rv).(map[interface{}]int16), e)
+}
+func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		v2 := make([]bytesI, len(v))
+		var i, l int
+		var vp *bytesI
+		for k2, _ := range v {
+			l = len(mksv)
+			e2.MustEncode(k2)
+			vp = &v2[i]
+			vp.v = mksv[l:]
+			vp.i = k2
+			i++
+		}
+		sort.Sort(bytesISlice(v2))
+		if esep {
+			for j := range v2 {
+				ee.WriteMapElemKey()
+				e.asis(v2[j].v)
+				ee.WriteMapElemValue()
+				e.encode(v[v2[j].i])
+			}
+		} else {
+			for j := range v2 {
+				e.asis(v2[j].v)
+				e.encode(v[v2[j].i])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfInt32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntfInt32V(rv2i(rv).(map[interface{}]int32), e)
+}
+func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		v2 := make([]bytesI, len(v))
+		var i, l int
+		var vp *bytesI
+		for k2, _ := range v {
+			l = len(mksv)
+			e2.MustEncode(k2)
+			vp = &v2[i]
+			vp.v = mksv[l:]
+			vp.i = k2
+			i++
+		}
+		sort.Sort(bytesISlice(v2))
+		if esep {
+			for j := range v2 {
+				ee.WriteMapElemKey()
+				e.asis(v2[j].v)
+				ee.WriteMapElemValue()
+				e.encode(v[v2[j].i])
+			}
+		} else {
+			for j := range v2 {
+				e.asis(v2[j].v)
+				e.encode(v[v2[j].i])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfInt64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntfInt64V(rv2i(rv).(map[interface{}]int64), e)
+}
+func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		v2 := make([]bytesI, len(v))
+		var i, l int
+		var vp *bytesI
+		for k2, _ := range v {
+			l = len(mksv)
+			e2.MustEncode(k2)
+			vp = &v2[i]
+			vp.v = mksv[l:]
+			vp.i = k2
+			i++
+		}
+		sort.Sort(bytesISlice(v2))
+		if esep {
+			for j := range v2 {
+				ee.WriteMapElemKey()
+				e.asis(v2[j].v)
+				ee.WriteMapElemValue()
+				e.encode(v[v2[j].i])
+			}
+		} else {
+			for j := range v2 {
+				e.asis(v2[j].v)
+				e.encode(v[v2[j].i])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), e)
+}
+func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		v2 := make([]bytesI, len(v))
+		var i, l int
+		var vp *bytesI
+		for k2, _ := range v {
+			l = len(mksv)
+			e2.MustEncode(k2)
+			vp = &v2[i]
+			vp.v = mksv[l:]
+			vp.i = k2
+			i++
+		}
+		sort.Sort(bytesISlice(v2))
+		if esep {
+			for j := range v2 {
+				ee.WriteMapElemKey()
+				e.asis(v2[j].v)
+				ee.WriteMapElemValue()
+				e.encode(v[v2[j].i])
+			}
+		} else {
+			for j := range v2 {
+				e.asis(v2[j].v)
+				e.encode(v[v2[j].i])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeFloat32(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), e)
+}
+func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		v2 := make([]bytesI, len(v))
+		var i, l int
+		var vp *bytesI
+		for k2, _ := range v {
+			l = len(mksv)
+			e2.MustEncode(k2)
+			vp = &v2[i]
+			vp.v = mksv[l:]
+			vp.i = k2
+			i++
+		}
+		sort.Sort(bytesISlice(v2))
+		if esep {
+			for j := range v2 {
+				ee.WriteMapElemKey()
+				e.asis(v2[j].v)
+				ee.WriteMapElemValue()
+				e.encode(v[v2[j].i])
+			}
+		} else {
+			for j := range v2 {
+				e.asis(v2[j].v)
+				e.encode(v[v2[j].i])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeFloat64(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfBoolR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntfBoolV(rv2i(rv).(map[interface{}]bool), e)
+}
+func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		v2 := make([]bytesI, len(v))
+		var i, l int
+		var vp *bytesI
+		for k2, _ := range v {
+			l = len(mksv)
+			e2.MustEncode(k2)
+			vp = &v2[i]
+			vp.v = mksv[l:]
+			vp.i = k2
+			i++
+		}
+		sort.Sort(bytesISlice(v2))
+		if esep {
+			for j := range v2 {
+				ee.WriteMapElemKey()
+				e.asis(v2[j].v)
+				ee.WriteMapElemValue()
+				e.encode(v[v2[j].i])
+			}
+		} else {
+			for j := range v2 {
+				e.asis(v2[j].v)
+				e.encode(v[v2[j].i])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeBool(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringIntfR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e)
+}
+func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]string, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = string(k)
+			i++
+		}
+		sort.Sort(stringSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				e.encode(v[string(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeString(cUTF8, k2)
+				e.encode(v[string(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeString(cUTF8, k2)
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringStringR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapStringStringV(rv2i(rv).(map[string]string), e)
+}
+func (_ fastpathT) EncMapStringStringV(v map[string]string, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]string, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = string(k)
+			i++
+		}
+		sort.Sort(stringSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v[string(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeString(cUTF8, v[string(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeString(cUTF8, v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringUintR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapStringUintV(rv2i(rv).(map[string]uint), e)
+}
+func (_ fastpathT) EncMapStringUintV(v map[string]uint, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]string, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = string(k)
+			i++
+		}
+		sort.Sort(stringSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[string(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeUint(uint64(v[string(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringUint8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e)
+}
+func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]string, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = string(k)
+			i++
+		}
+		sort.Sort(stringSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[string(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeUint(uint64(v[string(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringUint16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapStringUint16V(rv2i(rv).(map[string]uint16), e)
+}
+func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]string, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = string(k)
+			i++
+		}
+		sort.Sort(stringSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[string(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeUint(uint64(v[string(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringUint32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapStringUint32V(rv2i(rv).(map[string]uint32), e)
+}
+func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]string, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = string(k)
+			i++
+		}
+		sort.Sort(stringSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[string(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeUint(uint64(v[string(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringUint64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e)
+}
+func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]string, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = string(k)
+			i++
+		}
+		sort.Sort(stringSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[string(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeUint(uint64(v[string(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringUintptrR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapStringUintptrV(rv2i(rv).(map[string]uintptr), e)
+}
+func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]string, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = string(k)
+			i++
+		}
+		sort.Sort(stringSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				e.encode(v[string(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeString(cUTF8, k2)
+				e.encode(v[string(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeString(cUTF8, k2)
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringIntR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapStringIntV(rv2i(rv).(map[string]int), e)
+}
+func (_ fastpathT) EncMapStringIntV(v map[string]int, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]string, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = string(k)
+			i++
+		}
+		sort.Sort(stringSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[string(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeInt(int64(v[string(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringInt8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapStringInt8V(rv2i(rv).(map[string]int8), e)
+}
+func (_ fastpathT) EncMapStringInt8V(v map[string]int8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]string, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = string(k)
+			i++
+		}
+		sort.Sort(stringSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[string(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeInt(int64(v[string(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringInt16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapStringInt16V(rv2i(rv).(map[string]int16), e)
+}
+func (_ fastpathT) EncMapStringInt16V(v map[string]int16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]string, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = string(k)
+			i++
+		}
+		sort.Sort(stringSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[string(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeInt(int64(v[string(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringInt32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapStringInt32V(rv2i(rv).(map[string]int32), e)
+}
+func (_ fastpathT) EncMapStringInt32V(v map[string]int32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]string, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = string(k)
+			i++
+		}
+		sort.Sort(stringSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[string(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeInt(int64(v[string(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringInt64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapStringInt64V(rv2i(rv).(map[string]int64), e)
+}
+func (_ fastpathT) EncMapStringInt64V(v map[string]int64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]string, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = string(k)
+			i++
+		}
+		sort.Sort(stringSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[string(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeInt(int64(v[string(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringFloat32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapStringFloat32V(rv2i(rv).(map[string]float32), e)
+}
+func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]string, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = string(k)
+			i++
+		}
+		sort.Sort(stringSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v[string(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeFloat32(v[string(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeFloat32(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringFloat64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e)
+}
+func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]string, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = string(k)
+			i++
+		}
+		sort.Sort(stringSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v[string(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeFloat64(v[string(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeFloat64(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringBoolR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapStringBoolV(rv2i(rv).(map[string]bool), e)
+}
+func (_ fastpathT) EncMapStringBoolV(v map[string]bool, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]string, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = string(k)
+			i++
+		}
+		sort.Sort(stringSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v[string(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeBool(v[string(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeString(cUTF8, k2)
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeString(cUTF8, k2)
+				ee.EncodeBool(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), e)
+}
+func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(float32(k2))
+				ee.WriteMapElemValue()
+				e.encode(v[float32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat32(float32(k2))
+				e.encode(v[float32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(k2)
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat32(k2)
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32StringR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat32StringV(rv2i(rv).(map[float32]string), e)
+}
+func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(float32(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v[float32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat32(float32(k2))
+				ee.EncodeString(cUTF8, v[float32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat32(k2)
+				ee.EncodeString(cUTF8, v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32UintR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat32UintV(rv2i(rv).(map[float32]uint), e)
+}
+func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(float32(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[float32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat32(float32(k2))
+				ee.EncodeUint(uint64(v[float32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat32(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), e)
+}
+func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(float32(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[float32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat32(float32(k2))
+				ee.EncodeUint(uint64(v[float32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat32(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), e)
+}
+func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(float32(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[float32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat32(float32(k2))
+				ee.EncodeUint(uint64(v[float32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat32(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), e)
+}
+func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(float32(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[float32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat32(float32(k2))
+				ee.EncodeUint(uint64(v[float32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat32(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), e)
+}
+func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(float32(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[float32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat32(float32(k2))
+				ee.EncodeUint(uint64(v[float32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat32(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), e)
+}
+func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(float32(k2))
+				ee.WriteMapElemValue()
+				e.encode(v[float32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat32(float32(k2))
+				e.encode(v[float32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(k2)
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat32(k2)
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32IntR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat32IntV(rv2i(rv).(map[float32]int), e)
+}
+func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(float32(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[float32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat32(float32(k2))
+				ee.EncodeInt(int64(v[float32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat32(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat32Int8V(rv2i(rv).(map[float32]int8), e)
+}
+func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(float32(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[float32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat32(float32(k2))
+				ee.EncodeInt(int64(v[float32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat32(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat32Int16V(rv2i(rv).(map[float32]int16), e)
+}
+func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(float32(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[float32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat32(float32(k2))
+				ee.EncodeInt(int64(v[float32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat32(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat32Int32V(rv2i(rv).(map[float32]int32), e)
+}
+func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(float32(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[float32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat32(float32(k2))
+				ee.EncodeInt(int64(v[float32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat32(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat32Int64V(rv2i(rv).(map[float32]int64), e)
+}
+func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(float32(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[float32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat32(float32(k2))
+				ee.EncodeInt(int64(v[float32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat32(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat32Float32V(rv2i(rv).(map[float32]float32), e)
+}
+func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(float32(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v[float32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat32(float32(k2))
+				ee.EncodeFloat32(v[float32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat32(k2)
+				ee.EncodeFloat32(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat32Float64V(rv2i(rv).(map[float32]float64), e)
+}
+func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(float32(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v[float32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat32(float32(k2))
+				ee.EncodeFloat64(v[float32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat32(k2)
+				ee.EncodeFloat64(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat32BoolV(rv2i(rv).(map[float32]bool), e)
+}
+func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(float32(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v[float32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat32(float32(k2))
+				ee.EncodeBool(v[float32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat32(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat32(k2)
+				ee.EncodeBool(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), e)
+}
+func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(float64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v[float64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat64(float64(k2))
+				e.encode(v[float64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(k2)
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat64(k2)
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64StringR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat64StringV(rv2i(rv).(map[float64]string), e)
+}
+func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(float64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v[float64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat64(float64(k2))
+				ee.EncodeString(cUTF8, v[float64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat64(k2)
+				ee.EncodeString(cUTF8, v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64UintR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat64UintV(rv2i(rv).(map[float64]uint), e)
+}
+func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(float64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[float64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat64(float64(k2))
+				ee.EncodeUint(uint64(v[float64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat64(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), e)
+}
+func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(float64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[float64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat64(float64(k2))
+				ee.EncodeUint(uint64(v[float64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat64(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), e)
+}
+func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(float64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[float64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat64(float64(k2))
+				ee.EncodeUint(uint64(v[float64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat64(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), e)
+}
+func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(float64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[float64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat64(float64(k2))
+				ee.EncodeUint(uint64(v[float64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat64(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), e)
+}
+func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(float64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[float64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat64(float64(k2))
+				ee.EncodeUint(uint64(v[float64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat64(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), e)
+}
+func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(float64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v[float64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat64(float64(k2))
+				e.encode(v[float64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(k2)
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat64(k2)
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64IntR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat64IntV(rv2i(rv).(map[float64]int), e)
+}
+func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(float64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[float64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat64(float64(k2))
+				ee.EncodeInt(int64(v[float64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat64(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat64Int8V(rv2i(rv).(map[float64]int8), e)
+}
+func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(float64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[float64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat64(float64(k2))
+				ee.EncodeInt(int64(v[float64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat64(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat64Int16V(rv2i(rv).(map[float64]int16), e)
+}
+func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(float64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[float64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat64(float64(k2))
+				ee.EncodeInt(int64(v[float64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat64(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat64Int32V(rv2i(rv).(map[float64]int32), e)
+}
+func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(float64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[float64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat64(float64(k2))
+				ee.EncodeInt(int64(v[float64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat64(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat64Int64V(rv2i(rv).(map[float64]int64), e)
+}
+func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(float64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[float64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat64(float64(k2))
+				ee.EncodeInt(int64(v[float64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat64(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat64Float32V(rv2i(rv).(map[float64]float32), e)
+}
+func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(float64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v[float64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat64(float64(k2))
+				ee.EncodeFloat32(v[float64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat64(k2)
+				ee.EncodeFloat32(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat64Float64V(rv2i(rv).(map[float64]float64), e)
+}
+func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(float64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v[float64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat64(float64(k2))
+				ee.EncodeFloat64(v[float64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat64(k2)
+				ee.EncodeFloat64(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapFloat64BoolV(rv2i(rv).(map[float64]bool), e)
+}
+func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]float64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = float64(k)
+			i++
+		}
+		sort.Sort(floatSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(float64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v[float64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeFloat64(float64(k2))
+				ee.EncodeBool(v[float64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeFloat64(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeFloat64(k2)
+				ee.EncodeBool(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintIntfR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintIntfV(rv2i(rv).(map[uint]interface{}), e)
+}
+func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[uint(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint(k2)))
+				e.encode(v[uint(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintStringR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintStringV(rv2i(rv).(map[uint]string), e)
+}
+func (_ fastpathT) EncMapUintStringV(v map[uint]string, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v[uint(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.EncodeString(cUTF8, v[uint(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeString(cUTF8, v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintUintR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintUintV(rv2i(rv).(map[uint]uint), e)
+}
+func (_ fastpathT) EncMapUintUintV(v map[uint]uint, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.EncodeUint(uint64(v[uint(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintUint8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintUint8V(rv2i(rv).(map[uint]uint8), e)
+}
+func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.EncodeUint(uint64(v[uint(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintUint16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintUint16V(rv2i(rv).(map[uint]uint16), e)
+}
+func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.EncodeUint(uint64(v[uint(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintUint32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintUint32V(rv2i(rv).(map[uint]uint32), e)
+}
+func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.EncodeUint(uint64(v[uint(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintUint64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintUint64V(rv2i(rv).(map[uint]uint64), e)
+}
+func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.EncodeUint(uint64(v[uint(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintUintptrR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintUintptrV(rv2i(rv).(map[uint]uintptr), e)
+}
+func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[uint(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint(k2)))
+				e.encode(v[uint(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintIntR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintIntV(rv2i(rv).(map[uint]int), e)
+}
+func (_ fastpathT) EncMapUintIntV(v map[uint]int, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.EncodeInt(int64(v[uint(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintInt8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintInt8V(rv2i(rv).(map[uint]int8), e)
+}
+func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.EncodeInt(int64(v[uint(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintInt16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintInt16V(rv2i(rv).(map[uint]int16), e)
+}
+func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.EncodeInt(int64(v[uint(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintInt32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintInt32V(rv2i(rv).(map[uint]int32), e)
+}
+func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.EncodeInt(int64(v[uint(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintInt64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintInt64V(rv2i(rv).(map[uint]int64), e)
+}
+func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.EncodeInt(int64(v[uint(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintFloat32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintFloat32V(rv2i(rv).(map[uint]float32), e)
+}
+func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v[uint(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.EncodeFloat32(v[uint(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeFloat32(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintFloat64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintFloat64V(rv2i(rv).(map[uint]float64), e)
+}
+func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v[uint(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.EncodeFloat64(v[uint(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeFloat64(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintBoolR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintBoolV(rv2i(rv).(map[uint]bool), e)
+}
+func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v[uint(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint(k2)))
+				ee.EncodeBool(v[uint(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeBool(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8IntfR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e)
+}
+func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[uint8(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint8(k2)))
+				e.encode(v[uint8(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8StringR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e)
+}
+func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v[uint8(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.EncodeString(cUTF8, v[uint8(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeString(cUTF8, v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8UintR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint8UintV(rv2i(rv).(map[uint8]uint), e)
+}
+func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.EncodeUint(uint64(v[uint8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e)
+}
+func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.EncodeUint(uint64(v[uint8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), e)
+}
+func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.EncodeUint(uint64(v[uint8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), e)
+}
+func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.EncodeUint(uint64(v[uint8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e)
+}
+func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.EncodeUint(uint64(v[uint8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), e)
+}
+func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[uint8(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint8(k2)))
+				e.encode(v[uint8(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8IntR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e)
+}
+func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.EncodeInt(int64(v[uint8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Int8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint8Int8V(rv2i(rv).(map[uint8]int8), e)
+}
+func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.EncodeInt(int64(v[uint8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Int16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint8Int16V(rv2i(rv).(map[uint8]int16), e)
+}
+func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.EncodeInt(int64(v[uint8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Int32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e)
+}
+func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.EncodeInt(int64(v[uint8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Int64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint8Int64V(rv2i(rv).(map[uint8]int64), e)
+}
+func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.EncodeInt(int64(v[uint8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Float32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint8Float32V(rv2i(rv).(map[uint8]float32), e)
+}
+func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v[uint8(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.EncodeFloat32(v[uint8(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeFloat32(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Float64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e)
+}
+func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v[uint8(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.EncodeFloat64(v[uint8(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeFloat64(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8BoolR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e)
+}
+func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v[uint8(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint8(k2)))
+				ee.EncodeBool(v[uint8(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeBool(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16IntfR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), e)
+}
+func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[uint16(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint16(k2)))
+				e.encode(v[uint16(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16StringR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint16StringV(rv2i(rv).(map[uint16]string), e)
+}
+func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v[uint16(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.EncodeString(cUTF8, v[uint16(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeString(cUTF8, v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16UintR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint16UintV(rv2i(rv).(map[uint16]uint), e)
+}
+func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.EncodeUint(uint64(v[uint16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), e)
+}
+func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.EncodeUint(uint64(v[uint16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), e)
+}
+func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.EncodeUint(uint64(v[uint16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), e)
+}
+func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.EncodeUint(uint64(v[uint16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), e)
+}
+func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.EncodeUint(uint64(v[uint16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), e)
+}
+func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[uint16(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint16(k2)))
+				e.encode(v[uint16(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16IntR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint16IntV(rv2i(rv).(map[uint16]int), e)
+}
+func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.EncodeInt(int64(v[uint16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Int8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint16Int8V(rv2i(rv).(map[uint16]int8), e)
+}
+func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.EncodeInt(int64(v[uint16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Int16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint16Int16V(rv2i(rv).(map[uint16]int16), e)
+}
+func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.EncodeInt(int64(v[uint16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Int32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint16Int32V(rv2i(rv).(map[uint16]int32), e)
+}
+func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.EncodeInt(int64(v[uint16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Int64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint16Int64V(rv2i(rv).(map[uint16]int64), e)
+}
+func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.EncodeInt(int64(v[uint16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Float32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint16Float32V(rv2i(rv).(map[uint16]float32), e)
+}
+func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v[uint16(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.EncodeFloat32(v[uint16(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeFloat32(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Float64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint16Float64V(rv2i(rv).(map[uint16]float64), e)
+}
+func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v[uint16(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.EncodeFloat64(v[uint16(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeFloat64(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16BoolR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint16BoolV(rv2i(rv).(map[uint16]bool), e)
+}
+func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v[uint16(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint16(k2)))
+				ee.EncodeBool(v[uint16(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeBool(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32IntfR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), e)
+}
+func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[uint32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint32(k2)))
+				e.encode(v[uint32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32StringR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint32StringV(rv2i(rv).(map[uint32]string), e)
+}
+func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v[uint32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.EncodeString(cUTF8, v[uint32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeString(cUTF8, v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32UintR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint32UintV(rv2i(rv).(map[uint32]uint), e)
+}
+func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.EncodeUint(uint64(v[uint32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), e)
+}
+func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.EncodeUint(uint64(v[uint32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), e)
+}
+func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.EncodeUint(uint64(v[uint32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), e)
+}
+func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.EncodeUint(uint64(v[uint32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), e)
+}
+func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.EncodeUint(uint64(v[uint32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), e)
+}
+func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[uint32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint32(k2)))
+				e.encode(v[uint32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32IntR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint32IntV(rv2i(rv).(map[uint32]int), e)
+}
+func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.EncodeInt(int64(v[uint32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Int8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint32Int8V(rv2i(rv).(map[uint32]int8), e)
+}
+func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.EncodeInt(int64(v[uint32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Int16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint32Int16V(rv2i(rv).(map[uint32]int16), e)
+}
+func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.EncodeInt(int64(v[uint32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Int32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint32Int32V(rv2i(rv).(map[uint32]int32), e)
+}
+func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.EncodeInt(int64(v[uint32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Int64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint32Int64V(rv2i(rv).(map[uint32]int64), e)
+}
+func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.EncodeInt(int64(v[uint32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Float32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint32Float32V(rv2i(rv).(map[uint32]float32), e)
+}
+func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v[uint32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.EncodeFloat32(v[uint32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeFloat32(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Float64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint32Float64V(rv2i(rv).(map[uint32]float64), e)
+}
+func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v[uint32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.EncodeFloat64(v[uint32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeFloat64(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32BoolR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint32BoolV(rv2i(rv).(map[uint32]bool), e)
+}
+func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v[uint32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint32(k2)))
+				ee.EncodeBool(v[uint32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeBool(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64IntfR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e)
+}
+func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[uint64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint64(k2)))
+				e.encode(v[uint64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64StringR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e)
+}
+func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v[uint64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.EncodeString(cUTF8, v[uint64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeString(cUTF8, v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64UintR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint64UintV(rv2i(rv).(map[uint64]uint), e)
+}
+func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.EncodeUint(uint64(v[uint64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e)
+}
+func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.EncodeUint(uint64(v[uint64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), e)
+}
+func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.EncodeUint(uint64(v[uint64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), e)
+}
+func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.EncodeUint(uint64(v[uint64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e)
+}
+func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uint64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.EncodeUint(uint64(v[uint64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), e)
+}
+func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[uint64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint64(k2)))
+				e.encode(v[uint64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64IntR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e)
+}
+func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.EncodeInt(int64(v[uint64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Int8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint64Int8V(rv2i(rv).(map[uint64]int8), e)
+}
+func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.EncodeInt(int64(v[uint64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Int16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint64Int16V(rv2i(rv).(map[uint64]int16), e)
+}
+func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.EncodeInt(int64(v[uint64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Int32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e)
+}
+func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.EncodeInt(int64(v[uint64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Int64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint64Int64V(rv2i(rv).(map[uint64]int64), e)
+}
+func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uint64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.EncodeInt(int64(v[uint64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Float32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint64Float32V(rv2i(rv).(map[uint64]float32), e)
+}
+func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v[uint64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.EncodeFloat32(v[uint64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeFloat32(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Float64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e)
+}
+func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v[uint64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.EncodeFloat64(v[uint64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeFloat64(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64BoolR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e)
+}
+func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v[uint64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeUint(uint64(uint64(k2)))
+				ee.EncodeBool(v[uint64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeUint(uint64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeUint(uint64(k2))
+				ee.EncodeBool(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), e)
+}
+func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				e.encode(uintptr(k2))
+				ee.WriteMapElemValue()
+				e.encode(v[uintptr(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				e.encode(uintptr(k2))
+				e.encode(v[uintptr(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrStringR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintptrStringV(rv2i(rv).(map[uintptr]string), e)
+}
+func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				e.encode(uintptr(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v[uintptr(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				e.encode(uintptr(k2))
+				ee.EncodeString(cUTF8, v[uintptr(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeString(cUTF8, v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrUintR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintptrUintV(rv2i(rv).(map[uintptr]uint), e)
+}
+func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				e.encode(uintptr(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uintptr(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				e.encode(uintptr(k2))
+				ee.EncodeUint(uint64(v[uintptr(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), e)
+}
+func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				e.encode(uintptr(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uintptr(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				e.encode(uintptr(k2))
+				ee.EncodeUint(uint64(v[uintptr(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), e)
+}
+func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				e.encode(uintptr(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uintptr(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				e.encode(uintptr(k2))
+				ee.EncodeUint(uint64(v[uintptr(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), e)
+}
+func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				e.encode(uintptr(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uintptr(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				e.encode(uintptr(k2))
+				ee.EncodeUint(uint64(v[uintptr(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), e)
+}
+func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				e.encode(uintptr(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[uintptr(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				e.encode(uintptr(k2))
+				ee.EncodeUint(uint64(v[uintptr(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), e)
+}
+func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				e.encode(uintptr(k2))
+				ee.WriteMapElemValue()
+				e.encode(v[uintptr(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				e.encode(uintptr(k2))
+				e.encode(v[uintptr(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrIntR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintptrIntV(rv2i(rv).(map[uintptr]int), e)
+}
+func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				e.encode(uintptr(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uintptr(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				e.encode(uintptr(k2))
+				ee.EncodeInt(int64(v[uintptr(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), e)
+}
+func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				e.encode(uintptr(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uintptr(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				e.encode(uintptr(k2))
+				ee.EncodeInt(int64(v[uintptr(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), e)
+}
+func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				e.encode(uintptr(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uintptr(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				e.encode(uintptr(k2))
+				ee.EncodeInt(int64(v[uintptr(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), e)
+}
+func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				e.encode(uintptr(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uintptr(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				e.encode(uintptr(k2))
+				ee.EncodeInt(int64(v[uintptr(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), e)
+}
+func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				e.encode(uintptr(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[uintptr(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				e.encode(uintptr(k2))
+				ee.EncodeInt(int64(v[uintptr(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), e)
+}
+func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				e.encode(uintptr(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v[uintptr(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				e.encode(uintptr(k2))
+				ee.EncodeFloat32(v[uintptr(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeFloat32(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), e)
+}
+func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				e.encode(uintptr(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v[uintptr(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				e.encode(uintptr(k2))
+				ee.EncodeFloat64(v[uintptr(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeFloat64(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), e)
+}
+func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]uint64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = uint64(k)
+			i++
+		}
+		sort.Sort(uintSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				e.encode(uintptr(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v[uintptr(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				e.encode(uintptr(k2))
+				ee.EncodeBool(v[uintptr(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				e.encode(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				e.encode(k2)
+				ee.EncodeBool(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntIntfR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e)
+}
+func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[int(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int(k2)))
+				e.encode(v[int(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntStringR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntStringV(rv2i(rv).(map[int]string), e)
+}
+func (_ fastpathT) EncMapIntStringV(v map[int]string, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v[int(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int(k2)))
+				ee.EncodeString(cUTF8, v[int(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeString(cUTF8, v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntUintR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntUintV(rv2i(rv).(map[int]uint), e)
+}
+func (_ fastpathT) EncMapIntUintV(v map[int]uint, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int(k2)))
+				ee.EncodeUint(uint64(v[int(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntUint8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e)
+}
+func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int(k2)))
+				ee.EncodeUint(uint64(v[int(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntUint16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntUint16V(rv2i(rv).(map[int]uint16), e)
+}
+func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int(k2)))
+				ee.EncodeUint(uint64(v[int(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntUint32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntUint32V(rv2i(rv).(map[int]uint32), e)
+}
+func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int(k2)))
+				ee.EncodeUint(uint64(v[int(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntUint64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e)
+}
+func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int(k2)))
+				ee.EncodeUint(uint64(v[int(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntUintptrR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntUintptrV(rv2i(rv).(map[int]uintptr), e)
+}
+func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[int(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int(k2)))
+				e.encode(v[int(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntIntR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntIntV(rv2i(rv).(map[int]int), e)
+}
+func (_ fastpathT) EncMapIntIntV(v map[int]int, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int(k2)))
+				ee.EncodeInt(int64(v[int(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntInt8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntInt8V(rv2i(rv).(map[int]int8), e)
+}
+func (_ fastpathT) EncMapIntInt8V(v map[int]int8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int(k2)))
+				ee.EncodeInt(int64(v[int(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntInt16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntInt16V(rv2i(rv).(map[int]int16), e)
+}
+func (_ fastpathT) EncMapIntInt16V(v map[int]int16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int(k2)))
+				ee.EncodeInt(int64(v[int(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntInt32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntInt32V(rv2i(rv).(map[int]int32), e)
+}
+func (_ fastpathT) EncMapIntInt32V(v map[int]int32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int(k2)))
+				ee.EncodeInt(int64(v[int(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntInt64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntInt64V(rv2i(rv).(map[int]int64), e)
+}
+func (_ fastpathT) EncMapIntInt64V(v map[int]int64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int(k2)))
+				ee.EncodeInt(int64(v[int(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntFloat32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntFloat32V(rv2i(rv).(map[int]float32), e)
+}
+func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v[int(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int(k2)))
+				ee.EncodeFloat32(v[int(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeFloat32(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntFloat64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e)
+}
+func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v[int(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int(k2)))
+				ee.EncodeFloat64(v[int(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeFloat64(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntBoolR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapIntBoolV(rv2i(rv).(map[int]bool), e)
+}
+func (_ fastpathT) EncMapIntBoolV(v map[int]bool, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v[int(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int(k2)))
+				ee.EncodeBool(v[int(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeBool(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8IntfR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt8IntfV(rv2i(rv).(map[int8]interface{}), e)
+}
+func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int8(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[int8(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int8(k2)))
+				e.encode(v[int8(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8StringR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt8StringV(rv2i(rv).(map[int8]string), e)
+}
+func (_ fastpathT) EncMapInt8StringV(v map[int8]string, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v[int8(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int8(k2)))
+				ee.EncodeString(cUTF8, v[int8(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeString(cUTF8, v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8UintR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt8UintV(rv2i(rv).(map[int8]uint), e)
+}
+func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int8(k2)))
+				ee.EncodeUint(uint64(v[int8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt8Uint8V(rv2i(rv).(map[int8]uint8), e)
+}
+func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int8(k2)))
+				ee.EncodeUint(uint64(v[int8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt8Uint16V(rv2i(rv).(map[int8]uint16), e)
+}
+func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int8(k2)))
+				ee.EncodeUint(uint64(v[int8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt8Uint32V(rv2i(rv).(map[int8]uint32), e)
+}
+func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int8(k2)))
+				ee.EncodeUint(uint64(v[int8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt8Uint64V(rv2i(rv).(map[int8]uint64), e)
+}
+func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int8(k2)))
+				ee.EncodeUint(uint64(v[int8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), e)
+}
+func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int8(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[int8(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int8(k2)))
+				e.encode(v[int8(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8IntR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt8IntV(rv2i(rv).(map[int8]int), e)
+}
+func (_ fastpathT) EncMapInt8IntV(v map[int8]int, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int8(k2)))
+				ee.EncodeInt(int64(v[int8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Int8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt8Int8V(rv2i(rv).(map[int8]int8), e)
+}
+func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int8(k2)))
+				ee.EncodeInt(int64(v[int8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Int16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt8Int16V(rv2i(rv).(map[int8]int16), e)
+}
+func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int8(k2)))
+				ee.EncodeInt(int64(v[int8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Int32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt8Int32V(rv2i(rv).(map[int8]int32), e)
+}
+func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int8(k2)))
+				ee.EncodeInt(int64(v[int8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Int64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt8Int64V(rv2i(rv).(map[int8]int64), e)
+}
+func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int8(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int8(k2)))
+				ee.EncodeInt(int64(v[int8(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Float32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt8Float32V(rv2i(rv).(map[int8]float32), e)
+}
+func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v[int8(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int8(k2)))
+				ee.EncodeFloat32(v[int8(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeFloat32(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Float64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt8Float64V(rv2i(rv).(map[int8]float64), e)
+}
+func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v[int8(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int8(k2)))
+				ee.EncodeFloat64(v[int8(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeFloat64(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8BoolR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt8BoolV(rv2i(rv).(map[int8]bool), e)
+}
+func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int8(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v[int8(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int8(k2)))
+				ee.EncodeBool(v[int8(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeBool(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16IntfR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt16IntfV(rv2i(rv).(map[int16]interface{}), e)
+}
+func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int16(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[int16(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int16(k2)))
+				e.encode(v[int16(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16StringR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt16StringV(rv2i(rv).(map[int16]string), e)
+}
+func (_ fastpathT) EncMapInt16StringV(v map[int16]string, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v[int16(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int16(k2)))
+				ee.EncodeString(cUTF8, v[int16(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeString(cUTF8, v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16UintR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt16UintV(rv2i(rv).(map[int16]uint), e)
+}
+func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int16(k2)))
+				ee.EncodeUint(uint64(v[int16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt16Uint8V(rv2i(rv).(map[int16]uint8), e)
+}
+func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int16(k2)))
+				ee.EncodeUint(uint64(v[int16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt16Uint16V(rv2i(rv).(map[int16]uint16), e)
+}
+func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int16(k2)))
+				ee.EncodeUint(uint64(v[int16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt16Uint32V(rv2i(rv).(map[int16]uint32), e)
+}
+func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int16(k2)))
+				ee.EncodeUint(uint64(v[int16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt16Uint64V(rv2i(rv).(map[int16]uint64), e)
+}
+func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int16(k2)))
+				ee.EncodeUint(uint64(v[int16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), e)
+}
+func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int16(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[int16(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int16(k2)))
+				e.encode(v[int16(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16IntR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt16IntV(rv2i(rv).(map[int16]int), e)
+}
+func (_ fastpathT) EncMapInt16IntV(v map[int16]int, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int16(k2)))
+				ee.EncodeInt(int64(v[int16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Int8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt16Int8V(rv2i(rv).(map[int16]int8), e)
+}
+func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int16(k2)))
+				ee.EncodeInt(int64(v[int16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Int16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt16Int16V(rv2i(rv).(map[int16]int16), e)
+}
+func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int16(k2)))
+				ee.EncodeInt(int64(v[int16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Int32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt16Int32V(rv2i(rv).(map[int16]int32), e)
+}
+func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int16(k2)))
+				ee.EncodeInt(int64(v[int16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Int64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt16Int64V(rv2i(rv).(map[int16]int64), e)
+}
+func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int16(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int16(k2)))
+				ee.EncodeInt(int64(v[int16(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Float32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt16Float32V(rv2i(rv).(map[int16]float32), e)
+}
+func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v[int16(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int16(k2)))
+				ee.EncodeFloat32(v[int16(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeFloat32(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Float64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt16Float64V(rv2i(rv).(map[int16]float64), e)
+}
+func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v[int16(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int16(k2)))
+				ee.EncodeFloat64(v[int16(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeFloat64(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16BoolR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt16BoolV(rv2i(rv).(map[int16]bool), e)
+}
+func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int16(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v[int16(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int16(k2)))
+				ee.EncodeBool(v[int16(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeBool(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32IntfR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e)
+}
+func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int32(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[int32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int32(k2)))
+				e.encode(v[int32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32StringR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt32StringV(rv2i(rv).(map[int32]string), e)
+}
+func (_ fastpathT) EncMapInt32StringV(v map[int32]string, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v[int32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int32(k2)))
+				ee.EncodeString(cUTF8, v[int32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeString(cUTF8, v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32UintR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt32UintV(rv2i(rv).(map[int32]uint), e)
+}
+func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int32(k2)))
+				ee.EncodeUint(uint64(v[int32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e)
+}
+func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int32(k2)))
+				ee.EncodeUint(uint64(v[int32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt32Uint16V(rv2i(rv).(map[int32]uint16), e)
+}
+func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int32(k2)))
+				ee.EncodeUint(uint64(v[int32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt32Uint32V(rv2i(rv).(map[int32]uint32), e)
+}
+func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int32(k2)))
+				ee.EncodeUint(uint64(v[int32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e)
+}
+func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int32(k2)))
+				ee.EncodeUint(uint64(v[int32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), e)
+}
+func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int32(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[int32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int32(k2)))
+				e.encode(v[int32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32IntR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt32IntV(rv2i(rv).(map[int32]int), e)
+}
+func (_ fastpathT) EncMapInt32IntV(v map[int32]int, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int32(k2)))
+				ee.EncodeInt(int64(v[int32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Int8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt32Int8V(rv2i(rv).(map[int32]int8), e)
+}
+func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int32(k2)))
+				ee.EncodeInt(int64(v[int32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Int16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt32Int16V(rv2i(rv).(map[int32]int16), e)
+}
+func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int32(k2)))
+				ee.EncodeInt(int64(v[int32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Int32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e)
+}
+func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int32(k2)))
+				ee.EncodeInt(int64(v[int32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Int64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt32Int64V(rv2i(rv).(map[int32]int64), e)
+}
+func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int32(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int32(k2)))
+				ee.EncodeInt(int64(v[int32(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Float32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt32Float32V(rv2i(rv).(map[int32]float32), e)
+}
+func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v[int32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int32(k2)))
+				ee.EncodeFloat32(v[int32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeFloat32(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Float64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e)
+}
+func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v[int32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int32(k2)))
+				ee.EncodeFloat64(v[int32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeFloat64(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32BoolR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e)
+}
+func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int32(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v[int32(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int32(k2)))
+				ee.EncodeBool(v[int32(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeBool(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64IntfR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt64IntfV(rv2i(rv).(map[int64]interface{}), e)
+}
+func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int64(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[int64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int64(k2)))
+				e.encode(v[int64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64StringR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt64StringV(rv2i(rv).(map[int64]string), e)
+}
+func (_ fastpathT) EncMapInt64StringV(v map[int64]string, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v[int64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int64(k2)))
+				ee.EncodeString(cUTF8, v[int64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeString(cUTF8, v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64UintR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt64UintV(rv2i(rv).(map[int64]uint), e)
+}
+func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int64(k2)))
+				ee.EncodeUint(uint64(v[int64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt64Uint8V(rv2i(rv).(map[int64]uint8), e)
+}
+func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int64(k2)))
+				ee.EncodeUint(uint64(v[int64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt64Uint16V(rv2i(rv).(map[int64]uint16), e)
+}
+func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int64(k2)))
+				ee.EncodeUint(uint64(v[int64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt64Uint32V(rv2i(rv).(map[int64]uint32), e)
+}
+func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int64(k2)))
+				ee.EncodeUint(uint64(v[int64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt64Uint64V(rv2i(rv).(map[int64]uint64), e)
+}
+func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[int64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int64(k2)))
+				ee.EncodeUint(uint64(v[int64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), e)
+}
+func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int64(k2)))
+				ee.WriteMapElemValue()
+				e.encode(v[int64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int64(k2)))
+				e.encode(v[int64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64IntR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt64IntV(rv2i(rv).(map[int64]int), e)
+}
+func (_ fastpathT) EncMapInt64IntV(v map[int64]int, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int64(k2)))
+				ee.EncodeInt(int64(v[int64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Int8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt64Int8V(rv2i(rv).(map[int64]int8), e)
+}
+func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int64(k2)))
+				ee.EncodeInt(int64(v[int64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Int16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt64Int16V(rv2i(rv).(map[int64]int16), e)
+}
+func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int64(k2)))
+				ee.EncodeInt(int64(v[int64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Int32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt64Int32V(rv2i(rv).(map[int64]int32), e)
+}
+func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int64(k2)))
+				ee.EncodeInt(int64(v[int64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Int64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt64Int64V(rv2i(rv).(map[int64]int64), e)
+}
+func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[int64(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int64(k2)))
+				ee.EncodeInt(int64(v[int64(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Float32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt64Float32V(rv2i(rv).(map[int64]float32), e)
+}
+func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v[int64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int64(k2)))
+				ee.EncodeFloat32(v[int64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeFloat32(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Float64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt64Float64V(rv2i(rv).(map[int64]float64), e)
+}
+func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v[int64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int64(k2)))
+				ee.EncodeFloat64(v[int64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeFloat64(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64BoolR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapInt64BoolV(rv2i(rv).(map[int64]bool), e)
+}
+func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]int64, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = int64(k)
+			i++
+		}
+		sort.Sort(intSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(int64(k2)))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v[int64(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeInt(int64(int64(k2)))
+				ee.EncodeBool(v[int64(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeInt(int64(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeInt(int64(k2))
+				ee.EncodeBool(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolIntfR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapBoolIntfV(rv2i(rv).(map[bool]interface{}), e)
+}
+func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]bool, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = bool(k)
+			i++
+		}
+		sort.Sort(boolSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(bool(k2))
+				ee.WriteMapElemValue()
+				e.encode(v[bool(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeBool(bool(k2))
+				e.encode(v[bool(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(k2)
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeBool(k2)
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolStringR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapBoolStringV(rv2i(rv).(map[bool]string), e)
+}
+func (_ fastpathT) EncMapBoolStringV(v map[bool]string, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]bool, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = bool(k)
+			i++
+		}
+		sort.Sort(boolSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(bool(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v[bool(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeBool(bool(k2))
+				ee.EncodeString(cUTF8, v[bool(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeString(cUTF8, v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeBool(k2)
+				ee.EncodeString(cUTF8, v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolUintR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapBoolUintV(rv2i(rv).(map[bool]uint), e)
+}
+func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]bool, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = bool(k)
+			i++
+		}
+		sort.Sort(boolSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(bool(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[bool(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeBool(bool(k2))
+				ee.EncodeUint(uint64(v[bool(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeBool(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolUint8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapBoolUint8V(rv2i(rv).(map[bool]uint8), e)
+}
+func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]bool, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = bool(k)
+			i++
+		}
+		sort.Sort(boolSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(bool(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[bool(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeBool(bool(k2))
+				ee.EncodeUint(uint64(v[bool(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeBool(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolUint16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapBoolUint16V(rv2i(rv).(map[bool]uint16), e)
+}
+func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]bool, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = bool(k)
+			i++
+		}
+		sort.Sort(boolSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(bool(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[bool(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeBool(bool(k2))
+				ee.EncodeUint(uint64(v[bool(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeBool(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolUint32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapBoolUint32V(rv2i(rv).(map[bool]uint32), e)
+}
+func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]bool, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = bool(k)
+			i++
+		}
+		sort.Sort(boolSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(bool(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[bool(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeBool(bool(k2))
+				ee.EncodeUint(uint64(v[bool(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeBool(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolUint64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapBoolUint64V(rv2i(rv).(map[bool]uint64), e)
+}
+func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]bool, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = bool(k)
+			i++
+		}
+		sort.Sort(boolSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(bool(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v[bool(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeBool(bool(k2))
+				ee.EncodeUint(uint64(v[bool(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeUint(uint64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeBool(k2)
+				ee.EncodeUint(uint64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), e)
+}
+func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]bool, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = bool(k)
+			i++
+		}
+		sort.Sort(boolSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(bool(k2))
+				ee.WriteMapElemValue()
+				e.encode(v[bool(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeBool(bool(k2))
+				e.encode(v[bool(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(k2)
+				ee.WriteMapElemValue()
+				e.encode(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeBool(k2)
+				e.encode(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolIntR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapBoolIntV(rv2i(rv).(map[bool]int), e)
+}
+func (_ fastpathT) EncMapBoolIntV(v map[bool]int, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]bool, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = bool(k)
+			i++
+		}
+		sort.Sort(boolSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(bool(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[bool(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeBool(bool(k2))
+				ee.EncodeInt(int64(v[bool(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeBool(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolInt8R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapBoolInt8V(rv2i(rv).(map[bool]int8), e)
+}
+func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]bool, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = bool(k)
+			i++
+		}
+		sort.Sort(boolSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(bool(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[bool(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeBool(bool(k2))
+				ee.EncodeInt(int64(v[bool(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeBool(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolInt16R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapBoolInt16V(rv2i(rv).(map[bool]int16), e)
+}
+func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]bool, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = bool(k)
+			i++
+		}
+		sort.Sort(boolSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(bool(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[bool(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeBool(bool(k2))
+				ee.EncodeInt(int64(v[bool(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeBool(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolInt32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapBoolInt32V(rv2i(rv).(map[bool]int32), e)
+}
+func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]bool, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = bool(k)
+			i++
+		}
+		sort.Sort(boolSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(bool(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[bool(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeBool(bool(k2))
+				ee.EncodeInt(int64(v[bool(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeBool(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolInt64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapBoolInt64V(rv2i(rv).(map[bool]int64), e)
+}
+func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]bool, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = bool(k)
+			i++
+		}
+		sort.Sort(boolSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(bool(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v[bool(k2)]))
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeBool(bool(k2))
+				ee.EncodeInt(int64(v[bool(k2)]))
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeInt(int64(v2))
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeBool(k2)
+				ee.EncodeInt(int64(v2))
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapBoolFloat32V(rv2i(rv).(map[bool]float32), e)
+}
+func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]bool, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = bool(k)
+			i++
+		}
+		sort.Sort(boolSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(bool(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v[bool(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeBool(bool(k2))
+				ee.EncodeFloat32(v[bool(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeFloat32(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeBool(k2)
+				ee.EncodeFloat32(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapBoolFloat64V(rv2i(rv).(map[bool]float64), e)
+}
+func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]bool, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = bool(k)
+			i++
+		}
+		sort.Sort(boolSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(bool(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v[bool(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeBool(bool(k2))
+				ee.EncodeFloat64(v[bool(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeFloat64(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeBool(k2)
+				ee.EncodeFloat64(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolBoolR(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.EncMapBoolBoolV(rv2i(rv).(map[bool]bool), e)
+}
+func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, e *Encoder) {
+	if v == nil {
+		e.e.EncodeNil()
+		return
+	}
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		v2 := make([]bool, len(v))
+		var i int
+		for k, _ := range v {
+			v2[i] = bool(k)
+			i++
+		}
+		sort.Sort(boolSlice(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(bool(k2))
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v[bool(k2)])
+			}
+		} else {
+			for _, k2 := range v2 {
+				ee.EncodeBool(bool(k2))
+				ee.EncodeBool(v[bool(k2)])
+			}
+		}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				ee.EncodeBool(k2)
+				ee.WriteMapElemValue()
+				ee.EncodeBool(v2)
+			}
+		} else {
+			for k2, v2 := range v {
+				ee.EncodeBool(k2)
+				ee.EncodeBool(v2)
+			}
+		}
+	}
+	ee.WriteMapEnd()
+}
+
+// -- decode
+
+// -- -- fast path type switch
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
+	var changed bool
+	switch v := iv.(type) {
+
+	case []interface{}:
+		var v2 []interface{}
+		v2, changed = fastpathTV.DecSliceIntfV(v, false, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	case *[]interface{}:
+		var v2 []interface{}
+		v2, changed = fastpathTV.DecSliceIntfV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case []string:
+		var v2 []string
+		v2, changed = fastpathTV.DecSliceStringV(v, false, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	case *[]string:
+		var v2 []string
+		v2, changed = fastpathTV.DecSliceStringV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case []float32:
+		var v2 []float32
+		v2, changed = fastpathTV.DecSliceFloat32V(v, false, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	case *[]float32:
+		var v2 []float32
+		v2, changed = fastpathTV.DecSliceFloat32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case []float64:
+		var v2 []float64
+		v2, changed = fastpathTV.DecSliceFloat64V(v, false, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	case *[]float64:
+		var v2 []float64
+		v2, changed = fastpathTV.DecSliceFloat64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case []uint:
+		var v2 []uint
+		v2, changed = fastpathTV.DecSliceUintV(v, false, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	case *[]uint:
+		var v2 []uint
+		v2, changed = fastpathTV.DecSliceUintV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case []uint16:
+		var v2 []uint16
+		v2, changed = fastpathTV.DecSliceUint16V(v, false, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	case *[]uint16:
+		var v2 []uint16
+		v2, changed = fastpathTV.DecSliceUint16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case []uint32:
+		var v2 []uint32
+		v2, changed = fastpathTV.DecSliceUint32V(v, false, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	case *[]uint32:
+		var v2 []uint32
+		v2, changed = fastpathTV.DecSliceUint32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case []uint64:
+		var v2 []uint64
+		v2, changed = fastpathTV.DecSliceUint64V(v, false, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	case *[]uint64:
+		var v2 []uint64
+		v2, changed = fastpathTV.DecSliceUint64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case []uintptr:
+		var v2 []uintptr
+		v2, changed = fastpathTV.DecSliceUintptrV(v, false, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	case *[]uintptr:
+		var v2 []uintptr
+		v2, changed = fastpathTV.DecSliceUintptrV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case []int:
+		var v2 []int
+		v2, changed = fastpathTV.DecSliceIntV(v, false, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	case *[]int:
+		var v2 []int
+		v2, changed = fastpathTV.DecSliceIntV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case []int8:
+		var v2 []int8
+		v2, changed = fastpathTV.DecSliceInt8V(v, false, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	case *[]int8:
+		var v2 []int8
+		v2, changed = fastpathTV.DecSliceInt8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case []int16:
+		var v2 []int16
+		v2, changed = fastpathTV.DecSliceInt16V(v, false, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	case *[]int16:
+		var v2 []int16
+		v2, changed = fastpathTV.DecSliceInt16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case []int32:
+		var v2 []int32
+		v2, changed = fastpathTV.DecSliceInt32V(v, false, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	case *[]int32:
+		var v2 []int32
+		v2, changed = fastpathTV.DecSliceInt32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case []int64:
+		var v2 []int64
+		v2, changed = fastpathTV.DecSliceInt64V(v, false, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	case *[]int64:
+		var v2 []int64
+		v2, changed = fastpathTV.DecSliceInt64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case []bool:
+		var v2 []bool
+		v2, changed = fastpathTV.DecSliceBoolV(v, false, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	case *[]bool:
+		var v2 []bool
+		v2, changed = fastpathTV.DecSliceBoolV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+
+	case map[interface{}]interface{}:
+		fastpathTV.DecMapIntfIntfV(v, false, d)
+	case *map[interface{}]interface{}:
+		var v2 map[interface{}]interface{}
+		v2, changed = fastpathTV.DecMapIntfIntfV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[interface{}]string:
+		fastpathTV.DecMapIntfStringV(v, false, d)
+	case *map[interface{}]string:
+		var v2 map[interface{}]string
+		v2, changed = fastpathTV.DecMapIntfStringV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[interface{}]uint:
+		fastpathTV.DecMapIntfUintV(v, false, d)
+	case *map[interface{}]uint:
+		var v2 map[interface{}]uint
+		v2, changed = fastpathTV.DecMapIntfUintV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[interface{}]uint8:
+		fastpathTV.DecMapIntfUint8V(v, false, d)
+	case *map[interface{}]uint8:
+		var v2 map[interface{}]uint8
+		v2, changed = fastpathTV.DecMapIntfUint8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[interface{}]uint16:
+		fastpathTV.DecMapIntfUint16V(v, false, d)
+	case *map[interface{}]uint16:
+		var v2 map[interface{}]uint16
+		v2, changed = fastpathTV.DecMapIntfUint16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[interface{}]uint32:
+		fastpathTV.DecMapIntfUint32V(v, false, d)
+	case *map[interface{}]uint32:
+		var v2 map[interface{}]uint32
+		v2, changed = fastpathTV.DecMapIntfUint32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[interface{}]uint64:
+		fastpathTV.DecMapIntfUint64V(v, false, d)
+	case *map[interface{}]uint64:
+		var v2 map[interface{}]uint64
+		v2, changed = fastpathTV.DecMapIntfUint64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[interface{}]uintptr:
+		fastpathTV.DecMapIntfUintptrV(v, false, d)
+	case *map[interface{}]uintptr:
+		var v2 map[interface{}]uintptr
+		v2, changed = fastpathTV.DecMapIntfUintptrV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[interface{}]int:
+		fastpathTV.DecMapIntfIntV(v, false, d)
+	case *map[interface{}]int:
+		var v2 map[interface{}]int
+		v2, changed = fastpathTV.DecMapIntfIntV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[interface{}]int8:
+		fastpathTV.DecMapIntfInt8V(v, false, d)
+	case *map[interface{}]int8:
+		var v2 map[interface{}]int8
+		v2, changed = fastpathTV.DecMapIntfInt8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[interface{}]int16:
+		fastpathTV.DecMapIntfInt16V(v, false, d)
+	case *map[interface{}]int16:
+		var v2 map[interface{}]int16
+		v2, changed = fastpathTV.DecMapIntfInt16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[interface{}]int32:
+		fastpathTV.DecMapIntfInt32V(v, false, d)
+	case *map[interface{}]int32:
+		var v2 map[interface{}]int32
+		v2, changed = fastpathTV.DecMapIntfInt32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[interface{}]int64:
+		fastpathTV.DecMapIntfInt64V(v, false, d)
+	case *map[interface{}]int64:
+		var v2 map[interface{}]int64
+		v2, changed = fastpathTV.DecMapIntfInt64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[interface{}]float32:
+		fastpathTV.DecMapIntfFloat32V(v, false, d)
+	case *map[interface{}]float32:
+		var v2 map[interface{}]float32
+		v2, changed = fastpathTV.DecMapIntfFloat32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[interface{}]float64:
+		fastpathTV.DecMapIntfFloat64V(v, false, d)
+	case *map[interface{}]float64:
+		var v2 map[interface{}]float64
+		v2, changed = fastpathTV.DecMapIntfFloat64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[interface{}]bool:
+		fastpathTV.DecMapIntfBoolV(v, false, d)
+	case *map[interface{}]bool:
+		var v2 map[interface{}]bool
+		v2, changed = fastpathTV.DecMapIntfBoolV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[string]interface{}:
+		fastpathTV.DecMapStringIntfV(v, false, d)
+	case *map[string]interface{}:
+		var v2 map[string]interface{}
+		v2, changed = fastpathTV.DecMapStringIntfV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[string]string:
+		fastpathTV.DecMapStringStringV(v, false, d)
+	case *map[string]string:
+		var v2 map[string]string
+		v2, changed = fastpathTV.DecMapStringStringV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[string]uint:
+		fastpathTV.DecMapStringUintV(v, false, d)
+	case *map[string]uint:
+		var v2 map[string]uint
+		v2, changed = fastpathTV.DecMapStringUintV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[string]uint8:
+		fastpathTV.DecMapStringUint8V(v, false, d)
+	case *map[string]uint8:
+		var v2 map[string]uint8
+		v2, changed = fastpathTV.DecMapStringUint8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[string]uint16:
+		fastpathTV.DecMapStringUint16V(v, false, d)
+	case *map[string]uint16:
+		var v2 map[string]uint16
+		v2, changed = fastpathTV.DecMapStringUint16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[string]uint32:
+		fastpathTV.DecMapStringUint32V(v, false, d)
+	case *map[string]uint32:
+		var v2 map[string]uint32
+		v2, changed = fastpathTV.DecMapStringUint32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[string]uint64:
+		fastpathTV.DecMapStringUint64V(v, false, d)
+	case *map[string]uint64:
+		var v2 map[string]uint64
+		v2, changed = fastpathTV.DecMapStringUint64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[string]uintptr:
+		fastpathTV.DecMapStringUintptrV(v, false, d)
+	case *map[string]uintptr:
+		var v2 map[string]uintptr
+		v2, changed = fastpathTV.DecMapStringUintptrV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[string]int:
+		fastpathTV.DecMapStringIntV(v, false, d)
+	case *map[string]int:
+		var v2 map[string]int
+		v2, changed = fastpathTV.DecMapStringIntV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[string]int8:
+		fastpathTV.DecMapStringInt8V(v, false, d)
+	case *map[string]int8:
+		var v2 map[string]int8
+		v2, changed = fastpathTV.DecMapStringInt8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[string]int16:
+		fastpathTV.DecMapStringInt16V(v, false, d)
+	case *map[string]int16:
+		var v2 map[string]int16
+		v2, changed = fastpathTV.DecMapStringInt16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[string]int32:
+		fastpathTV.DecMapStringInt32V(v, false, d)
+	case *map[string]int32:
+		var v2 map[string]int32
+		v2, changed = fastpathTV.DecMapStringInt32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[string]int64:
+		fastpathTV.DecMapStringInt64V(v, false, d)
+	case *map[string]int64:
+		var v2 map[string]int64
+		v2, changed = fastpathTV.DecMapStringInt64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[string]float32:
+		fastpathTV.DecMapStringFloat32V(v, false, d)
+	case *map[string]float32:
+		var v2 map[string]float32
+		v2, changed = fastpathTV.DecMapStringFloat32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[string]float64:
+		fastpathTV.DecMapStringFloat64V(v, false, d)
+	case *map[string]float64:
+		var v2 map[string]float64
+		v2, changed = fastpathTV.DecMapStringFloat64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[string]bool:
+		fastpathTV.DecMapStringBoolV(v, false, d)
+	case *map[string]bool:
+		var v2 map[string]bool
+		v2, changed = fastpathTV.DecMapStringBoolV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float32]interface{}:
+		fastpathTV.DecMapFloat32IntfV(v, false, d)
+	case *map[float32]interface{}:
+		var v2 map[float32]interface{}
+		v2, changed = fastpathTV.DecMapFloat32IntfV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float32]string:
+		fastpathTV.DecMapFloat32StringV(v, false, d)
+	case *map[float32]string:
+		var v2 map[float32]string
+		v2, changed = fastpathTV.DecMapFloat32StringV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float32]uint:
+		fastpathTV.DecMapFloat32UintV(v, false, d)
+	case *map[float32]uint:
+		var v2 map[float32]uint
+		v2, changed = fastpathTV.DecMapFloat32UintV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float32]uint8:
+		fastpathTV.DecMapFloat32Uint8V(v, false, d)
+	case *map[float32]uint8:
+		var v2 map[float32]uint8
+		v2, changed = fastpathTV.DecMapFloat32Uint8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float32]uint16:
+		fastpathTV.DecMapFloat32Uint16V(v, false, d)
+	case *map[float32]uint16:
+		var v2 map[float32]uint16
+		v2, changed = fastpathTV.DecMapFloat32Uint16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float32]uint32:
+		fastpathTV.DecMapFloat32Uint32V(v, false, d)
+	case *map[float32]uint32:
+		var v2 map[float32]uint32
+		v2, changed = fastpathTV.DecMapFloat32Uint32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float32]uint64:
+		fastpathTV.DecMapFloat32Uint64V(v, false, d)
+	case *map[float32]uint64:
+		var v2 map[float32]uint64
+		v2, changed = fastpathTV.DecMapFloat32Uint64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float32]uintptr:
+		fastpathTV.DecMapFloat32UintptrV(v, false, d)
+	case *map[float32]uintptr:
+		var v2 map[float32]uintptr
+		v2, changed = fastpathTV.DecMapFloat32UintptrV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float32]int:
+		fastpathTV.DecMapFloat32IntV(v, false, d)
+	case *map[float32]int:
+		var v2 map[float32]int
+		v2, changed = fastpathTV.DecMapFloat32IntV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float32]int8:
+		fastpathTV.DecMapFloat32Int8V(v, false, d)
+	case *map[float32]int8:
+		var v2 map[float32]int8
+		v2, changed = fastpathTV.DecMapFloat32Int8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float32]int16:
+		fastpathTV.DecMapFloat32Int16V(v, false, d)
+	case *map[float32]int16:
+		var v2 map[float32]int16
+		v2, changed = fastpathTV.DecMapFloat32Int16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float32]int32:
+		fastpathTV.DecMapFloat32Int32V(v, false, d)
+	case *map[float32]int32:
+		var v2 map[float32]int32
+		v2, changed = fastpathTV.DecMapFloat32Int32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float32]int64:
+		fastpathTV.DecMapFloat32Int64V(v, false, d)
+	case *map[float32]int64:
+		var v2 map[float32]int64
+		v2, changed = fastpathTV.DecMapFloat32Int64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float32]float32:
+		fastpathTV.DecMapFloat32Float32V(v, false, d)
+	case *map[float32]float32:
+		var v2 map[float32]float32
+		v2, changed = fastpathTV.DecMapFloat32Float32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float32]float64:
+		fastpathTV.DecMapFloat32Float64V(v, false, d)
+	case *map[float32]float64:
+		var v2 map[float32]float64
+		v2, changed = fastpathTV.DecMapFloat32Float64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float32]bool:
+		fastpathTV.DecMapFloat32BoolV(v, false, d)
+	case *map[float32]bool:
+		var v2 map[float32]bool
+		v2, changed = fastpathTV.DecMapFloat32BoolV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float64]interface{}:
+		fastpathTV.DecMapFloat64IntfV(v, false, d)
+	case *map[float64]interface{}:
+		var v2 map[float64]interface{}
+		v2, changed = fastpathTV.DecMapFloat64IntfV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float64]string:
+		fastpathTV.DecMapFloat64StringV(v, false, d)
+	case *map[float64]string:
+		var v2 map[float64]string
+		v2, changed = fastpathTV.DecMapFloat64StringV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float64]uint:
+		fastpathTV.DecMapFloat64UintV(v, false, d)
+	case *map[float64]uint:
+		var v2 map[float64]uint
+		v2, changed = fastpathTV.DecMapFloat64UintV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float64]uint8:
+		fastpathTV.DecMapFloat64Uint8V(v, false, d)
+	case *map[float64]uint8:
+		var v2 map[float64]uint8
+		v2, changed = fastpathTV.DecMapFloat64Uint8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float64]uint16:
+		fastpathTV.DecMapFloat64Uint16V(v, false, d)
+	case *map[float64]uint16:
+		var v2 map[float64]uint16
+		v2, changed = fastpathTV.DecMapFloat64Uint16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float64]uint32:
+		fastpathTV.DecMapFloat64Uint32V(v, false, d)
+	case *map[float64]uint32:
+		var v2 map[float64]uint32
+		v2, changed = fastpathTV.DecMapFloat64Uint32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float64]uint64:
+		fastpathTV.DecMapFloat64Uint64V(v, false, d)
+	case *map[float64]uint64:
+		var v2 map[float64]uint64
+		v2, changed = fastpathTV.DecMapFloat64Uint64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float64]uintptr:
+		fastpathTV.DecMapFloat64UintptrV(v, false, d)
+	case *map[float64]uintptr:
+		var v2 map[float64]uintptr
+		v2, changed = fastpathTV.DecMapFloat64UintptrV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float64]int:
+		fastpathTV.DecMapFloat64IntV(v, false, d)
+	case *map[float64]int:
+		var v2 map[float64]int
+		v2, changed = fastpathTV.DecMapFloat64IntV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float64]int8:
+		fastpathTV.DecMapFloat64Int8V(v, false, d)
+	case *map[float64]int8:
+		var v2 map[float64]int8
+		v2, changed = fastpathTV.DecMapFloat64Int8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float64]int16:
+		fastpathTV.DecMapFloat64Int16V(v, false, d)
+	case *map[float64]int16:
+		var v2 map[float64]int16
+		v2, changed = fastpathTV.DecMapFloat64Int16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float64]int32:
+		fastpathTV.DecMapFloat64Int32V(v, false, d)
+	case *map[float64]int32:
+		var v2 map[float64]int32
+		v2, changed = fastpathTV.DecMapFloat64Int32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float64]int64:
+		fastpathTV.DecMapFloat64Int64V(v, false, d)
+	case *map[float64]int64:
+		var v2 map[float64]int64
+		v2, changed = fastpathTV.DecMapFloat64Int64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float64]float32:
+		fastpathTV.DecMapFloat64Float32V(v, false, d)
+	case *map[float64]float32:
+		var v2 map[float64]float32
+		v2, changed = fastpathTV.DecMapFloat64Float32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float64]float64:
+		fastpathTV.DecMapFloat64Float64V(v, false, d)
+	case *map[float64]float64:
+		var v2 map[float64]float64
+		v2, changed = fastpathTV.DecMapFloat64Float64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[float64]bool:
+		fastpathTV.DecMapFloat64BoolV(v, false, d)
+	case *map[float64]bool:
+		var v2 map[float64]bool
+		v2, changed = fastpathTV.DecMapFloat64BoolV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint]interface{}:
+		fastpathTV.DecMapUintIntfV(v, false, d)
+	case *map[uint]interface{}:
+		var v2 map[uint]interface{}
+		v2, changed = fastpathTV.DecMapUintIntfV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint]string:
+		fastpathTV.DecMapUintStringV(v, false, d)
+	case *map[uint]string:
+		var v2 map[uint]string
+		v2, changed = fastpathTV.DecMapUintStringV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint]uint:
+		fastpathTV.DecMapUintUintV(v, false, d)
+	case *map[uint]uint:
+		var v2 map[uint]uint
+		v2, changed = fastpathTV.DecMapUintUintV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint]uint8:
+		fastpathTV.DecMapUintUint8V(v, false, d)
+	case *map[uint]uint8:
+		var v2 map[uint]uint8
+		v2, changed = fastpathTV.DecMapUintUint8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint]uint16:
+		fastpathTV.DecMapUintUint16V(v, false, d)
+	case *map[uint]uint16:
+		var v2 map[uint]uint16
+		v2, changed = fastpathTV.DecMapUintUint16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint]uint32:
+		fastpathTV.DecMapUintUint32V(v, false, d)
+	case *map[uint]uint32:
+		var v2 map[uint]uint32
+		v2, changed = fastpathTV.DecMapUintUint32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint]uint64:
+		fastpathTV.DecMapUintUint64V(v, false, d)
+	case *map[uint]uint64:
+		var v2 map[uint]uint64
+		v2, changed = fastpathTV.DecMapUintUint64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint]uintptr:
+		fastpathTV.DecMapUintUintptrV(v, false, d)
+	case *map[uint]uintptr:
+		var v2 map[uint]uintptr
+		v2, changed = fastpathTV.DecMapUintUintptrV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint]int:
+		fastpathTV.DecMapUintIntV(v, false, d)
+	case *map[uint]int:
+		var v2 map[uint]int
+		v2, changed = fastpathTV.DecMapUintIntV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint]int8:
+		fastpathTV.DecMapUintInt8V(v, false, d)
+	case *map[uint]int8:
+		var v2 map[uint]int8
+		v2, changed = fastpathTV.DecMapUintInt8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint]int16:
+		fastpathTV.DecMapUintInt16V(v, false, d)
+	case *map[uint]int16:
+		var v2 map[uint]int16
+		v2, changed = fastpathTV.DecMapUintInt16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint]int32:
+		fastpathTV.DecMapUintInt32V(v, false, d)
+	case *map[uint]int32:
+		var v2 map[uint]int32
+		v2, changed = fastpathTV.DecMapUintInt32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint]int64:
+		fastpathTV.DecMapUintInt64V(v, false, d)
+	case *map[uint]int64:
+		var v2 map[uint]int64
+		v2, changed = fastpathTV.DecMapUintInt64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint]float32:
+		fastpathTV.DecMapUintFloat32V(v, false, d)
+	case *map[uint]float32:
+		var v2 map[uint]float32
+		v2, changed = fastpathTV.DecMapUintFloat32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint]float64:
+		fastpathTV.DecMapUintFloat64V(v, false, d)
+	case *map[uint]float64:
+		var v2 map[uint]float64
+		v2, changed = fastpathTV.DecMapUintFloat64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint]bool:
+		fastpathTV.DecMapUintBoolV(v, false, d)
+	case *map[uint]bool:
+		var v2 map[uint]bool
+		v2, changed = fastpathTV.DecMapUintBoolV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint8]interface{}:
+		fastpathTV.DecMapUint8IntfV(v, false, d)
+	case *map[uint8]interface{}:
+		var v2 map[uint8]interface{}
+		v2, changed = fastpathTV.DecMapUint8IntfV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint8]string:
+		fastpathTV.DecMapUint8StringV(v, false, d)
+	case *map[uint8]string:
+		var v2 map[uint8]string
+		v2, changed = fastpathTV.DecMapUint8StringV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint8]uint:
+		fastpathTV.DecMapUint8UintV(v, false, d)
+	case *map[uint8]uint:
+		var v2 map[uint8]uint
+		v2, changed = fastpathTV.DecMapUint8UintV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint8]uint8:
+		fastpathTV.DecMapUint8Uint8V(v, false, d)
+	case *map[uint8]uint8:
+		var v2 map[uint8]uint8
+		v2, changed = fastpathTV.DecMapUint8Uint8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint8]uint16:
+		fastpathTV.DecMapUint8Uint16V(v, false, d)
+	case *map[uint8]uint16:
+		var v2 map[uint8]uint16
+		v2, changed = fastpathTV.DecMapUint8Uint16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint8]uint32:
+		fastpathTV.DecMapUint8Uint32V(v, false, d)
+	case *map[uint8]uint32:
+		var v2 map[uint8]uint32
+		v2, changed = fastpathTV.DecMapUint8Uint32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint8]uint64:
+		fastpathTV.DecMapUint8Uint64V(v, false, d)
+	case *map[uint8]uint64:
+		var v2 map[uint8]uint64
+		v2, changed = fastpathTV.DecMapUint8Uint64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint8]uintptr:
+		fastpathTV.DecMapUint8UintptrV(v, false, d)
+	case *map[uint8]uintptr:
+		var v2 map[uint8]uintptr
+		v2, changed = fastpathTV.DecMapUint8UintptrV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint8]int:
+		fastpathTV.DecMapUint8IntV(v, false, d)
+	case *map[uint8]int:
+		var v2 map[uint8]int
+		v2, changed = fastpathTV.DecMapUint8IntV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint8]int8:
+		fastpathTV.DecMapUint8Int8V(v, false, d)
+	case *map[uint8]int8:
+		var v2 map[uint8]int8
+		v2, changed = fastpathTV.DecMapUint8Int8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint8]int16:
+		fastpathTV.DecMapUint8Int16V(v, false, d)
+	case *map[uint8]int16:
+		var v2 map[uint8]int16
+		v2, changed = fastpathTV.DecMapUint8Int16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint8]int32:
+		fastpathTV.DecMapUint8Int32V(v, false, d)
+	case *map[uint8]int32:
+		var v2 map[uint8]int32
+		v2, changed = fastpathTV.DecMapUint8Int32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint8]int64:
+		fastpathTV.DecMapUint8Int64V(v, false, d)
+	case *map[uint8]int64:
+		var v2 map[uint8]int64
+		v2, changed = fastpathTV.DecMapUint8Int64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint8]float32:
+		fastpathTV.DecMapUint8Float32V(v, false, d)
+	case *map[uint8]float32:
+		var v2 map[uint8]float32
+		v2, changed = fastpathTV.DecMapUint8Float32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint8]float64:
+		fastpathTV.DecMapUint8Float64V(v, false, d)
+	case *map[uint8]float64:
+		var v2 map[uint8]float64
+		v2, changed = fastpathTV.DecMapUint8Float64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint8]bool:
+		fastpathTV.DecMapUint8BoolV(v, false, d)
+	case *map[uint8]bool:
+		var v2 map[uint8]bool
+		v2, changed = fastpathTV.DecMapUint8BoolV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint16]interface{}:
+		fastpathTV.DecMapUint16IntfV(v, false, d)
+	case *map[uint16]interface{}:
+		var v2 map[uint16]interface{}
+		v2, changed = fastpathTV.DecMapUint16IntfV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint16]string:
+		fastpathTV.DecMapUint16StringV(v, false, d)
+	case *map[uint16]string:
+		var v2 map[uint16]string
+		v2, changed = fastpathTV.DecMapUint16StringV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint16]uint:
+		fastpathTV.DecMapUint16UintV(v, false, d)
+	case *map[uint16]uint:
+		var v2 map[uint16]uint
+		v2, changed = fastpathTV.DecMapUint16UintV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint16]uint8:
+		fastpathTV.DecMapUint16Uint8V(v, false, d)
+	case *map[uint16]uint8:
+		var v2 map[uint16]uint8
+		v2, changed = fastpathTV.DecMapUint16Uint8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint16]uint16:
+		fastpathTV.DecMapUint16Uint16V(v, false, d)
+	case *map[uint16]uint16:
+		var v2 map[uint16]uint16
+		v2, changed = fastpathTV.DecMapUint16Uint16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint16]uint32:
+		fastpathTV.DecMapUint16Uint32V(v, false, d)
+	case *map[uint16]uint32:
+		var v2 map[uint16]uint32
+		v2, changed = fastpathTV.DecMapUint16Uint32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint16]uint64:
+		fastpathTV.DecMapUint16Uint64V(v, false, d)
+	case *map[uint16]uint64:
+		var v2 map[uint16]uint64
+		v2, changed = fastpathTV.DecMapUint16Uint64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint16]uintptr:
+		fastpathTV.DecMapUint16UintptrV(v, false, d)
+	case *map[uint16]uintptr:
+		var v2 map[uint16]uintptr
+		v2, changed = fastpathTV.DecMapUint16UintptrV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint16]int:
+		fastpathTV.DecMapUint16IntV(v, false, d)
+	case *map[uint16]int:
+		var v2 map[uint16]int
+		v2, changed = fastpathTV.DecMapUint16IntV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint16]int8:
+		fastpathTV.DecMapUint16Int8V(v, false, d)
+	case *map[uint16]int8:
+		var v2 map[uint16]int8
+		v2, changed = fastpathTV.DecMapUint16Int8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint16]int16:
+		fastpathTV.DecMapUint16Int16V(v, false, d)
+	case *map[uint16]int16:
+		var v2 map[uint16]int16
+		v2, changed = fastpathTV.DecMapUint16Int16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint16]int32:
+		fastpathTV.DecMapUint16Int32V(v, false, d)
+	case *map[uint16]int32:
+		var v2 map[uint16]int32
+		v2, changed = fastpathTV.DecMapUint16Int32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint16]int64:
+		fastpathTV.DecMapUint16Int64V(v, false, d)
+	case *map[uint16]int64:
+		var v2 map[uint16]int64
+		v2, changed = fastpathTV.DecMapUint16Int64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint16]float32:
+		fastpathTV.DecMapUint16Float32V(v, false, d)
+	case *map[uint16]float32:
+		var v2 map[uint16]float32
+		v2, changed = fastpathTV.DecMapUint16Float32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint16]float64:
+		fastpathTV.DecMapUint16Float64V(v, false, d)
+	case *map[uint16]float64:
+		var v2 map[uint16]float64
+		v2, changed = fastpathTV.DecMapUint16Float64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint16]bool:
+		fastpathTV.DecMapUint16BoolV(v, false, d)
+	case *map[uint16]bool:
+		var v2 map[uint16]bool
+		v2, changed = fastpathTV.DecMapUint16BoolV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint32]interface{}:
+		fastpathTV.DecMapUint32IntfV(v, false, d)
+	case *map[uint32]interface{}:
+		var v2 map[uint32]interface{}
+		v2, changed = fastpathTV.DecMapUint32IntfV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint32]string:
+		fastpathTV.DecMapUint32StringV(v, false, d)
+	case *map[uint32]string:
+		var v2 map[uint32]string
+		v2, changed = fastpathTV.DecMapUint32StringV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint32]uint:
+		fastpathTV.DecMapUint32UintV(v, false, d)
+	case *map[uint32]uint:
+		var v2 map[uint32]uint
+		v2, changed = fastpathTV.DecMapUint32UintV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint32]uint8:
+		fastpathTV.DecMapUint32Uint8V(v, false, d)
+	case *map[uint32]uint8:
+		var v2 map[uint32]uint8
+		v2, changed = fastpathTV.DecMapUint32Uint8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint32]uint16:
+		fastpathTV.DecMapUint32Uint16V(v, false, d)
+	case *map[uint32]uint16:
+		var v2 map[uint32]uint16
+		v2, changed = fastpathTV.DecMapUint32Uint16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint32]uint32:
+		fastpathTV.DecMapUint32Uint32V(v, false, d)
+	case *map[uint32]uint32:
+		var v2 map[uint32]uint32
+		v2, changed = fastpathTV.DecMapUint32Uint32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint32]uint64:
+		fastpathTV.DecMapUint32Uint64V(v, false, d)
+	case *map[uint32]uint64:
+		var v2 map[uint32]uint64
+		v2, changed = fastpathTV.DecMapUint32Uint64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint32]uintptr:
+		fastpathTV.DecMapUint32UintptrV(v, false, d)
+	case *map[uint32]uintptr:
+		var v2 map[uint32]uintptr
+		v2, changed = fastpathTV.DecMapUint32UintptrV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint32]int:
+		fastpathTV.DecMapUint32IntV(v, false, d)
+	case *map[uint32]int:
+		var v2 map[uint32]int
+		v2, changed = fastpathTV.DecMapUint32IntV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint32]int8:
+		fastpathTV.DecMapUint32Int8V(v, false, d)
+	case *map[uint32]int8:
+		var v2 map[uint32]int8
+		v2, changed = fastpathTV.DecMapUint32Int8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint32]int16:
+		fastpathTV.DecMapUint32Int16V(v, false, d)
+	case *map[uint32]int16:
+		var v2 map[uint32]int16
+		v2, changed = fastpathTV.DecMapUint32Int16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint32]int32:
+		fastpathTV.DecMapUint32Int32V(v, false, d)
+	case *map[uint32]int32:
+		var v2 map[uint32]int32
+		v2, changed = fastpathTV.DecMapUint32Int32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint32]int64:
+		fastpathTV.DecMapUint32Int64V(v, false, d)
+	case *map[uint32]int64:
+		var v2 map[uint32]int64
+		v2, changed = fastpathTV.DecMapUint32Int64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint32]float32:
+		fastpathTV.DecMapUint32Float32V(v, false, d)
+	case *map[uint32]float32:
+		var v2 map[uint32]float32
+		v2, changed = fastpathTV.DecMapUint32Float32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint32]float64:
+		fastpathTV.DecMapUint32Float64V(v, false, d)
+	case *map[uint32]float64:
+		var v2 map[uint32]float64
+		v2, changed = fastpathTV.DecMapUint32Float64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint32]bool:
+		fastpathTV.DecMapUint32BoolV(v, false, d)
+	case *map[uint32]bool:
+		var v2 map[uint32]bool
+		v2, changed = fastpathTV.DecMapUint32BoolV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint64]interface{}:
+		fastpathTV.DecMapUint64IntfV(v, false, d)
+	case *map[uint64]interface{}:
+		var v2 map[uint64]interface{}
+		v2, changed = fastpathTV.DecMapUint64IntfV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint64]string:
+		fastpathTV.DecMapUint64StringV(v, false, d)
+	case *map[uint64]string:
+		var v2 map[uint64]string
+		v2, changed = fastpathTV.DecMapUint64StringV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint64]uint:
+		fastpathTV.DecMapUint64UintV(v, false, d)
+	case *map[uint64]uint:
+		var v2 map[uint64]uint
+		v2, changed = fastpathTV.DecMapUint64UintV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint64]uint8:
+		fastpathTV.DecMapUint64Uint8V(v, false, d)
+	case *map[uint64]uint8:
+		var v2 map[uint64]uint8
+		v2, changed = fastpathTV.DecMapUint64Uint8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint64]uint16:
+		fastpathTV.DecMapUint64Uint16V(v, false, d)
+	case *map[uint64]uint16:
+		var v2 map[uint64]uint16
+		v2, changed = fastpathTV.DecMapUint64Uint16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint64]uint32:
+		fastpathTV.DecMapUint64Uint32V(v, false, d)
+	case *map[uint64]uint32:
+		var v2 map[uint64]uint32
+		v2, changed = fastpathTV.DecMapUint64Uint32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint64]uint64:
+		fastpathTV.DecMapUint64Uint64V(v, false, d)
+	case *map[uint64]uint64:
+		var v2 map[uint64]uint64
+		v2, changed = fastpathTV.DecMapUint64Uint64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint64]uintptr:
+		fastpathTV.DecMapUint64UintptrV(v, false, d)
+	case *map[uint64]uintptr:
+		var v2 map[uint64]uintptr
+		v2, changed = fastpathTV.DecMapUint64UintptrV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint64]int:
+		fastpathTV.DecMapUint64IntV(v, false, d)
+	case *map[uint64]int:
+		var v2 map[uint64]int
+		v2, changed = fastpathTV.DecMapUint64IntV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint64]int8:
+		fastpathTV.DecMapUint64Int8V(v, false, d)
+	case *map[uint64]int8:
+		var v2 map[uint64]int8
+		v2, changed = fastpathTV.DecMapUint64Int8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint64]int16:
+		fastpathTV.DecMapUint64Int16V(v, false, d)
+	case *map[uint64]int16:
+		var v2 map[uint64]int16
+		v2, changed = fastpathTV.DecMapUint64Int16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint64]int32:
+		fastpathTV.DecMapUint64Int32V(v, false, d)
+	case *map[uint64]int32:
+		var v2 map[uint64]int32
+		v2, changed = fastpathTV.DecMapUint64Int32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint64]int64:
+		fastpathTV.DecMapUint64Int64V(v, false, d)
+	case *map[uint64]int64:
+		var v2 map[uint64]int64
+		v2, changed = fastpathTV.DecMapUint64Int64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint64]float32:
+		fastpathTV.DecMapUint64Float32V(v, false, d)
+	case *map[uint64]float32:
+		var v2 map[uint64]float32
+		v2, changed = fastpathTV.DecMapUint64Float32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint64]float64:
+		fastpathTV.DecMapUint64Float64V(v, false, d)
+	case *map[uint64]float64:
+		var v2 map[uint64]float64
+		v2, changed = fastpathTV.DecMapUint64Float64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uint64]bool:
+		fastpathTV.DecMapUint64BoolV(v, false, d)
+	case *map[uint64]bool:
+		var v2 map[uint64]bool
+		v2, changed = fastpathTV.DecMapUint64BoolV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uintptr]interface{}:
+		fastpathTV.DecMapUintptrIntfV(v, false, d)
+	case *map[uintptr]interface{}:
+		var v2 map[uintptr]interface{}
+		v2, changed = fastpathTV.DecMapUintptrIntfV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uintptr]string:
+		fastpathTV.DecMapUintptrStringV(v, false, d)
+	case *map[uintptr]string:
+		var v2 map[uintptr]string
+		v2, changed = fastpathTV.DecMapUintptrStringV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uintptr]uint:
+		fastpathTV.DecMapUintptrUintV(v, false, d)
+	case *map[uintptr]uint:
+		var v2 map[uintptr]uint
+		v2, changed = fastpathTV.DecMapUintptrUintV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uintptr]uint8:
+		fastpathTV.DecMapUintptrUint8V(v, false, d)
+	case *map[uintptr]uint8:
+		var v2 map[uintptr]uint8
+		v2, changed = fastpathTV.DecMapUintptrUint8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uintptr]uint16:
+		fastpathTV.DecMapUintptrUint16V(v, false, d)
+	case *map[uintptr]uint16:
+		var v2 map[uintptr]uint16
+		v2, changed = fastpathTV.DecMapUintptrUint16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uintptr]uint32:
+		fastpathTV.DecMapUintptrUint32V(v, false, d)
+	case *map[uintptr]uint32:
+		var v2 map[uintptr]uint32
+		v2, changed = fastpathTV.DecMapUintptrUint32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uintptr]uint64:
+		fastpathTV.DecMapUintptrUint64V(v, false, d)
+	case *map[uintptr]uint64:
+		var v2 map[uintptr]uint64
+		v2, changed = fastpathTV.DecMapUintptrUint64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uintptr]uintptr:
+		fastpathTV.DecMapUintptrUintptrV(v, false, d)
+	case *map[uintptr]uintptr:
+		var v2 map[uintptr]uintptr
+		v2, changed = fastpathTV.DecMapUintptrUintptrV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uintptr]int:
+		fastpathTV.DecMapUintptrIntV(v, false, d)
+	case *map[uintptr]int:
+		var v2 map[uintptr]int
+		v2, changed = fastpathTV.DecMapUintptrIntV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uintptr]int8:
+		fastpathTV.DecMapUintptrInt8V(v, false, d)
+	case *map[uintptr]int8:
+		var v2 map[uintptr]int8
+		v2, changed = fastpathTV.DecMapUintptrInt8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uintptr]int16:
+		fastpathTV.DecMapUintptrInt16V(v, false, d)
+	case *map[uintptr]int16:
+		var v2 map[uintptr]int16
+		v2, changed = fastpathTV.DecMapUintptrInt16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uintptr]int32:
+		fastpathTV.DecMapUintptrInt32V(v, false, d)
+	case *map[uintptr]int32:
+		var v2 map[uintptr]int32
+		v2, changed = fastpathTV.DecMapUintptrInt32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uintptr]int64:
+		fastpathTV.DecMapUintptrInt64V(v, false, d)
+	case *map[uintptr]int64:
+		var v2 map[uintptr]int64
+		v2, changed = fastpathTV.DecMapUintptrInt64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uintptr]float32:
+		fastpathTV.DecMapUintptrFloat32V(v, false, d)
+	case *map[uintptr]float32:
+		var v2 map[uintptr]float32
+		v2, changed = fastpathTV.DecMapUintptrFloat32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uintptr]float64:
+		fastpathTV.DecMapUintptrFloat64V(v, false, d)
+	case *map[uintptr]float64:
+		var v2 map[uintptr]float64
+		v2, changed = fastpathTV.DecMapUintptrFloat64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[uintptr]bool:
+		fastpathTV.DecMapUintptrBoolV(v, false, d)
+	case *map[uintptr]bool:
+		var v2 map[uintptr]bool
+		v2, changed = fastpathTV.DecMapUintptrBoolV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int]interface{}:
+		fastpathTV.DecMapIntIntfV(v, false, d)
+	case *map[int]interface{}:
+		var v2 map[int]interface{}
+		v2, changed = fastpathTV.DecMapIntIntfV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int]string:
+		fastpathTV.DecMapIntStringV(v, false, d)
+	case *map[int]string:
+		var v2 map[int]string
+		v2, changed = fastpathTV.DecMapIntStringV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int]uint:
+		fastpathTV.DecMapIntUintV(v, false, d)
+	case *map[int]uint:
+		var v2 map[int]uint
+		v2, changed = fastpathTV.DecMapIntUintV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int]uint8:
+		fastpathTV.DecMapIntUint8V(v, false, d)
+	case *map[int]uint8:
+		var v2 map[int]uint8
+		v2, changed = fastpathTV.DecMapIntUint8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int]uint16:
+		fastpathTV.DecMapIntUint16V(v, false, d)
+	case *map[int]uint16:
+		var v2 map[int]uint16
+		v2, changed = fastpathTV.DecMapIntUint16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int]uint32:
+		fastpathTV.DecMapIntUint32V(v, false, d)
+	case *map[int]uint32:
+		var v2 map[int]uint32
+		v2, changed = fastpathTV.DecMapIntUint32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int]uint64:
+		fastpathTV.DecMapIntUint64V(v, false, d)
+	case *map[int]uint64:
+		var v2 map[int]uint64
+		v2, changed = fastpathTV.DecMapIntUint64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int]uintptr:
+		fastpathTV.DecMapIntUintptrV(v, false, d)
+	case *map[int]uintptr:
+		var v2 map[int]uintptr
+		v2, changed = fastpathTV.DecMapIntUintptrV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int]int:
+		fastpathTV.DecMapIntIntV(v, false, d)
+	case *map[int]int:
+		var v2 map[int]int
+		v2, changed = fastpathTV.DecMapIntIntV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int]int8:
+		fastpathTV.DecMapIntInt8V(v, false, d)
+	case *map[int]int8:
+		var v2 map[int]int8
+		v2, changed = fastpathTV.DecMapIntInt8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int]int16:
+		fastpathTV.DecMapIntInt16V(v, false, d)
+	case *map[int]int16:
+		var v2 map[int]int16
+		v2, changed = fastpathTV.DecMapIntInt16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int]int32:
+		fastpathTV.DecMapIntInt32V(v, false, d)
+	case *map[int]int32:
+		var v2 map[int]int32
+		v2, changed = fastpathTV.DecMapIntInt32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int]int64:
+		fastpathTV.DecMapIntInt64V(v, false, d)
+	case *map[int]int64:
+		var v2 map[int]int64
+		v2, changed = fastpathTV.DecMapIntInt64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int]float32:
+		fastpathTV.DecMapIntFloat32V(v, false, d)
+	case *map[int]float32:
+		var v2 map[int]float32
+		v2, changed = fastpathTV.DecMapIntFloat32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int]float64:
+		fastpathTV.DecMapIntFloat64V(v, false, d)
+	case *map[int]float64:
+		var v2 map[int]float64
+		v2, changed = fastpathTV.DecMapIntFloat64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int]bool:
+		fastpathTV.DecMapIntBoolV(v, false, d)
+	case *map[int]bool:
+		var v2 map[int]bool
+		v2, changed = fastpathTV.DecMapIntBoolV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int8]interface{}:
+		fastpathTV.DecMapInt8IntfV(v, false, d)
+	case *map[int8]interface{}:
+		var v2 map[int8]interface{}
+		v2, changed = fastpathTV.DecMapInt8IntfV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int8]string:
+		fastpathTV.DecMapInt8StringV(v, false, d)
+	case *map[int8]string:
+		var v2 map[int8]string
+		v2, changed = fastpathTV.DecMapInt8StringV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int8]uint:
+		fastpathTV.DecMapInt8UintV(v, false, d)
+	case *map[int8]uint:
+		var v2 map[int8]uint
+		v2, changed = fastpathTV.DecMapInt8UintV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int8]uint8:
+		fastpathTV.DecMapInt8Uint8V(v, false, d)
+	case *map[int8]uint8:
+		var v2 map[int8]uint8
+		v2, changed = fastpathTV.DecMapInt8Uint8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int8]uint16:
+		fastpathTV.DecMapInt8Uint16V(v, false, d)
+	case *map[int8]uint16:
+		var v2 map[int8]uint16
+		v2, changed = fastpathTV.DecMapInt8Uint16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int8]uint32:
+		fastpathTV.DecMapInt8Uint32V(v, false, d)
+	case *map[int8]uint32:
+		var v2 map[int8]uint32
+		v2, changed = fastpathTV.DecMapInt8Uint32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int8]uint64:
+		fastpathTV.DecMapInt8Uint64V(v, false, d)
+	case *map[int8]uint64:
+		var v2 map[int8]uint64
+		v2, changed = fastpathTV.DecMapInt8Uint64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int8]uintptr:
+		fastpathTV.DecMapInt8UintptrV(v, false, d)
+	case *map[int8]uintptr:
+		var v2 map[int8]uintptr
+		v2, changed = fastpathTV.DecMapInt8UintptrV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int8]int:
+		fastpathTV.DecMapInt8IntV(v, false, d)
+	case *map[int8]int:
+		var v2 map[int8]int
+		v2, changed = fastpathTV.DecMapInt8IntV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int8]int8:
+		fastpathTV.DecMapInt8Int8V(v, false, d)
+	case *map[int8]int8:
+		var v2 map[int8]int8
+		v2, changed = fastpathTV.DecMapInt8Int8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int8]int16:
+		fastpathTV.DecMapInt8Int16V(v, false, d)
+	case *map[int8]int16:
+		var v2 map[int8]int16
+		v2, changed = fastpathTV.DecMapInt8Int16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int8]int32:
+		fastpathTV.DecMapInt8Int32V(v, false, d)
+	case *map[int8]int32:
+		var v2 map[int8]int32
+		v2, changed = fastpathTV.DecMapInt8Int32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int8]int64:
+		fastpathTV.DecMapInt8Int64V(v, false, d)
+	case *map[int8]int64:
+		var v2 map[int8]int64
+		v2, changed = fastpathTV.DecMapInt8Int64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int8]float32:
+		fastpathTV.DecMapInt8Float32V(v, false, d)
+	case *map[int8]float32:
+		var v2 map[int8]float32
+		v2, changed = fastpathTV.DecMapInt8Float32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int8]float64:
+		fastpathTV.DecMapInt8Float64V(v, false, d)
+	case *map[int8]float64:
+		var v2 map[int8]float64
+		v2, changed = fastpathTV.DecMapInt8Float64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int8]bool:
+		fastpathTV.DecMapInt8BoolV(v, false, d)
+	case *map[int8]bool:
+		var v2 map[int8]bool
+		v2, changed = fastpathTV.DecMapInt8BoolV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int16]interface{}:
+		fastpathTV.DecMapInt16IntfV(v, false, d)
+	case *map[int16]interface{}:
+		var v2 map[int16]interface{}
+		v2, changed = fastpathTV.DecMapInt16IntfV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int16]string:
+		fastpathTV.DecMapInt16StringV(v, false, d)
+	case *map[int16]string:
+		var v2 map[int16]string
+		v2, changed = fastpathTV.DecMapInt16StringV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int16]uint:
+		fastpathTV.DecMapInt16UintV(v, false, d)
+	case *map[int16]uint:
+		var v2 map[int16]uint
+		v2, changed = fastpathTV.DecMapInt16UintV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int16]uint8:
+		fastpathTV.DecMapInt16Uint8V(v, false, d)
+	case *map[int16]uint8:
+		var v2 map[int16]uint8
+		v2, changed = fastpathTV.DecMapInt16Uint8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int16]uint16:
+		fastpathTV.DecMapInt16Uint16V(v, false, d)
+	case *map[int16]uint16:
+		var v2 map[int16]uint16
+		v2, changed = fastpathTV.DecMapInt16Uint16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int16]uint32:
+		fastpathTV.DecMapInt16Uint32V(v, false, d)
+	case *map[int16]uint32:
+		var v2 map[int16]uint32
+		v2, changed = fastpathTV.DecMapInt16Uint32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int16]uint64:
+		fastpathTV.DecMapInt16Uint64V(v, false, d)
+	case *map[int16]uint64:
+		var v2 map[int16]uint64
+		v2, changed = fastpathTV.DecMapInt16Uint64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int16]uintptr:
+		fastpathTV.DecMapInt16UintptrV(v, false, d)
+	case *map[int16]uintptr:
+		var v2 map[int16]uintptr
+		v2, changed = fastpathTV.DecMapInt16UintptrV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int16]int:
+		fastpathTV.DecMapInt16IntV(v, false, d)
+	case *map[int16]int:
+		var v2 map[int16]int
+		v2, changed = fastpathTV.DecMapInt16IntV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int16]int8:
+		fastpathTV.DecMapInt16Int8V(v, false, d)
+	case *map[int16]int8:
+		var v2 map[int16]int8
+		v2, changed = fastpathTV.DecMapInt16Int8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int16]int16:
+		fastpathTV.DecMapInt16Int16V(v, false, d)
+	case *map[int16]int16:
+		var v2 map[int16]int16
+		v2, changed = fastpathTV.DecMapInt16Int16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int16]int32:
+		fastpathTV.DecMapInt16Int32V(v, false, d)
+	case *map[int16]int32:
+		var v2 map[int16]int32
+		v2, changed = fastpathTV.DecMapInt16Int32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int16]int64:
+		fastpathTV.DecMapInt16Int64V(v, false, d)
+	case *map[int16]int64:
+		var v2 map[int16]int64
+		v2, changed = fastpathTV.DecMapInt16Int64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int16]float32:
+		fastpathTV.DecMapInt16Float32V(v, false, d)
+	case *map[int16]float32:
+		var v2 map[int16]float32
+		v2, changed = fastpathTV.DecMapInt16Float32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int16]float64:
+		fastpathTV.DecMapInt16Float64V(v, false, d)
+	case *map[int16]float64:
+		var v2 map[int16]float64
+		v2, changed = fastpathTV.DecMapInt16Float64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int16]bool:
+		fastpathTV.DecMapInt16BoolV(v, false, d)
+	case *map[int16]bool:
+		var v2 map[int16]bool
+		v2, changed = fastpathTV.DecMapInt16BoolV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int32]interface{}:
+		fastpathTV.DecMapInt32IntfV(v, false, d)
+	case *map[int32]interface{}:
+		var v2 map[int32]interface{}
+		v2, changed = fastpathTV.DecMapInt32IntfV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int32]string:
+		fastpathTV.DecMapInt32StringV(v, false, d)
+	case *map[int32]string:
+		var v2 map[int32]string
+		v2, changed = fastpathTV.DecMapInt32StringV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int32]uint:
+		fastpathTV.DecMapInt32UintV(v, false, d)
+	case *map[int32]uint:
+		var v2 map[int32]uint
+		v2, changed = fastpathTV.DecMapInt32UintV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int32]uint8:
+		fastpathTV.DecMapInt32Uint8V(v, false, d)
+	case *map[int32]uint8:
+		var v2 map[int32]uint8
+		v2, changed = fastpathTV.DecMapInt32Uint8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int32]uint16:
+		fastpathTV.DecMapInt32Uint16V(v, false, d)
+	case *map[int32]uint16:
+		var v2 map[int32]uint16
+		v2, changed = fastpathTV.DecMapInt32Uint16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int32]uint32:
+		fastpathTV.DecMapInt32Uint32V(v, false, d)
+	case *map[int32]uint32:
+		var v2 map[int32]uint32
+		v2, changed = fastpathTV.DecMapInt32Uint32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int32]uint64:
+		fastpathTV.DecMapInt32Uint64V(v, false, d)
+	case *map[int32]uint64:
+		var v2 map[int32]uint64
+		v2, changed = fastpathTV.DecMapInt32Uint64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int32]uintptr:
+		fastpathTV.DecMapInt32UintptrV(v, false, d)
+	case *map[int32]uintptr:
+		var v2 map[int32]uintptr
+		v2, changed = fastpathTV.DecMapInt32UintptrV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int32]int:
+		fastpathTV.DecMapInt32IntV(v, false, d)
+	case *map[int32]int:
+		var v2 map[int32]int
+		v2, changed = fastpathTV.DecMapInt32IntV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int32]int8:
+		fastpathTV.DecMapInt32Int8V(v, false, d)
+	case *map[int32]int8:
+		var v2 map[int32]int8
+		v2, changed = fastpathTV.DecMapInt32Int8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int32]int16:
+		fastpathTV.DecMapInt32Int16V(v, false, d)
+	case *map[int32]int16:
+		var v2 map[int32]int16
+		v2, changed = fastpathTV.DecMapInt32Int16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int32]int32:
+		fastpathTV.DecMapInt32Int32V(v, false, d)
+	case *map[int32]int32:
+		var v2 map[int32]int32
+		v2, changed = fastpathTV.DecMapInt32Int32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int32]int64:
+		fastpathTV.DecMapInt32Int64V(v, false, d)
+	case *map[int32]int64:
+		var v2 map[int32]int64
+		v2, changed = fastpathTV.DecMapInt32Int64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int32]float32:
+		fastpathTV.DecMapInt32Float32V(v, false, d)
+	case *map[int32]float32:
+		var v2 map[int32]float32
+		v2, changed = fastpathTV.DecMapInt32Float32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int32]float64:
+		fastpathTV.DecMapInt32Float64V(v, false, d)
+	case *map[int32]float64:
+		var v2 map[int32]float64
+		v2, changed = fastpathTV.DecMapInt32Float64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int32]bool:
+		fastpathTV.DecMapInt32BoolV(v, false, d)
+	case *map[int32]bool:
+		var v2 map[int32]bool
+		v2, changed = fastpathTV.DecMapInt32BoolV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int64]interface{}:
+		fastpathTV.DecMapInt64IntfV(v, false, d)
+	case *map[int64]interface{}:
+		var v2 map[int64]interface{}
+		v2, changed = fastpathTV.DecMapInt64IntfV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int64]string:
+		fastpathTV.DecMapInt64StringV(v, false, d)
+	case *map[int64]string:
+		var v2 map[int64]string
+		v2, changed = fastpathTV.DecMapInt64StringV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int64]uint:
+		fastpathTV.DecMapInt64UintV(v, false, d)
+	case *map[int64]uint:
+		var v2 map[int64]uint
+		v2, changed = fastpathTV.DecMapInt64UintV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int64]uint8:
+		fastpathTV.DecMapInt64Uint8V(v, false, d)
+	case *map[int64]uint8:
+		var v2 map[int64]uint8
+		v2, changed = fastpathTV.DecMapInt64Uint8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int64]uint16:
+		fastpathTV.DecMapInt64Uint16V(v, false, d)
+	case *map[int64]uint16:
+		var v2 map[int64]uint16
+		v2, changed = fastpathTV.DecMapInt64Uint16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int64]uint32:
+		fastpathTV.DecMapInt64Uint32V(v, false, d)
+	case *map[int64]uint32:
+		var v2 map[int64]uint32
+		v2, changed = fastpathTV.DecMapInt64Uint32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int64]uint64:
+		fastpathTV.DecMapInt64Uint64V(v, false, d)
+	case *map[int64]uint64:
+		var v2 map[int64]uint64
+		v2, changed = fastpathTV.DecMapInt64Uint64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int64]uintptr:
+		fastpathTV.DecMapInt64UintptrV(v, false, d)
+	case *map[int64]uintptr:
+		var v2 map[int64]uintptr
+		v2, changed = fastpathTV.DecMapInt64UintptrV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int64]int:
+		fastpathTV.DecMapInt64IntV(v, false, d)
+	case *map[int64]int:
+		var v2 map[int64]int
+		v2, changed = fastpathTV.DecMapInt64IntV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int64]int8:
+		fastpathTV.DecMapInt64Int8V(v, false, d)
+	case *map[int64]int8:
+		var v2 map[int64]int8
+		v2, changed = fastpathTV.DecMapInt64Int8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int64]int16:
+		fastpathTV.DecMapInt64Int16V(v, false, d)
+	case *map[int64]int16:
+		var v2 map[int64]int16
+		v2, changed = fastpathTV.DecMapInt64Int16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int64]int32:
+		fastpathTV.DecMapInt64Int32V(v, false, d)
+	case *map[int64]int32:
+		var v2 map[int64]int32
+		v2, changed = fastpathTV.DecMapInt64Int32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int64]int64:
+		fastpathTV.DecMapInt64Int64V(v, false, d)
+	case *map[int64]int64:
+		var v2 map[int64]int64
+		v2, changed = fastpathTV.DecMapInt64Int64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int64]float32:
+		fastpathTV.DecMapInt64Float32V(v, false, d)
+	case *map[int64]float32:
+		var v2 map[int64]float32
+		v2, changed = fastpathTV.DecMapInt64Float32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int64]float64:
+		fastpathTV.DecMapInt64Float64V(v, false, d)
+	case *map[int64]float64:
+		var v2 map[int64]float64
+		v2, changed = fastpathTV.DecMapInt64Float64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[int64]bool:
+		fastpathTV.DecMapInt64BoolV(v, false, d)
+	case *map[int64]bool:
+		var v2 map[int64]bool
+		v2, changed = fastpathTV.DecMapInt64BoolV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[bool]interface{}:
+		fastpathTV.DecMapBoolIntfV(v, false, d)
+	case *map[bool]interface{}:
+		var v2 map[bool]interface{}
+		v2, changed = fastpathTV.DecMapBoolIntfV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[bool]string:
+		fastpathTV.DecMapBoolStringV(v, false, d)
+	case *map[bool]string:
+		var v2 map[bool]string
+		v2, changed = fastpathTV.DecMapBoolStringV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[bool]uint:
+		fastpathTV.DecMapBoolUintV(v, false, d)
+	case *map[bool]uint:
+		var v2 map[bool]uint
+		v2, changed = fastpathTV.DecMapBoolUintV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[bool]uint8:
+		fastpathTV.DecMapBoolUint8V(v, false, d)
+	case *map[bool]uint8:
+		var v2 map[bool]uint8
+		v2, changed = fastpathTV.DecMapBoolUint8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[bool]uint16:
+		fastpathTV.DecMapBoolUint16V(v, false, d)
+	case *map[bool]uint16:
+		var v2 map[bool]uint16
+		v2, changed = fastpathTV.DecMapBoolUint16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[bool]uint32:
+		fastpathTV.DecMapBoolUint32V(v, false, d)
+	case *map[bool]uint32:
+		var v2 map[bool]uint32
+		v2, changed = fastpathTV.DecMapBoolUint32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[bool]uint64:
+		fastpathTV.DecMapBoolUint64V(v, false, d)
+	case *map[bool]uint64:
+		var v2 map[bool]uint64
+		v2, changed = fastpathTV.DecMapBoolUint64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[bool]uintptr:
+		fastpathTV.DecMapBoolUintptrV(v, false, d)
+	case *map[bool]uintptr:
+		var v2 map[bool]uintptr
+		v2, changed = fastpathTV.DecMapBoolUintptrV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[bool]int:
+		fastpathTV.DecMapBoolIntV(v, false, d)
+	case *map[bool]int:
+		var v2 map[bool]int
+		v2, changed = fastpathTV.DecMapBoolIntV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[bool]int8:
+		fastpathTV.DecMapBoolInt8V(v, false, d)
+	case *map[bool]int8:
+		var v2 map[bool]int8
+		v2, changed = fastpathTV.DecMapBoolInt8V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[bool]int16:
+		fastpathTV.DecMapBoolInt16V(v, false, d)
+	case *map[bool]int16:
+		var v2 map[bool]int16
+		v2, changed = fastpathTV.DecMapBoolInt16V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[bool]int32:
+		fastpathTV.DecMapBoolInt32V(v, false, d)
+	case *map[bool]int32:
+		var v2 map[bool]int32
+		v2, changed = fastpathTV.DecMapBoolInt32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[bool]int64:
+		fastpathTV.DecMapBoolInt64V(v, false, d)
+	case *map[bool]int64:
+		var v2 map[bool]int64
+		v2, changed = fastpathTV.DecMapBoolInt64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[bool]float32:
+		fastpathTV.DecMapBoolFloat32V(v, false, d)
+	case *map[bool]float32:
+		var v2 map[bool]float32
+		v2, changed = fastpathTV.DecMapBoolFloat32V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[bool]float64:
+		fastpathTV.DecMapBoolFloat64V(v, false, d)
+	case *map[bool]float64:
+		var v2 map[bool]float64
+		v2, changed = fastpathTV.DecMapBoolFloat64V(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	case map[bool]bool:
+		fastpathTV.DecMapBoolBoolV(v, false, d)
+	case *map[bool]bool:
+		var v2 map[bool]bool
+		v2, changed = fastpathTV.DecMapBoolBoolV(*v, true, d)
+		if changed {
+			*v = v2
+		}
+	default:
+		_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+		return false
+	}
+	return true
+}
+
+func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool {
+	switch v := iv.(type) {
+
+	case *[]interface{}:
+		*v = nil
+	case *[]string:
+		*v = nil
+	case *[]float32:
+		*v = nil
+	case *[]float64:
+		*v = nil
+	case *[]uint:
+		*v = nil
+	case *[]uint8:
+		*v = nil
+	case *[]uint16:
+		*v = nil
+	case *[]uint32:
+		*v = nil
+	case *[]uint64:
+		*v = nil
+	case *[]uintptr:
+		*v = nil
+	case *[]int:
+		*v = nil
+	case *[]int8:
+		*v = nil
+	case *[]int16:
+		*v = nil
+	case *[]int32:
+		*v = nil
+	case *[]int64:
+		*v = nil
+	case *[]bool:
+		*v = nil
+
+	case *map[interface{}]interface{}:
+		*v = nil
+	case *map[interface{}]string:
+		*v = nil
+	case *map[interface{}]uint:
+		*v = nil
+	case *map[interface{}]uint8:
+		*v = nil
+	case *map[interface{}]uint16:
+		*v = nil
+	case *map[interface{}]uint32:
+		*v = nil
+	case *map[interface{}]uint64:
+		*v = nil
+	case *map[interface{}]uintptr:
+		*v = nil
+	case *map[interface{}]int:
+		*v = nil
+	case *map[interface{}]int8:
+		*v = nil
+	case *map[interface{}]int16:
+		*v = nil
+	case *map[interface{}]int32:
+		*v = nil
+	case *map[interface{}]int64:
+		*v = nil
+	case *map[interface{}]float32:
+		*v = nil
+	case *map[interface{}]float64:
+		*v = nil
+	case *map[interface{}]bool:
+		*v = nil
+	case *map[string]interface{}:
+		*v = nil
+	case *map[string]string:
+		*v = nil
+	case *map[string]uint:
+		*v = nil
+	case *map[string]uint8:
+		*v = nil
+	case *map[string]uint16:
+		*v = nil
+	case *map[string]uint32:
+		*v = nil
+	case *map[string]uint64:
+		*v = nil
+	case *map[string]uintptr:
+		*v = nil
+	case *map[string]int:
+		*v = nil
+	case *map[string]int8:
+		*v = nil
+	case *map[string]int16:
+		*v = nil
+	case *map[string]int32:
+		*v = nil
+	case *map[string]int64:
+		*v = nil
+	case *map[string]float32:
+		*v = nil
+	case *map[string]float64:
+		*v = nil
+	case *map[string]bool:
+		*v = nil
+	case *map[float32]interface{}:
+		*v = nil
+	case *map[float32]string:
+		*v = nil
+	case *map[float32]uint:
+		*v = nil
+	case *map[float32]uint8:
+		*v = nil
+	case *map[float32]uint16:
+		*v = nil
+	case *map[float32]uint32:
+		*v = nil
+	case *map[float32]uint64:
+		*v = nil
+	case *map[float32]uintptr:
+		*v = nil
+	case *map[float32]int:
+		*v = nil
+	case *map[float32]int8:
+		*v = nil
+	case *map[float32]int16:
+		*v = nil
+	case *map[float32]int32:
+		*v = nil
+	case *map[float32]int64:
+		*v = nil
+	case *map[float32]float32:
+		*v = nil
+	case *map[float32]float64:
+		*v = nil
+	case *map[float32]bool:
+		*v = nil
+	case *map[float64]interface{}:
+		*v = nil
+	case *map[float64]string:
+		*v = nil
+	case *map[float64]uint:
+		*v = nil
+	case *map[float64]uint8:
+		*v = nil
+	case *map[float64]uint16:
+		*v = nil
+	case *map[float64]uint32:
+		*v = nil
+	case *map[float64]uint64:
+		*v = nil
+	case *map[float64]uintptr:
+		*v = nil
+	case *map[float64]int:
+		*v = nil
+	case *map[float64]int8:
+		*v = nil
+	case *map[float64]int16:
+		*v = nil
+	case *map[float64]int32:
+		*v = nil
+	case *map[float64]int64:
+		*v = nil
+	case *map[float64]float32:
+		*v = nil
+	case *map[float64]float64:
+		*v = nil
+	case *map[float64]bool:
+		*v = nil
+	case *map[uint]interface{}:
+		*v = nil
+	case *map[uint]string:
+		*v = nil
+	case *map[uint]uint:
+		*v = nil
+	case *map[uint]uint8:
+		*v = nil
+	case *map[uint]uint16:
+		*v = nil
+	case *map[uint]uint32:
+		*v = nil
+	case *map[uint]uint64:
+		*v = nil
+	case *map[uint]uintptr:
+		*v = nil
+	case *map[uint]int:
+		*v = nil
+	case *map[uint]int8:
+		*v = nil
+	case *map[uint]int16:
+		*v = nil
+	case *map[uint]int32:
+		*v = nil
+	case *map[uint]int64:
+		*v = nil
+	case *map[uint]float32:
+		*v = nil
+	case *map[uint]float64:
+		*v = nil
+	case *map[uint]bool:
+		*v = nil
+	case *map[uint8]interface{}:
+		*v = nil
+	case *map[uint8]string:
+		*v = nil
+	case *map[uint8]uint:
+		*v = nil
+	case *map[uint8]uint8:
+		*v = nil
+	case *map[uint8]uint16:
+		*v = nil
+	case *map[uint8]uint32:
+		*v = nil
+	case *map[uint8]uint64:
+		*v = nil
+	case *map[uint8]uintptr:
+		*v = nil
+	case *map[uint8]int:
+		*v = nil
+	case *map[uint8]int8:
+		*v = nil
+	case *map[uint8]int16:
+		*v = nil
+	case *map[uint8]int32:
+		*v = nil
+	case *map[uint8]int64:
+		*v = nil
+	case *map[uint8]float32:
+		*v = nil
+	case *map[uint8]float64:
+		*v = nil
+	case *map[uint8]bool:
+		*v = nil
+	case *map[uint16]interface{}:
+		*v = nil
+	case *map[uint16]string:
+		*v = nil
+	case *map[uint16]uint:
+		*v = nil
+	case *map[uint16]uint8:
+		*v = nil
+	case *map[uint16]uint16:
+		*v = nil
+	case *map[uint16]uint32:
+		*v = nil
+	case *map[uint16]uint64:
+		*v = nil
+	case *map[uint16]uintptr:
+		*v = nil
+	case *map[uint16]int:
+		*v = nil
+	case *map[uint16]int8:
+		*v = nil
+	case *map[uint16]int16:
+		*v = nil
+	case *map[uint16]int32:
+		*v = nil
+	case *map[uint16]int64:
+		*v = nil
+	case *map[uint16]float32:
+		*v = nil
+	case *map[uint16]float64:
+		*v = nil
+	case *map[uint16]bool:
+		*v = nil
+	case *map[uint32]interface{}:
+		*v = nil
+	case *map[uint32]string:
+		*v = nil
+	case *map[uint32]uint:
+		*v = nil
+	case *map[uint32]uint8:
+		*v = nil
+	case *map[uint32]uint16:
+		*v = nil
+	case *map[uint32]uint32:
+		*v = nil
+	case *map[uint32]uint64:
+		*v = nil
+	case *map[uint32]uintptr:
+		*v = nil
+	case *map[uint32]int:
+		*v = nil
+	case *map[uint32]int8:
+		*v = nil
+	case *map[uint32]int16:
+		*v = nil
+	case *map[uint32]int32:
+		*v = nil
+	case *map[uint32]int64:
+		*v = nil
+	case *map[uint32]float32:
+		*v = nil
+	case *map[uint32]float64:
+		*v = nil
+	case *map[uint32]bool:
+		*v = nil
+	case *map[uint64]interface{}:
+		*v = nil
+	case *map[uint64]string:
+		*v = nil
+	case *map[uint64]uint:
+		*v = nil
+	case *map[uint64]uint8:
+		*v = nil
+	case *map[uint64]uint16:
+		*v = nil
+	case *map[uint64]uint32:
+		*v = nil
+	case *map[uint64]uint64:
+		*v = nil
+	case *map[uint64]uintptr:
+		*v = nil
+	case *map[uint64]int:
+		*v = nil
+	case *map[uint64]int8:
+		*v = nil
+	case *map[uint64]int16:
+		*v = nil
+	case *map[uint64]int32:
+		*v = nil
+	case *map[uint64]int64:
+		*v = nil
+	case *map[uint64]float32:
+		*v = nil
+	case *map[uint64]float64:
+		*v = nil
+	case *map[uint64]bool:
+		*v = nil
+	case *map[uintptr]interface{}:
+		*v = nil
+	case *map[uintptr]string:
+		*v = nil
+	case *map[uintptr]uint:
+		*v = nil
+	case *map[uintptr]uint8:
+		*v = nil
+	case *map[uintptr]uint16:
+		*v = nil
+	case *map[uintptr]uint32:
+		*v = nil
+	case *map[uintptr]uint64:
+		*v = nil
+	case *map[uintptr]uintptr:
+		*v = nil
+	case *map[uintptr]int:
+		*v = nil
+	case *map[uintptr]int8:
+		*v = nil
+	case *map[uintptr]int16:
+		*v = nil
+	case *map[uintptr]int32:
+		*v = nil
+	case *map[uintptr]int64:
+		*v = nil
+	case *map[uintptr]float32:
+		*v = nil
+	case *map[uintptr]float64:
+		*v = nil
+	case *map[uintptr]bool:
+		*v = nil
+	case *map[int]interface{}:
+		*v = nil
+	case *map[int]string:
+		*v = nil
+	case *map[int]uint:
+		*v = nil
+	case *map[int]uint8:
+		*v = nil
+	case *map[int]uint16:
+		*v = nil
+	case *map[int]uint32:
+		*v = nil
+	case *map[int]uint64:
+		*v = nil
+	case *map[int]uintptr:
+		*v = nil
+	case *map[int]int:
+		*v = nil
+	case *map[int]int8:
+		*v = nil
+	case *map[int]int16:
+		*v = nil
+	case *map[int]int32:
+		*v = nil
+	case *map[int]int64:
+		*v = nil
+	case *map[int]float32:
+		*v = nil
+	case *map[int]float64:
+		*v = nil
+	case *map[int]bool:
+		*v = nil
+	case *map[int8]interface{}:
+		*v = nil
+	case *map[int8]string:
+		*v = nil
+	case *map[int8]uint:
+		*v = nil
+	case *map[int8]uint8:
+		*v = nil
+	case *map[int8]uint16:
+		*v = nil
+	case *map[int8]uint32:
+		*v = nil
+	case *map[int8]uint64:
+		*v = nil
+	case *map[int8]uintptr:
+		*v = nil
+	case *map[int8]int:
+		*v = nil
+	case *map[int8]int8:
+		*v = nil
+	case *map[int8]int16:
+		*v = nil
+	case *map[int8]int32:
+		*v = nil
+	case *map[int8]int64:
+		*v = nil
+	case *map[int8]float32:
+		*v = nil
+	case *map[int8]float64:
+		*v = nil
+	case *map[int8]bool:
+		*v = nil
+	case *map[int16]interface{}:
+		*v = nil
+	case *map[int16]string:
+		*v = nil
+	case *map[int16]uint:
+		*v = nil
+	case *map[int16]uint8:
+		*v = nil
+	case *map[int16]uint16:
+		*v = nil
+	case *map[int16]uint32:
+		*v = nil
+	case *map[int16]uint64:
+		*v = nil
+	case *map[int16]uintptr:
+		*v = nil
+	case *map[int16]int:
+		*v = nil
+	case *map[int16]int8:
+		*v = nil
+	case *map[int16]int16:
+		*v = nil
+	case *map[int16]int32:
+		*v = nil
+	case *map[int16]int64:
+		*v = nil
+	case *map[int16]float32:
+		*v = nil
+	case *map[int16]float64:
+		*v = nil
+	case *map[int16]bool:
+		*v = nil
+	case *map[int32]interface{}:
+		*v = nil
+	case *map[int32]string:
+		*v = nil
+	case *map[int32]uint:
+		*v = nil
+	case *map[int32]uint8:
+		*v = nil
+	case *map[int32]uint16:
+		*v = nil
+	case *map[int32]uint32:
+		*v = nil
+	case *map[int32]uint64:
+		*v = nil
+	case *map[int32]uintptr:
+		*v = nil
+	case *map[int32]int:
+		*v = nil
+	case *map[int32]int8:
+		*v = nil
+	case *map[int32]int16:
+		*v = nil
+	case *map[int32]int32:
+		*v = nil
+	case *map[int32]int64:
+		*v = nil
+	case *map[int32]float32:
+		*v = nil
+	case *map[int32]float64:
+		*v = nil
+	case *map[int32]bool:
+		*v = nil
+	case *map[int64]interface{}:
+		*v = nil
+	case *map[int64]string:
+		*v = nil
+	case *map[int64]uint:
+		*v = nil
+	case *map[int64]uint8:
+		*v = nil
+	case *map[int64]uint16:
+		*v = nil
+	case *map[int64]uint32:
+		*v = nil
+	case *map[int64]uint64:
+		*v = nil
+	case *map[int64]uintptr:
+		*v = nil
+	case *map[int64]int:
+		*v = nil
+	case *map[int64]int8:
+		*v = nil
+	case *map[int64]int16:
+		*v = nil
+	case *map[int64]int32:
+		*v = nil
+	case *map[int64]int64:
+		*v = nil
+	case *map[int64]float32:
+		*v = nil
+	case *map[int64]float64:
+		*v = nil
+	case *map[int64]bool:
+		*v = nil
+	case *map[bool]interface{}:
+		*v = nil
+	case *map[bool]string:
+		*v = nil
+	case *map[bool]uint:
+		*v = nil
+	case *map[bool]uint8:
+		*v = nil
+	case *map[bool]uint16:
+		*v = nil
+	case *map[bool]uint32:
+		*v = nil
+	case *map[bool]uint64:
+		*v = nil
+	case *map[bool]uintptr:
+		*v = nil
+	case *map[bool]int:
+		*v = nil
+	case *map[bool]int8:
+		*v = nil
+	case *map[bool]int16:
+		*v = nil
+	case *map[bool]int32:
+		*v = nil
+	case *map[bool]int64:
+		*v = nil
+	case *map[bool]float32:
+		*v = nil
+	case *map[bool]float64:
+		*v = nil
+	case *map[bool]bool:
+		*v = nil
+	default:
+		_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+		return false
+	}
+	return true
+}
+
+// -- -- fast path functions
+
+func (d *Decoder) fastpathDecSliceIntfR(f *codecFnInfo, rv reflect.Value) {
+	if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*[]interface{})
+		v, changed := fastpathTV.DecSliceIntfV(*vp, !array, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		v := rv2i(rv).([]interface{})
+		v2, changed := fastpathTV.DecSliceIntfV(v, !array, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	}
+}
+func (f fastpathT) DecSliceIntfX(vp *[]interface{}, d *Decoder) {
+	v, changed := f.DecSliceIntfV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecSliceIntfV(v []interface{}, canChange bool, d *Decoder) (_ []interface{}, changed bool) {
+	dd := d.d
+	slh, containerLenS := d.decSliceHelperStart()
+	if containerLenS == 0 {
+		if canChange {
+			if v == nil {
+				v = []interface{}{}
+			} else if len(v) != 0 {
+				v = v[:0]
+			}
+			changed = true
+		}
+		slh.End()
+		return v, changed
+	}
+	hasLen := containerLenS > 0
+	var xlen int
+	if hasLen && canChange {
+		if containerLenS > cap(v) {
+			xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+			if xlen <= cap(v) {
+				v = v[:xlen]
+			} else {
+				v = make([]interface{}, xlen)
+			}
+			changed = true
+		} else if containerLenS != len(v) {
+			v = v[:containerLenS]
+			changed = true
+		}
+	}
+	j := 0
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && len(v) == 0 && canChange {
+			if hasLen {
+				xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+			} else {
+				xlen = 8
+			}
+			v = make([]interface{}, xlen)
+			changed = true
+		}
+		// if indefinite, etc, then expand the slice if necessary
+		var decodeIntoBlank bool
+		if j >= len(v) {
+			if canChange {
+				v = append(v, nil)
+				changed = true
+			} else {
+				d.arrayCannotExpand(len(v), j+1)
+				decodeIntoBlank = true
+			}
+		}
+		slh.ElemContainerState(j)
+		if decodeIntoBlank {
+			d.swallow()
+		} else if dd.TryDecodeAsNil() {
+			v[j] = nil
+		} else {
+			d.decode(&v[j])
+		}
+	}
+	if canChange {
+		if j < len(v) {
+			v = v[:j]
+			changed = true
+		} else if j == 0 && v == nil {
+			v = make([]interface{}, 0)
+			changed = true
+		}
+	}
+	slh.End()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceStringR(f *codecFnInfo, rv reflect.Value) {
+	if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*[]string)
+		v, changed := fastpathTV.DecSliceStringV(*vp, !array, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		v := rv2i(rv).([]string)
+		v2, changed := fastpathTV.DecSliceStringV(v, !array, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	}
+}
+func (f fastpathT) DecSliceStringX(vp *[]string, d *Decoder) {
+	v, changed := f.DecSliceStringV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecSliceStringV(v []string, canChange bool, d *Decoder) (_ []string, changed bool) {
+	dd := d.d
+	slh, containerLenS := d.decSliceHelperStart()
+	if containerLenS == 0 {
+		if canChange {
+			if v == nil {
+				v = []string{}
+			} else if len(v) != 0 {
+				v = v[:0]
+			}
+			changed = true
+		}
+		slh.End()
+		return v, changed
+	}
+	hasLen := containerLenS > 0
+	var xlen int
+	if hasLen && canChange {
+		if containerLenS > cap(v) {
+			xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+			if xlen <= cap(v) {
+				v = v[:xlen]
+			} else {
+				v = make([]string, xlen)
+			}
+			changed = true
+		} else if containerLenS != len(v) {
+			v = v[:containerLenS]
+			changed = true
+		}
+	}
+	j := 0
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && len(v) == 0 && canChange {
+			if hasLen {
+				xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+			} else {
+				xlen = 8
+			}
+			v = make([]string, xlen)
+			changed = true
+		}
+		// if indefinite, etc, then expand the slice if necessary
+		var decodeIntoBlank bool
+		if j >= len(v) {
+			if canChange {
+				v = append(v, "")
+				changed = true
+			} else {
+				d.arrayCannotExpand(len(v), j+1)
+				decodeIntoBlank = true
+			}
+		}
+		slh.ElemContainerState(j)
+		if decodeIntoBlank {
+			d.swallow()
+		} else if dd.TryDecodeAsNil() {
+			v[j] = ""
+		} else {
+			v[j] = dd.DecodeString()
+		}
+	}
+	if canChange {
+		if j < len(v) {
+			v = v[:j]
+			changed = true
+		} else if j == 0 && v == nil {
+			v = make([]string, 0)
+			changed = true
+		}
+	}
+	slh.End()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceFloat32R(f *codecFnInfo, rv reflect.Value) {
+	if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*[]float32)
+		v, changed := fastpathTV.DecSliceFloat32V(*vp, !array, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		v := rv2i(rv).([]float32)
+		v2, changed := fastpathTV.DecSliceFloat32V(v, !array, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	}
+}
+func (f fastpathT) DecSliceFloat32X(vp *[]float32, d *Decoder) {
+	v, changed := f.DecSliceFloat32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecSliceFloat32V(v []float32, canChange bool, d *Decoder) (_ []float32, changed bool) {
+	dd := d.d
+	slh, containerLenS := d.decSliceHelperStart()
+	if containerLenS == 0 {
+		if canChange {
+			if v == nil {
+				v = []float32{}
+			} else if len(v) != 0 {
+				v = v[:0]
+			}
+			changed = true
+		}
+		slh.End()
+		return v, changed
+	}
+	hasLen := containerLenS > 0
+	var xlen int
+	if hasLen && canChange {
+		if containerLenS > cap(v) {
+			xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+			if xlen <= cap(v) {
+				v = v[:xlen]
+			} else {
+				v = make([]float32, xlen)
+			}
+			changed = true
+		} else if containerLenS != len(v) {
+			v = v[:containerLenS]
+			changed = true
+		}
+	}
+	j := 0
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && len(v) == 0 && canChange {
+			if hasLen {
+				xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+			} else {
+				xlen = 8
+			}
+			v = make([]float32, xlen)
+			changed = true
+		}
+		// if indefinite, etc, then expand the slice if necessary
+		var decodeIntoBlank bool
+		if j >= len(v) {
+			if canChange {
+				v = append(v, 0)
+				changed = true
+			} else {
+				d.arrayCannotExpand(len(v), j+1)
+				decodeIntoBlank = true
+			}
+		}
+		slh.ElemContainerState(j)
+		if decodeIntoBlank {
+			d.swallow()
+		} else if dd.TryDecodeAsNil() {
+			v[j] = 0
+		} else {
+			v[j] = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		}
+	}
+	if canChange {
+		if j < len(v) {
+			v = v[:j]
+			changed = true
+		} else if j == 0 && v == nil {
+			v = make([]float32, 0)
+			changed = true
+		}
+	}
+	slh.End()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceFloat64R(f *codecFnInfo, rv reflect.Value) {
+	if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*[]float64)
+		v, changed := fastpathTV.DecSliceFloat64V(*vp, !array, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		v := rv2i(rv).([]float64)
+		v2, changed := fastpathTV.DecSliceFloat64V(v, !array, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	}
+}
+func (f fastpathT) DecSliceFloat64X(vp *[]float64, d *Decoder) {
+	v, changed := f.DecSliceFloat64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecSliceFloat64V(v []float64, canChange bool, d *Decoder) (_ []float64, changed bool) {
+	dd := d.d
+	slh, containerLenS := d.decSliceHelperStart()
+	if containerLenS == 0 {
+		if canChange {
+			if v == nil {
+				v = []float64{}
+			} else if len(v) != 0 {
+				v = v[:0]
+			}
+			changed = true
+		}
+		slh.End()
+		return v, changed
+	}
+	hasLen := containerLenS > 0
+	var xlen int
+	if hasLen && canChange {
+		if containerLenS > cap(v) {
+			xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+			if xlen <= cap(v) {
+				v = v[:xlen]
+			} else {
+				v = make([]float64, xlen)
+			}
+			changed = true
+		} else if containerLenS != len(v) {
+			v = v[:containerLenS]
+			changed = true
+		}
+	}
+	j := 0
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && len(v) == 0 && canChange {
+			if hasLen {
+				xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+			} else {
+				xlen = 8
+			}
+			v = make([]float64, xlen)
+			changed = true
+		}
+		// if indefinite, etc, then expand the slice if necessary
+		var decodeIntoBlank bool
+		if j >= len(v) {
+			if canChange {
+				v = append(v, 0)
+				changed = true
+			} else {
+				d.arrayCannotExpand(len(v), j+1)
+				decodeIntoBlank = true
+			}
+		}
+		slh.ElemContainerState(j)
+		if decodeIntoBlank {
+			d.swallow()
+		} else if dd.TryDecodeAsNil() {
+			v[j] = 0
+		} else {
+			v[j] = dd.DecodeFloat64()
+		}
+	}
+	if canChange {
+		if j < len(v) {
+			v = v[:j]
+			changed = true
+		} else if j == 0 && v == nil {
+			v = make([]float64, 0)
+			changed = true
+		}
+	}
+	slh.End()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceUintR(f *codecFnInfo, rv reflect.Value) {
+	if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*[]uint)
+		v, changed := fastpathTV.DecSliceUintV(*vp, !array, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		v := rv2i(rv).([]uint)
+		v2, changed := fastpathTV.DecSliceUintV(v, !array, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	}
+}
+func (f fastpathT) DecSliceUintX(vp *[]uint, d *Decoder) {
+	v, changed := f.DecSliceUintV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecSliceUintV(v []uint, canChange bool, d *Decoder) (_ []uint, changed bool) {
+	dd := d.d
+	slh, containerLenS := d.decSliceHelperStart()
+	if containerLenS == 0 {
+		if canChange {
+			if v == nil {
+				v = []uint{}
+			} else if len(v) != 0 {
+				v = v[:0]
+			}
+			changed = true
+		}
+		slh.End()
+		return v, changed
+	}
+	hasLen := containerLenS > 0
+	var xlen int
+	if hasLen && canChange {
+		if containerLenS > cap(v) {
+			xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+			if xlen <= cap(v) {
+				v = v[:xlen]
+			} else {
+				v = make([]uint, xlen)
+			}
+			changed = true
+		} else if containerLenS != len(v) {
+			v = v[:containerLenS]
+			changed = true
+		}
+	}
+	j := 0
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && len(v) == 0 && canChange {
+			if hasLen {
+				xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+			} else {
+				xlen = 8
+			}
+			v = make([]uint, xlen)
+			changed = true
+		}
+		// if indefinite, etc, then expand the slice if necessary
+		var decodeIntoBlank bool
+		if j >= len(v) {
+			if canChange {
+				v = append(v, 0)
+				changed = true
+			} else {
+				d.arrayCannotExpand(len(v), j+1)
+				decodeIntoBlank = true
+			}
+		}
+		slh.ElemContainerState(j)
+		if decodeIntoBlank {
+			d.swallow()
+		} else if dd.TryDecodeAsNil() {
+			v[j] = 0
+		} else {
+			v[j] = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		}
+	}
+	if canChange {
+		if j < len(v) {
+			v = v[:j]
+			changed = true
+		} else if j == 0 && v == nil {
+			v = make([]uint, 0)
+			changed = true
+		}
+	}
+	slh.End()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceUint8R(f *codecFnInfo, rv reflect.Value) {
+	if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*[]uint8)
+		v, changed := fastpathTV.DecSliceUint8V(*vp, !array, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		v := rv2i(rv).([]uint8)
+		v2, changed := fastpathTV.DecSliceUint8V(v, !array, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	}
+}
+func (f fastpathT) DecSliceUint8X(vp *[]uint8, d *Decoder) {
+	v, changed := f.DecSliceUint8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) {
+	dd := d.d
+	slh, containerLenS := d.decSliceHelperStart()
+	if containerLenS == 0 {
+		if canChange {
+			if v == nil {
+				v = []uint8{}
+			} else if len(v) != 0 {
+				v = v[:0]
+			}
+			changed = true
+		}
+		slh.End()
+		return v, changed
+	}
+	hasLen := containerLenS > 0
+	var xlen int
+	if hasLen && canChange {
+		if containerLenS > cap(v) {
+			xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+			if xlen <= cap(v) {
+				v = v[:xlen]
+			} else {
+				v = make([]uint8, xlen)
+			}
+			changed = true
+		} else if containerLenS != len(v) {
+			v = v[:containerLenS]
+			changed = true
+		}
+	}
+	j := 0
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && len(v) == 0 && canChange {
+			if hasLen {
+				xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+			} else {
+				xlen = 8
+			}
+			v = make([]uint8, xlen)
+			changed = true
+		}
+		// if indefinite, etc, then expand the slice if necessary
+		var decodeIntoBlank bool
+		if j >= len(v) {
+			if canChange {
+				v = append(v, 0)
+				changed = true
+			} else {
+				d.arrayCannotExpand(len(v), j+1)
+				decodeIntoBlank = true
+			}
+		}
+		slh.ElemContainerState(j)
+		if decodeIntoBlank {
+			d.swallow()
+		} else if dd.TryDecodeAsNil() {
+			v[j] = 0
+		} else {
+			v[j] = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		}
+	}
+	if canChange {
+		if j < len(v) {
+			v = v[:j]
+			changed = true
+		} else if j == 0 && v == nil {
+			v = make([]uint8, 0)
+			changed = true
+		}
+	}
+	slh.End()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceUint16R(f *codecFnInfo, rv reflect.Value) {
+	if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*[]uint16)
+		v, changed := fastpathTV.DecSliceUint16V(*vp, !array, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		v := rv2i(rv).([]uint16)
+		v2, changed := fastpathTV.DecSliceUint16V(v, !array, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	}
+}
+func (f fastpathT) DecSliceUint16X(vp *[]uint16, d *Decoder) {
+	v, changed := f.DecSliceUint16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecSliceUint16V(v []uint16, canChange bool, d *Decoder) (_ []uint16, changed bool) {
+	dd := d.d
+	slh, containerLenS := d.decSliceHelperStart()
+	if containerLenS == 0 {
+		if canChange {
+			if v == nil {
+				v = []uint16{}
+			} else if len(v) != 0 {
+				v = v[:0]
+			}
+			changed = true
+		}
+		slh.End()
+		return v, changed
+	}
+	hasLen := containerLenS > 0
+	var xlen int
+	if hasLen && canChange {
+		if containerLenS > cap(v) {
+			xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2)
+			if xlen <= cap(v) {
+				v = v[:xlen]
+			} else {
+				v = make([]uint16, xlen)
+			}
+			changed = true
+		} else if containerLenS != len(v) {
+			v = v[:containerLenS]
+			changed = true
+		}
+	}
+	j := 0
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && len(v) == 0 && canChange {
+			if hasLen {
+				xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2)
+			} else {
+				xlen = 8
+			}
+			v = make([]uint16, xlen)
+			changed = true
+		}
+		// if indefinite, etc, then expand the slice if necessary
+		var decodeIntoBlank bool
+		if j >= len(v) {
+			if canChange {
+				v = append(v, 0)
+				changed = true
+			} else {
+				d.arrayCannotExpand(len(v), j+1)
+				decodeIntoBlank = true
+			}
+		}
+		slh.ElemContainerState(j)
+		if decodeIntoBlank {
+			d.swallow()
+		} else if dd.TryDecodeAsNil() {
+			v[j] = 0
+		} else {
+			v[j] = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		}
+	}
+	if canChange {
+		if j < len(v) {
+			v = v[:j]
+			changed = true
+		} else if j == 0 && v == nil {
+			v = make([]uint16, 0)
+			changed = true
+		}
+	}
+	slh.End()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceUint32R(f *codecFnInfo, rv reflect.Value) {
+	if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*[]uint32)
+		v, changed := fastpathTV.DecSliceUint32V(*vp, !array, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		v := rv2i(rv).([]uint32)
+		v2, changed := fastpathTV.DecSliceUint32V(v, !array, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	}
+}
+func (f fastpathT) DecSliceUint32X(vp *[]uint32, d *Decoder) {
+	v, changed := f.DecSliceUint32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecSliceUint32V(v []uint32, canChange bool, d *Decoder) (_ []uint32, changed bool) {
+	dd := d.d
+	slh, containerLenS := d.decSliceHelperStart()
+	if containerLenS == 0 {
+		if canChange {
+			if v == nil {
+				v = []uint32{}
+			} else if len(v) != 0 {
+				v = v[:0]
+			}
+			changed = true
+		}
+		slh.End()
+		return v, changed
+	}
+	hasLen := containerLenS > 0
+	var xlen int
+	if hasLen && canChange {
+		if containerLenS > cap(v) {
+			xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+			if xlen <= cap(v) {
+				v = v[:xlen]
+			} else {
+				v = make([]uint32, xlen)
+			}
+			changed = true
+		} else if containerLenS != len(v) {
+			v = v[:containerLenS]
+			changed = true
+		}
+	}
+	j := 0
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && len(v) == 0 && canChange {
+			if hasLen {
+				xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+			} else {
+				xlen = 8
+			}
+			v = make([]uint32, xlen)
+			changed = true
+		}
+		// if indefinite, etc, then expand the slice if necessary
+		var decodeIntoBlank bool
+		if j >= len(v) {
+			if canChange {
+				v = append(v, 0)
+				changed = true
+			} else {
+				d.arrayCannotExpand(len(v), j+1)
+				decodeIntoBlank = true
+			}
+		}
+		slh.ElemContainerState(j)
+		if decodeIntoBlank {
+			d.swallow()
+		} else if dd.TryDecodeAsNil() {
+			v[j] = 0
+		} else {
+			v[j] = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		}
+	}
+	if canChange {
+		if j < len(v) {
+			v = v[:j]
+			changed = true
+		} else if j == 0 && v == nil {
+			v = make([]uint32, 0)
+			changed = true
+		}
+	}
+	slh.End()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceUint64R(f *codecFnInfo, rv reflect.Value) {
+	if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*[]uint64)
+		v, changed := fastpathTV.DecSliceUint64V(*vp, !array, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		v := rv2i(rv).([]uint64)
+		v2, changed := fastpathTV.DecSliceUint64V(v, !array, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	}
+}
+func (f fastpathT) DecSliceUint64X(vp *[]uint64, d *Decoder) {
+	v, changed := f.DecSliceUint64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecSliceUint64V(v []uint64, canChange bool, d *Decoder) (_ []uint64, changed bool) {
+	dd := d.d
+	slh, containerLenS := d.decSliceHelperStart()
+	if containerLenS == 0 {
+		if canChange {
+			if v == nil {
+				v = []uint64{}
+			} else if len(v) != 0 {
+				v = v[:0]
+			}
+			changed = true
+		}
+		slh.End()
+		return v, changed
+	}
+	hasLen := containerLenS > 0
+	var xlen int
+	if hasLen && canChange {
+		if containerLenS > cap(v) {
+			xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+			if xlen <= cap(v) {
+				v = v[:xlen]
+			} else {
+				v = make([]uint64, xlen)
+			}
+			changed = true
+		} else if containerLenS != len(v) {
+			v = v[:containerLenS]
+			changed = true
+		}
+	}
+	j := 0
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && len(v) == 0 && canChange {
+			if hasLen {
+				xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+			} else {
+				xlen = 8
+			}
+			v = make([]uint64, xlen)
+			changed = true
+		}
+		// if indefinite, etc, then expand the slice if necessary
+		var decodeIntoBlank bool
+		if j >= len(v) {
+			if canChange {
+				v = append(v, 0)
+				changed = true
+			} else {
+				d.arrayCannotExpand(len(v), j+1)
+				decodeIntoBlank = true
+			}
+		}
+		slh.ElemContainerState(j)
+		if decodeIntoBlank {
+			d.swallow()
+		} else if dd.TryDecodeAsNil() {
+			v[j] = 0
+		} else {
+			v[j] = dd.DecodeUint64()
+		}
+	}
+	if canChange {
+		if j < len(v) {
+			v = v[:j]
+			changed = true
+		} else if j == 0 && v == nil {
+			v = make([]uint64, 0)
+			changed = true
+		}
+	}
+	slh.End()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceUintptrR(f *codecFnInfo, rv reflect.Value) {
+	if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*[]uintptr)
+		v, changed := fastpathTV.DecSliceUintptrV(*vp, !array, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		v := rv2i(rv).([]uintptr)
+		v2, changed := fastpathTV.DecSliceUintptrV(v, !array, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	}
+}
+func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, d *Decoder) {
+	v, changed := f.DecSliceUintptrV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecSliceUintptrV(v []uintptr, canChange bool, d *Decoder) (_ []uintptr, changed bool) {
+	dd := d.d
+	slh, containerLenS := d.decSliceHelperStart()
+	if containerLenS == 0 {
+		if canChange {
+			if v == nil {
+				v = []uintptr{}
+			} else if len(v) != 0 {
+				v = v[:0]
+			}
+			changed = true
+		}
+		slh.End()
+		return v, changed
+	}
+	hasLen := containerLenS > 0
+	var xlen int
+	if hasLen && canChange {
+		if containerLenS > cap(v) {
+			xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+			if xlen <= cap(v) {
+				v = v[:xlen]
+			} else {
+				v = make([]uintptr, xlen)
+			}
+			changed = true
+		} else if containerLenS != len(v) {
+			v = v[:containerLenS]
+			changed = true
+		}
+	}
+	j := 0
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && len(v) == 0 && canChange {
+			if hasLen {
+				xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+			} else {
+				xlen = 8
+			}
+			v = make([]uintptr, xlen)
+			changed = true
+		}
+		// if indefinite, etc, then expand the slice if necessary
+		var decodeIntoBlank bool
+		if j >= len(v) {
+			if canChange {
+				v = append(v, 0)
+				changed = true
+			} else {
+				d.arrayCannotExpand(len(v), j+1)
+				decodeIntoBlank = true
+			}
+		}
+		slh.ElemContainerState(j)
+		if decodeIntoBlank {
+			d.swallow()
+		} else if dd.TryDecodeAsNil() {
+			v[j] = 0
+		} else {
+			v[j] = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		}
+	}
+	if canChange {
+		if j < len(v) {
+			v = v[:j]
+			changed = true
+		} else if j == 0 && v == nil {
+			v = make([]uintptr, 0)
+			changed = true
+		}
+	}
+	slh.End()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceIntR(f *codecFnInfo, rv reflect.Value) {
+	if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*[]int)
+		v, changed := fastpathTV.DecSliceIntV(*vp, !array, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		v := rv2i(rv).([]int)
+		v2, changed := fastpathTV.DecSliceIntV(v, !array, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	}
+}
+func (f fastpathT) DecSliceIntX(vp *[]int, d *Decoder) {
+	v, changed := f.DecSliceIntV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecSliceIntV(v []int, canChange bool, d *Decoder) (_ []int, changed bool) {
+	dd := d.d
+	slh, containerLenS := d.decSliceHelperStart()
+	if containerLenS == 0 {
+		if canChange {
+			if v == nil {
+				v = []int{}
+			} else if len(v) != 0 {
+				v = v[:0]
+			}
+			changed = true
+		}
+		slh.End()
+		return v, changed
+	}
+	hasLen := containerLenS > 0
+	var xlen int
+	if hasLen && canChange {
+		if containerLenS > cap(v) {
+			xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+			if xlen <= cap(v) {
+				v = v[:xlen]
+			} else {
+				v = make([]int, xlen)
+			}
+			changed = true
+		} else if containerLenS != len(v) {
+			v = v[:containerLenS]
+			changed = true
+		}
+	}
+	j := 0
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && len(v) == 0 && canChange {
+			if hasLen {
+				xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+			} else {
+				xlen = 8
+			}
+			v = make([]int, xlen)
+			changed = true
+		}
+		// if indefinite, etc, then expand the slice if necessary
+		var decodeIntoBlank bool
+		if j >= len(v) {
+			if canChange {
+				v = append(v, 0)
+				changed = true
+			} else {
+				d.arrayCannotExpand(len(v), j+1)
+				decodeIntoBlank = true
+			}
+		}
+		slh.ElemContainerState(j)
+		if decodeIntoBlank {
+			d.swallow()
+		} else if dd.TryDecodeAsNil() {
+			v[j] = 0
+		} else {
+			v[j] = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		}
+	}
+	if canChange {
+		if j < len(v) {
+			v = v[:j]
+			changed = true
+		} else if j == 0 && v == nil {
+			v = make([]int, 0)
+			changed = true
+		}
+	}
+	slh.End()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceInt8R(f *codecFnInfo, rv reflect.Value) {
+	if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*[]int8)
+		v, changed := fastpathTV.DecSliceInt8V(*vp, !array, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		v := rv2i(rv).([]int8)
+		v2, changed := fastpathTV.DecSliceInt8V(v, !array, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	}
+}
+func (f fastpathT) DecSliceInt8X(vp *[]int8, d *Decoder) {
+	v, changed := f.DecSliceInt8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecSliceInt8V(v []int8, canChange bool, d *Decoder) (_ []int8, changed bool) {
+	dd := d.d
+	slh, containerLenS := d.decSliceHelperStart()
+	if containerLenS == 0 {
+		if canChange {
+			if v == nil {
+				v = []int8{}
+			} else if len(v) != 0 {
+				v = v[:0]
+			}
+			changed = true
+		}
+		slh.End()
+		return v, changed
+	}
+	hasLen := containerLenS > 0
+	var xlen int
+	if hasLen && canChange {
+		if containerLenS > cap(v) {
+			xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+			if xlen <= cap(v) {
+				v = v[:xlen]
+			} else {
+				v = make([]int8, xlen)
+			}
+			changed = true
+		} else if containerLenS != len(v) {
+			v = v[:containerLenS]
+			changed = true
+		}
+	}
+	j := 0
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && len(v) == 0 && canChange {
+			if hasLen {
+				xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+			} else {
+				xlen = 8
+			}
+			v = make([]int8, xlen)
+			changed = true
+		}
+		// if indefinite, etc, then expand the slice if necessary
+		var decodeIntoBlank bool
+		if j >= len(v) {
+			if canChange {
+				v = append(v, 0)
+				changed = true
+			} else {
+				d.arrayCannotExpand(len(v), j+1)
+				decodeIntoBlank = true
+			}
+		}
+		slh.ElemContainerState(j)
+		if decodeIntoBlank {
+			d.swallow()
+		} else if dd.TryDecodeAsNil() {
+			v[j] = 0
+		} else {
+			v[j] = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		}
+	}
+	if canChange {
+		if j < len(v) {
+			v = v[:j]
+			changed = true
+		} else if j == 0 && v == nil {
+			v = make([]int8, 0)
+			changed = true
+		}
+	}
+	slh.End()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceInt16R(f *codecFnInfo, rv reflect.Value) {
+	if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*[]int16)
+		v, changed := fastpathTV.DecSliceInt16V(*vp, !array, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		v := rv2i(rv).([]int16)
+		v2, changed := fastpathTV.DecSliceInt16V(v, !array, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	}
+}
+func (f fastpathT) DecSliceInt16X(vp *[]int16, d *Decoder) {
+	v, changed := f.DecSliceInt16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecSliceInt16V(v []int16, canChange bool, d *Decoder) (_ []int16, changed bool) {
+	dd := d.d
+	slh, containerLenS := d.decSliceHelperStart()
+	if containerLenS == 0 {
+		if canChange {
+			if v == nil {
+				v = []int16{}
+			} else if len(v) != 0 {
+				v = v[:0]
+			}
+			changed = true
+		}
+		slh.End()
+		return v, changed
+	}
+	hasLen := containerLenS > 0
+	var xlen int
+	if hasLen && canChange {
+		if containerLenS > cap(v) {
+			xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2)
+			if xlen <= cap(v) {
+				v = v[:xlen]
+			} else {
+				v = make([]int16, xlen)
+			}
+			changed = true
+		} else if containerLenS != len(v) {
+			v = v[:containerLenS]
+			changed = true
+		}
+	}
+	j := 0
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && len(v) == 0 && canChange {
+			if hasLen {
+				xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2)
+			} else {
+				xlen = 8
+			}
+			v = make([]int16, xlen)
+			changed = true
+		}
+		// if indefinite, etc, then expand the slice if necessary
+		var decodeIntoBlank bool
+		if j >= len(v) {
+			if canChange {
+				v = append(v, 0)
+				changed = true
+			} else {
+				d.arrayCannotExpand(len(v), j+1)
+				decodeIntoBlank = true
+			}
+		}
+		slh.ElemContainerState(j)
+		if decodeIntoBlank {
+			d.swallow()
+		} else if dd.TryDecodeAsNil() {
+			v[j] = 0
+		} else {
+			v[j] = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		}
+	}
+	if canChange {
+		if j < len(v) {
+			v = v[:j]
+			changed = true
+		} else if j == 0 && v == nil {
+			v = make([]int16, 0)
+			changed = true
+		}
+	}
+	slh.End()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceInt32R(f *codecFnInfo, rv reflect.Value) {
+	if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*[]int32)
+		v, changed := fastpathTV.DecSliceInt32V(*vp, !array, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		v := rv2i(rv).([]int32)
+		v2, changed := fastpathTV.DecSliceInt32V(v, !array, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	}
+}
+func (f fastpathT) DecSliceInt32X(vp *[]int32, d *Decoder) {
+	v, changed := f.DecSliceInt32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecSliceInt32V(v []int32, canChange bool, d *Decoder) (_ []int32, changed bool) {
+	dd := d.d
+	slh, containerLenS := d.decSliceHelperStart()
+	if containerLenS == 0 {
+		if canChange {
+			if v == nil {
+				v = []int32{}
+			} else if len(v) != 0 {
+				v = v[:0]
+			}
+			changed = true
+		}
+		slh.End()
+		return v, changed
+	}
+	hasLen := containerLenS > 0
+	var xlen int
+	if hasLen && canChange {
+		if containerLenS > cap(v) {
+			xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+			if xlen <= cap(v) {
+				v = v[:xlen]
+			} else {
+				v = make([]int32, xlen)
+			}
+			changed = true
+		} else if containerLenS != len(v) {
+			v = v[:containerLenS]
+			changed = true
+		}
+	}
+	j := 0
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && len(v) == 0 && canChange {
+			if hasLen {
+				xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+			} else {
+				xlen = 8
+			}
+			v = make([]int32, xlen)
+			changed = true
+		}
+		// if indefinite, etc, then expand the slice if necessary
+		var decodeIntoBlank bool
+		if j >= len(v) {
+			if canChange {
+				v = append(v, 0)
+				changed = true
+			} else {
+				d.arrayCannotExpand(len(v), j+1)
+				decodeIntoBlank = true
+			}
+		}
+		slh.ElemContainerState(j)
+		if decodeIntoBlank {
+			d.swallow()
+		} else if dd.TryDecodeAsNil() {
+			v[j] = 0
+		} else {
+			v[j] = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		}
+	}
+	if canChange {
+		if j < len(v) {
+			v = v[:j]
+			changed = true
+		} else if j == 0 && v == nil {
+			v = make([]int32, 0)
+			changed = true
+		}
+	}
+	slh.End()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceInt64R(f *codecFnInfo, rv reflect.Value) {
+	if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*[]int64)
+		v, changed := fastpathTV.DecSliceInt64V(*vp, !array, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		v := rv2i(rv).([]int64)
+		v2, changed := fastpathTV.DecSliceInt64V(v, !array, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	}
+}
+func (f fastpathT) DecSliceInt64X(vp *[]int64, d *Decoder) {
+	v, changed := f.DecSliceInt64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecSliceInt64V(v []int64, canChange bool, d *Decoder) (_ []int64, changed bool) {
+	dd := d.d
+	slh, containerLenS := d.decSliceHelperStart()
+	if containerLenS == 0 {
+		if canChange {
+			if v == nil {
+				v = []int64{}
+			} else if len(v) != 0 {
+				v = v[:0]
+			}
+			changed = true
+		}
+		slh.End()
+		return v, changed
+	}
+	hasLen := containerLenS > 0
+	var xlen int
+	if hasLen && canChange {
+		if containerLenS > cap(v) {
+			xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+			if xlen <= cap(v) {
+				v = v[:xlen]
+			} else {
+				v = make([]int64, xlen)
+			}
+			changed = true
+		} else if containerLenS != len(v) {
+			v = v[:containerLenS]
+			changed = true
+		}
+	}
+	j := 0
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && len(v) == 0 && canChange {
+			if hasLen {
+				xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+			} else {
+				xlen = 8
+			}
+			v = make([]int64, xlen)
+			changed = true
+		}
+		// if indefinite, etc, then expand the slice if necessary
+		var decodeIntoBlank bool
+		if j >= len(v) {
+			if canChange {
+				v = append(v, 0)
+				changed = true
+			} else {
+				d.arrayCannotExpand(len(v), j+1)
+				decodeIntoBlank = true
+			}
+		}
+		slh.ElemContainerState(j)
+		if decodeIntoBlank {
+			d.swallow()
+		} else if dd.TryDecodeAsNil() {
+			v[j] = 0
+		} else {
+			v[j] = dd.DecodeInt64()
+		}
+	}
+	if canChange {
+		if j < len(v) {
+			v = v[:j]
+			changed = true
+		} else if j == 0 && v == nil {
+			v = make([]int64, 0)
+			changed = true
+		}
+	}
+	slh.End()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceBoolR(f *codecFnInfo, rv reflect.Value) {
+	if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*[]bool)
+		v, changed := fastpathTV.DecSliceBoolV(*vp, !array, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		v := rv2i(rv).([]bool)
+		v2, changed := fastpathTV.DecSliceBoolV(v, !array, d)
+		if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	}
+}
+func (f fastpathT) DecSliceBoolX(vp *[]bool, d *Decoder) {
+	v, changed := f.DecSliceBoolV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecSliceBoolV(v []bool, canChange bool, d *Decoder) (_ []bool, changed bool) {
+	dd := d.d
+	slh, containerLenS := d.decSliceHelperStart()
+	if containerLenS == 0 {
+		if canChange {
+			if v == nil {
+				v = []bool{}
+			} else if len(v) != 0 {
+				v = v[:0]
+			}
+			changed = true
+		}
+		slh.End()
+		return v, changed
+	}
+	hasLen := containerLenS > 0
+	var xlen int
+	if hasLen && canChange {
+		if containerLenS > cap(v) {
+			xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+			if xlen <= cap(v) {
+				v = v[:xlen]
+			} else {
+				v = make([]bool, xlen)
+			}
+			changed = true
+		} else if containerLenS != len(v) {
+			v = v[:containerLenS]
+			changed = true
+		}
+	}
+	j := 0
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && len(v) == 0 && canChange {
+			if hasLen {
+				xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+			} else {
+				xlen = 8
+			}
+			v = make([]bool, xlen)
+			changed = true
+		}
+		// if indefinite, etc, then expand the slice if necessary
+		var decodeIntoBlank bool
+		if j >= len(v) {
+			if canChange {
+				v = append(v, false)
+				changed = true
+			} else {
+				d.arrayCannotExpand(len(v), j+1)
+				decodeIntoBlank = true
+			}
+		}
+		slh.ElemContainerState(j)
+		if decodeIntoBlank {
+			d.swallow()
+		} else if dd.TryDecodeAsNil() {
+			v[j] = false
+		} else {
+			v[j] = dd.DecodeBool()
+		}
+	}
+	if canChange {
+		if j < len(v) {
+			v = v[:j]
+			changed = true
+		} else if j == 0 && v == nil {
+			v = make([]bool, 0)
+			changed = true
+		}
+	}
+	slh.End()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfIntfR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[interface{}]interface{})
+		v, changed := fastpathTV.DecMapIntfIntfV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), false, d)
+	}
+}
+func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, d *Decoder) {
+	v, changed := f.DecMapIntfIntfV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, canChange bool,
+	d *Decoder) (_ map[interface{}]interface{}, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 32)
+		v = make(map[interface{}]interface{}, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+	var mk interface{}
+	var mv interface{}
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = nil
+		d.decode(&mk)
+		if bv, bok := mk.([]byte); bok {
+			mk = d.string(bv)
+		}
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = nil
+			}
+			continue
+		}
+		if mapGet {
+			mv = v[mk]
+		} else {
+			mv = nil
+		}
+		d.decode(&mv)
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfStringR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[interface{}]string)
+		v, changed := fastpathTV.DecMapIntfStringV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntfStringV(rv2i(rv).(map[interface{}]string), false, d)
+	}
+}
+func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, d *Decoder) {
+	v, changed := f.DecMapIntfStringV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, canChange bool,
+	d *Decoder) (_ map[interface{}]string, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 32)
+		v = make(map[interface{}]string, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk interface{}
+	var mv string
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = nil
+		d.decode(&mk)
+		if bv, bok := mk.([]byte); bok {
+			mk = d.string(bv)
+		}
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = ""
+			}
+			continue
+		}
+		mv = dd.DecodeString()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfUintR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[interface{}]uint)
+		v, changed := fastpathTV.DecMapIntfUintV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntfUintV(rv2i(rv).(map[interface{}]uint), false, d)
+	}
+}
+func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, d *Decoder) {
+	v, changed := f.DecMapIntfUintV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, canChange bool,
+	d *Decoder) (_ map[interface{}]uint, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[interface{}]uint, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk interface{}
+	var mv uint
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = nil
+		d.decode(&mk)
+		if bv, bok := mk.([]byte); bok {
+			mk = d.string(bv)
+		}
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfUint8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[interface{}]uint8)
+		v, changed := fastpathTV.DecMapIntfUint8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), false, d)
+	}
+}
+func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, d *Decoder) {
+	v, changed := f.DecMapIntfUint8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, canChange bool,
+	d *Decoder) (_ map[interface{}]uint8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+		v = make(map[interface{}]uint8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk interface{}
+	var mv uint8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = nil
+		d.decode(&mk)
+		if bv, bok := mk.([]byte); bok {
+			mk = d.string(bv)
+		}
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfUint16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[interface{}]uint16)
+		v, changed := fastpathTV.DecMapIntfUint16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), false, d)
+	}
+}
+func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, d *Decoder) {
+	v, changed := f.DecMapIntfUint16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, canChange bool,
+	d *Decoder) (_ map[interface{}]uint16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
+		v = make(map[interface{}]uint16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk interface{}
+	var mv uint16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = nil
+		d.decode(&mk)
+		if bv, bok := mk.([]byte); bok {
+			mk = d.string(bv)
+		}
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfUint32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[interface{}]uint32)
+		v, changed := fastpathTV.DecMapIntfUint32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), false, d)
+	}
+}
+func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, d *Decoder) {
+	v, changed := f.DecMapIntfUint32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, canChange bool,
+	d *Decoder) (_ map[interface{}]uint32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+		v = make(map[interface{}]uint32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk interface{}
+	var mv uint32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = nil
+		d.decode(&mk)
+		if bv, bok := mk.([]byte); bok {
+			mk = d.string(bv)
+		}
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfUint64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[interface{}]uint64)
+		v, changed := fastpathTV.DecMapIntfUint64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), false, d)
+	}
+}
+func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, d *Decoder) {
+	v, changed := f.DecMapIntfUint64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, canChange bool,
+	d *Decoder) (_ map[interface{}]uint64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[interface{}]uint64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk interface{}
+	var mv uint64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = nil
+		d.decode(&mk)
+		if bv, bok := mk.([]byte); bok {
+			mk = d.string(bv)
+		}
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeUint64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[interface{}]uintptr)
+		v, changed := fastpathTV.DecMapIntfUintptrV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), false, d)
+	}
+}
+func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, d *Decoder) {
+	v, changed := f.DecMapIntfUintptrV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, canChange bool,
+	d *Decoder) (_ map[interface{}]uintptr, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[interface{}]uintptr, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk interface{}
+	var mv uintptr
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = nil
+		d.decode(&mk)
+		if bv, bok := mk.([]byte); bok {
+			mk = d.string(bv)
+		}
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfIntR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[interface{}]int)
+		v, changed := fastpathTV.DecMapIntfIntV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntfIntV(rv2i(rv).(map[interface{}]int), false, d)
+	}
+}
+func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, d *Decoder) {
+	v, changed := f.DecMapIntfIntV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, canChange bool,
+	d *Decoder) (_ map[interface{}]int, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[interface{}]int, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk interface{}
+	var mv int
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = nil
+		d.decode(&mk)
+		if bv, bok := mk.([]byte); bok {
+			mk = d.string(bv)
+		}
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfInt8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[interface{}]int8)
+		v, changed := fastpathTV.DecMapIntfInt8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntfInt8V(rv2i(rv).(map[interface{}]int8), false, d)
+	}
+}
+func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, d *Decoder) {
+	v, changed := f.DecMapIntfInt8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, canChange bool,
+	d *Decoder) (_ map[interface{}]int8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+		v = make(map[interface{}]int8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk interface{}
+	var mv int8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = nil
+		d.decode(&mk)
+		if bv, bok := mk.([]byte); bok {
+			mk = d.string(bv)
+		}
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfInt16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[interface{}]int16)
+		v, changed := fastpathTV.DecMapIntfInt16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntfInt16V(rv2i(rv).(map[interface{}]int16), false, d)
+	}
+}
+func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, d *Decoder) {
+	v, changed := f.DecMapIntfInt16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, canChange bool,
+	d *Decoder) (_ map[interface{}]int16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
+		v = make(map[interface{}]int16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk interface{}
+	var mv int16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = nil
+		d.decode(&mk)
+		if bv, bok := mk.([]byte); bok {
+			mk = d.string(bv)
+		}
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfInt32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[interface{}]int32)
+		v, changed := fastpathTV.DecMapIntfInt32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntfInt32V(rv2i(rv).(map[interface{}]int32), false, d)
+	}
+}
+func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, d *Decoder) {
+	v, changed := f.DecMapIntfInt32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, canChange bool,
+	d *Decoder) (_ map[interface{}]int32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+		v = make(map[interface{}]int32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk interface{}
+	var mv int32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = nil
+		d.decode(&mk)
+		if bv, bok := mk.([]byte); bok {
+			mk = d.string(bv)
+		}
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfInt64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[interface{}]int64)
+		v, changed := fastpathTV.DecMapIntfInt64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntfInt64V(rv2i(rv).(map[interface{}]int64), false, d)
+	}
+}
+func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, d *Decoder) {
+	v, changed := f.DecMapIntfInt64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, canChange bool,
+	d *Decoder) (_ map[interface{}]int64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[interface{}]int64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk interface{}
+	var mv int64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = nil
+		d.decode(&mk)
+		if bv, bok := mk.([]byte); bok {
+			mk = d.string(bv)
+		}
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeInt64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[interface{}]float32)
+		v, changed := fastpathTV.DecMapIntfFloat32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), false, d)
+	}
+}
+func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, d *Decoder) {
+	v, changed := f.DecMapIntfFloat32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, canChange bool,
+	d *Decoder) (_ map[interface{}]float32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+		v = make(map[interface{}]float32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk interface{}
+	var mv float32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = nil
+		d.decode(&mk)
+		if bv, bok := mk.([]byte); bok {
+			mk = d.string(bv)
+		}
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[interface{}]float64)
+		v, changed := fastpathTV.DecMapIntfFloat64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), false, d)
+	}
+}
+func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, d *Decoder) {
+	v, changed := f.DecMapIntfFloat64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, canChange bool,
+	d *Decoder) (_ map[interface{}]float64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[interface{}]float64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk interface{}
+	var mv float64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = nil
+		d.decode(&mk)
+		if bv, bok := mk.([]byte); bok {
+			mk = d.string(bv)
+		}
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeFloat64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfBoolR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[interface{}]bool)
+		v, changed := fastpathTV.DecMapIntfBoolV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntfBoolV(rv2i(rv).(map[interface{}]bool), false, d)
+	}
+}
+func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, d *Decoder) {
+	v, changed := f.DecMapIntfBoolV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, canChange bool,
+	d *Decoder) (_ map[interface{}]bool, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+		v = make(map[interface{}]bool, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk interface{}
+	var mv bool
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = nil
+		d.decode(&mk)
+		if bv, bok := mk.([]byte); bok {
+			mk = d.string(bv)
+		}
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = false
+			}
+			continue
+		}
+		mv = dd.DecodeBool()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringIntfR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[string]interface{})
+		v, changed := fastpathTV.DecMapStringIntfV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapStringIntfV(rv2i(rv).(map[string]interface{}), false, d)
+	}
+}
+func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, d *Decoder) {
+	v, changed := f.DecMapStringIntfV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, canChange bool,
+	d *Decoder) (_ map[string]interface{}, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 32)
+		v = make(map[string]interface{}, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+	var mk string
+	var mv interface{}
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeString()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = nil
+			}
+			continue
+		}
+		if mapGet {
+			mv = v[mk]
+		} else {
+			mv = nil
+		}
+		d.decode(&mv)
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringStringR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[string]string)
+		v, changed := fastpathTV.DecMapStringStringV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapStringStringV(rv2i(rv).(map[string]string), false, d)
+	}
+}
+func (f fastpathT) DecMapStringStringX(vp *map[string]string, d *Decoder) {
+	v, changed := f.DecMapStringStringV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapStringStringV(v map[string]string, canChange bool,
+	d *Decoder) (_ map[string]string, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 32)
+		v = make(map[string]string, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk string
+	var mv string
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeString()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = ""
+			}
+			continue
+		}
+		mv = dd.DecodeString()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringUintR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[string]uint)
+		v, changed := fastpathTV.DecMapStringUintV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapStringUintV(rv2i(rv).(map[string]uint), false, d)
+	}
+}
+func (f fastpathT) DecMapStringUintX(vp *map[string]uint, d *Decoder) {
+	v, changed := f.DecMapStringUintV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapStringUintV(v map[string]uint, canChange bool,
+	d *Decoder) (_ map[string]uint, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[string]uint, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk string
+	var mv uint
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeString()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringUint8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[string]uint8)
+		v, changed := fastpathTV.DecMapStringUint8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapStringUint8V(rv2i(rv).(map[string]uint8), false, d)
+	}
+}
+func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, d *Decoder) {
+	v, changed := f.DecMapStringUint8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, canChange bool,
+	d *Decoder) (_ map[string]uint8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+		v = make(map[string]uint8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk string
+	var mv uint8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeString()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringUint16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[string]uint16)
+		v, changed := fastpathTV.DecMapStringUint16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapStringUint16V(rv2i(rv).(map[string]uint16), false, d)
+	}
+}
+func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, d *Decoder) {
+	v, changed := f.DecMapStringUint16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, canChange bool,
+	d *Decoder) (_ map[string]uint16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
+		v = make(map[string]uint16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk string
+	var mv uint16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeString()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringUint32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[string]uint32)
+		v, changed := fastpathTV.DecMapStringUint32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapStringUint32V(rv2i(rv).(map[string]uint32), false, d)
+	}
+}
+func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, d *Decoder) {
+	v, changed := f.DecMapStringUint32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, canChange bool,
+	d *Decoder) (_ map[string]uint32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+		v = make(map[string]uint32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk string
+	var mv uint32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeString()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringUint64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[string]uint64)
+		v, changed := fastpathTV.DecMapStringUint64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapStringUint64V(rv2i(rv).(map[string]uint64), false, d)
+	}
+}
+func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, d *Decoder) {
+	v, changed := f.DecMapStringUint64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, canChange bool,
+	d *Decoder) (_ map[string]uint64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[string]uint64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk string
+	var mv uint64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeString()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeUint64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringUintptrR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[string]uintptr)
+		v, changed := fastpathTV.DecMapStringUintptrV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapStringUintptrV(rv2i(rv).(map[string]uintptr), false, d)
+	}
+}
+func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, d *Decoder) {
+	v, changed := f.DecMapStringUintptrV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, canChange bool,
+	d *Decoder) (_ map[string]uintptr, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[string]uintptr, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk string
+	var mv uintptr
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeString()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringIntR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[string]int)
+		v, changed := fastpathTV.DecMapStringIntV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapStringIntV(rv2i(rv).(map[string]int), false, d)
+	}
+}
+func (f fastpathT) DecMapStringIntX(vp *map[string]int, d *Decoder) {
+	v, changed := f.DecMapStringIntV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapStringIntV(v map[string]int, canChange bool,
+	d *Decoder) (_ map[string]int, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[string]int, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk string
+	var mv int
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeString()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringInt8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[string]int8)
+		v, changed := fastpathTV.DecMapStringInt8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapStringInt8V(rv2i(rv).(map[string]int8), false, d)
+	}
+}
+func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, d *Decoder) {
+	v, changed := f.DecMapStringInt8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapStringInt8V(v map[string]int8, canChange bool,
+	d *Decoder) (_ map[string]int8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+		v = make(map[string]int8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk string
+	var mv int8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeString()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringInt16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[string]int16)
+		v, changed := fastpathTV.DecMapStringInt16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapStringInt16V(rv2i(rv).(map[string]int16), false, d)
+	}
+}
+func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, d *Decoder) {
+	v, changed := f.DecMapStringInt16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapStringInt16V(v map[string]int16, canChange bool,
+	d *Decoder) (_ map[string]int16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
+		v = make(map[string]int16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk string
+	var mv int16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeString()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringInt32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[string]int32)
+		v, changed := fastpathTV.DecMapStringInt32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapStringInt32V(rv2i(rv).(map[string]int32), false, d)
+	}
+}
+func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, d *Decoder) {
+	v, changed := f.DecMapStringInt32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapStringInt32V(v map[string]int32, canChange bool,
+	d *Decoder) (_ map[string]int32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+		v = make(map[string]int32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk string
+	var mv int32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeString()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringInt64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[string]int64)
+		v, changed := fastpathTV.DecMapStringInt64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapStringInt64V(rv2i(rv).(map[string]int64), false, d)
+	}
+}
+func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, d *Decoder) {
+	v, changed := f.DecMapStringInt64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapStringInt64V(v map[string]int64, canChange bool,
+	d *Decoder) (_ map[string]int64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[string]int64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk string
+	var mv int64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeString()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeInt64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringFloat32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[string]float32)
+		v, changed := fastpathTV.DecMapStringFloat32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapStringFloat32V(rv2i(rv).(map[string]float32), false, d)
+	}
+}
+func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, d *Decoder) {
+	v, changed := f.DecMapStringFloat32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, canChange bool,
+	d *Decoder) (_ map[string]float32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+		v = make(map[string]float32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk string
+	var mv float32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeString()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringFloat64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[string]float64)
+		v, changed := fastpathTV.DecMapStringFloat64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapStringFloat64V(rv2i(rv).(map[string]float64), false, d)
+	}
+}
+func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, d *Decoder) {
+	v, changed := f.DecMapStringFloat64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, canChange bool,
+	d *Decoder) (_ map[string]float64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[string]float64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk string
+	var mv float64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeString()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeFloat64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringBoolR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[string]bool)
+		v, changed := fastpathTV.DecMapStringBoolV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapStringBoolV(rv2i(rv).(map[string]bool), false, d)
+	}
+}
+func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, d *Decoder) {
+	v, changed := f.DecMapStringBoolV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapStringBoolV(v map[string]bool, canChange bool,
+	d *Decoder) (_ map[string]bool, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+		v = make(map[string]bool, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk string
+	var mv bool
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeString()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = false
+			}
+			continue
+		}
+		mv = dd.DecodeBool()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float32]interface{})
+		v, changed := fastpathTV.DecMapFloat32IntfV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, d *Decoder) {
+	v, changed := f.DecMapFloat32IntfV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, canChange bool,
+	d *Decoder) (_ map[float32]interface{}, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+		v = make(map[float32]interface{}, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+	var mk float32
+	var mv interface{}
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = nil
+			}
+			continue
+		}
+		if mapGet {
+			mv = v[mk]
+		} else {
+			mv = nil
+		}
+		d.decode(&mv)
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32StringR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float32]string)
+		v, changed := fastpathTV.DecMapFloat32StringV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat32StringV(rv2i(rv).(map[float32]string), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, d *Decoder) {
+	v, changed := f.DecMapFloat32StringV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, canChange bool,
+	d *Decoder) (_ map[float32]string, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+		v = make(map[float32]string, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float32
+	var mv string
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = ""
+			}
+			continue
+		}
+		mv = dd.DecodeString()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32UintR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float32]uint)
+		v, changed := fastpathTV.DecMapFloat32UintV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat32UintV(rv2i(rv).(map[float32]uint), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, d *Decoder) {
+	v, changed := f.DecMapFloat32UintV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, canChange bool,
+	d *Decoder) (_ map[float32]uint, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[float32]uint, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float32
+	var mv uint
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float32]uint8)
+		v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, d *Decoder) {
+	v, changed := f.DecMapFloat32Uint8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, canChange bool,
+	d *Decoder) (_ map[float32]uint8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[float32]uint8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float32
+	var mv uint8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float32]uint16)
+		v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, d *Decoder) {
+	v, changed := f.DecMapFloat32Uint16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, canChange bool,
+	d *Decoder) (_ map[float32]uint16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+		v = make(map[float32]uint16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float32
+	var mv uint16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float32]uint32)
+		v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, d *Decoder) {
+	v, changed := f.DecMapFloat32Uint32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, canChange bool,
+	d *Decoder) (_ map[float32]uint32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+		v = make(map[float32]uint32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float32
+	var mv uint32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float32]uint64)
+		v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, d *Decoder) {
+	v, changed := f.DecMapFloat32Uint64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, canChange bool,
+	d *Decoder) (_ map[float32]uint64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[float32]uint64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float32
+	var mv uint64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeUint64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float32]uintptr)
+		v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, d *Decoder) {
+	v, changed := f.DecMapFloat32UintptrV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, canChange bool,
+	d *Decoder) (_ map[float32]uintptr, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[float32]uintptr, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float32
+	var mv uintptr
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32IntR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float32]int)
+		v, changed := fastpathTV.DecMapFloat32IntV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat32IntV(rv2i(rv).(map[float32]int), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, d *Decoder) {
+	v, changed := f.DecMapFloat32IntV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, canChange bool,
+	d *Decoder) (_ map[float32]int, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[float32]int, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float32
+	var mv int
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float32]int8)
+		v, changed := fastpathTV.DecMapFloat32Int8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat32Int8V(rv2i(rv).(map[float32]int8), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, d *Decoder) {
+	v, changed := f.DecMapFloat32Int8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, canChange bool,
+	d *Decoder) (_ map[float32]int8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[float32]int8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float32
+	var mv int8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float32]int16)
+		v, changed := fastpathTV.DecMapFloat32Int16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat32Int16V(rv2i(rv).(map[float32]int16), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, d *Decoder) {
+	v, changed := f.DecMapFloat32Int16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, canChange bool,
+	d *Decoder) (_ map[float32]int16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+		v = make(map[float32]int16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float32
+	var mv int16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float32]int32)
+		v, changed := fastpathTV.DecMapFloat32Int32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat32Int32V(rv2i(rv).(map[float32]int32), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, d *Decoder) {
+	v, changed := f.DecMapFloat32Int32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, canChange bool,
+	d *Decoder) (_ map[float32]int32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+		v = make(map[float32]int32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float32
+	var mv int32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float32]int64)
+		v, changed := fastpathTV.DecMapFloat32Int64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat32Int64V(rv2i(rv).(map[float32]int64), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, d *Decoder) {
+	v, changed := f.DecMapFloat32Int64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, canChange bool,
+	d *Decoder) (_ map[float32]int64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[float32]int64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float32
+	var mv int64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeInt64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float32]float32)
+		v, changed := fastpathTV.DecMapFloat32Float32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat32Float32V(rv2i(rv).(map[float32]float32), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, d *Decoder) {
+	v, changed := f.DecMapFloat32Float32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, canChange bool,
+	d *Decoder) (_ map[float32]float32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+		v = make(map[float32]float32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float32
+	var mv float32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float32]float64)
+		v, changed := fastpathTV.DecMapFloat32Float64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat32Float64V(rv2i(rv).(map[float32]float64), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, d *Decoder) {
+	v, changed := f.DecMapFloat32Float64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, canChange bool,
+	d *Decoder) (_ map[float32]float64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[float32]float64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float32
+	var mv float64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeFloat64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float32]bool)
+		v, changed := fastpathTV.DecMapFloat32BoolV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat32BoolV(rv2i(rv).(map[float32]bool), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, d *Decoder) {
+	v, changed := f.DecMapFloat32BoolV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, canChange bool,
+	d *Decoder) (_ map[float32]bool, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[float32]bool, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float32
+	var mv bool
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = false
+			}
+			continue
+		}
+		mv = dd.DecodeBool()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float64]interface{})
+		v, changed := fastpathTV.DecMapFloat64IntfV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, d *Decoder) {
+	v, changed := f.DecMapFloat64IntfV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, canChange bool,
+	d *Decoder) (_ map[float64]interface{}, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[float64]interface{}, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+	var mk float64
+	var mv interface{}
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeFloat64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = nil
+			}
+			continue
+		}
+		if mapGet {
+			mv = v[mk]
+		} else {
+			mv = nil
+		}
+		d.decode(&mv)
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64StringR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float64]string)
+		v, changed := fastpathTV.DecMapFloat64StringV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat64StringV(rv2i(rv).(map[float64]string), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, d *Decoder) {
+	v, changed := f.DecMapFloat64StringV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, canChange bool,
+	d *Decoder) (_ map[float64]string, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[float64]string, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float64
+	var mv string
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeFloat64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = ""
+			}
+			continue
+		}
+		mv = dd.DecodeString()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64UintR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float64]uint)
+		v, changed := fastpathTV.DecMapFloat64UintV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat64UintV(rv2i(rv).(map[float64]uint), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, d *Decoder) {
+	v, changed := f.DecMapFloat64UintV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, canChange bool,
+	d *Decoder) (_ map[float64]uint, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[float64]uint, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float64
+	var mv uint
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeFloat64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float64]uint8)
+		v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, d *Decoder) {
+	v, changed := f.DecMapFloat64Uint8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, canChange bool,
+	d *Decoder) (_ map[float64]uint8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[float64]uint8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float64
+	var mv uint8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeFloat64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float64]uint16)
+		v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, d *Decoder) {
+	v, changed := f.DecMapFloat64Uint16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, canChange bool,
+	d *Decoder) (_ map[float64]uint16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[float64]uint16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float64
+	var mv uint16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeFloat64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float64]uint32)
+		v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, d *Decoder) {
+	v, changed := f.DecMapFloat64Uint32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, canChange bool,
+	d *Decoder) (_ map[float64]uint32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[float64]uint32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float64
+	var mv uint32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeFloat64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float64]uint64)
+		v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, d *Decoder) {
+	v, changed := f.DecMapFloat64Uint64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, canChange bool,
+	d *Decoder) (_ map[float64]uint64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[float64]uint64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float64
+	var mv uint64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeFloat64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeUint64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float64]uintptr)
+		v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, d *Decoder) {
+	v, changed := f.DecMapFloat64UintptrV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, canChange bool,
+	d *Decoder) (_ map[float64]uintptr, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[float64]uintptr, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float64
+	var mv uintptr
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeFloat64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64IntR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float64]int)
+		v, changed := fastpathTV.DecMapFloat64IntV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat64IntV(rv2i(rv).(map[float64]int), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, d *Decoder) {
+	v, changed := f.DecMapFloat64IntV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, canChange bool,
+	d *Decoder) (_ map[float64]int, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[float64]int, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float64
+	var mv int
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeFloat64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float64]int8)
+		v, changed := fastpathTV.DecMapFloat64Int8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat64Int8V(rv2i(rv).(map[float64]int8), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, d *Decoder) {
+	v, changed := f.DecMapFloat64Int8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, canChange bool,
+	d *Decoder) (_ map[float64]int8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[float64]int8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float64
+	var mv int8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeFloat64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float64]int16)
+		v, changed := fastpathTV.DecMapFloat64Int16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat64Int16V(rv2i(rv).(map[float64]int16), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, d *Decoder) {
+	v, changed := f.DecMapFloat64Int16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, canChange bool,
+	d *Decoder) (_ map[float64]int16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[float64]int16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float64
+	var mv int16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeFloat64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float64]int32)
+		v, changed := fastpathTV.DecMapFloat64Int32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat64Int32V(rv2i(rv).(map[float64]int32), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, d *Decoder) {
+	v, changed := f.DecMapFloat64Int32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, canChange bool,
+	d *Decoder) (_ map[float64]int32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[float64]int32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float64
+	var mv int32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeFloat64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float64]int64)
+		v, changed := fastpathTV.DecMapFloat64Int64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat64Int64V(rv2i(rv).(map[float64]int64), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, d *Decoder) {
+	v, changed := f.DecMapFloat64Int64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, canChange bool,
+	d *Decoder) (_ map[float64]int64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[float64]int64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float64
+	var mv int64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeFloat64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeInt64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float64]float32)
+		v, changed := fastpathTV.DecMapFloat64Float32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat64Float32V(rv2i(rv).(map[float64]float32), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, d *Decoder) {
+	v, changed := f.DecMapFloat64Float32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, canChange bool,
+	d *Decoder) (_ map[float64]float32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[float64]float32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float64
+	var mv float32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeFloat64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float64]float64)
+		v, changed := fastpathTV.DecMapFloat64Float64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat64Float64V(rv2i(rv).(map[float64]float64), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, d *Decoder) {
+	v, changed := f.DecMapFloat64Float64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, canChange bool,
+	d *Decoder) (_ map[float64]float64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[float64]float64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float64
+	var mv float64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeFloat64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeFloat64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[float64]bool)
+		v, changed := fastpathTV.DecMapFloat64BoolV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapFloat64BoolV(rv2i(rv).(map[float64]bool), false, d)
+	}
+}
+func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, d *Decoder) {
+	v, changed := f.DecMapFloat64BoolV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, canChange bool,
+	d *Decoder) (_ map[float64]bool, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[float64]bool, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk float64
+	var mv bool
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeFloat64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = false
+			}
+			continue
+		}
+		mv = dd.DecodeBool()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintIntfR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint]interface{})
+		v, changed := fastpathTV.DecMapUintIntfV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintIntfV(rv2i(rv).(map[uint]interface{}), false, d)
+	}
+}
+func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, d *Decoder) {
+	v, changed := f.DecMapUintIntfV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, canChange bool,
+	d *Decoder) (_ map[uint]interface{}, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[uint]interface{}, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+	var mk uint
+	var mv interface{}
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = nil
+			}
+			continue
+		}
+		if mapGet {
+			mv = v[mk]
+		} else {
+			mv = nil
+		}
+		d.decode(&mv)
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintStringR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint]string)
+		v, changed := fastpathTV.DecMapUintStringV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintStringV(rv2i(rv).(map[uint]string), false, d)
+	}
+}
+func (f fastpathT) DecMapUintStringX(vp *map[uint]string, d *Decoder) {
+	v, changed := f.DecMapUintStringV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintStringV(v map[uint]string, canChange bool,
+	d *Decoder) (_ map[uint]string, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[uint]string, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint
+	var mv string
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = ""
+			}
+			continue
+		}
+		mv = dd.DecodeString()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintUintR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint]uint)
+		v, changed := fastpathTV.DecMapUintUintV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintUintV(rv2i(rv).(map[uint]uint), false, d)
+	}
+}
+func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, d *Decoder) {
+	v, changed := f.DecMapUintUintV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintUintV(v map[uint]uint, canChange bool,
+	d *Decoder) (_ map[uint]uint, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uint]uint, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint
+	var mv uint
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintUint8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint]uint8)
+		v, changed := fastpathTV.DecMapUintUint8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintUint8V(rv2i(rv).(map[uint]uint8), false, d)
+	}
+}
+func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, d *Decoder) {
+	v, changed := f.DecMapUintUint8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, canChange bool,
+	d *Decoder) (_ map[uint]uint8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[uint]uint8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint
+	var mv uint8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintUint16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint]uint16)
+		v, changed := fastpathTV.DecMapUintUint16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintUint16V(rv2i(rv).(map[uint]uint16), false, d)
+	}
+}
+func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, d *Decoder) {
+	v, changed := f.DecMapUintUint16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, canChange bool,
+	d *Decoder) (_ map[uint]uint16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[uint]uint16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint
+	var mv uint16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintUint32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint]uint32)
+		v, changed := fastpathTV.DecMapUintUint32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintUint32V(rv2i(rv).(map[uint]uint32), false, d)
+	}
+}
+func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, d *Decoder) {
+	v, changed := f.DecMapUintUint32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, canChange bool,
+	d *Decoder) (_ map[uint]uint32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[uint]uint32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint
+	var mv uint32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintUint64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint]uint64)
+		v, changed := fastpathTV.DecMapUintUint64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintUint64V(rv2i(rv).(map[uint]uint64), false, d)
+	}
+}
+func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, d *Decoder) {
+	v, changed := f.DecMapUintUint64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, canChange bool,
+	d *Decoder) (_ map[uint]uint64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uint]uint64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint
+	var mv uint64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeUint64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintUintptrR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint]uintptr)
+		v, changed := fastpathTV.DecMapUintUintptrV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintUintptrV(rv2i(rv).(map[uint]uintptr), false, d)
+	}
+}
+func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, d *Decoder) {
+	v, changed := f.DecMapUintUintptrV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, canChange bool,
+	d *Decoder) (_ map[uint]uintptr, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uint]uintptr, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint
+	var mv uintptr
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintIntR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint]int)
+		v, changed := fastpathTV.DecMapUintIntV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintIntV(rv2i(rv).(map[uint]int), false, d)
+	}
+}
+func (f fastpathT) DecMapUintIntX(vp *map[uint]int, d *Decoder) {
+	v, changed := f.DecMapUintIntV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintIntV(v map[uint]int, canChange bool,
+	d *Decoder) (_ map[uint]int, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uint]int, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint
+	var mv int
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintInt8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint]int8)
+		v, changed := fastpathTV.DecMapUintInt8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintInt8V(rv2i(rv).(map[uint]int8), false, d)
+	}
+}
+func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, d *Decoder) {
+	v, changed := f.DecMapUintInt8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, canChange bool,
+	d *Decoder) (_ map[uint]int8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[uint]int8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint
+	var mv int8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintInt16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint]int16)
+		v, changed := fastpathTV.DecMapUintInt16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintInt16V(rv2i(rv).(map[uint]int16), false, d)
+	}
+}
+func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, d *Decoder) {
+	v, changed := f.DecMapUintInt16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, canChange bool,
+	d *Decoder) (_ map[uint]int16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[uint]int16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint
+	var mv int16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintInt32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint]int32)
+		v, changed := fastpathTV.DecMapUintInt32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintInt32V(rv2i(rv).(map[uint]int32), false, d)
+	}
+}
+func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, d *Decoder) {
+	v, changed := f.DecMapUintInt32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, canChange bool,
+	d *Decoder) (_ map[uint]int32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[uint]int32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint
+	var mv int32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintInt64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint]int64)
+		v, changed := fastpathTV.DecMapUintInt64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintInt64V(rv2i(rv).(map[uint]int64), false, d)
+	}
+}
+func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, d *Decoder) {
+	v, changed := f.DecMapUintInt64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, canChange bool,
+	d *Decoder) (_ map[uint]int64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uint]int64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint
+	var mv int64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeInt64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintFloat32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint]float32)
+		v, changed := fastpathTV.DecMapUintFloat32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintFloat32V(rv2i(rv).(map[uint]float32), false, d)
+	}
+}
+func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, d *Decoder) {
+	v, changed := f.DecMapUintFloat32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, canChange bool,
+	d *Decoder) (_ map[uint]float32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[uint]float32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint
+	var mv float32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintFloat64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint]float64)
+		v, changed := fastpathTV.DecMapUintFloat64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintFloat64V(rv2i(rv).(map[uint]float64), false, d)
+	}
+}
+func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, d *Decoder) {
+	v, changed := f.DecMapUintFloat64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, canChange bool,
+	d *Decoder) (_ map[uint]float64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uint]float64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint
+	var mv float64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeFloat64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintBoolR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint]bool)
+		v, changed := fastpathTV.DecMapUintBoolV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintBoolV(rv2i(rv).(map[uint]bool), false, d)
+	}
+}
+func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, d *Decoder) {
+	v, changed := f.DecMapUintBoolV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, canChange bool,
+	d *Decoder) (_ map[uint]bool, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[uint]bool, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint
+	var mv bool
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = false
+			}
+			continue
+		}
+		mv = dd.DecodeBool()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8IntfR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint8]interface{})
+		v, changed := fastpathTV.DecMapUint8IntfV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), false, d)
+	}
+}
+func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, d *Decoder) {
+	v, changed := f.DecMapUint8IntfV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, canChange bool,
+	d *Decoder) (_ map[uint8]interface{}, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+		v = make(map[uint8]interface{}, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+	var mk uint8
+	var mv interface{}
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = nil
+			}
+			continue
+		}
+		if mapGet {
+			mv = v[mk]
+		} else {
+			mv = nil
+		}
+		d.decode(&mv)
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8StringR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint8]string)
+		v, changed := fastpathTV.DecMapUint8StringV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint8StringV(rv2i(rv).(map[uint8]string), false, d)
+	}
+}
+func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, d *Decoder) {
+	v, changed := f.DecMapUint8StringV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, canChange bool,
+	d *Decoder) (_ map[uint8]string, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+		v = make(map[uint8]string, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint8
+	var mv string
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = ""
+			}
+			continue
+		}
+		mv = dd.DecodeString()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8UintR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint8]uint)
+		v, changed := fastpathTV.DecMapUint8UintV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint8UintV(rv2i(rv).(map[uint8]uint), false, d)
+	}
+}
+func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, d *Decoder) {
+	v, changed := f.DecMapUint8UintV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, canChange bool,
+	d *Decoder) (_ map[uint8]uint, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[uint8]uint, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint8
+	var mv uint
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint8]uint8)
+		v, changed := fastpathTV.DecMapUint8Uint8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), false, d)
+	}
+}
+func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, d *Decoder) {
+	v, changed := f.DecMapUint8Uint8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, canChange bool,
+	d *Decoder) (_ map[uint8]uint8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+		v = make(map[uint8]uint8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint8
+	var mv uint8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint8]uint16)
+		v, changed := fastpathTV.DecMapUint8Uint16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), false, d)
+	}
+}
+func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, d *Decoder) {
+	v, changed := f.DecMapUint8Uint16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, canChange bool,
+	d *Decoder) (_ map[uint8]uint16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+		v = make(map[uint8]uint16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint8
+	var mv uint16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint8]uint32)
+		v, changed := fastpathTV.DecMapUint8Uint32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), false, d)
+	}
+}
+func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, d *Decoder) {
+	v, changed := f.DecMapUint8Uint32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, canChange bool,
+	d *Decoder) (_ map[uint8]uint32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[uint8]uint32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint8
+	var mv uint32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint8]uint64)
+		v, changed := fastpathTV.DecMapUint8Uint64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), false, d)
+	}
+}
+func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, d *Decoder) {
+	v, changed := f.DecMapUint8Uint64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, canChange bool,
+	d *Decoder) (_ map[uint8]uint64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[uint8]uint64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint8
+	var mv uint64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeUint64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint8]uintptr)
+		v, changed := fastpathTV.DecMapUint8UintptrV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), false, d)
+	}
+}
+func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, d *Decoder) {
+	v, changed := f.DecMapUint8UintptrV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, canChange bool,
+	d *Decoder) (_ map[uint8]uintptr, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[uint8]uintptr, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint8
+	var mv uintptr
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8IntR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint8]int)
+		v, changed := fastpathTV.DecMapUint8IntV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint8IntV(rv2i(rv).(map[uint8]int), false, d)
+	}
+}
+func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, d *Decoder) {
+	v, changed := f.DecMapUint8IntV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, canChange bool,
+	d *Decoder) (_ map[uint8]int, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[uint8]int, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint8
+	var mv int
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Int8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint8]int8)
+		v, changed := fastpathTV.DecMapUint8Int8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint8Int8V(rv2i(rv).(map[uint8]int8), false, d)
+	}
+}
+func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, d *Decoder) {
+	v, changed := f.DecMapUint8Int8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, canChange bool,
+	d *Decoder) (_ map[uint8]int8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+		v = make(map[uint8]int8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint8
+	var mv int8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Int16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint8]int16)
+		v, changed := fastpathTV.DecMapUint8Int16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint8Int16V(rv2i(rv).(map[uint8]int16), false, d)
+	}
+}
+func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, d *Decoder) {
+	v, changed := f.DecMapUint8Int16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, canChange bool,
+	d *Decoder) (_ map[uint8]int16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+		v = make(map[uint8]int16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint8
+	var mv int16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Int32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint8]int32)
+		v, changed := fastpathTV.DecMapUint8Int32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint8Int32V(rv2i(rv).(map[uint8]int32), false, d)
+	}
+}
+func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, d *Decoder) {
+	v, changed := f.DecMapUint8Int32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, canChange bool,
+	d *Decoder) (_ map[uint8]int32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[uint8]int32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint8
+	var mv int32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Int64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint8]int64)
+		v, changed := fastpathTV.DecMapUint8Int64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint8Int64V(rv2i(rv).(map[uint8]int64), false, d)
+	}
+}
+func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, d *Decoder) {
+	v, changed := f.DecMapUint8Int64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, canChange bool,
+	d *Decoder) (_ map[uint8]int64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[uint8]int64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint8
+	var mv int64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeInt64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Float32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint8]float32)
+		v, changed := fastpathTV.DecMapUint8Float32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint8Float32V(rv2i(rv).(map[uint8]float32), false, d)
+	}
+}
+func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, d *Decoder) {
+	v, changed := f.DecMapUint8Float32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, canChange bool,
+	d *Decoder) (_ map[uint8]float32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[uint8]float32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint8
+	var mv float32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Float64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint8]float64)
+		v, changed := fastpathTV.DecMapUint8Float64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint8Float64V(rv2i(rv).(map[uint8]float64), false, d)
+	}
+}
+func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, d *Decoder) {
+	v, changed := f.DecMapUint8Float64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, canChange bool,
+	d *Decoder) (_ map[uint8]float64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[uint8]float64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint8
+	var mv float64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeFloat64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8BoolR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint8]bool)
+		v, changed := fastpathTV.DecMapUint8BoolV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint8BoolV(rv2i(rv).(map[uint8]bool), false, d)
+	}
+}
+func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, d *Decoder) {
+	v, changed := f.DecMapUint8BoolV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, canChange bool,
+	d *Decoder) (_ map[uint8]bool, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+		v = make(map[uint8]bool, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint8
+	var mv bool
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = false
+			}
+			continue
+		}
+		mv = dd.DecodeBool()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16IntfR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint16]interface{})
+		v, changed := fastpathTV.DecMapUint16IntfV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), false, d)
+	}
+}
+func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, d *Decoder) {
+	v, changed := f.DecMapUint16IntfV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, canChange bool,
+	d *Decoder) (_ map[uint16]interface{}, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
+		v = make(map[uint16]interface{}, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+	var mk uint16
+	var mv interface{}
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = nil
+			}
+			continue
+		}
+		if mapGet {
+			mv = v[mk]
+		} else {
+			mv = nil
+		}
+		d.decode(&mv)
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16StringR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint16]string)
+		v, changed := fastpathTV.DecMapUint16StringV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint16StringV(rv2i(rv).(map[uint16]string), false, d)
+	}
+}
+func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, d *Decoder) {
+	v, changed := f.DecMapUint16StringV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, canChange bool,
+	d *Decoder) (_ map[uint16]string, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
+		v = make(map[uint16]string, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint16
+	var mv string
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = ""
+			}
+			continue
+		}
+		mv = dd.DecodeString()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16UintR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint16]uint)
+		v, changed := fastpathTV.DecMapUint16UintV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint16UintV(rv2i(rv).(map[uint16]uint), false, d)
+	}
+}
+func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, d *Decoder) {
+	v, changed := f.DecMapUint16UintV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, canChange bool,
+	d *Decoder) (_ map[uint16]uint, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[uint16]uint, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint16
+	var mv uint
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint16]uint8)
+		v, changed := fastpathTV.DecMapUint16Uint8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), false, d)
+	}
+}
+func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, d *Decoder) {
+	v, changed := f.DecMapUint16Uint8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, canChange bool,
+	d *Decoder) (_ map[uint16]uint8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+		v = make(map[uint16]uint8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint16
+	var mv uint8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint16]uint16)
+		v, changed := fastpathTV.DecMapUint16Uint16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), false, d)
+	}
+}
+func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, d *Decoder) {
+	v, changed := f.DecMapUint16Uint16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, canChange bool,
+	d *Decoder) (_ map[uint16]uint16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 4)
+		v = make(map[uint16]uint16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint16
+	var mv uint16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint16]uint32)
+		v, changed := fastpathTV.DecMapUint16Uint32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), false, d)
+	}
+}
+func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, d *Decoder) {
+	v, changed := f.DecMapUint16Uint32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, canChange bool,
+	d *Decoder) (_ map[uint16]uint32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+		v = make(map[uint16]uint32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint16
+	var mv uint32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint16]uint64)
+		v, changed := fastpathTV.DecMapUint16Uint64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), false, d)
+	}
+}
+func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, d *Decoder) {
+	v, changed := f.DecMapUint16Uint64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, canChange bool,
+	d *Decoder) (_ map[uint16]uint64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[uint16]uint64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint16
+	var mv uint64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeUint64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint16]uintptr)
+		v, changed := fastpathTV.DecMapUint16UintptrV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), false, d)
+	}
+}
+func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, d *Decoder) {
+	v, changed := f.DecMapUint16UintptrV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, canChange bool,
+	d *Decoder) (_ map[uint16]uintptr, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[uint16]uintptr, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint16
+	var mv uintptr
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16IntR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint16]int)
+		v, changed := fastpathTV.DecMapUint16IntV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint16IntV(rv2i(rv).(map[uint16]int), false, d)
+	}
+}
+func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, d *Decoder) {
+	v, changed := f.DecMapUint16IntV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, canChange bool,
+	d *Decoder) (_ map[uint16]int, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[uint16]int, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint16
+	var mv int
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Int8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint16]int8)
+		v, changed := fastpathTV.DecMapUint16Int8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint16Int8V(rv2i(rv).(map[uint16]int8), false, d)
+	}
+}
+func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, d *Decoder) {
+	v, changed := f.DecMapUint16Int8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, canChange bool,
+	d *Decoder) (_ map[uint16]int8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+		v = make(map[uint16]int8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint16
+	var mv int8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Int16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint16]int16)
+		v, changed := fastpathTV.DecMapUint16Int16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint16Int16V(rv2i(rv).(map[uint16]int16), false, d)
+	}
+}
+func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, d *Decoder) {
+	v, changed := f.DecMapUint16Int16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, canChange bool,
+	d *Decoder) (_ map[uint16]int16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 4)
+		v = make(map[uint16]int16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint16
+	var mv int16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Int32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint16]int32)
+		v, changed := fastpathTV.DecMapUint16Int32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint16Int32V(rv2i(rv).(map[uint16]int32), false, d)
+	}
+}
+func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, d *Decoder) {
+	v, changed := f.DecMapUint16Int32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, canChange bool,
+	d *Decoder) (_ map[uint16]int32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+		v = make(map[uint16]int32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint16
+	var mv int32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Int64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint16]int64)
+		v, changed := fastpathTV.DecMapUint16Int64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint16Int64V(rv2i(rv).(map[uint16]int64), false, d)
+	}
+}
+func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, d *Decoder) {
+	v, changed := f.DecMapUint16Int64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, canChange bool,
+	d *Decoder) (_ map[uint16]int64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[uint16]int64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint16
+	var mv int64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeInt64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Float32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint16]float32)
+		v, changed := fastpathTV.DecMapUint16Float32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint16Float32V(rv2i(rv).(map[uint16]float32), false, d)
+	}
+}
+func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, d *Decoder) {
+	v, changed := f.DecMapUint16Float32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, canChange bool,
+	d *Decoder) (_ map[uint16]float32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+		v = make(map[uint16]float32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint16
+	var mv float32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Float64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint16]float64)
+		v, changed := fastpathTV.DecMapUint16Float64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint16Float64V(rv2i(rv).(map[uint16]float64), false, d)
+	}
+}
+func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, d *Decoder) {
+	v, changed := f.DecMapUint16Float64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, canChange bool,
+	d *Decoder) (_ map[uint16]float64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[uint16]float64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint16
+	var mv float64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeFloat64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16BoolR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint16]bool)
+		v, changed := fastpathTV.DecMapUint16BoolV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint16BoolV(rv2i(rv).(map[uint16]bool), false, d)
+	}
+}
+func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, d *Decoder) {
+	v, changed := f.DecMapUint16BoolV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, canChange bool,
+	d *Decoder) (_ map[uint16]bool, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+		v = make(map[uint16]bool, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint16
+	var mv bool
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = false
+			}
+			continue
+		}
+		mv = dd.DecodeBool()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32IntfR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint32]interface{})
+		v, changed := fastpathTV.DecMapUint32IntfV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), false, d)
+	}
+}
+func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, d *Decoder) {
+	v, changed := f.DecMapUint32IntfV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, canChange bool,
+	d *Decoder) (_ map[uint32]interface{}, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+		v = make(map[uint32]interface{}, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+	var mk uint32
+	var mv interface{}
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = nil
+			}
+			continue
+		}
+		if mapGet {
+			mv = v[mk]
+		} else {
+			mv = nil
+		}
+		d.decode(&mv)
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32StringR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint32]string)
+		v, changed := fastpathTV.DecMapUint32StringV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint32StringV(rv2i(rv).(map[uint32]string), false, d)
+	}
+}
+func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, d *Decoder) {
+	v, changed := f.DecMapUint32StringV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, canChange bool,
+	d *Decoder) (_ map[uint32]string, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+		v = make(map[uint32]string, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint32
+	var mv string
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = ""
+			}
+			continue
+		}
+		mv = dd.DecodeString()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32UintR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint32]uint)
+		v, changed := fastpathTV.DecMapUint32UintV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint32UintV(rv2i(rv).(map[uint32]uint), false, d)
+	}
+}
+func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, d *Decoder) {
+	v, changed := f.DecMapUint32UintV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, canChange bool,
+	d *Decoder) (_ map[uint32]uint, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[uint32]uint, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint32
+	var mv uint
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint32]uint8)
+		v, changed := fastpathTV.DecMapUint32Uint8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), false, d)
+	}
+}
+func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, d *Decoder) {
+	v, changed := f.DecMapUint32Uint8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, canChange bool,
+	d *Decoder) (_ map[uint32]uint8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[uint32]uint8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint32
+	var mv uint8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint32]uint16)
+		v, changed := fastpathTV.DecMapUint32Uint16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), false, d)
+	}
+}
+func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, d *Decoder) {
+	v, changed := f.DecMapUint32Uint16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, canChange bool,
+	d *Decoder) (_ map[uint32]uint16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+		v = make(map[uint32]uint16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint32
+	var mv uint16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint32]uint32)
+		v, changed := fastpathTV.DecMapUint32Uint32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), false, d)
+	}
+}
+func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, d *Decoder) {
+	v, changed := f.DecMapUint32Uint32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, canChange bool,
+	d *Decoder) (_ map[uint32]uint32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+		v = make(map[uint32]uint32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint32
+	var mv uint32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint32]uint64)
+		v, changed := fastpathTV.DecMapUint32Uint64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), false, d)
+	}
+}
+func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, d *Decoder) {
+	v, changed := f.DecMapUint32Uint64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, canChange bool,
+	d *Decoder) (_ map[uint32]uint64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[uint32]uint64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint32
+	var mv uint64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeUint64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint32]uintptr)
+		v, changed := fastpathTV.DecMapUint32UintptrV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), false, d)
+	}
+}
+func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, d *Decoder) {
+	v, changed := f.DecMapUint32UintptrV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, canChange bool,
+	d *Decoder) (_ map[uint32]uintptr, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[uint32]uintptr, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint32
+	var mv uintptr
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32IntR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint32]int)
+		v, changed := fastpathTV.DecMapUint32IntV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint32IntV(rv2i(rv).(map[uint32]int), false, d)
+	}
+}
+func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, d *Decoder) {
+	v, changed := f.DecMapUint32IntV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, canChange bool,
+	d *Decoder) (_ map[uint32]int, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[uint32]int, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint32
+	var mv int
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Int8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint32]int8)
+		v, changed := fastpathTV.DecMapUint32Int8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint32Int8V(rv2i(rv).(map[uint32]int8), false, d)
+	}
+}
+func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, d *Decoder) {
+	v, changed := f.DecMapUint32Int8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, canChange bool,
+	d *Decoder) (_ map[uint32]int8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[uint32]int8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint32
+	var mv int8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Int16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint32]int16)
+		v, changed := fastpathTV.DecMapUint32Int16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint32Int16V(rv2i(rv).(map[uint32]int16), false, d)
+	}
+}
+func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, d *Decoder) {
+	v, changed := f.DecMapUint32Int16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, canChange bool,
+	d *Decoder) (_ map[uint32]int16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+		v = make(map[uint32]int16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint32
+	var mv int16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Int32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint32]int32)
+		v, changed := fastpathTV.DecMapUint32Int32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint32Int32V(rv2i(rv).(map[uint32]int32), false, d)
+	}
+}
+func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, d *Decoder) {
+	v, changed := f.DecMapUint32Int32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, canChange bool,
+	d *Decoder) (_ map[uint32]int32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+		v = make(map[uint32]int32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint32
+	var mv int32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Int64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint32]int64)
+		v, changed := fastpathTV.DecMapUint32Int64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint32Int64V(rv2i(rv).(map[uint32]int64), false, d)
+	}
+}
+func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, d *Decoder) {
+	v, changed := f.DecMapUint32Int64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, canChange bool,
+	d *Decoder) (_ map[uint32]int64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[uint32]int64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint32
+	var mv int64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeInt64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Float32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint32]float32)
+		v, changed := fastpathTV.DecMapUint32Float32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint32Float32V(rv2i(rv).(map[uint32]float32), false, d)
+	}
+}
+func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, d *Decoder) {
+	v, changed := f.DecMapUint32Float32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, canChange bool,
+	d *Decoder) (_ map[uint32]float32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+		v = make(map[uint32]float32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint32
+	var mv float32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Float64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint32]float64)
+		v, changed := fastpathTV.DecMapUint32Float64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint32Float64V(rv2i(rv).(map[uint32]float64), false, d)
+	}
+}
+func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, d *Decoder) {
+	v, changed := f.DecMapUint32Float64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, canChange bool,
+	d *Decoder) (_ map[uint32]float64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[uint32]float64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint32
+	var mv float64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeFloat64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32BoolR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint32]bool)
+		v, changed := fastpathTV.DecMapUint32BoolV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint32BoolV(rv2i(rv).(map[uint32]bool), false, d)
+	}
+}
+func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, d *Decoder) {
+	v, changed := f.DecMapUint32BoolV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, canChange bool,
+	d *Decoder) (_ map[uint32]bool, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[uint32]bool, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint32
+	var mv bool
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = false
+			}
+			continue
+		}
+		mv = dd.DecodeBool()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64IntfR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint64]interface{})
+		v, changed := fastpathTV.DecMapUint64IntfV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), false, d)
+	}
+}
+func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, d *Decoder) {
+	v, changed := f.DecMapUint64IntfV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, canChange bool,
+	d *Decoder) (_ map[uint64]interface{}, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[uint64]interface{}, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+	var mk uint64
+	var mv interface{}
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeUint64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = nil
+			}
+			continue
+		}
+		if mapGet {
+			mv = v[mk]
+		} else {
+			mv = nil
+		}
+		d.decode(&mv)
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64StringR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint64]string)
+		v, changed := fastpathTV.DecMapUint64StringV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint64StringV(rv2i(rv).(map[uint64]string), false, d)
+	}
+}
+func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, d *Decoder) {
+	v, changed := f.DecMapUint64StringV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, canChange bool,
+	d *Decoder) (_ map[uint64]string, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[uint64]string, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint64
+	var mv string
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeUint64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = ""
+			}
+			continue
+		}
+		mv = dd.DecodeString()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64UintR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint64]uint)
+		v, changed := fastpathTV.DecMapUint64UintV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint64UintV(rv2i(rv).(map[uint64]uint), false, d)
+	}
+}
+func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, d *Decoder) {
+	v, changed := f.DecMapUint64UintV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, canChange bool,
+	d *Decoder) (_ map[uint64]uint, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uint64]uint, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint64
+	var mv uint
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeUint64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint64]uint8)
+		v, changed := fastpathTV.DecMapUint64Uint8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), false, d)
+	}
+}
+func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, d *Decoder) {
+	v, changed := f.DecMapUint64Uint8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, canChange bool,
+	d *Decoder) (_ map[uint64]uint8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[uint64]uint8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint64
+	var mv uint8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeUint64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint64]uint16)
+		v, changed := fastpathTV.DecMapUint64Uint16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), false, d)
+	}
+}
+func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, d *Decoder) {
+	v, changed := f.DecMapUint64Uint16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, canChange bool,
+	d *Decoder) (_ map[uint64]uint16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[uint64]uint16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint64
+	var mv uint16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeUint64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint64]uint32)
+		v, changed := fastpathTV.DecMapUint64Uint32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), false, d)
+	}
+}
+func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, d *Decoder) {
+	v, changed := f.DecMapUint64Uint32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, canChange bool,
+	d *Decoder) (_ map[uint64]uint32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[uint64]uint32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint64
+	var mv uint32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeUint64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint64]uint64)
+		v, changed := fastpathTV.DecMapUint64Uint64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), false, d)
+	}
+}
+func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, d *Decoder) {
+	v, changed := f.DecMapUint64Uint64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, canChange bool,
+	d *Decoder) (_ map[uint64]uint64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uint64]uint64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint64
+	var mv uint64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeUint64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeUint64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint64]uintptr)
+		v, changed := fastpathTV.DecMapUint64UintptrV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), false, d)
+	}
+}
+func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, d *Decoder) {
+	v, changed := f.DecMapUint64UintptrV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, canChange bool,
+	d *Decoder) (_ map[uint64]uintptr, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uint64]uintptr, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint64
+	var mv uintptr
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeUint64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64IntR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint64]int)
+		v, changed := fastpathTV.DecMapUint64IntV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint64IntV(rv2i(rv).(map[uint64]int), false, d)
+	}
+}
+func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, d *Decoder) {
+	v, changed := f.DecMapUint64IntV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, canChange bool,
+	d *Decoder) (_ map[uint64]int, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uint64]int, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint64
+	var mv int
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeUint64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Int8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint64]int8)
+		v, changed := fastpathTV.DecMapUint64Int8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint64Int8V(rv2i(rv).(map[uint64]int8), false, d)
+	}
+}
+func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, d *Decoder) {
+	v, changed := f.DecMapUint64Int8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, canChange bool,
+	d *Decoder) (_ map[uint64]int8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[uint64]int8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint64
+	var mv int8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeUint64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Int16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint64]int16)
+		v, changed := fastpathTV.DecMapUint64Int16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint64Int16V(rv2i(rv).(map[uint64]int16), false, d)
+	}
+}
+func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, d *Decoder) {
+	v, changed := f.DecMapUint64Int16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, canChange bool,
+	d *Decoder) (_ map[uint64]int16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[uint64]int16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint64
+	var mv int16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeUint64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Int32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint64]int32)
+		v, changed := fastpathTV.DecMapUint64Int32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint64Int32V(rv2i(rv).(map[uint64]int32), false, d)
+	}
+}
+func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, d *Decoder) {
+	v, changed := f.DecMapUint64Int32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, canChange bool,
+	d *Decoder) (_ map[uint64]int32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[uint64]int32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint64
+	var mv int32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeUint64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Int64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint64]int64)
+		v, changed := fastpathTV.DecMapUint64Int64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint64Int64V(rv2i(rv).(map[uint64]int64), false, d)
+	}
+}
+func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, d *Decoder) {
+	v, changed := f.DecMapUint64Int64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, canChange bool,
+	d *Decoder) (_ map[uint64]int64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uint64]int64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint64
+	var mv int64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeUint64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeInt64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Float32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint64]float32)
+		v, changed := fastpathTV.DecMapUint64Float32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint64Float32V(rv2i(rv).(map[uint64]float32), false, d)
+	}
+}
+func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, d *Decoder) {
+	v, changed := f.DecMapUint64Float32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, canChange bool,
+	d *Decoder) (_ map[uint64]float32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[uint64]float32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint64
+	var mv float32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeUint64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Float64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint64]float64)
+		v, changed := fastpathTV.DecMapUint64Float64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint64Float64V(rv2i(rv).(map[uint64]float64), false, d)
+	}
+}
+func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, d *Decoder) {
+	v, changed := f.DecMapUint64Float64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, canChange bool,
+	d *Decoder) (_ map[uint64]float64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uint64]float64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint64
+	var mv float64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeUint64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeFloat64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64BoolR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uint64]bool)
+		v, changed := fastpathTV.DecMapUint64BoolV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUint64BoolV(rv2i(rv).(map[uint64]bool), false, d)
+	}
+}
+func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, d *Decoder) {
+	v, changed := f.DecMapUint64BoolV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, canChange bool,
+	d *Decoder) (_ map[uint64]bool, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[uint64]bool, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uint64
+	var mv bool
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeUint64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = false
+			}
+			continue
+		}
+		mv = dd.DecodeBool()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uintptr]interface{})
+		v, changed := fastpathTV.DecMapUintptrIntfV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), false, d)
+	}
+}
+func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, d *Decoder) {
+	v, changed := f.DecMapUintptrIntfV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, canChange bool,
+	d *Decoder) (_ map[uintptr]interface{}, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[uintptr]interface{}, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+	var mk uintptr
+	var mv interface{}
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = nil
+			}
+			continue
+		}
+		if mapGet {
+			mv = v[mk]
+		} else {
+			mv = nil
+		}
+		d.decode(&mv)
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrStringR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uintptr]string)
+		v, changed := fastpathTV.DecMapUintptrStringV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintptrStringV(rv2i(rv).(map[uintptr]string), false, d)
+	}
+}
+func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, d *Decoder) {
+	v, changed := f.DecMapUintptrStringV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, canChange bool,
+	d *Decoder) (_ map[uintptr]string, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[uintptr]string, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uintptr
+	var mv string
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = ""
+			}
+			continue
+		}
+		mv = dd.DecodeString()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrUintR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uintptr]uint)
+		v, changed := fastpathTV.DecMapUintptrUintV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintptrUintV(rv2i(rv).(map[uintptr]uint), false, d)
+	}
+}
+func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, d *Decoder) {
+	v, changed := f.DecMapUintptrUintV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, canChange bool,
+	d *Decoder) (_ map[uintptr]uint, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uintptr]uint, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uintptr
+	var mv uint
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uintptr]uint8)
+		v, changed := fastpathTV.DecMapUintptrUint8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), false, d)
+	}
+}
+func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, d *Decoder) {
+	v, changed := f.DecMapUintptrUint8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, canChange bool,
+	d *Decoder) (_ map[uintptr]uint8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[uintptr]uint8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uintptr
+	var mv uint8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uintptr]uint16)
+		v, changed := fastpathTV.DecMapUintptrUint16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), false, d)
+	}
+}
+func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, d *Decoder) {
+	v, changed := f.DecMapUintptrUint16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, canChange bool,
+	d *Decoder) (_ map[uintptr]uint16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[uintptr]uint16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uintptr
+	var mv uint16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uintptr]uint32)
+		v, changed := fastpathTV.DecMapUintptrUint32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), false, d)
+	}
+}
+func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, d *Decoder) {
+	v, changed := f.DecMapUintptrUint32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, canChange bool,
+	d *Decoder) (_ map[uintptr]uint32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[uintptr]uint32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uintptr
+	var mv uint32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uintptr]uint64)
+		v, changed := fastpathTV.DecMapUintptrUint64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), false, d)
+	}
+}
+func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, d *Decoder) {
+	v, changed := f.DecMapUintptrUint64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, canChange bool,
+	d *Decoder) (_ map[uintptr]uint64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uintptr]uint64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uintptr
+	var mv uint64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeUint64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uintptr]uintptr)
+		v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), false, d)
+	}
+}
+func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, d *Decoder) {
+	v, changed := f.DecMapUintptrUintptrV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, canChange bool,
+	d *Decoder) (_ map[uintptr]uintptr, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uintptr]uintptr, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uintptr
+	var mv uintptr
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrIntR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uintptr]int)
+		v, changed := fastpathTV.DecMapUintptrIntV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintptrIntV(rv2i(rv).(map[uintptr]int), false, d)
+	}
+}
+func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, d *Decoder) {
+	v, changed := f.DecMapUintptrIntV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, canChange bool,
+	d *Decoder) (_ map[uintptr]int, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uintptr]int, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uintptr
+	var mv int
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uintptr]int8)
+		v, changed := fastpathTV.DecMapUintptrInt8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), false, d)
+	}
+}
+func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, d *Decoder) {
+	v, changed := f.DecMapUintptrInt8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, canChange bool,
+	d *Decoder) (_ map[uintptr]int8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[uintptr]int8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uintptr
+	var mv int8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uintptr]int16)
+		v, changed := fastpathTV.DecMapUintptrInt16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), false, d)
+	}
+}
+func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, d *Decoder) {
+	v, changed := f.DecMapUintptrInt16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, canChange bool,
+	d *Decoder) (_ map[uintptr]int16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[uintptr]int16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uintptr
+	var mv int16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uintptr]int32)
+		v, changed := fastpathTV.DecMapUintptrInt32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), false, d)
+	}
+}
+func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, d *Decoder) {
+	v, changed := f.DecMapUintptrInt32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, canChange bool,
+	d *Decoder) (_ map[uintptr]int32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[uintptr]int32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uintptr
+	var mv int32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uintptr]int64)
+		v, changed := fastpathTV.DecMapUintptrInt64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), false, d)
+	}
+}
+func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, d *Decoder) {
+	v, changed := f.DecMapUintptrInt64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, canChange bool,
+	d *Decoder) (_ map[uintptr]int64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uintptr]int64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uintptr
+	var mv int64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeInt64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uintptr]float32)
+		v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), false, d)
+	}
+}
+func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, d *Decoder) {
+	v, changed := f.DecMapUintptrFloat32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, canChange bool,
+	d *Decoder) (_ map[uintptr]float32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[uintptr]float32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uintptr
+	var mv float32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uintptr]float64)
+		v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), false, d)
+	}
+}
+func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, d *Decoder) {
+	v, changed := f.DecMapUintptrFloat64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, canChange bool,
+	d *Decoder) (_ map[uintptr]float64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[uintptr]float64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uintptr
+	var mv float64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeFloat64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[uintptr]bool)
+		v, changed := fastpathTV.DecMapUintptrBoolV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), false, d)
+	}
+}
+func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, d *Decoder) {
+	v, changed := f.DecMapUintptrBoolV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, canChange bool,
+	d *Decoder) (_ map[uintptr]bool, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[uintptr]bool, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk uintptr
+	var mv bool
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = false
+			}
+			continue
+		}
+		mv = dd.DecodeBool()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntIntfR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int]interface{})
+		v, changed := fastpathTV.DecMapIntIntfV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntIntfV(rv2i(rv).(map[int]interface{}), false, d)
+	}
+}
+func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, d *Decoder) {
+	v, changed := f.DecMapIntIntfV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, canChange bool,
+	d *Decoder) (_ map[int]interface{}, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[int]interface{}, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+	var mk int
+	var mv interface{}
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = nil
+			}
+			continue
+		}
+		if mapGet {
+			mv = v[mk]
+		} else {
+			mv = nil
+		}
+		d.decode(&mv)
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntStringR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int]string)
+		v, changed := fastpathTV.DecMapIntStringV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntStringV(rv2i(rv).(map[int]string), false, d)
+	}
+}
+func (f fastpathT) DecMapIntStringX(vp *map[int]string, d *Decoder) {
+	v, changed := f.DecMapIntStringV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntStringV(v map[int]string, canChange bool,
+	d *Decoder) (_ map[int]string, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[int]string, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int
+	var mv string
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = ""
+			}
+			continue
+		}
+		mv = dd.DecodeString()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntUintR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int]uint)
+		v, changed := fastpathTV.DecMapIntUintV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntUintV(rv2i(rv).(map[int]uint), false, d)
+	}
+}
+func (f fastpathT) DecMapIntUintX(vp *map[int]uint, d *Decoder) {
+	v, changed := f.DecMapIntUintV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntUintV(v map[int]uint, canChange bool,
+	d *Decoder) (_ map[int]uint, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[int]uint, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int
+	var mv uint
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntUint8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int]uint8)
+		v, changed := fastpathTV.DecMapIntUint8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntUint8V(rv2i(rv).(map[int]uint8), false, d)
+	}
+}
+func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, d *Decoder) {
+	v, changed := f.DecMapIntUint8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, canChange bool,
+	d *Decoder) (_ map[int]uint8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[int]uint8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int
+	var mv uint8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntUint16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int]uint16)
+		v, changed := fastpathTV.DecMapIntUint16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntUint16V(rv2i(rv).(map[int]uint16), false, d)
+	}
+}
+func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, d *Decoder) {
+	v, changed := f.DecMapIntUint16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, canChange bool,
+	d *Decoder) (_ map[int]uint16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[int]uint16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int
+	var mv uint16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntUint32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int]uint32)
+		v, changed := fastpathTV.DecMapIntUint32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntUint32V(rv2i(rv).(map[int]uint32), false, d)
+	}
+}
+func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, d *Decoder) {
+	v, changed := f.DecMapIntUint32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, canChange bool,
+	d *Decoder) (_ map[int]uint32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[int]uint32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int
+	var mv uint32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntUint64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int]uint64)
+		v, changed := fastpathTV.DecMapIntUint64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntUint64V(rv2i(rv).(map[int]uint64), false, d)
+	}
+}
+func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, d *Decoder) {
+	v, changed := f.DecMapIntUint64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, canChange bool,
+	d *Decoder) (_ map[int]uint64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[int]uint64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int
+	var mv uint64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeUint64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntUintptrR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int]uintptr)
+		v, changed := fastpathTV.DecMapIntUintptrV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntUintptrV(rv2i(rv).(map[int]uintptr), false, d)
+	}
+}
+func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, d *Decoder) {
+	v, changed := f.DecMapIntUintptrV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, canChange bool,
+	d *Decoder) (_ map[int]uintptr, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[int]uintptr, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int
+	var mv uintptr
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntIntR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int]int)
+		v, changed := fastpathTV.DecMapIntIntV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntIntV(rv2i(rv).(map[int]int), false, d)
+	}
+}
+func (f fastpathT) DecMapIntIntX(vp *map[int]int, d *Decoder) {
+	v, changed := f.DecMapIntIntV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntIntV(v map[int]int, canChange bool,
+	d *Decoder) (_ map[int]int, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[int]int, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int
+	var mv int
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntInt8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int]int8)
+		v, changed := fastpathTV.DecMapIntInt8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntInt8V(rv2i(rv).(map[int]int8), false, d)
+	}
+}
+func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, d *Decoder) {
+	v, changed := f.DecMapIntInt8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntInt8V(v map[int]int8, canChange bool,
+	d *Decoder) (_ map[int]int8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[int]int8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int
+	var mv int8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntInt16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int]int16)
+		v, changed := fastpathTV.DecMapIntInt16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntInt16V(rv2i(rv).(map[int]int16), false, d)
+	}
+}
+func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, d *Decoder) {
+	v, changed := f.DecMapIntInt16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntInt16V(v map[int]int16, canChange bool,
+	d *Decoder) (_ map[int]int16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[int]int16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int
+	var mv int16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntInt32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int]int32)
+		v, changed := fastpathTV.DecMapIntInt32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntInt32V(rv2i(rv).(map[int]int32), false, d)
+	}
+}
+func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, d *Decoder) {
+	v, changed := f.DecMapIntInt32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntInt32V(v map[int]int32, canChange bool,
+	d *Decoder) (_ map[int]int32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[int]int32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int
+	var mv int32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntInt64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int]int64)
+		v, changed := fastpathTV.DecMapIntInt64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntInt64V(rv2i(rv).(map[int]int64), false, d)
+	}
+}
+func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, d *Decoder) {
+	v, changed := f.DecMapIntInt64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntInt64V(v map[int]int64, canChange bool,
+	d *Decoder) (_ map[int]int64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[int]int64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int
+	var mv int64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeInt64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntFloat32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int]float32)
+		v, changed := fastpathTV.DecMapIntFloat32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntFloat32V(rv2i(rv).(map[int]float32), false, d)
+	}
+}
+func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, d *Decoder) {
+	v, changed := f.DecMapIntFloat32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, canChange bool,
+	d *Decoder) (_ map[int]float32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[int]float32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int
+	var mv float32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntFloat64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int]float64)
+		v, changed := fastpathTV.DecMapIntFloat64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntFloat64V(rv2i(rv).(map[int]float64), false, d)
+	}
+}
+func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, d *Decoder) {
+	v, changed := f.DecMapIntFloat64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, canChange bool,
+	d *Decoder) (_ map[int]float64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[int]float64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int
+	var mv float64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeFloat64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntBoolR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int]bool)
+		v, changed := fastpathTV.DecMapIntBoolV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapIntBoolV(rv2i(rv).(map[int]bool), false, d)
+	}
+}
+func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, d *Decoder) {
+	v, changed := f.DecMapIntBoolV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapIntBoolV(v map[int]bool, canChange bool,
+	d *Decoder) (_ map[int]bool, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[int]bool, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int
+	var mv bool
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = false
+			}
+			continue
+		}
+		mv = dd.DecodeBool()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8IntfR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int8]interface{})
+		v, changed := fastpathTV.DecMapInt8IntfV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt8IntfV(rv2i(rv).(map[int8]interface{}), false, d)
+	}
+}
+func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, d *Decoder) {
+	v, changed := f.DecMapInt8IntfV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, canChange bool,
+	d *Decoder) (_ map[int8]interface{}, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+		v = make(map[int8]interface{}, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+	var mk int8
+	var mv interface{}
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = nil
+			}
+			continue
+		}
+		if mapGet {
+			mv = v[mk]
+		} else {
+			mv = nil
+		}
+		d.decode(&mv)
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8StringR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int8]string)
+		v, changed := fastpathTV.DecMapInt8StringV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt8StringV(rv2i(rv).(map[int8]string), false, d)
+	}
+}
+func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, d *Decoder) {
+	v, changed := f.DecMapInt8StringV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt8StringV(v map[int8]string, canChange bool,
+	d *Decoder) (_ map[int8]string, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+		v = make(map[int8]string, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int8
+	var mv string
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = ""
+			}
+			continue
+		}
+		mv = dd.DecodeString()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8UintR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int8]uint)
+		v, changed := fastpathTV.DecMapInt8UintV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt8UintV(rv2i(rv).(map[int8]uint), false, d)
+	}
+}
+func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, d *Decoder) {
+	v, changed := f.DecMapInt8UintV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, canChange bool,
+	d *Decoder) (_ map[int8]uint, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[int8]uint, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int8
+	var mv uint
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int8]uint8)
+		v, changed := fastpathTV.DecMapInt8Uint8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt8Uint8V(rv2i(rv).(map[int8]uint8), false, d)
+	}
+}
+func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, d *Decoder) {
+	v, changed := f.DecMapInt8Uint8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, canChange bool,
+	d *Decoder) (_ map[int8]uint8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+		v = make(map[int8]uint8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int8
+	var mv uint8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int8]uint16)
+		v, changed := fastpathTV.DecMapInt8Uint16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt8Uint16V(rv2i(rv).(map[int8]uint16), false, d)
+	}
+}
+func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, d *Decoder) {
+	v, changed := f.DecMapInt8Uint16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, canChange bool,
+	d *Decoder) (_ map[int8]uint16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+		v = make(map[int8]uint16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int8
+	var mv uint16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int8]uint32)
+		v, changed := fastpathTV.DecMapInt8Uint32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt8Uint32V(rv2i(rv).(map[int8]uint32), false, d)
+	}
+}
+func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, d *Decoder) {
+	v, changed := f.DecMapInt8Uint32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, canChange bool,
+	d *Decoder) (_ map[int8]uint32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[int8]uint32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int8
+	var mv uint32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int8]uint64)
+		v, changed := fastpathTV.DecMapInt8Uint64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt8Uint64V(rv2i(rv).(map[int8]uint64), false, d)
+	}
+}
+func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, d *Decoder) {
+	v, changed := f.DecMapInt8Uint64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, canChange bool,
+	d *Decoder) (_ map[int8]uint64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[int8]uint64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int8
+	var mv uint64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeUint64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int8]uintptr)
+		v, changed := fastpathTV.DecMapInt8UintptrV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), false, d)
+	}
+}
+func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, d *Decoder) {
+	v, changed := f.DecMapInt8UintptrV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, canChange bool,
+	d *Decoder) (_ map[int8]uintptr, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[int8]uintptr, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int8
+	var mv uintptr
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8IntR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int8]int)
+		v, changed := fastpathTV.DecMapInt8IntV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt8IntV(rv2i(rv).(map[int8]int), false, d)
+	}
+}
+func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, d *Decoder) {
+	v, changed := f.DecMapInt8IntV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt8IntV(v map[int8]int, canChange bool,
+	d *Decoder) (_ map[int8]int, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[int8]int, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int8
+	var mv int
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Int8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int8]int8)
+		v, changed := fastpathTV.DecMapInt8Int8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt8Int8V(rv2i(rv).(map[int8]int8), false, d)
+	}
+}
+func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, d *Decoder) {
+	v, changed := f.DecMapInt8Int8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, canChange bool,
+	d *Decoder) (_ map[int8]int8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+		v = make(map[int8]int8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int8
+	var mv int8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Int16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int8]int16)
+		v, changed := fastpathTV.DecMapInt8Int16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt8Int16V(rv2i(rv).(map[int8]int16), false, d)
+	}
+}
+func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, d *Decoder) {
+	v, changed := f.DecMapInt8Int16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, canChange bool,
+	d *Decoder) (_ map[int8]int16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+		v = make(map[int8]int16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int8
+	var mv int16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Int32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int8]int32)
+		v, changed := fastpathTV.DecMapInt8Int32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt8Int32V(rv2i(rv).(map[int8]int32), false, d)
+	}
+}
+func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, d *Decoder) {
+	v, changed := f.DecMapInt8Int32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, canChange bool,
+	d *Decoder) (_ map[int8]int32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[int8]int32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int8
+	var mv int32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Int64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int8]int64)
+		v, changed := fastpathTV.DecMapInt8Int64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt8Int64V(rv2i(rv).(map[int8]int64), false, d)
+	}
+}
+func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, d *Decoder) {
+	v, changed := f.DecMapInt8Int64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, canChange bool,
+	d *Decoder) (_ map[int8]int64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[int8]int64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int8
+	var mv int64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeInt64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Float32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int8]float32)
+		v, changed := fastpathTV.DecMapInt8Float32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt8Float32V(rv2i(rv).(map[int8]float32), false, d)
+	}
+}
+func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, d *Decoder) {
+	v, changed := f.DecMapInt8Float32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, canChange bool,
+	d *Decoder) (_ map[int8]float32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[int8]float32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int8
+	var mv float32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Float64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int8]float64)
+		v, changed := fastpathTV.DecMapInt8Float64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt8Float64V(rv2i(rv).(map[int8]float64), false, d)
+	}
+}
+func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, d *Decoder) {
+	v, changed := f.DecMapInt8Float64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, canChange bool,
+	d *Decoder) (_ map[int8]float64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[int8]float64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int8
+	var mv float64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeFloat64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8BoolR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int8]bool)
+		v, changed := fastpathTV.DecMapInt8BoolV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt8BoolV(rv2i(rv).(map[int8]bool), false, d)
+	}
+}
+func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, d *Decoder) {
+	v, changed := f.DecMapInt8BoolV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, canChange bool,
+	d *Decoder) (_ map[int8]bool, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+		v = make(map[int8]bool, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int8
+	var mv bool
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = false
+			}
+			continue
+		}
+		mv = dd.DecodeBool()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16IntfR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int16]interface{})
+		v, changed := fastpathTV.DecMapInt16IntfV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt16IntfV(rv2i(rv).(map[int16]interface{}), false, d)
+	}
+}
+func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, d *Decoder) {
+	v, changed := f.DecMapInt16IntfV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, canChange bool,
+	d *Decoder) (_ map[int16]interface{}, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
+		v = make(map[int16]interface{}, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+	var mk int16
+	var mv interface{}
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = nil
+			}
+			continue
+		}
+		if mapGet {
+			mv = v[mk]
+		} else {
+			mv = nil
+		}
+		d.decode(&mv)
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16StringR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int16]string)
+		v, changed := fastpathTV.DecMapInt16StringV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt16StringV(rv2i(rv).(map[int16]string), false, d)
+	}
+}
+func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, d *Decoder) {
+	v, changed := f.DecMapInt16StringV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt16StringV(v map[int16]string, canChange bool,
+	d *Decoder) (_ map[int16]string, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
+		v = make(map[int16]string, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int16
+	var mv string
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = ""
+			}
+			continue
+		}
+		mv = dd.DecodeString()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16UintR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int16]uint)
+		v, changed := fastpathTV.DecMapInt16UintV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt16UintV(rv2i(rv).(map[int16]uint), false, d)
+	}
+}
+func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, d *Decoder) {
+	v, changed := f.DecMapInt16UintV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, canChange bool,
+	d *Decoder) (_ map[int16]uint, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[int16]uint, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int16
+	var mv uint
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int16]uint8)
+		v, changed := fastpathTV.DecMapInt16Uint8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt16Uint8V(rv2i(rv).(map[int16]uint8), false, d)
+	}
+}
+func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, d *Decoder) {
+	v, changed := f.DecMapInt16Uint8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, canChange bool,
+	d *Decoder) (_ map[int16]uint8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+		v = make(map[int16]uint8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int16
+	var mv uint8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int16]uint16)
+		v, changed := fastpathTV.DecMapInt16Uint16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt16Uint16V(rv2i(rv).(map[int16]uint16), false, d)
+	}
+}
+func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, d *Decoder) {
+	v, changed := f.DecMapInt16Uint16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, canChange bool,
+	d *Decoder) (_ map[int16]uint16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 4)
+		v = make(map[int16]uint16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int16
+	var mv uint16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int16]uint32)
+		v, changed := fastpathTV.DecMapInt16Uint32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt16Uint32V(rv2i(rv).(map[int16]uint32), false, d)
+	}
+}
+func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, d *Decoder) {
+	v, changed := f.DecMapInt16Uint32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, canChange bool,
+	d *Decoder) (_ map[int16]uint32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+		v = make(map[int16]uint32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int16
+	var mv uint32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int16]uint64)
+		v, changed := fastpathTV.DecMapInt16Uint64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt16Uint64V(rv2i(rv).(map[int16]uint64), false, d)
+	}
+}
+func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, d *Decoder) {
+	v, changed := f.DecMapInt16Uint64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, canChange bool,
+	d *Decoder) (_ map[int16]uint64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[int16]uint64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int16
+	var mv uint64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeUint64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int16]uintptr)
+		v, changed := fastpathTV.DecMapInt16UintptrV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), false, d)
+	}
+}
+func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, d *Decoder) {
+	v, changed := f.DecMapInt16UintptrV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, canChange bool,
+	d *Decoder) (_ map[int16]uintptr, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[int16]uintptr, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int16
+	var mv uintptr
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16IntR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int16]int)
+		v, changed := fastpathTV.DecMapInt16IntV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt16IntV(rv2i(rv).(map[int16]int), false, d)
+	}
+}
+func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, d *Decoder) {
+	v, changed := f.DecMapInt16IntV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt16IntV(v map[int16]int, canChange bool,
+	d *Decoder) (_ map[int16]int, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[int16]int, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int16
+	var mv int
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Int8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int16]int8)
+		v, changed := fastpathTV.DecMapInt16Int8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt16Int8V(rv2i(rv).(map[int16]int8), false, d)
+	}
+}
+func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, d *Decoder) {
+	v, changed := f.DecMapInt16Int8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, canChange bool,
+	d *Decoder) (_ map[int16]int8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+		v = make(map[int16]int8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int16
+	var mv int8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Int16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int16]int16)
+		v, changed := fastpathTV.DecMapInt16Int16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt16Int16V(rv2i(rv).(map[int16]int16), false, d)
+	}
+}
+func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, d *Decoder) {
+	v, changed := f.DecMapInt16Int16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, canChange bool,
+	d *Decoder) (_ map[int16]int16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 4)
+		v = make(map[int16]int16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int16
+	var mv int16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Int32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int16]int32)
+		v, changed := fastpathTV.DecMapInt16Int32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt16Int32V(rv2i(rv).(map[int16]int32), false, d)
+	}
+}
+func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, d *Decoder) {
+	v, changed := f.DecMapInt16Int32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, canChange bool,
+	d *Decoder) (_ map[int16]int32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+		v = make(map[int16]int32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int16
+	var mv int32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Int64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int16]int64)
+		v, changed := fastpathTV.DecMapInt16Int64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt16Int64V(rv2i(rv).(map[int16]int64), false, d)
+	}
+}
+func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, d *Decoder) {
+	v, changed := f.DecMapInt16Int64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, canChange bool,
+	d *Decoder) (_ map[int16]int64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[int16]int64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int16
+	var mv int64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeInt64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Float32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int16]float32)
+		v, changed := fastpathTV.DecMapInt16Float32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt16Float32V(rv2i(rv).(map[int16]float32), false, d)
+	}
+}
+func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, d *Decoder) {
+	v, changed := f.DecMapInt16Float32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, canChange bool,
+	d *Decoder) (_ map[int16]float32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+		v = make(map[int16]float32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int16
+	var mv float32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Float64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int16]float64)
+		v, changed := fastpathTV.DecMapInt16Float64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt16Float64V(rv2i(rv).(map[int16]float64), false, d)
+	}
+}
+func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, d *Decoder) {
+	v, changed := f.DecMapInt16Float64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, canChange bool,
+	d *Decoder) (_ map[int16]float64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[int16]float64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int16
+	var mv float64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeFloat64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16BoolR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int16]bool)
+		v, changed := fastpathTV.DecMapInt16BoolV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt16BoolV(rv2i(rv).(map[int16]bool), false, d)
+	}
+}
+func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, d *Decoder) {
+	v, changed := f.DecMapInt16BoolV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, canChange bool,
+	d *Decoder) (_ map[int16]bool, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+		v = make(map[int16]bool, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int16
+	var mv bool
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = false
+			}
+			continue
+		}
+		mv = dd.DecodeBool()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32IntfR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int32]interface{})
+		v, changed := fastpathTV.DecMapInt32IntfV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt32IntfV(rv2i(rv).(map[int32]interface{}), false, d)
+	}
+}
+func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, d *Decoder) {
+	v, changed := f.DecMapInt32IntfV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, canChange bool,
+	d *Decoder) (_ map[int32]interface{}, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+		v = make(map[int32]interface{}, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+	var mk int32
+	var mv interface{}
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = nil
+			}
+			continue
+		}
+		if mapGet {
+			mv = v[mk]
+		} else {
+			mv = nil
+		}
+		d.decode(&mv)
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32StringR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int32]string)
+		v, changed := fastpathTV.DecMapInt32StringV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt32StringV(rv2i(rv).(map[int32]string), false, d)
+	}
+}
+func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, d *Decoder) {
+	v, changed := f.DecMapInt32StringV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt32StringV(v map[int32]string, canChange bool,
+	d *Decoder) (_ map[int32]string, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+		v = make(map[int32]string, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int32
+	var mv string
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = ""
+			}
+			continue
+		}
+		mv = dd.DecodeString()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32UintR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int32]uint)
+		v, changed := fastpathTV.DecMapInt32UintV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt32UintV(rv2i(rv).(map[int32]uint), false, d)
+	}
+}
+func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, d *Decoder) {
+	v, changed := f.DecMapInt32UintV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, canChange bool,
+	d *Decoder) (_ map[int32]uint, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[int32]uint, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int32
+	var mv uint
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int32]uint8)
+		v, changed := fastpathTV.DecMapInt32Uint8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt32Uint8V(rv2i(rv).(map[int32]uint8), false, d)
+	}
+}
+func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, d *Decoder) {
+	v, changed := f.DecMapInt32Uint8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, canChange bool,
+	d *Decoder) (_ map[int32]uint8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[int32]uint8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int32
+	var mv uint8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int32]uint16)
+		v, changed := fastpathTV.DecMapInt32Uint16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt32Uint16V(rv2i(rv).(map[int32]uint16), false, d)
+	}
+}
+func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, d *Decoder) {
+	v, changed := f.DecMapInt32Uint16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, canChange bool,
+	d *Decoder) (_ map[int32]uint16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+		v = make(map[int32]uint16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int32
+	var mv uint16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int32]uint32)
+		v, changed := fastpathTV.DecMapInt32Uint32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt32Uint32V(rv2i(rv).(map[int32]uint32), false, d)
+	}
+}
+func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, d *Decoder) {
+	v, changed := f.DecMapInt32Uint32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, canChange bool,
+	d *Decoder) (_ map[int32]uint32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+		v = make(map[int32]uint32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int32
+	var mv uint32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int32]uint64)
+		v, changed := fastpathTV.DecMapInt32Uint64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt32Uint64V(rv2i(rv).(map[int32]uint64), false, d)
+	}
+}
+func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, d *Decoder) {
+	v, changed := f.DecMapInt32Uint64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, canChange bool,
+	d *Decoder) (_ map[int32]uint64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[int32]uint64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int32
+	var mv uint64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeUint64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int32]uintptr)
+		v, changed := fastpathTV.DecMapInt32UintptrV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), false, d)
+	}
+}
+func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, d *Decoder) {
+	v, changed := f.DecMapInt32UintptrV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, canChange bool,
+	d *Decoder) (_ map[int32]uintptr, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[int32]uintptr, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int32
+	var mv uintptr
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32IntR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int32]int)
+		v, changed := fastpathTV.DecMapInt32IntV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt32IntV(rv2i(rv).(map[int32]int), false, d)
+	}
+}
+func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, d *Decoder) {
+	v, changed := f.DecMapInt32IntV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt32IntV(v map[int32]int, canChange bool,
+	d *Decoder) (_ map[int32]int, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[int32]int, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int32
+	var mv int
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Int8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int32]int8)
+		v, changed := fastpathTV.DecMapInt32Int8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt32Int8V(rv2i(rv).(map[int32]int8), false, d)
+	}
+}
+func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, d *Decoder) {
+	v, changed := f.DecMapInt32Int8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, canChange bool,
+	d *Decoder) (_ map[int32]int8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[int32]int8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int32
+	var mv int8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Int16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int32]int16)
+		v, changed := fastpathTV.DecMapInt32Int16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt32Int16V(rv2i(rv).(map[int32]int16), false, d)
+	}
+}
+func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, d *Decoder) {
+	v, changed := f.DecMapInt32Int16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, canChange bool,
+	d *Decoder) (_ map[int32]int16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+		v = make(map[int32]int16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int32
+	var mv int16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Int32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int32]int32)
+		v, changed := fastpathTV.DecMapInt32Int32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt32Int32V(rv2i(rv).(map[int32]int32), false, d)
+	}
+}
+func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, d *Decoder) {
+	v, changed := f.DecMapInt32Int32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, canChange bool,
+	d *Decoder) (_ map[int32]int32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+		v = make(map[int32]int32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int32
+	var mv int32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Int64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int32]int64)
+		v, changed := fastpathTV.DecMapInt32Int64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt32Int64V(rv2i(rv).(map[int32]int64), false, d)
+	}
+}
+func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, d *Decoder) {
+	v, changed := f.DecMapInt32Int64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, canChange bool,
+	d *Decoder) (_ map[int32]int64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[int32]int64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int32
+	var mv int64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeInt64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Float32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int32]float32)
+		v, changed := fastpathTV.DecMapInt32Float32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt32Float32V(rv2i(rv).(map[int32]float32), false, d)
+	}
+}
+func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, d *Decoder) {
+	v, changed := f.DecMapInt32Float32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, canChange bool,
+	d *Decoder) (_ map[int32]float32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+		v = make(map[int32]float32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int32
+	var mv float32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Float64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int32]float64)
+		v, changed := fastpathTV.DecMapInt32Float64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt32Float64V(rv2i(rv).(map[int32]float64), false, d)
+	}
+}
+func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, d *Decoder) {
+	v, changed := f.DecMapInt32Float64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, canChange bool,
+	d *Decoder) (_ map[int32]float64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[int32]float64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int32
+	var mv float64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeFloat64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32BoolR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int32]bool)
+		v, changed := fastpathTV.DecMapInt32BoolV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt32BoolV(rv2i(rv).(map[int32]bool), false, d)
+	}
+}
+func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, d *Decoder) {
+	v, changed := f.DecMapInt32BoolV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, canChange bool,
+	d *Decoder) (_ map[int32]bool, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[int32]bool, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int32
+	var mv bool
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = false
+			}
+			continue
+		}
+		mv = dd.DecodeBool()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64IntfR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int64]interface{})
+		v, changed := fastpathTV.DecMapInt64IntfV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt64IntfV(rv2i(rv).(map[int64]interface{}), false, d)
+	}
+}
+func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, d *Decoder) {
+	v, changed := f.DecMapInt64IntfV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, canChange bool,
+	d *Decoder) (_ map[int64]interface{}, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[int64]interface{}, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+	var mk int64
+	var mv interface{}
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeInt64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = nil
+			}
+			continue
+		}
+		if mapGet {
+			mv = v[mk]
+		} else {
+			mv = nil
+		}
+		d.decode(&mv)
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64StringR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int64]string)
+		v, changed := fastpathTV.DecMapInt64StringV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt64StringV(rv2i(rv).(map[int64]string), false, d)
+	}
+}
+func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, d *Decoder) {
+	v, changed := f.DecMapInt64StringV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt64StringV(v map[int64]string, canChange bool,
+	d *Decoder) (_ map[int64]string, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+		v = make(map[int64]string, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int64
+	var mv string
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeInt64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = ""
+			}
+			continue
+		}
+		mv = dd.DecodeString()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64UintR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int64]uint)
+		v, changed := fastpathTV.DecMapInt64UintV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt64UintV(rv2i(rv).(map[int64]uint), false, d)
+	}
+}
+func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, d *Decoder) {
+	v, changed := f.DecMapInt64UintV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, canChange bool,
+	d *Decoder) (_ map[int64]uint, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[int64]uint, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int64
+	var mv uint
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeInt64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int64]uint8)
+		v, changed := fastpathTV.DecMapInt64Uint8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt64Uint8V(rv2i(rv).(map[int64]uint8), false, d)
+	}
+}
+func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, d *Decoder) {
+	v, changed := f.DecMapInt64Uint8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, canChange bool,
+	d *Decoder) (_ map[int64]uint8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[int64]uint8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int64
+	var mv uint8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeInt64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int64]uint16)
+		v, changed := fastpathTV.DecMapInt64Uint16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt64Uint16V(rv2i(rv).(map[int64]uint16), false, d)
+	}
+}
+func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, d *Decoder) {
+	v, changed := f.DecMapInt64Uint16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, canChange bool,
+	d *Decoder) (_ map[int64]uint16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[int64]uint16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int64
+	var mv uint16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeInt64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int64]uint32)
+		v, changed := fastpathTV.DecMapInt64Uint32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt64Uint32V(rv2i(rv).(map[int64]uint32), false, d)
+	}
+}
+func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, d *Decoder) {
+	v, changed := f.DecMapInt64Uint32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, canChange bool,
+	d *Decoder) (_ map[int64]uint32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[int64]uint32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int64
+	var mv uint32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeInt64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int64]uint64)
+		v, changed := fastpathTV.DecMapInt64Uint64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt64Uint64V(rv2i(rv).(map[int64]uint64), false, d)
+	}
+}
+func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, d *Decoder) {
+	v, changed := f.DecMapInt64Uint64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, canChange bool,
+	d *Decoder) (_ map[int64]uint64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[int64]uint64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int64
+	var mv uint64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeInt64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeUint64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int64]uintptr)
+		v, changed := fastpathTV.DecMapInt64UintptrV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), false, d)
+	}
+}
+func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, d *Decoder) {
+	v, changed := f.DecMapInt64UintptrV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, canChange bool,
+	d *Decoder) (_ map[int64]uintptr, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[int64]uintptr, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int64
+	var mv uintptr
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeInt64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64IntR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int64]int)
+		v, changed := fastpathTV.DecMapInt64IntV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt64IntV(rv2i(rv).(map[int64]int), false, d)
+	}
+}
+func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, d *Decoder) {
+	v, changed := f.DecMapInt64IntV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt64IntV(v map[int64]int, canChange bool,
+	d *Decoder) (_ map[int64]int, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[int64]int, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int64
+	var mv int
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeInt64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Int8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int64]int8)
+		v, changed := fastpathTV.DecMapInt64Int8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt64Int8V(rv2i(rv).(map[int64]int8), false, d)
+	}
+}
+func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, d *Decoder) {
+	v, changed := f.DecMapInt64Int8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, canChange bool,
+	d *Decoder) (_ map[int64]int8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[int64]int8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int64
+	var mv int8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeInt64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Int16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int64]int16)
+		v, changed := fastpathTV.DecMapInt64Int16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt64Int16V(rv2i(rv).(map[int64]int16), false, d)
+	}
+}
+func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, d *Decoder) {
+	v, changed := f.DecMapInt64Int16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, canChange bool,
+	d *Decoder) (_ map[int64]int16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+		v = make(map[int64]int16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int64
+	var mv int16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeInt64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Int32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int64]int32)
+		v, changed := fastpathTV.DecMapInt64Int32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt64Int32V(rv2i(rv).(map[int64]int32), false, d)
+	}
+}
+func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, d *Decoder) {
+	v, changed := f.DecMapInt64Int32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, canChange bool,
+	d *Decoder) (_ map[int64]int32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[int64]int32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int64
+	var mv int32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeInt64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Int64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int64]int64)
+		v, changed := fastpathTV.DecMapInt64Int64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt64Int64V(rv2i(rv).(map[int64]int64), false, d)
+	}
+}
+func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, d *Decoder) {
+	v, changed := f.DecMapInt64Int64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, canChange bool,
+	d *Decoder) (_ map[int64]int64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[int64]int64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int64
+	var mv int64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeInt64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeInt64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Float32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int64]float32)
+		v, changed := fastpathTV.DecMapInt64Float32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt64Float32V(rv2i(rv).(map[int64]float32), false, d)
+	}
+}
+func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, d *Decoder) {
+	v, changed := f.DecMapInt64Float32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, canChange bool,
+	d *Decoder) (_ map[int64]float32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+		v = make(map[int64]float32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int64
+	var mv float32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeInt64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Float64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int64]float64)
+		v, changed := fastpathTV.DecMapInt64Float64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt64Float64V(rv2i(rv).(map[int64]float64), false, d)
+	}
+}
+func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, d *Decoder) {
+	v, changed := f.DecMapInt64Float64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, canChange bool,
+	d *Decoder) (_ map[int64]float64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+		v = make(map[int64]float64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int64
+	var mv float64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeInt64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeFloat64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64BoolR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[int64]bool)
+		v, changed := fastpathTV.DecMapInt64BoolV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapInt64BoolV(rv2i(rv).(map[int64]bool), false, d)
+	}
+}
+func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, d *Decoder) {
+	v, changed := f.DecMapInt64BoolV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, canChange bool,
+	d *Decoder) (_ map[int64]bool, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[int64]bool, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk int64
+	var mv bool
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeInt64()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = false
+			}
+			continue
+		}
+		mv = dd.DecodeBool()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolIntfR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[bool]interface{})
+		v, changed := fastpathTV.DecMapBoolIntfV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapBoolIntfV(rv2i(rv).(map[bool]interface{}), false, d)
+	}
+}
+func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, d *Decoder) {
+	v, changed := f.DecMapBoolIntfV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, canChange bool,
+	d *Decoder) (_ map[bool]interface{}, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+		v = make(map[bool]interface{}, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+	var mk bool
+	var mv interface{}
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeBool()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = nil
+			}
+			continue
+		}
+		if mapGet {
+			mv = v[mk]
+		} else {
+			mv = nil
+		}
+		d.decode(&mv)
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolStringR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[bool]string)
+		v, changed := fastpathTV.DecMapBoolStringV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapBoolStringV(rv2i(rv).(map[bool]string), false, d)
+	}
+}
+func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, d *Decoder) {
+	v, changed := f.DecMapBoolStringV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapBoolStringV(v map[bool]string, canChange bool,
+	d *Decoder) (_ map[bool]string, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+		v = make(map[bool]string, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk bool
+	var mv string
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeBool()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = ""
+			}
+			continue
+		}
+		mv = dd.DecodeString()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolUintR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[bool]uint)
+		v, changed := fastpathTV.DecMapBoolUintV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapBoolUintV(rv2i(rv).(map[bool]uint), false, d)
+	}
+}
+func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, d *Decoder) {
+	v, changed := f.DecMapBoolUintV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, canChange bool,
+	d *Decoder) (_ map[bool]uint, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[bool]uint, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk bool
+	var mv uint
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeBool()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolUint8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[bool]uint8)
+		v, changed := fastpathTV.DecMapBoolUint8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapBoolUint8V(rv2i(rv).(map[bool]uint8), false, d)
+	}
+}
+func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, d *Decoder) {
+	v, changed := f.DecMapBoolUint8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, canChange bool,
+	d *Decoder) (_ map[bool]uint8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+		v = make(map[bool]uint8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk bool
+	var mv uint8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeBool()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolUint16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[bool]uint16)
+		v, changed := fastpathTV.DecMapBoolUint16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapBoolUint16V(rv2i(rv).(map[bool]uint16), false, d)
+	}
+}
+func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, d *Decoder) {
+	v, changed := f.DecMapBoolUint16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, canChange bool,
+	d *Decoder) (_ map[bool]uint16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+		v = make(map[bool]uint16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk bool
+	var mv uint16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeBool()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolUint32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[bool]uint32)
+		v, changed := fastpathTV.DecMapBoolUint32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapBoolUint32V(rv2i(rv).(map[bool]uint32), false, d)
+	}
+}
+func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, d *Decoder) {
+	v, changed := f.DecMapBoolUint32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, canChange bool,
+	d *Decoder) (_ map[bool]uint32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[bool]uint32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk bool
+	var mv uint32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeBool()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolUint64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[bool]uint64)
+		v, changed := fastpathTV.DecMapBoolUint64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapBoolUint64V(rv2i(rv).(map[bool]uint64), false, d)
+	}
+}
+func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, d *Decoder) {
+	v, changed := f.DecMapBoolUint64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, canChange bool,
+	d *Decoder) (_ map[bool]uint64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[bool]uint64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk bool
+	var mv uint64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeBool()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeUint64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[bool]uintptr)
+		v, changed := fastpathTV.DecMapBoolUintptrV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), false, d)
+	}
+}
+func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, d *Decoder) {
+	v, changed := f.DecMapBoolUintptrV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, canChange bool,
+	d *Decoder) (_ map[bool]uintptr, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[bool]uintptr, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk bool
+	var mv uintptr
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeBool()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolIntR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[bool]int)
+		v, changed := fastpathTV.DecMapBoolIntV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapBoolIntV(rv2i(rv).(map[bool]int), false, d)
+	}
+}
+func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, d *Decoder) {
+	v, changed := f.DecMapBoolIntV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapBoolIntV(v map[bool]int, canChange bool,
+	d *Decoder) (_ map[bool]int, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[bool]int, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk bool
+	var mv int
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeBool()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolInt8R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[bool]int8)
+		v, changed := fastpathTV.DecMapBoolInt8V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapBoolInt8V(rv2i(rv).(map[bool]int8), false, d)
+	}
+}
+func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, d *Decoder) {
+	v, changed := f.DecMapBoolInt8V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, canChange bool,
+	d *Decoder) (_ map[bool]int8, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+		v = make(map[bool]int8, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk bool
+	var mv int8
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeBool()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolInt16R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[bool]int16)
+		v, changed := fastpathTV.DecMapBoolInt16V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapBoolInt16V(rv2i(rv).(map[bool]int16), false, d)
+	}
+}
+func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, d *Decoder) {
+	v, changed := f.DecMapBoolInt16V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, canChange bool,
+	d *Decoder) (_ map[bool]int16, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+		v = make(map[bool]int16, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk bool
+	var mv int16
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeBool()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolInt32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[bool]int32)
+		v, changed := fastpathTV.DecMapBoolInt32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapBoolInt32V(rv2i(rv).(map[bool]int32), false, d)
+	}
+}
+func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, d *Decoder) {
+	v, changed := f.DecMapBoolInt32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, canChange bool,
+	d *Decoder) (_ map[bool]int32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[bool]int32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk bool
+	var mv int32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeBool()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolInt64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[bool]int64)
+		v, changed := fastpathTV.DecMapBoolInt64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapBoolInt64V(rv2i(rv).(map[bool]int64), false, d)
+	}
+}
+func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, d *Decoder) {
+	v, changed := f.DecMapBoolInt64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, canChange bool,
+	d *Decoder) (_ map[bool]int64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[bool]int64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk bool
+	var mv int64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeBool()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeInt64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[bool]float32)
+		v, changed := fastpathTV.DecMapBoolFloat32V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapBoolFloat32V(rv2i(rv).(map[bool]float32), false, d)
+	}
+}
+func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, d *Decoder) {
+	v, changed := f.DecMapBoolFloat32V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, canChange bool,
+	d *Decoder) (_ map[bool]float32, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+		v = make(map[bool]float32, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk bool
+	var mv float32
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeBool()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[bool]float64)
+		v, changed := fastpathTV.DecMapBoolFloat64V(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapBoolFloat64V(rv2i(rv).(map[bool]float64), false, d)
+	}
+}
+func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, d *Decoder) {
+	v, changed := f.DecMapBoolFloat64V(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, canChange bool,
+	d *Decoder) (_ map[bool]float64, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+		v = make(map[bool]float64, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk bool
+	var mv float64
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeBool()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = 0
+			}
+			continue
+		}
+		mv = dd.DecodeFloat64()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolBoolR(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[bool]bool)
+		v, changed := fastpathTV.DecMapBoolBoolV(*vp, true, d)
+		if changed {
+			*vp = v
+		}
+	} else {
+		fastpathTV.DecMapBoolBoolV(rv2i(rv).(map[bool]bool), false, d)
+	}
+}
+func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, d *Decoder) {
+	v, changed := f.DecMapBoolBoolV(*vp, true, d)
+	if changed {
+		*vp = v
+	}
+}
+func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, canChange bool,
+	d *Decoder) (_ map[bool]bool, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators()
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+		v = make(map[bool]bool, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	var mk bool
+	var mv bool
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep {
+			dd.ReadMapElemKey()
+		}
+		mk = dd.DecodeBool()
+		if esep {
+			dd.ReadMapElemValue()
+		}
+		if dd.TryDecodeAsNil() {
+			if v == nil {
+			} else if d.h.DeleteOnNilMapValue {
+				delete(v, mk)
+			} else {
+				v[mk] = false
+			}
+			continue
+		}
+		mv = dd.DecodeBool()
+		if v != nil {
+			v[mk] = mv
+		}
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
diff --git a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl
new file mode 100644
index 0000000..2023e05
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl
@@ -0,0 +1,544 @@
+// +build !notfastpath
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from fast-path.go.tmpl - DO NOT EDIT.
+
+package codec
+
+// Fast path functions try to create a fast path encode or decode implementation
+// for common maps and slices.
+//
+// We define the functions and register then in this single file
+// so as not to pollute the encode.go and decode.go, and create a dependency in there.
+// This file can be omitted without causing a build failure.
+//
+// The advantage of fast paths is:
+//	  - Many calls bypass reflection altogether
+// 
+// Currently support
+//	  - slice of all builtin types,
+//	  - map of all builtin types to string or interface value
+//	  - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8)
+// This should provide adequate "typical" implementations.
+// 
+// Note that fast track decode functions must handle values for which an address cannot be obtained.
+// For example: 
+//	 m2 := map[string]int{}
+//	 p2 := []interface{}{m2}
+//	 // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
+// 
+
+import (
+	"reflect"
+	"sort"
+)
+
+const fastpathEnabled = true
+
+type fastpathT struct {}
+
+var fastpathTV fastpathT
+
+type fastpathE struct {
+	rtid uintptr
+	rt reflect.Type 
+	encfn func(*Encoder, *codecFnInfo, reflect.Value)
+	decfn func(*Decoder, *codecFnInfo, reflect.Value)
+}
+
+type fastpathA [{{ .FastpathLen }}]fastpathE
+
+func (x *fastpathA) index(rtid uintptr) int {
+	// use binary search to grab the index (adapted from sort/search.go)
+	h, i, j := 0, 0, {{ .FastpathLen }} // len(x)
+	for i < j {
+		h = i + (j-i)/2
+		if x[h].rtid < rtid {
+			i = h + 1
+		} else {
+			j = h
+		}
+	}
+	if i < {{ .FastpathLen }} && x[i].rtid == rtid {
+		return i
+	}
+	return -1
+}
+
+type fastpathAslice []fastpathE
+
+func (x fastpathAslice) Len() int { return len(x) }
+func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid }
+func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+var fastpathAV fastpathA
+
+// due to possible initialization loop error, make fastpath in an init()
+func init() {
+	i := 0
+	fn := func(v interface{},
+		fe func(*Encoder, *codecFnInfo, reflect.Value),
+		fd func(*Decoder, *codecFnInfo, reflect.Value)) (f fastpathE) {
+		xrt := reflect.TypeOf(v)
+		xptr := rt2id(xrt)
+		fastpathAV[i] = fastpathE{xptr, xrt, fe, fd}
+		i++
+		return
+	}
+	{{/* do not register []uint8 in fast-path */}}
+	{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}}
+	fn([]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}{{end}}
+	
+	{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+	fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
+	
+	sort.Sort(fastpathAslice(fastpathAV[:]))
+}
+
+// -- encode
+
+// -- -- fast path type switch
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
+	switch v := iv.(type) {
+
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}}
+	case []{{ .Elem }}:
+		fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
+	case *[]{{ .Elem }}:
+		fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e){{/*
+*/}}{{end}}{{end}}{{end}}{{end}}
+
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+	case map[{{ .MapKey }}]{{ .Elem }}:
+		fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
+	case *map[{{ .MapKey }}]{{ .Elem }}:
+		fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e){{/*
+*/}}{{end}}{{end}}{{end}}
+
+	default:
+        _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+		return false
+	}
+	return true
+}
+
+{{/*
+**** removing this block, as they are never called directly ****
+
+
+
+**** removing this block, as they are never called directly ****
+
+
+
+func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
+	switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+	case []{{ .Elem }}:
+		fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
+	case *[]{{ .Elem }}:
+		fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
+{{end}}{{end}}{{end}}
+	default:
+        _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+		return false
+	}
+	return true
+}
+
+func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
+	switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+	case map[{{ .MapKey }}]{{ .Elem }}:
+		fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
+	case *map[{{ .MapKey }}]{{ .Elem }}:
+		fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
+{{end}}{{end}}{{end}}
+	default:
+        _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+		return false
+	}
+	return true
+}
+
+
+
+**** removing this block, as they are never called directly ****
+
+
+
+**** removing this block, as they are never called directly ****
+*/}}
+
+// -- -- fast path functions
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} 
+func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) {
+	if f.ti.mbs {
+		fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv2i(rv).([]{{ .Elem }}), e)
+	} else {
+		fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).([]{{ .Elem }}), e)
+	}
+}
+func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *Encoder) {
+	if v == nil { e.e.EncodeNil(); return }
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	ee.WriteArrayStart(len(v))
+	if esep {
+		for _, v2 := range v {
+			ee.WriteArrayElem()
+			{{ encmd .Elem "v2"}}
+		}
+	} else {
+		for _, v2 := range v {
+			{{ encmd .Elem "v2"}}
+		}
+	} {{/*
+	for _, v2 := range v {
+		if esep { ee.WriteArrayElem() }
+		{{ encmd .Elem "v2"}}
+	} */}}
+	ee.WriteArrayEnd()
+}
+func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *Encoder) {
+	ee, esep := e.e, e.hh.hasElemSeparators()
+	if len(v)%2 == 1 {
+		e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+		return
+	}
+	ee.WriteMapStart(len(v) / 2)
+	if esep {
+		for j, v2 := range v {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+			{{ encmd .Elem "v2"}}
+		}
+	} else {
+		for _, v2 := range v {
+			{{ encmd .Elem "v2"}}
+		}
+	} {{/*
+	for j, v2 := range v {
+		if esep {
+			if j%2 == 0 {
+				ee.WriteMapElemKey()
+			} else {
+				ee.WriteMapElemValue()
+			}
+		}
+		{{ encmd .Elem "v2"}}
+	} */}}
+	ee.WriteMapEnd()
+}
+{{end}}{{end}}{{end}}
+
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) {
+	fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e)
+}
+func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *Encoder) {
+	if v == nil { e.e.EncodeNil(); return }
+	ee, esep := e.e, e.hh.hasElemSeparators() 
+	ee.WriteMapStart(len(v))
+	if e.h.Canonical {
+		{{if eq .MapKey "interface{}"}}{{/* out of band 
+		*/}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+		e2 := NewEncoderBytes(&mksv, e.hh)
+		v2 := make([]bytesI, len(v))
+		var i, l int
+		var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}}
+		for k2, _ := range v {
+			l = len(mksv)
+			e2.MustEncode(k2)
+			vp = &v2[i]
+			vp.v = mksv[l:]
+			vp.i = k2 
+			i++
+		}
+		sort.Sort(bytesISlice(v2))
+		if esep {
+			for j := range v2 {
+				ee.WriteMapElemKey()
+				e.asis(v2[j].v)
+				ee.WriteMapElemValue()
+				e.encode(v[v2[j].i])
+			}
+		} else {
+			for j := range v2 {
+				e.asis(v2[j].v)
+				e.encode(v[v2[j].i])
+			}
+		} {{/*
+		for j := range v2 {
+			if esep { ee.WriteMapElemKey() }
+			e.asis(v2[j].v)
+			if esep { ee.WriteMapElemValue() }
+			e.encode(v[v2[j].i])
+		} */}} {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v))
+		var i int 
+		for k, _ := range v {
+			v2[i] = {{ $x }}(k)
+			i++
+		}
+		sort.Sort({{ sorttype .MapKey false}}(v2))
+		if esep {
+			for _, k2 := range v2 {
+				ee.WriteMapElemKey()
+				{{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
+				ee.WriteMapElemValue()
+				{{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
+			} 
+		} else {
+			for _, k2 := range v2 {
+				{{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
+				{{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
+			} 
+		} {{/*
+		for _, k2 := range v2 {
+			if esep { ee.WriteMapElemKey() }
+			{{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
+			if esep { ee.WriteMapElemValue() }
+			{{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
+		} */}} {{end}}
+	} else {
+		if esep {
+			for k2, v2 := range v {
+				ee.WriteMapElemKey()
+				{{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ encmd .MapKey "k2"}}{{end}}
+				ee.WriteMapElemValue()
+				{{ encmd .Elem "v2"}}
+			}
+		} else {
+			for k2, v2 := range v {
+				{{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ encmd .MapKey "k2"}}{{end}}
+				{{ encmd .Elem "v2"}}
+			}
+		} {{/*
+		for k2, v2 := range v {
+			if esep { ee.WriteMapElemKey() }
+			{{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ encmd .MapKey "k2"}}{{end}}
+			if esep { ee.WriteMapElemValue() }
+			{{ encmd .Elem "v2"}}
+		} */}}
+	}
+	ee.WriteMapEnd()
+}
+{{end}}{{end}}{{end}}
+
+// -- decode
+
+// -- -- fast path type switch
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
+     var changed bool
+	switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}}
+	case []{{ .Elem }}:
+        var v2 []{{ .Elem }}
+		v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d)
+        if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+			copy(v, v2)
+		}
+	case *[]{{ .Elem }}:
+        var v2 []{{ .Elem }}
+		v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d)
+        if changed {
+			*v = v2 
+		}{{/*
+*/}}{{end}}{{end}}{{end}}{{end}}
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}{{/*
+// maps only change if nil, and in that case, there's no point copying
+*/}}
+	case map[{{ .MapKey }}]{{ .Elem }}:
+		fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d)
+	case *map[{{ .MapKey }}]{{ .Elem }}:
+         var v2 map[{{ .MapKey }}]{{ .Elem }}
+		v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d)
+        if changed {
+			*v = v2 
+		}{{/*
+*/}}{{end}}{{end}}{{end}}
+	default:
+        _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+		return false
+	}
+	return true
+}
+
+func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool {
+	switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+	case *[]{{ .Elem }}: 
+		*v = nil {{/*
+*/}}{{end}}{{end}}{{end}}
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+	case *map[{{ .MapKey }}]{{ .Elem }}: 
+		*v = nil {{/*
+*/}}{{end}}{{end}}{{end}}
+	default:
+        _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+		return false
+	}
+	return true
+}
+
+// -- -- fast path functions
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+{{/*
+Slices can change if they 
+- did not come from an array
+- are addressable (from a ptr)
+- are settable (e.g. contained in an interface{})
+*/}}
+func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) {
+	if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*[]{{ .Elem }})
+		v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, !array, d)
+        if changed { *vp = v }
+	} else {
+		v := rv2i(rv).([]{{ .Elem }})
+        v2, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, !array, d)
+        if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+           copy(v, v2)
+        }
+	}
+}
+func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *Decoder) {
+	v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d)
+    if changed { *vp = v }
+}
+func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) {
+	dd := d.d{{/*
+	    // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil()
+    */}}
+	slh, containerLenS := d.decSliceHelperStart()
+	if containerLenS == 0 {
+		if canChange {
+			if v == nil { v = []{{ .Elem }}{} } else if len(v) != 0 { v = v[:0] }
+			changed = true
+		}
+		slh.End()
+		return v, changed
+	}
+	hasLen := containerLenS > 0
+	var xlen int 
+	if hasLen && canChange {
+		if containerLenS > cap(v) {
+			xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }})
+			if xlen <= cap(v) {
+				v = v[:xlen]
+			} else {
+				v = make([]{{ .Elem }}, xlen)
+			}
+			changed = true 
+		} else if containerLenS != len(v) {
+			v = v[:containerLenS]
+			changed = true
+		}
+	}
+	j := 0
+	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+		if j == 0 && len(v) == 0 && canChange {
+			if hasLen {
+				xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }})
+			} else {
+				xlen = 8
+			}
+			v = make([]{{ .Elem }}, xlen)
+			changed = true 
+		}
+		// if indefinite, etc, then expand the slice if necessary
+		var decodeIntoBlank bool
+		if j >= len(v) {
+			if canChange {
+				v = append(v, {{ zerocmd .Elem }})
+				changed = true
+			} else {
+				d.arrayCannotExpand(len(v), j+1)
+				decodeIntoBlank = true
+			}
+		} 
+		slh.ElemContainerState(j)
+		if decodeIntoBlank {
+			d.swallow()
+		} else if dd.TryDecodeAsNil() {
+			v[j] = {{ zerocmd .Elem }}
+		} else {
+			{{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }}
+		}
+	}
+	if canChange {
+		if j < len(v) {
+			v = v[:j]
+			changed = true
+		} else if j == 0 && v == nil {
+			v = make([]{{ .Elem }}, 0)
+			changed = true
+		}
+	}
+	slh.End()
+	return v, changed 
+}
+{{end}}{{end}}{{end}}
+
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+{{/*
+Maps can change if they are
+- addressable (from a ptr)
+- settable (e.g. contained in an interface{})
+*/}}
+func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) {
+	if rv.Kind() == reflect.Ptr {
+		vp := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }})
+		v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d);
+		if changed { *vp = v }
+	} else {
+	    fastpathTV.{{ .MethodNamePfx "Dec" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), false, d)
+    }
+}
+func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *Decoder) {
+	v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d)
+	if changed { *vp = v }
+}
+func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, canChange bool, 
+	d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) {
+	dd, esep := d.d, d.hh.hasElemSeparators(){{/*
+	    // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil()
+    */}}
+	containerLen := dd.ReadMapStart()
+	if canChange && v == nil {
+		xlen := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }})
+		v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen)
+		changed = true
+	}
+	if containerLen == 0 {
+		dd.ReadMapEnd()
+		return v, changed
+	}
+	{{ if eq .Elem "interface{}" }}mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+    {{end}}var mk {{ .MapKey }}
+	var mv {{ .Elem }}
+	hasLen := containerLen > 0
+	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+		if esep { dd.ReadMapElemKey() }
+		{{ if eq .MapKey "interface{}" }}mk = nil 
+		d.decode(&mk)
+		if bv, bok := mk.([]byte); bok {
+			mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
+		}{{ else }}mk = {{ decmd .MapKey }}{{ end }}
+		if esep { dd.ReadMapElemValue() }
+		if dd.TryDecodeAsNil() {
+			if v == nil {} else if d.h.DeleteOnNilMapValue { delete(v, mk) } else { v[mk] = {{ zerocmd .Elem }} }
+			continue 
+		}
+		{{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil }
+		d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }}
+		if v != nil { v[mk] = mv }
+	}
+	dd.ReadMapEnd()
+	return v, changed
+}
+{{end}}{{end}}{{end}}
diff --git a/vendor/github.com/ugorji/go/codec/fast-path.not.go b/vendor/github.com/ugorji/go/codec/fast-path.not.go
new file mode 100644
index 0000000..f11b467
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/fast-path.not.go
@@ -0,0 +1,47 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build notfastpath
+
+package codec
+
+import "reflect"
+
+const fastpathEnabled = false
+
+// The generated fast-path code is very large, and adds a few seconds to the build time.
+// This causes test execution, execution of small tools which use codec, etc
+// to take a long time.
+//
+// To mitigate, we now support the notfastpath tag.
+// This tag disables fastpath during build, allowing for faster build, test execution,
+// short-program runs, etc.
+
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool      { return false }
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool      { return false }
+func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
+func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool   { return false }
+func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool           { return false }
+
+type fastpathT struct{}
+type fastpathE struct {
+	rtid  uintptr
+	rt    reflect.Type
+	encfn func(*Encoder, *codecFnInfo, reflect.Value)
+	decfn func(*Decoder, *codecFnInfo, reflect.Value)
+}
+type fastpathA [0]fastpathE
+
+func (x fastpathA) index(rtid uintptr) int { return -1 }
+
+func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) {
+	fn := d.cfer().get(uint8SliceTyp, true, true)
+	d.kSlice(&fn.i, reflect.ValueOf(&v).Elem())
+	return v, true
+}
+
+var fastpathAV fastpathA
+var fastpathTV fastpathT
+
+// ----
+type TestMammoth2Wrapper struct{} // to allow testMammoth work in notfastpath mode
diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl
new file mode 100644
index 0000000..59c5983
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl
@@ -0,0 +1,78 @@
+{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
+{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}}
+var {{var "c"}} bool {{/* // changed */}}
+_ = {{var "c"}}{{end}}
+if {{var "l"}} == 0 {
+	{{if isSlice }}if {{var "v"}} == nil {
+		{{var "v"}} = []{{ .Typ }}{}
+		{{var "c"}} = true
+	} else if len({{var "v"}}) != 0 {
+		{{var "v"}} = {{var "v"}}[:0]
+		{{var "c"}} = true
+	} {{else if isChan }}if {{var "v"}} == nil {
+		{{var "v"}} = make({{ .CTyp }}, 0)
+		{{var "c"}} = true
+	} {{end}}
+} else {
+	{{var "hl"}} := {{var "l"}} > 0
+	var {{var "rl"}} int
+	_ =  {{var "rl"}}
+	{{if isSlice }} if {{var "hl"}} {
+	if {{var "l"}} > cap({{var "v"}}) {
+		{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+		if {{var "rl"}} <= cap({{var "v"}}) {
+			{{var "v"}} = {{var "v"}}[:{{var "rl"}}]
+		} else {
+			{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+		}
+		{{var "c"}} = true
+	} else if {{var "l"}} != len({{var "v"}}) {
+		{{var "v"}} = {{var "v"}}[:{{var "l"}}]
+		{{var "c"}} = true
+	}
+	} {{end}}
+	var {{var "j"}} int 
+    // var {{var "dn"}} bool 
+	for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
+		{{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil {
+			if {{var "hl"}} {
+				{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+			} else {
+				{{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}}
+			}
+			{{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}})
+			{{var "c"}} = true 
+		}{{end}}
+		{{var "h"}}.ElemContainerState({{var "j"}})
+        {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}}
+        {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }}
+		{{ decLineVar $x }}
+		{{var "v"}} <- {{ $x }}
+        // println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this
+        {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}}
+		var {{var "db"}} bool
+		if {{var "j"}} >= len({{var "v"}}) {
+			{{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }})
+			{{var "c"}} = true
+			{{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
+			{{end}}
+		}
+		if {{var "db"}} {
+			z.DecSwallow()
+		} else {
+			{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+		}
+        {{end}}
+	}
+	{{if isSlice}} if {{var "j"}} < len({{var "v"}}) {
+		{{var "v"}} = {{var "v"}}[:{{var "j"}}]
+		{{var "c"}} = true
+	} else if {{var "j"}} == 0 && {{var "v"}} == nil {
+		{{var "v"}} = make([]{{ .Typ }}, 0)
+		{{var "c"}} = true
+	} {{end}}
+}
+{{var "h"}}.End()
+{{if not isArray }}if {{var "c"}} { 
+	*{{ .Varname }} = {{var "v"}}
+}{{end}}
diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl
new file mode 100644
index 0000000..8323b54
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl
@@ -0,0 +1,42 @@
+{{var "v"}} := *{{ .Varname }}
+{{var "l"}} := r.ReadMapStart()
+{{var "bh"}} := z.DecBasicHandle()
+if {{var "v"}} == nil {
+	{{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
+	{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
+	*{{ .Varname }} = {{var "v"}}
+}
+var {{var "mk"}} {{ .KTyp }}
+var {{var "mv"}} {{ .Typ }}
+var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
+if {{var "bh"}}.MapValueReset {
+	{{if decElemKindPtr}}{{var "mg"}} = true
+	{{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
+	{{else if not decElemKindImmutable}}{{var "mg"}} = true
+	{{end}} }
+if {{var "l"}} != 0 {
+{{var "hl"}} := {{var "l"}} > 0 
+	for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
+	r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}}
+	{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
+{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
+		{{var "mk"}} = string({{var "bv"}})
+	}{{ end }}{{if decElemKindPtr}}
+	{{var "ms"}} = true{{end}}
+	if {{var "mg"}} {
+		{{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] 
+		if {{var "mok"}} {
+			{{var "ms"}} = false
+		} {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
+	} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+	r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}}
+	{{var "mdn"}} = false
+	{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }}
+	if {{var "mdn"}} {
+		if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} }
+	} else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
+		{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
+	}
+}
+} // else len==0: TODO: Should we clear map entries?
+r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}}
diff --git a/vendor/github.com/ugorji/go/codec/gen-enc-chan.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-enc-chan.go.tmpl
new file mode 100644
index 0000000..4249588
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen-enc-chan.go.tmpl
@@ -0,0 +1,27 @@
+{{.Label}}:
+switch timeout{{.Sfx}} :=  z.EncBasicHandle().ChanRecvTimeout; {
+case timeout{{.Sfx}} == 0: // only consume available
+	for {
+		select {
+		case b{{.Sfx}} := <-{{.Chan}}:
+			{{ .Slice }} = append({{.Slice}}, b{{.Sfx}})
+		default:
+			break {{.Label}}
+		}
+	}
+case timeout{{.Sfx}} > 0: // consume until timeout
+	tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}})
+	for {
+		select {
+		case b{{.Sfx}} := <-{{.Chan}}:
+			{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
+		case <-tt{{.Sfx}}.C:
+			// close(tt.C)
+			break {{.Label}}
+		}
+	}
+default: // consume until close
+	for b{{.Sfx}} := range {{.Chan}} {
+		{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
+	}
+}
diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go
new file mode 100644
index 0000000..917d282
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go
@@ -0,0 +1,335 @@
+/* // +build ignore */
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from gen-helper.go.tmpl - DO NOT EDIT.
+
+package codec
+
+import (
+	"encoding"
+	"reflect"
+)
+
+// GenVersion is the current version of codecgen.
+const GenVersion = 8
+
+// This file is used to generate helper code for codecgen.
+// The values here i.e. genHelper(En|De)coder are not to be used directly by
+// library users. They WILL change continuously and without notice.
+//
+// To help enforce this, we create an unexported type with exported members.
+// The only way to get the type is via the one exported type that we control (somewhat).
+//
+// When static codecs are created for types, they will use this value
+// to perform encoding or decoding of primitives or known slice or map types.
+
+// GenHelperEncoder is exported so that it can be used externally by codecgen.
+//
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) {
+	ge = genHelperEncoder{e: e}
+	ee = genHelperEncDriver{encDriver: e.e}
+	return
+}
+
+// GenHelperDecoder is exported so that it can be used externally by codecgen.
+//
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) {
+	gd = genHelperDecoder{d: d}
+	dd = genHelperDecDriver{decDriver: d.d}
+	return
+}
+
+type genHelperEncDriver struct {
+	encDriver
+}
+
+func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {}
+func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) {
+	encStructFieldKey(x.encDriver, keyType, s)
+}
+func (x genHelperEncDriver) EncodeSymbol(s string) {
+	x.encDriver.EncodeString(cUTF8, s)
+}
+
+type genHelperDecDriver struct {
+	decDriver
+	C checkOverflow
+}
+
+func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {}
+func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte {
+	return decStructFieldKey(x.decDriver, keyType, buf)
+}
+func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) {
+	return x.C.IntV(x.decDriver.DecodeInt64(), bitsize)
+}
+func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
+	return x.C.UintV(x.decDriver.DecodeUint64(), bitsize)
+}
+func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+	f = x.DecodeFloat64()
+	if chkOverflow32 && chkOvf.Float32(f) {
+		panicv.errorf("float32 overflow: %v", f)
+	}
+	return
+}
+func (x genHelperDecDriver) DecodeFloat32As64() (f float64) {
+	f = x.DecodeFloat64()
+	if chkOvf.Float32(f) {
+		panicv.errorf("float32 overflow: %v", f)
+	}
+	return
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperEncoder struct {
+	M must
+	e *Encoder
+	F fastpathT
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperDecoder struct {
+	C checkOverflow
+	d *Decoder
+	F fastpathT
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
+	return f.e.h
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinary() bool {
+	return f.e.be // f.e.hh.isBinaryEncoding()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) IsJSONHandle() bool {
+	return f.e.js
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncFallback(iv interface{}) {
+	// println(">>>>>>>>> EncFallback")
+	// f.e.encodeI(iv, false, false)
+	f.e.encodeValue(reflect.ValueOf(iv), nil, false)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
+	bs, fnerr := iv.MarshalText()
+	f.e.marshal(bs, fnerr, false, cUTF8)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
+	bs, fnerr := iv.MarshalJSON()
+	f.e.marshal(bs, fnerr, true, cUTF8)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
+	bs, fnerr := iv.MarshalBinary()
+	f.e.marshal(bs, fnerr, false, cRAW)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: builtin no longer supported - so we make this method a no-op,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return }
+
+// func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
+// 	if _, ok := f.e.hh.(*BincHandle); ok {
+// 		return timeTypId
+// 	}
+// }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) I2Rtid(v interface{}) uintptr {
+	return i2rtid(v)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) {
+	return f.e.h.getExt(rtid)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) {
+	f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: No longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperEncoder) HasExtensions() bool {
+	return len(f.e.h.extHandle) != 0
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: No longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
+	if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil {
+		f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
+		return true
+	}
+	return false
+}
+
+// ---------------- DECODER FOLLOWS -----------------
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
+	return f.d.h
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinary() bool {
+	return f.d.be // f.d.hh.isBinaryEncoding()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSwallow() { f.d.swallow() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecScratchBuffer() []byte {
+	return f.d.b[:]
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte {
+	return &f.d.b
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
+	// println(">>>>>>>>> DecFallback")
+	rv := reflect.ValueOf(iv)
+	if chkPtr {
+		rv = f.d.ensureDecodeable(rv)
+	}
+	f.d.decodeValue(rv, nil, false)
+	// f.d.decodeValueFallback(rv)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
+	return f.d.decSliceHelperStart()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
+	f.d.structFieldNotFound(index, name)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
+	f.d.arrayCannotExpand(sliceLen, streamLen)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
+	fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes())
+	if fnerr != nil {
+		panic(fnerr)
+	}
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
+	// bs := f.dd.DecodeStringAsBytes()
+	// grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+	fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
+	if fnerr != nil {
+		panic(fnerr)
+	}
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
+	fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true))
+	if fnerr != nil {
+		panic(fnerr)
+	}
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: builtin no longer supported - so we make this method a no-op,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return }
+
+// func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
+// 	// Note: builtin is no longer supported - so make this a no-op
+// 	if _, ok := f.d.hh.(*BincHandle); ok {
+// 		return timeTypId
+// 	}
+// 	return 0
+// }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) IsJSONHandle() bool {
+	return f.d.js
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) I2Rtid(v interface{}) uintptr {
+	return i2rtid(v)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) {
+	return f.d.h.getExt(rtid)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) {
+	f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: No longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperDecoder) HasExtensions() bool {
+	return len(f.d.h.extHandle) != 0
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: No longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
+	if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil {
+		f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
+		return true
+	}
+	return false
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
+	return decInferLen(clen, maxlen, unit)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: no longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) }
diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl
new file mode 100644
index 0000000..6aeb856
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl
@@ -0,0 +1,302 @@
+/* // +build ignore */
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from gen-helper.go.tmpl - DO NOT EDIT.
+
+package codec
+
+import (
+	"encoding"
+	"reflect"
+)
+
+// GenVersion is the current version of codecgen.
+const GenVersion = {{ .Version }} 
+
+// This file is used to generate helper code for codecgen. 
+// The values here i.e. genHelper(En|De)coder are not to be used directly by 
+// library users. They WILL change continuously and without notice.
+//
+// To help enforce this, we create an unexported type with exported members.
+// The only way to get the type is via the one exported type that we control (somewhat).
+//
+// When static codecs are created for types, they will use this value
+// to perform encoding or decoding of primitives or known slice or map types.
+
+// GenHelperEncoder is exported so that it can be used externally by codecgen.
+//
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) {
+	ge = genHelperEncoder{e: e}
+	ee = genHelperEncDriver{encDriver: e.e}
+	return 
+}
+
+// GenHelperDecoder is exported so that it can be used externally by codecgen.
+//
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) {
+	gd = genHelperDecoder{d: d}
+	dd = genHelperDecDriver{decDriver: d.d}
+	return
+}
+
+type genHelperEncDriver struct {
+	encDriver
+}
+
+func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {}
+func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) {
+	encStructFieldKey(x.encDriver, keyType, s)
+}
+func (x genHelperEncDriver) EncodeSymbol(s string) {
+	x.encDriver.EncodeString(cUTF8, s)
+}
+
+type genHelperDecDriver struct {
+	decDriver
+	C checkOverflow
+}
+
+func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {}
+func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte {
+	return decStructFieldKey(x.decDriver, keyType, buf)
+}
+func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) {
+	return x.C.IntV(x.decDriver.DecodeInt64(), bitsize)
+}
+func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
+	return x.C.UintV(x.decDriver.DecodeUint64(), bitsize)
+}
+func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+	f = x.DecodeFloat64()
+	if chkOverflow32 && chkOvf.Float32(f) {
+		panicv.errorf("float32 overflow: %v", f)
+	}
+	return
+}
+func (x genHelperDecDriver) DecodeFloat32As64() (f float64) {
+	f = x.DecodeFloat64()
+	if chkOvf.Float32(f) {
+		panicv.errorf("float32 overflow: %v", f)
+	}
+	return
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperEncoder struct {
+	M must
+	e *Encoder
+	F fastpathT 
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperDecoder struct {
+	C checkOverflow
+	d *Decoder
+	F fastpathT 
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
+	return f.e.h
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinary() bool {
+	return f.e.be // f.e.hh.isBinaryEncoding()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) IsJSONHandle() bool {
+	return f.e.js
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncFallback(iv interface{}) {
+	// println(">>>>>>>>> EncFallback")
+	// f.e.encodeI(iv, false, false)
+	f.e.encodeValue(reflect.ValueOf(iv), nil, false)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
+	bs, fnerr := iv.MarshalText()
+	f.e.marshal(bs, fnerr, false, cUTF8)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
+	bs, fnerr := iv.MarshalJSON()
+	f.e.marshal(bs, fnerr, true, cUTF8)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
+	bs, fnerr := iv.MarshalBinary()
+	f.e.marshal(bs, fnerr, false, cRAW)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: builtin no longer supported - so we make this method a no-op, 
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return }
+// func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
+// 	if _, ok := f.e.hh.(*BincHandle); ok {
+// 		return timeTypId
+// 	}
+// }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) I2Rtid(v interface{}) uintptr {
+	return i2rtid(v)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) {
+	return f.e.h.getExt(rtid)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) {
+	f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: No longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperEncoder) HasExtensions() bool {
+	return len(f.e.h.extHandle) != 0
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: No longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
+	if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil {
+		f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
+		return true
+	}
+	return false 
+}
+
+// ---------------- DECODER FOLLOWS -----------------
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
+	return f.d.h
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinary() bool {
+     return f.d.be // f.d.hh.isBinaryEncoding()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSwallow() { f.d.swallow() }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecScratchBuffer() []byte {
+	return f.d.b[:]
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte {
+	return &f.d.b
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
+	// println(">>>>>>>>> DecFallback")
+	rv := reflect.ValueOf(iv)
+	if chkPtr {
+		rv = f.d.ensureDecodeable(rv)
+	}
+	f.d.decodeValue(rv, nil, false)
+	// f.d.decodeValueFallback(rv)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
+	return f.d.decSliceHelperStart()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
+	f.d.structFieldNotFound(index, name)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
+	f.d.arrayCannotExpand(sliceLen, streamLen)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
+	fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes())
+	if fnerr != nil {
+		panic(fnerr)
+	}
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
+	// bs := f.dd.DecodeStringAsBytes()
+	// grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+	fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
+	if fnerr != nil {
+		panic(fnerr)
+	}
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
+	fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true))
+	if fnerr != nil {
+		panic(fnerr)
+	}
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecRaw() []byte {	return f.d.rawBytes() }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: builtin no longer supported - so we make this method a no-op, 
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return }
+// func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
+// 	// Note: builtin is no longer supported - so make this a no-op
+// 	if _, ok := f.d.hh.(*BincHandle); ok {
+// 		return timeTypId
+// 	}
+// 	return 0
+// }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) IsJSONHandle() bool {
+	return f.d.js 
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) I2Rtid(v interface{}) uintptr {
+	return i2rtid(v)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) {
+	return f.d.h.getExt(rtid)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) {
+	f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: No longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperDecoder) HasExtensions() bool {
+	return len(f.d.h.extHandle) != 0
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: No longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
+	if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil {
+		f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
+		return true
+	}
+	return false 
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
+	return decInferLen(clen, maxlen, unit)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: no longer used, 
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) }
+
diff --git a/vendor/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/ugorji/go/codec/gen.generated.go
new file mode 100644
index 0000000..240ba9f
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen.generated.go
@@ -0,0 +1,164 @@
+// +build codecgen.exec
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
+
+const genDecMapTmpl = `
+{{var "v"}} := *{{ .Varname }}
+{{var "l"}} := r.ReadMapStart()
+{{var "bh"}} := z.DecBasicHandle()
+if {{var "v"}} == nil {
+	{{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
+	{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
+	*{{ .Varname }} = {{var "v"}}
+}
+var {{var "mk"}} {{ .KTyp }}
+var {{var "mv"}} {{ .Typ }}
+var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
+if {{var "bh"}}.MapValueReset {
+	{{if decElemKindPtr}}{{var "mg"}} = true
+	{{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
+	{{else if not decElemKindImmutable}}{{var "mg"}} = true
+	{{end}} }
+if {{var "l"}} != 0 {
+{{var "hl"}} := {{var "l"}} > 0 
+	for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
+	r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}}
+	{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
+{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
+		{{var "mk"}} = string({{var "bv"}})
+	}{{ end }}{{if decElemKindPtr}}
+	{{var "ms"}} = true{{end}}
+	if {{var "mg"}} {
+		{{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] 
+		if {{var "mok"}} {
+			{{var "ms"}} = false
+		} {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
+	} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+	r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}}
+	{{var "mdn"}} = false
+	{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }}
+	if {{var "mdn"}} {
+		if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} }
+	} else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
+		{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
+	}
+}
+} // else len==0: TODO: Should we clear map entries?
+r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}}
+`
+
+const genDecListTmpl = `
+{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
+{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}}
+var {{var "c"}} bool {{/* // changed */}}
+_ = {{var "c"}}{{end}}
+if {{var "l"}} == 0 {
+	{{if isSlice }}if {{var "v"}} == nil {
+		{{var "v"}} = []{{ .Typ }}{}
+		{{var "c"}} = true
+	} else if len({{var "v"}}) != 0 {
+		{{var "v"}} = {{var "v"}}[:0]
+		{{var "c"}} = true
+	} {{else if isChan }}if {{var "v"}} == nil {
+		{{var "v"}} = make({{ .CTyp }}, 0)
+		{{var "c"}} = true
+	} {{end}}
+} else {
+	{{var "hl"}} := {{var "l"}} > 0
+	var {{var "rl"}} int
+	_ =  {{var "rl"}}
+	{{if isSlice }} if {{var "hl"}} {
+	if {{var "l"}} > cap({{var "v"}}) {
+		{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+		if {{var "rl"}} <= cap({{var "v"}}) {
+			{{var "v"}} = {{var "v"}}[:{{var "rl"}}]
+		} else {
+			{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+		}
+		{{var "c"}} = true
+	} else if {{var "l"}} != len({{var "v"}}) {
+		{{var "v"}} = {{var "v"}}[:{{var "l"}}]
+		{{var "c"}} = true
+	}
+	} {{end}}
+	var {{var "j"}} int 
+    // var {{var "dn"}} bool 
+	for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
+		{{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil {
+			if {{var "hl"}} {
+				{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+			} else {
+				{{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}}
+			}
+			{{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}})
+			{{var "c"}} = true 
+		}{{end}}
+		{{var "h"}}.ElemContainerState({{var "j"}})
+        {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}}
+        {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }}
+		{{ decLineVar $x }}
+		{{var "v"}} <- {{ $x }}
+        // println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this
+        {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}}
+		var {{var "db"}} bool
+		if {{var "j"}} >= len({{var "v"}}) {
+			{{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }})
+			{{var "c"}} = true
+			{{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
+			{{end}}
+		}
+		if {{var "db"}} {
+			z.DecSwallow()
+		} else {
+			{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+		}
+        {{end}}
+	}
+	{{if isSlice}} if {{var "j"}} < len({{var "v"}}) {
+		{{var "v"}} = {{var "v"}}[:{{var "j"}}]
+		{{var "c"}} = true
+	} else if {{var "j"}} == 0 && {{var "v"}} == nil {
+		{{var "v"}} = make([]{{ .Typ }}, 0)
+		{{var "c"}} = true
+	} {{end}}
+}
+{{var "h"}}.End()
+{{if not isArray }}if {{var "c"}} { 
+	*{{ .Varname }} = {{var "v"}}
+}{{end}}
+`
+
+const genEncChanTmpl = `
+{{.Label}}:
+switch timeout{{.Sfx}} :=  z.EncBasicHandle().ChanRecvTimeout; {
+case timeout{{.Sfx}} == 0: // only consume available
+	for {
+		select {
+		case b{{.Sfx}} := <-{{.Chan}}:
+			{{ .Slice }} = append({{.Slice}}, b{{.Sfx}})
+		default:
+			break {{.Label}}
+		}
+	}
+case timeout{{.Sfx}} > 0: // consume until timeout
+	tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}})
+	for {
+		select {
+		case b{{.Sfx}} := <-{{.Chan}}:
+			{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
+		case <-tt{{.Sfx}}.C:
+			// close(tt.C)
+			break {{.Label}}
+		}
+	}
+default: // consume until close
+	for b{{.Sfx}} := range {{.Chan}} {
+		{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
+	}
+}
+`
diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go
new file mode 100644
index 0000000..b4c4031
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen.go
@@ -0,0 +1,2139 @@
+// +build codecgen.exec
+
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+	"bytes"
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"go/format"
+	"io"
+	"io/ioutil"
+	"math/rand"
+	"reflect"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"text/template"
+	"time"
+	"unicode"
+	"unicode/utf8"
+)
+
+// ---------------------------------------------------
+// codecgen supports the full cycle of reflection-based codec:
+//    - RawExt
+//    - Raw
+//    - Extensions
+//    - (Binary|Text|JSON)(Unm|M)arshal
+//    - generic by-kind
+//
+// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type.
+// In those areas, we try to only do reflection or interface-conversion when NECESSARY:
+//    - Extensions, only if Extensions are configured.
+//
+// However, codecgen doesn't support the following:
+//   - Canonical option. (codecgen IGNORES it currently)
+//     This is just because it has not been implemented.
+//
+// During encode/decode, Selfer takes precedence.
+// A type implementing Selfer will know how to encode/decode itself statically.
+//
+// The following field types are supported:
+//     array: [n]T
+//     slice: []T
+//     map: map[K]V
+//     primitive: [u]int[n], float(32|64), bool, string
+//     struct
+//
+// ---------------------------------------------------
+// Note that a Selfer cannot call (e|d).(En|De)code on itself,
+// as this will cause a circular reference, as (En|De)code will call Selfer methods.
+// Any type that implements Selfer must implement completely and not fallback to (En|De)code.
+//
+// In addition, code in this file manages the generation of fast-path implementations of
+// encode/decode of slices/maps of primitive keys/values.
+//
+// Users MUST re-generate their implementations whenever the code shape changes.
+// The generated code will panic if it was generated with a version older than the supporting library.
+// ---------------------------------------------------
+//
+// codec framework is very feature rich.
+// When encoding or decoding into an interface, it depends on the runtime type of the interface.
+// The type of the interface may be a named type, an extension, etc.
+// Consequently, we fallback to runtime codec for encoding/decoding interfaces.
+// In addition, we fallback for any value which cannot be guaranteed at runtime.
+// This allows us support ANY value, including any named types, specifically those which
+// do not implement our interfaces (e.g. Selfer).
+//
+// This explains some slowness compared to other code generation codecs (e.g. msgp).
+// This reduction in speed is only seen when your refers to interfaces,
+// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} }
+//
+// codecgen will panic if the file was generated with an old version of the library in use.
+//
+// Note:
+//   It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil.
+//   This way, there isn't a function call overhead just to see that we should not enter a block of code.
+//
+// Note:
+//   codecgen-generated code depends on the variables defined by fast-path.generated.go.
+//   consequently, you cannot run with tags "codecgen notfastpath".
+
+// GenVersion is the current version of codecgen.
+//
+// NOTE: Increment this value each time codecgen changes fundamentally.
+// Fundamental changes are:
+//   - helper methods change (signature change, new ones added, some removed, etc)
+//   - codecgen command line changes
+//
+// v1: Initial Version
+// v2:
+// v3: Changes for Kubernetes:
+//     changes in signature of some unpublished helper methods and codecgen cmdline arguments.
+// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen)
+// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections.
+// v6: removed unsafe from gen, and now uses codecgen.exec tag
+// v7:
+// v8: current - we now maintain compatibility with old generated code.
+const genVersion = 8
+
+const (
+	genCodecPkg        = "codec1978"
+	genTempVarPfx      = "yy"
+	genTopLevelVarName = "x"
+
+	// ignore canBeNil parameter, and always set to true.
+	// This is because nil can appear anywhere, so we should always check.
+	genAnythingCanBeNil = true
+
+	// if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function;
+	// else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals
+	// are not executed a lot.
+	//
+	// From testing, it didn't make much difference in runtime, so keep as true (one function only)
+	genUseOneFunctionForDecStructMap = true
+)
+
+type genStructMapStyle uint8
+
+const (
+	genStructMapStyleConsolidated genStructMapStyle = iota
+	genStructMapStyleLenPrefix
+	genStructMapStyleCheckBreak
+)
+
+var (
+	errGenAllTypesSamePkg  = errors.New("All types must be in the same package")
+	errGenExpectArrayOrMap = errors.New("unexpected type. Expecting array/map/slice")
+
+	genBase64enc  = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__")
+	genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`)
+)
+
+type genBuf struct {
+	buf []byte
+}
+
+func (x *genBuf) s(s string) *genBuf              { x.buf = append(x.buf, s...); return x }
+func (x *genBuf) b(s []byte) *genBuf              { x.buf = append(x.buf, s...); return x }
+func (x *genBuf) v() string                       { return string(x.buf) }
+func (x *genBuf) f(s string, args ...interface{}) { x.s(fmt.Sprintf(s, args...)) }
+func (x *genBuf) reset() {
+	if x.buf != nil {
+		x.buf = x.buf[:0]
+	}
+}
+
+// genRunner holds some state used during a Gen run.
+type genRunner struct {
+	w io.Writer      // output
+	c uint64         // counter used for generating varsfx
+	t []reflect.Type // list of types to run selfer on
+
+	tc reflect.Type     // currently running selfer on this type
+	te map[uintptr]bool // types for which the encoder has been created
+	td map[uintptr]bool // types for which the decoder has been created
+	cp string           // codec import path
+
+	im  map[string]reflect.Type // imports to add
+	imn map[string]string       // package names of imports to add
+	imc uint64                  // counter for import numbers
+
+	is map[reflect.Type]struct{} // types seen during import search
+	bp string                    // base PkgPath, for which we are generating for
+
+	cpfx string // codec package prefix
+
+	tm map[reflect.Type]struct{} // types for which enc/dec must be generated
+	ts []reflect.Type            // types for which enc/dec must be generated
+
+	xs string // top level variable/constant suffix
+	hn string // fn helper type name
+
+	ti *TypeInfos
+	// rr *rand.Rand // random generator for file-specific types
+
+	nx bool // no extensions
+}
+
+// Gen will write a complete go file containing Selfer implementations for each
+// type passed. All the types must be in the same package.
+//
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINUOUSLY WITHOUT NOTICE.
+func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool,
+	ti *TypeInfos, typ ...reflect.Type) {
+	// All types passed to this method do not have a codec.Selfer method implemented directly.
+	// codecgen already checks the AST and skips any types that define the codec.Selfer methods.
+	// Consequently, there's no need to check and trim them if they implement codec.Selfer
+
+	if len(typ) == 0 {
+		return
+	}
+	x := genRunner{
+		w:   w,
+		t:   typ,
+		te:  make(map[uintptr]bool),
+		td:  make(map[uintptr]bool),
+		im:  make(map[string]reflect.Type),
+		imn: make(map[string]string),
+		is:  make(map[reflect.Type]struct{}),
+		tm:  make(map[reflect.Type]struct{}),
+		ts:  []reflect.Type{},
+		bp:  genImportPath(typ[0]),
+		xs:  uid,
+		ti:  ti,
+		nx:  noExtensions,
+	}
+	if x.ti == nil {
+		x.ti = defTypeInfos
+	}
+	if x.xs == "" {
+		rr := rand.New(rand.NewSource(time.Now().UnixNano()))
+		x.xs = strconv.FormatInt(rr.Int63n(9999), 10)
+	}
+
+	// gather imports first:
+	x.cp = genImportPath(reflect.TypeOf(x))
+	x.imn[x.cp] = genCodecPkg
+	for _, t := range typ {
+		// fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name())
+		if genImportPath(t) != x.bp {
+			panic(errGenAllTypesSamePkg)
+		}
+		x.genRefPkgs(t)
+	}
+	if buildTags != "" {
+		x.line("// +build " + buildTags)
+		x.line("")
+	}
+	x.line(`
+
+// Code generated by codecgen - DO NOT EDIT.
+
+`)
+	x.line("package " + pkgName)
+	x.line("")
+	x.line("import (")
+	if x.cp != x.bp {
+		x.cpfx = genCodecPkg + "."
+		x.linef("%s \"%s\"", genCodecPkg, x.cp)
+	}
+	// use a sorted set of im keys, so that we can get consistent output
+	imKeys := make([]string, 0, len(x.im))
+	for k := range x.im {
+		imKeys = append(imKeys, k)
+	}
+	sort.Strings(imKeys)
+	for _, k := range imKeys { // for k, _ := range x.im {
+		if k == x.imn[k] {
+			x.linef("\"%s\"", k)
+		} else {
+			x.linef("%s \"%s\"", x.imn[k], k)
+		}
+	}
+	// add required packages
+	for _, k := range [...]string{"runtime", "errors", "strconv"} { // "reflect", "fmt"
+		if _, ok := x.im[k]; !ok {
+			x.line("\"" + k + "\"")
+		}
+	}
+	x.line(")")
+	x.line("")
+
+	x.line("const (")
+	x.linef("// ----- content types ----")
+	x.linef("codecSelferCcUTF8%s = %v", x.xs, int64(cUTF8))
+	x.linef("codecSelferCcRAW%s = %v", x.xs, int64(cRAW))
+	x.linef("// ----- value types used ----")
+	for _, vt := range [...]valueType{
+		valueTypeArray, valueTypeMap, valueTypeString,
+		valueTypeInt, valueTypeUint, valueTypeFloat} {
+		x.linef("codecSelferValueType%s%s = %v", vt.String(), x.xs, int64(vt))
+	}
+
+	x.linef("codecSelferBitsize%s = uint8(32 << (^uint(0) >> 63))", x.xs)
+	x.line(")")
+	x.line("var (")
+	x.line("errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)")
+	x.line(")")
+	x.line("")
+
+	x.hn = "codecSelfer" + x.xs
+	x.line("type " + x.hn + " struct{}")
+	x.line("")
+
+	x.varsfxreset()
+	x.line("func init() {")
+	x.linef("if %sGenVersion != %v {", x.cpfx, genVersion)
+	x.line("_, file, _, _ := runtime.Caller(0)")
+	x.outf(`panic("codecgen version mismatch: current: %v, need " + strconv.FormatInt(int64(%sGenVersion), 10) + ". Re-generate file: " + file)`, genVersion, x.cpfx)
+	// x.out(`panic(fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `)
+	// x.linef(`%v, %sGenVersion, file))`, genVersion, x.cpfx)
+	x.linef("}")
+	x.line("if false { // reference the types, but skip this branch at build/run time")
+	// x.line("_ = strconv.ParseInt")
+	var n int
+	// for k, t := range x.im {
+	for _, k := range imKeys {
+		t := x.im[k]
+		x.linef("var v%v %s.%s", n, x.imn[k], t.Name())
+		n++
+	}
+	if n > 0 {
+		x.out("_")
+		for i := 1; i < n; i++ {
+			x.out(", _")
+		}
+		x.out(" = v0")
+		for i := 1; i < n; i++ {
+			x.outf(", v%v", i)
+		}
+	}
+	x.line("} ") // close if false
+	x.line("}")  // close init
+	x.line("")
+
+	// generate rest of type info
+	for _, t := range typ {
+		x.tc = t
+		x.selfer(true)
+		x.selfer(false)
+	}
+
+	for _, t := range x.ts {
+		rtid := rt2id(t)
+		// generate enc functions for all these slice/map types.
+		x.varsfxreset()
+		x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx)
+		x.genRequiredMethodVars(true)
+		switch t.Kind() {
+		case reflect.Array, reflect.Slice, reflect.Chan:
+			x.encListFallback("v", t)
+		case reflect.Map:
+			x.encMapFallback("v", t)
+		default:
+			panic(errGenExpectArrayOrMap)
+		}
+		x.line("}")
+		x.line("")
+
+		// generate dec functions for all these slice/map types.
+		x.varsfxreset()
+		x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx)
+		x.genRequiredMethodVars(false)
+		switch t.Kind() {
+		case reflect.Array, reflect.Slice, reflect.Chan:
+			x.decListFallback("v", rtid, t)
+		case reflect.Map:
+			x.decMapFallback("v", rtid, t)
+		default:
+			panic(errGenExpectArrayOrMap)
+		}
+		x.line("}")
+		x.line("")
+	}
+
+	x.line("")
+}
+
+func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool {
+	// return varname != genTopLevelVarName && t != x.tc
+	// the only time we checkForSelfer is if we are not at the TOP of the generated code.
+	return varname != genTopLevelVarName
+}
+
+func (x *genRunner) arr2str(t reflect.Type, s string) string {
+	if t.Kind() == reflect.Array {
+		return s
+	}
+	return ""
+}
+
+func (x *genRunner) genRequiredMethodVars(encode bool) {
+	x.line("var h " + x.hn)
+	if encode {
+		x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)")
+	} else {
+		x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)")
+	}
+	x.line("_, _, _ = h, z, r")
+}
+
+func (x *genRunner) genRefPkgs(t reflect.Type) {
+	if _, ok := x.is[t]; ok {
+		return
+	}
+	x.is[t] = struct{}{}
+	tpkg, tname := genImportPath(t), t.Name()
+	if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' {
+		if _, ok := x.im[tpkg]; !ok {
+			x.im[tpkg] = t
+			if idx := strings.LastIndex(tpkg, "/"); idx < 0 {
+				x.imn[tpkg] = tpkg
+			} else {
+				x.imc++
+				x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false)
+			}
+		}
+	}
+	switch t.Kind() {
+	case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan:
+		x.genRefPkgs(t.Elem())
+	case reflect.Map:
+		x.genRefPkgs(t.Elem())
+		x.genRefPkgs(t.Key())
+	case reflect.Struct:
+		for i := 0; i < t.NumField(); i++ {
+			if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' {
+				x.genRefPkgs(t.Field(i).Type)
+			}
+		}
+	}
+}
+
+func (x *genRunner) varsfx() string {
+	x.c++
+	return strconv.FormatUint(x.c, 10)
+}
+
+func (x *genRunner) varsfxreset() {
+	x.c = 0
+}
+
+func (x *genRunner) out(s string) {
+	_, err := io.WriteString(x.w, s)
+	if err != nil {
+		panic(err)
+	}
+}
+
+func (x *genRunner) outf(s string, params ...interface{}) {
+	_, err := fmt.Fprintf(x.w, s, params...)
+	if err != nil {
+		panic(err)
+	}
+}
+
+func (x *genRunner) line(s string) {
+	x.out(s)
+	if len(s) == 0 || s[len(s)-1] != '\n' {
+		x.out("\n")
+	}
+}
+
+func (x *genRunner) linef(s string, params ...interface{}) {
+	x.outf(s, params...)
+	if len(s) == 0 || s[len(s)-1] != '\n' {
+		x.out("\n")
+	}
+}
+
+func (x *genRunner) genTypeName(t reflect.Type) (n string) {
+	// defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }()
+
+	// if the type has a PkgPath, which doesn't match the current package,
+	// then include it.
+	// We cannot depend on t.String() because it includes current package,
+	// or t.PkgPath because it includes full import path,
+	//
+	var ptrPfx string
+	for t.Kind() == reflect.Ptr {
+		ptrPfx += "*"
+		t = t.Elem()
+	}
+	if tn := t.Name(); tn != "" {
+		return ptrPfx + x.genTypeNamePrim(t)
+	}
+	switch t.Kind() {
+	case reflect.Map:
+		return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem())
+	case reflect.Slice:
+		return ptrPfx + "[]" + x.genTypeName(t.Elem())
+	case reflect.Array:
+		return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem())
+	case reflect.Chan:
+		return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem())
+	default:
+		if t == intfTyp {
+			return ptrPfx + "interface{}"
+		} else {
+			return ptrPfx + x.genTypeNamePrim(t)
+		}
+	}
+}
+
+func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) {
+	if t.Name() == "" {
+		return t.String()
+	} else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) {
+		return t.Name()
+	} else {
+		return x.imn[genImportPath(t)] + "." + t.Name()
+		// return t.String() // best way to get the package name inclusive
+	}
+}
+
+func (x *genRunner) genZeroValueR(t reflect.Type) string {
+	// if t is a named type, w
+	switch t.Kind() {
+	case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func,
+		reflect.Slice, reflect.Map, reflect.Invalid:
+		return "nil"
+	case reflect.Bool:
+		return "false"
+	case reflect.String:
+		return `""`
+	case reflect.Struct, reflect.Array:
+		return x.genTypeName(t) + "{}"
+	default: // all numbers
+		return "0"
+	}
+}
+
+func (x *genRunner) genMethodNameT(t reflect.Type) (s string) {
+	return genMethodNameT(t, x.tc)
+}
+
+func (x *genRunner) selfer(encode bool) {
+	t := x.tc
+	t0 := t
+	// always make decode use a pointer receiver,
+	// and structs/arrays always use a ptr receiver (encode|decode)
+	isptr := !encode || t.Kind() == reflect.Array || (t.Kind() == reflect.Struct && t != timeTyp)
+	x.varsfxreset()
+
+	fnSigPfx := "func (" + genTopLevelVarName + " "
+	if isptr {
+		fnSigPfx += "*"
+	}
+	fnSigPfx += x.genTypeName(t)
+	x.out(fnSigPfx)
+
+	if isptr {
+		t = reflect.PtrTo(t)
+	}
+	if encode {
+		x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {")
+		x.genRequiredMethodVars(true)
+		x.encVar(genTopLevelVarName, t)
+	} else {
+		x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {")
+		x.genRequiredMethodVars(false)
+		// do not use decVar, as there is no need to check TryDecodeAsNil
+		// or way to elegantly handle that, and also setting it to a
+		// non-nil value doesn't affect the pointer passed.
+		// x.decVar(genTopLevelVarName, t, false)
+		x.dec(genTopLevelVarName, t0, true)
+	}
+	x.line("}")
+	x.line("")
+
+	if encode || t0.Kind() != reflect.Struct {
+		return
+	}
+
+	// write is containerMap
+	if genUseOneFunctionForDecStructMap {
+		x.out(fnSigPfx)
+		x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {")
+		x.genRequiredMethodVars(false)
+		x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleConsolidated)
+		x.line("}")
+		x.line("")
+	} else {
+		x.out(fnSigPfx)
+		x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {")
+		x.genRequiredMethodVars(false)
+		x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleLenPrefix)
+		x.line("}")
+		x.line("")
+
+		x.out(fnSigPfx)
+		x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {")
+		x.genRequiredMethodVars(false)
+		x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleCheckBreak)
+		x.line("}")
+		x.line("")
+	}
+
+	// write containerArray
+	x.out(fnSigPfx)
+	x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {")
+	x.genRequiredMethodVars(false)
+	x.decStructArray(genTopLevelVarName, "l", "return", rt2id(t0), t0)
+	x.line("}")
+	x.line("")
+
+}
+
+// used for chan, array, slice, map
+func (x *genRunner) xtraSM(varname string, t reflect.Type, encode, isptr bool) {
+	var ptrPfx, addrPfx string
+	if isptr {
+		ptrPfx = "*"
+	} else {
+		addrPfx = "&"
+	}
+	if encode {
+		x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), ptrPfx, x.genTypeName(t), varname)
+	} else {
+		x.linef("h.dec%s((*%s)(%s%s), d)", x.genMethodNameT(t), x.genTypeName(t), addrPfx, varname)
+	}
+	x.registerXtraT(t)
+}
+
+func (x *genRunner) registerXtraT(t reflect.Type) {
+	// recursively register the types
+	if _, ok := x.tm[t]; ok {
+		return
+	}
+	var tkey reflect.Type
+	switch t.Kind() {
+	case reflect.Chan, reflect.Slice, reflect.Array:
+	case reflect.Map:
+		tkey = t.Key()
+	default:
+		return
+	}
+	x.tm[t] = struct{}{}
+	x.ts = append(x.ts, t)
+	// check if this refers to any xtra types eg. a slice of array: add the array
+	x.registerXtraT(t.Elem())
+	if tkey != nil {
+		x.registerXtraT(tkey)
+	}
+}
+
+// encVar will encode a variable.
+// The parameter, t, is the reflect.Type of the variable itself
+func (x *genRunner) encVar(varname string, t reflect.Type) {
+	// fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t)
+	var checkNil bool
+	switch t.Kind() {
+	case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan:
+		checkNil = true
+	}
+	if checkNil {
+		x.linef("if %s == nil { r.EncodeNil() } else { ", varname)
+	}
+
+	switch t.Kind() {
+	case reflect.Ptr:
+		telem := t.Elem()
+		tek := telem.Kind()
+		if tek == reflect.Array || (tek == reflect.Struct && telem != timeTyp) {
+			x.enc(varname, genNonPtr(t))
+			break
+		}
+		i := x.varsfx()
+		x.line(genTempVarPfx + i + " := *" + varname)
+		x.enc(genTempVarPfx+i, genNonPtr(t))
+	case reflect.Struct, reflect.Array:
+		if t == timeTyp {
+			x.enc(varname, t)
+			break
+		}
+		i := x.varsfx()
+		x.line(genTempVarPfx + i + " := &" + varname)
+		x.enc(genTempVarPfx+i, t)
+	default:
+		x.enc(varname, t)
+	}
+
+	if checkNil {
+		x.line("}")
+	}
+
+}
+
+// enc will encode a variable (varname) of type t, where t represents T.
+// if t is !time.Time and t is of kind reflect.Struct or reflect.Array, varname is of type *T
+// (to prevent copying),
+// else t is of type T
+func (x *genRunner) enc(varname string, t reflect.Type) {
+	rtid := rt2id(t)
+	ti2 := x.ti.get(rtid, t)
+	// We call CodecEncodeSelf if one of the following are honored:
+	//   - the type already implements Selfer, call that
+	//   - the type has a Selfer implementation just created, use that
+	//   - the type is in the list of the ones we will generate for, but it is not currently being generated
+
+	mi := x.varsfx()
+	// tptr := reflect.PtrTo(t)
+	tk := t.Kind()
+	if x.checkForSelfer(t, varname) {
+		if tk == reflect.Array || (tk == reflect.Struct && rtid != timeTypId) { // varname is of type *T
+			// if tptr.Implements(selferTyp) || t.Implements(selferTyp) {
+			if ti2.isFlag(typeInfoFlagIsZeroerPtr) || ti2.isFlag(typeInfoFlagIsZeroer) {
+				x.line(varname + ".CodecEncodeSelf(e)")
+				return
+			}
+		} else { // varname is of type T
+			if ti2.cs { // t.Implements(selferTyp) {
+				x.line(varname + ".CodecEncodeSelf(e)")
+				return
+			} else if ti2.csp { // tptr.Implements(selferTyp) {
+				x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname)
+				x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi)
+				return
+			}
+		}
+
+		if _, ok := x.te[rtid]; ok {
+			x.line(varname + ".CodecEncodeSelf(e)")
+			return
+		}
+	}
+
+	inlist := false
+	for _, t0 := range x.t {
+		if t == t0 {
+			inlist = true
+			if x.checkForSelfer(t, varname) {
+				x.line(varname + ".CodecEncodeSelf(e)")
+				return
+			}
+			break
+		}
+	}
+
+	var rtidAdded bool
+	if t == x.tc {
+		x.te[rtid] = true
+		rtidAdded = true
+	}
+
+	// check if
+	//   - type is time.Time, RawExt, Raw
+	//   - the type implements (Text|JSON|Binary)(Unm|M)arshal
+
+	x.line("if false {")           //start if block
+	defer func() { x.line("}") }() //end if block
+
+	if t == timeTyp {
+		x.linef("} else { r.EncodeTime(%s)", varname)
+		return
+	}
+	if t == rawTyp {
+		x.linef("} else { z.EncRaw(%s)", varname)
+		return
+	}
+	if t == rawExtTyp {
+		x.linef("} else { r.EncodeRawExt(%s, e)", varname)
+		return
+	}
+	// only check for extensions if the type is named, and has a packagePath.
+	var arrayOrStruct = tk == reflect.Array || tk == reflect.Struct // meaning varname if of type *T
+	if !x.nx && genImportPath(t) != "" && t.Name() != "" {
+		yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi)
+		x.linef("} else if %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.EncExtension(%s, %s) ", yy, varname, yy, varname, yy)
+	}
+	if arrayOrStruct { // varname is of type *T
+		if ti2.bm || ti2.bmp { // t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) {
+			x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(%v) ", varname)
+		}
+		if ti2.jm || ti2.jmp { // t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) {
+			x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", varname)
+		} else if ti2.tm || ti2.tmp { // t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) {
+			x.linef("} else if !z.EncBinary() { z.EncTextMarshal(%v) ", varname)
+		}
+	} else { // varname is of type T
+		if ti2.bm { // t.Implements(binaryMarshalerTyp) {
+			x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(%v) ", varname)
+		} else if ti2.bmp { // tptr.Implements(binaryMarshalerTyp) {
+			x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(&%v) ", varname)
+		}
+		if ti2.jm { // t.Implements(jsonMarshalerTyp) {
+			x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", varname)
+		} else if ti2.jmp { // tptr.Implements(jsonMarshalerTyp) {
+			x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", varname)
+		} else if ti2.tm { // t.Implements(textMarshalerTyp) {
+			x.linef("} else if !z.EncBinary() { z.EncTextMarshal(%v) ", varname)
+		} else if ti2.tmp { // tptr.Implements(textMarshalerTyp) {
+			x.linef("} else if !z.EncBinary() { z.EncTextMarshal(&%v) ", varname)
+		}
+	}
+	x.line("} else {")
+
+	switch t.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		x.line("r.EncodeInt(int64(" + varname + "))")
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		x.line("r.EncodeUint(uint64(" + varname + "))")
+	case reflect.Float32:
+		x.line("r.EncodeFloat32(float32(" + varname + "))")
+	case reflect.Float64:
+		x.line("r.EncodeFloat64(float64(" + varname + "))")
+	case reflect.Bool:
+		x.line("r.EncodeBool(bool(" + varname + "))")
+	case reflect.String:
+		x.line("r.EncodeString(codecSelferCcUTF8" + x.xs + ", string(" + varname + "))")
+	case reflect.Chan:
+		x.xtraSM(varname, t, true, false)
+		// x.encListFallback(varname, rtid, t)
+	case reflect.Array:
+		x.xtraSM(varname, t, true, true)
+	case reflect.Slice:
+		// if nil, call dedicated function
+		// if a []uint8, call dedicated function
+		// if a known fastpath slice, call dedicated function
+		// else write encode function in-line.
+		// - if elements are primitives or Selfers, call dedicated function on each member.
+		// - else call Encoder.encode(XXX) on it.
+		if rtid == uint8SliceTypId {
+			x.line("r.EncodeStringBytes(codecSelferCcRAW" + x.xs + ", []byte(" + varname + "))")
+		} else if fastpathAV.index(rtid) != -1 {
+			g := x.newGenV(t)
+			x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)")
+		} else {
+			x.xtraSM(varname, t, true, false)
+			// x.encListFallback(varname, rtid, t)
+		}
+	case reflect.Map:
+		// if nil, call dedicated function
+		// if a known fastpath map, call dedicated function
+		// else write encode function in-line.
+		// - if elements are primitives or Selfers, call dedicated function on each member.
+		// - else call Encoder.encode(XXX) on it.
+		// x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ")
+		if fastpathAV.index(rtid) != -1 {
+			g := x.newGenV(t)
+			x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)")
+		} else {
+			x.xtraSM(varname, t, true, false)
+			// x.encMapFallback(varname, rtid, t)
+		}
+	case reflect.Struct:
+		if !inlist {
+			delete(x.te, rtid)
+			x.line("z.EncFallback(" + varname + ")")
+			break
+		}
+		x.encStruct(varname, rtid, t)
+	default:
+		if rtidAdded {
+			delete(x.te, rtid)
+		}
+		x.line("z.EncFallback(" + varname + ")")
+	}
+}
+
+func (x *genRunner) encZero(t reflect.Type) {
+	switch t.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		x.line("r.EncodeInt(0)")
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		x.line("r.EncodeUint(0)")
+	case reflect.Float32:
+		x.line("r.EncodeFloat32(0)")
+	case reflect.Float64:
+		x.line("r.EncodeFloat64(0)")
+	case reflect.Bool:
+		x.line("r.EncodeBool(false)")
+	case reflect.String:
+		x.line("r.EncodeString(codecSelferCcUTF8" + x.xs + `, "")`)
+	default:
+		x.line("r.EncodeNil()")
+	}
+}
+
+func (x *genRunner) encOmitEmptyLine(t2 reflect.StructField, varname string, buf *genBuf) {
+	// smartly check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc.
+	// also, for maps/slices/arrays, check if len ! 0 (not if == zero value)
+	varname2 := varname + "." + t2.Name
+	switch t2.Type.Kind() {
+	case reflect.Struct:
+		rtid2 := rt2id(t2.Type)
+		ti2 := x.ti.get(rtid2, t2.Type)
+		// fmt.Printf(">>>> structfield: omitempty: type: %s, field: %s\n", t2.Type.Name(), t2.Name)
+		if ti2.rtid == timeTypId {
+			buf.s("!(").s(varname2).s(".IsZero())")
+			break
+		}
+		if ti2.isFlag(typeInfoFlagIsZeroerPtr) || ti2.isFlag(typeInfoFlagIsZeroer) {
+			buf.s("!(").s(varname2).s(".IsZero())")
+			break
+		}
+		if ti2.isFlag(typeInfoFlagComparable) {
+			buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type))
+			break
+		}
+		// buf.s("(")
+		buf.s("false")
+		for i, n := 0, t2.Type.NumField(); i < n; i++ {
+			f := t2.Type.Field(i)
+			if f.PkgPath != "" { // unexported
+				continue
+			}
+			buf.s(" || ")
+			x.encOmitEmptyLine(f, varname2, buf)
+		}
+		//buf.s(")")
+	case reflect.Bool:
+		buf.s(varname2)
+	case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan:
+		buf.s("len(").s(varname2).s(") != 0")
+	default:
+		buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type))
+	}
+}
+
+func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
+	// Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. )
+	// replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it
+
+	// if t === type currently running selfer on, do for all
+	ti := x.ti.get(rtid, t)
+	i := x.varsfx()
+	sepVarname := genTempVarPfx + "sep" + i
+	numfieldsvar := genTempVarPfx + "q" + i
+	ti2arrayvar := genTempVarPfx + "r" + i
+	struct2arrvar := genTempVarPfx + "2arr" + i
+
+	x.line(sepVarname + " := !z.EncBinary()")
+	x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar)
+	x.linef("_, _ = %s, %s", sepVarname, struct2arrvar)
+	x.linef("const %s bool = %v // struct tag has 'toArray'", ti2arrayvar, ti.toArray)
+
+	tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing.
+
+	// var nn int
+	// due to omitEmpty, we need to calculate the
+	// number of non-empty things we write out first.
+	// This is required as we need to pre-determine the size of the container,
+	// to support length-prefixing.
+	if ti.anyOmitEmpty {
+		x.linef("var %s = [%v]bool{ // should field at this index be written?", numfieldsvar, len(tisfi))
+
+		for j, si := range tisfi {
+			_ = j
+			if !si.omitEmpty() {
+				// x.linef("%s[%v] = true // %s", numfieldsvar, j, si.fieldName)
+				x.linef("true, // %s", si.fieldName)
+				// nn++
+				continue
+			}
+			var t2 reflect.StructField
+			var omitline genBuf
+			{
+				t2typ := t
+				varname3 := varname
+				// go through the loop, record the t2 field explicitly,
+				// and gather the omit line if embedded in pointers.
+				for ij, ix := range si.is {
+					if uint8(ij) == si.nis {
+						break
+					}
+					for t2typ.Kind() == reflect.Ptr {
+						t2typ = t2typ.Elem()
+					}
+					t2 = t2typ.Field(int(ix))
+					t2typ = t2.Type
+					varname3 = varname3 + "." + t2.Name
+					// do not include actual field in the omit line.
+					// that is done subsequently (right after - below).
+					if uint8(ij+1) < si.nis && t2typ.Kind() == reflect.Ptr {
+						omitline.s(varname3).s(" != nil && ")
+					}
+				}
+			}
+			x.encOmitEmptyLine(t2, varname, &omitline)
+			x.linef("%s, // %s", omitline.v(), si.fieldName)
+		}
+		x.line("}")
+		x.linef("_ = %s", numfieldsvar)
+	}
+	// x.linef("var %snn%s int", genTempVarPfx, i)
+	x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray {
+	x.linef("r.WriteArrayStart(%d)", len(tisfi))
+	x.linef("} else {") // if not ti.toArray
+	if ti.anyOmitEmpty {
+		// nn = 0
+		// x.linef("var %snn%s = %v", genTempVarPfx, i, nn)
+		x.linef("var %snn%s int", genTempVarPfx, i)
+		x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i)
+		x.linef("r.WriteMapStart(%snn%s)", genTempVarPfx, i)
+		x.linef("%snn%s = %v", genTempVarPfx, i, 0)
+	} else {
+		x.linef("r.WriteMapStart(%d)", len(tisfi))
+	}
+	x.line("}") // close if not StructToArray
+
+	for j, si := range tisfi {
+		i := x.varsfx()
+		isNilVarName := genTempVarPfx + "n" + i
+		var labelUsed bool
+		var t2 reflect.StructField
+		{
+			t2typ := t
+			varname3 := varname
+			for ij, ix := range si.is {
+				if uint8(ij) == si.nis {
+					break
+				}
+				for t2typ.Kind() == reflect.Ptr {
+					t2typ = t2typ.Elem()
+				}
+				t2 = t2typ.Field(int(ix))
+				t2typ = t2.Type
+				varname3 = varname3 + "." + t2.Name
+				if t2typ.Kind() == reflect.Ptr {
+					if !labelUsed {
+						x.line("var " + isNilVarName + " bool")
+					}
+					x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ")
+					x.line("goto LABEL" + i)
+					x.line("}")
+					labelUsed = true
+					// "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }")
+				}
+			}
+			// t2 = t.FieldByIndex(si.is)
+		}
+		if labelUsed {
+			x.line("LABEL" + i + ":")
+		}
+		// if the type of the field is a Selfer, or one of the ones
+
+		x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray
+		if labelUsed {
+			x.linef("if %s { r.WriteArrayElem(); r.EncodeNil() } else { ", isNilVarName)
+		}
+		x.line("r.WriteArrayElem()")
+		if si.omitEmpty() {
+			x.linef("if %s[%v] {", numfieldsvar, j)
+		}
+		x.encVar(varname+"."+t2.Name, t2.Type)
+		if si.omitEmpty() {
+			x.linef("} else {")
+			x.encZero(t2.Type)
+			x.linef("}")
+		}
+		if labelUsed {
+			x.line("}")
+		}
+
+		x.linef("} else {") // if not ti.toArray
+
+		if si.omitEmpty() {
+			x.linef("if %s[%v] {", numfieldsvar, j)
+		}
+		x.line("r.WriteMapElemKey()")
+
+		// x.line("r.EncodeString(codecSelferCcUTF8" + x.xs + ", `" + si.encName + "`)")
+		// emulate EncStructFieldKey
+		switch ti.keyType {
+		case valueTypeInt:
+			x.linef("r.EncodeInt(z.M.Int(strconv.ParseInt(`%s`, 10, 64)))", si.encName)
+		case valueTypeUint:
+			x.linef("r.EncodeUint(z.M.Uint(strconv.ParseUint(`%s`, 10, 64)))", si.encName)
+		case valueTypeFloat:
+			x.linef("r.EncodeFloat64(z.M.Float(strconv.ParseFloat(`%s`, 64)))", si.encName)
+		default: // string
+			x.linef("r.EncodeString(codecSelferCcUTF8%s, `%s`)", x.xs, si.encName)
+		}
+		// x.linef("r.EncStructFieldKey(codecSelferValueType%s%s, `%s`)", ti.keyType.String(), x.xs, si.encName)
+		x.line("r.WriteMapElemValue()")
+		if labelUsed {
+			x.line("if " + isNilVarName + " { r.EncodeNil() } else { ")
+			x.encVar(varname+"."+t2.Name, t2.Type)
+			x.line("}")
+		} else {
+			x.encVar(varname+"."+t2.Name, t2.Type)
+		}
+		if si.omitEmpty() {
+			x.line("}")
+		}
+		x.linef("} ") // end if/else ti.toArray
+	}
+	x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray {
+	x.line("r.WriteArrayEnd()")
+	x.line("} else {")
+	x.line("r.WriteMapEnd()")
+	x.line("}")
+
+}
+
+func (x *genRunner) encListFallback(varname string, t reflect.Type) {
+	elemBytes := t.Elem().Kind() == reflect.Uint8
+	if t.AssignableTo(uint8SliceTyp) {
+		x.linef("r.EncodeStringBytes(codecSelferCcRAW%s, []byte(%s))", x.xs, varname)
+		return
+	}
+	if t.Kind() == reflect.Array && elemBytes {
+		x.linef("r.EncodeStringBytes(codecSelferCcRAW%s, ((*[%d]byte)(%s))[:])", x.xs, t.Len(), varname)
+		return
+	}
+	i := x.varsfx()
+	if t.Kind() == reflect.Chan {
+		type ts struct {
+			Label, Chan, Slice, Sfx string
+		}
+		tm, err := template.New("").Parse(genEncChanTmpl)
+		if err != nil {
+			panic(err)
+		}
+		x.linef("if %s == nil { r.EncodeNil() } else { ", varname)
+		x.linef("var sch%s []%s", i, x.genTypeName(t.Elem()))
+		err = tm.Execute(x.w, &ts{"Lsch" + i, varname, "sch" + i, i})
+		if err != nil {
+			panic(err)
+		}
+		// x.linef("%s = sch%s", varname, i)
+		if elemBytes {
+			x.linef("r.EncodeStringBytes(codecSelferCcRAW%s, []byte(%s))", x.xs, "sch"+i)
+			x.line("}")
+			return
+		}
+		varname = "sch" + i
+	}
+
+	x.line("r.WriteArrayStart(len(" + varname + "))")
+	x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname)
+	x.line("r.WriteArrayElem()")
+
+	x.encVar(genTempVarPfx+"v"+i, t.Elem())
+	x.line("}")
+	x.line("r.WriteArrayEnd()")
+	if t.Kind() == reflect.Chan {
+		x.line("}")
+	}
+}
+
+func (x *genRunner) encMapFallback(varname string, t reflect.Type) {
+	// TODO: expand this to handle canonical.
+	i := x.varsfx()
+	x.line("r.WriteMapStart(len(" + varname + "))")
+	x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname)
+	x.line("r.WriteMapElemKey()")
+	x.encVar(genTempVarPfx+"k"+i, t.Key())
+	x.line("r.WriteMapElemValue()")
+	x.encVar(genTempVarPfx+"v"+i, t.Elem())
+	x.line("}")
+	x.line("r.WriteMapEnd()")
+}
+
+func (x *genRunner) decVarInitPtr(varname, nilvar string, t reflect.Type, si *structFieldInfo,
+	newbuf, nilbuf *genBuf) (t2 reflect.StructField) {
+	//we must accommodate anonymous fields, where the embedded field is a nil pointer in the value.
+	// t2 = t.FieldByIndex(si.is)
+	t2typ := t
+	varname3 := varname
+	t2kind := t2typ.Kind()
+	var nilbufed bool
+	if si != nil {
+		for ij, ix := range si.is {
+			if uint8(ij) == si.nis {
+				break
+			}
+			for t2typ.Kind() == reflect.Ptr {
+				t2typ = t2typ.Elem()
+			}
+			t2 = t2typ.Field(int(ix))
+			t2typ = t2.Type
+			varname3 = varname3 + "." + t2.Name
+			t2kind = t2typ.Kind()
+			if t2kind != reflect.Ptr {
+				continue
+			}
+			if newbuf != nil {
+				newbuf.f("if %s == nil { %s = new(%s) }\n", varname3, varname3, x.genTypeName(t2typ.Elem()))
+			}
+			if nilbuf != nil {
+				if !nilbufed {
+					nilbuf.s("if true")
+					nilbufed = true
+				}
+				nilbuf.s(" && ").s(varname3).s(" != nil")
+			}
+		}
+	}
+	// if t2typ.Kind() == reflect.Ptr {
+	// 	varname3 = varname3 + t2.Name
+	// }
+	if nilbuf != nil {
+		if nilbufed {
+			nilbuf.s(" { ")
+		}
+		if nilvar != "" {
+			nilbuf.s(nilvar).s(" = true")
+		} else if tk := t2typ.Kind(); tk == reflect.Ptr {
+			if strings.IndexByte(varname3, '.') != -1 || strings.IndexByte(varname3, '[') != -1 {
+				nilbuf.s(varname3).s(" = nil")
+			} else {
+				nilbuf.s("*").s(varname3).s(" = ").s(x.genZeroValueR(t2typ.Elem()))
+			}
+		} else {
+			nilbuf.s(varname3).s(" = ").s(x.genZeroValueR(t2typ))
+		}
+		if nilbufed {
+			nilbuf.s("}")
+		}
+	}
+	return t2
+}
+
+// decVar takes a variable called varname, of type t
+func (x *genRunner) decVarMain(varname, rand string, t reflect.Type, checkNotNil bool) {
+	// We only encode as nil if a nillable value.
+	// This removes some of the wasted checks for TryDecodeAsNil.
+	// We need to think about this more, to see what happens if omitempty, etc
+	// cause a nil value to be stored when something is expected.
+	// This could happen when decoding from a struct encoded as an array.
+	// For that, decVar should be called with canNil=true, to force true as its value.
+	var varname2 string
+	if t.Kind() != reflect.Ptr {
+		if t.PkgPath() != "" || !x.decTryAssignPrimitive(varname, t, false) {
+			x.dec(varname, t, false)
+		}
+	} else {
+		if checkNotNil {
+			x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem()))
+		}
+		// Ensure we set underlying ptr to a non-nil value (so we can deref to it later).
+		// There's a chance of a **T in here which is nil.
+		var ptrPfx string
+		for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() {
+			ptrPfx += "*"
+			if checkNotNil {
+				x.linef("if %s%s == nil { %s%s = new(%s)}",
+					ptrPfx, varname, ptrPfx, varname, x.genTypeName(t))
+			}
+		}
+		// Should we create temp var if a slice/map indexing? No. dec(...) can now handle it.
+
+		if ptrPfx == "" {
+			x.dec(varname, t, true)
+		} else {
+			varname2 = genTempVarPfx + "z" + rand
+			x.line(varname2 + " := " + ptrPfx + varname)
+			x.dec(varname2, t, true)
+		}
+	}
+}
+
+// decVar takes a variable called varname, of type t
+func (x *genRunner) decVar(varname, nilvar string, t reflect.Type, canBeNil, checkNotNil bool) {
+	i := x.varsfx()
+
+	// We only encode as nil if a nillable value.
+	// This removes some of the wasted checks for TryDecodeAsNil.
+	// We need to think about this more, to see what happens if omitempty, etc
+	// cause a nil value to be stored when something is expected.
+	// This could happen when decoding from a struct encoded as an array.
+	// For that, decVar should be called with canNil=true, to force true as its value.
+
+	if !canBeNil {
+		canBeNil = genAnythingCanBeNil || !genIsImmutable(t)
+	}
+
+	if canBeNil {
+		var buf genBuf
+		x.decVarInitPtr(varname, nilvar, t, nil, nil, &buf)
+		x.linef("if r.TryDecodeAsNil() { %s } else {", buf.buf)
+	} else {
+		x.line("// cannot be nil")
+	}
+
+	x.decVarMain(varname, i, t, checkNotNil)
+
+	if canBeNil {
+		x.line("} ")
+	}
+}
+
+// dec will decode a variable (varname) of type t or ptrTo(t) if isptr==true.
+// t is always a basetype (i.e. not of kind reflect.Ptr).
+func (x *genRunner) dec(varname string, t reflect.Type, isptr bool) {
+	// assumptions:
+	//   - the varname is to a pointer already. No need to take address of it
+	//   - t is always a baseType T (not a *T, etc).
+	rtid := rt2id(t)
+	ti2 := x.ti.get(rtid, t)
+	// tptr := reflect.PtrTo(t)
+	if x.checkForSelfer(t, varname) {
+		if ti2.cs || ti2.csp { // t.Implements(selferTyp) || tptr.Implements(selferTyp) {
+			x.line(varname + ".CodecDecodeSelf(d)")
+			return
+		}
+		if _, ok := x.td[rtid]; ok {
+			x.line(varname + ".CodecDecodeSelf(d)")
+			return
+		}
+	}
+
+	inlist := false
+	for _, t0 := range x.t {
+		if t == t0 {
+			inlist = true
+			if x.checkForSelfer(t, varname) {
+				x.line(varname + ".CodecDecodeSelf(d)")
+				return
+			}
+			break
+		}
+	}
+
+	var rtidAdded bool
+	if t == x.tc {
+		x.td[rtid] = true
+		rtidAdded = true
+	}
+
+	// check if
+	//   - type is time.Time, Raw, RawExt
+	//   - the type implements (Text|JSON|Binary)(Unm|M)arshal
+
+	mi := x.varsfx()
+	// x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi)
+	// x.linef("_ = %sm%s", genTempVarPfx, mi)
+	x.line("if false {")           //start if block
+	defer func() { x.line("}") }() //end if block
+
+	var ptrPfx, addrPfx string
+	if isptr {
+		ptrPfx = "*"
+	} else {
+		addrPfx = "&"
+	}
+	if t == timeTyp {
+		x.linef("} else { %s%v = r.DecodeTime()", ptrPfx, varname)
+		return
+	}
+	if t == rawTyp {
+		x.linef("} else { %s%v = z.DecRaw()", ptrPfx, varname)
+		return
+	}
+
+	if t == rawExtTyp {
+		x.linef("} else { r.DecodeExt(%s%v, 0, nil)", addrPfx, varname)
+		return
+	}
+
+	// only check for extensions if the type is named, and has a packagePath.
+	if !x.nx && genImportPath(t) != "" && t.Name() != "" {
+		// first check if extensions are configued, before doing the interface conversion
+		// x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname)
+		yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi)
+		x.linef("} else if %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.DecExtension(%s, %s) ", yy, varname, yy, varname, yy)
+	}
+
+	if ti2.bu || ti2.bup { // t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) {
+		x.linef("} else if z.DecBinary() { z.DecBinaryUnmarshal(%s%v) ", addrPfx, varname)
+	}
+	if ti2.ju || ti2.jup { // t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) {
+		x.linef("} else if !z.DecBinary() && z.IsJSONHandle() { z.DecJSONUnmarshal(%s%v)", addrPfx, varname)
+	} else if ti2.tu || ti2.tup { // t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) {
+		x.linef("} else if !z.DecBinary() { z.DecTextUnmarshal(%s%v)", addrPfx, varname)
+	}
+
+	x.line("} else {")
+
+	if x.decTryAssignPrimitive(varname, t, isptr) {
+		return
+	}
+
+	switch t.Kind() {
+	case reflect.Array, reflect.Chan:
+		x.xtraSM(varname, t, false, isptr)
+	case reflect.Slice:
+		// if a []uint8, call dedicated function
+		// if a known fastpath slice, call dedicated function
+		// else write encode function in-line.
+		// - if elements are primitives or Selfers, call dedicated function on each member.
+		// - else call Encoder.encode(XXX) on it.
+		if rtid == uint8SliceTypId {
+			x.linef("%s%s = r.DecodeBytes(%s(%s[]byte)(%s), false)",
+				ptrPfx, varname, ptrPfx, ptrPfx, varname)
+		} else if fastpathAV.index(rtid) != -1 {
+			g := x.newGenV(t)
+			x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname)
+		} else {
+			x.xtraSM(varname, t, false, isptr)
+			// x.decListFallback(varname, rtid, false, t)
+		}
+	case reflect.Map:
+		// if a known fastpath map, call dedicated function
+		// else write encode function in-line.
+		// - if elements are primitives or Selfers, call dedicated function on each member.
+		// - else call Encoder.encode(XXX) on it.
+		if fastpathAV.index(rtid) != -1 {
+			g := x.newGenV(t)
+			x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname)
+		} else {
+			x.xtraSM(varname, t, false, isptr)
+			// x.decMapFallback(varname, rtid, t)
+		}
+	case reflect.Struct:
+		if inlist {
+			// no need to create temp variable if isptr, or x.F or x[F]
+			if isptr || strings.IndexByte(varname, '.') != -1 || strings.IndexByte(varname, '[') != -1 {
+				x.decStruct(varname, rtid, t)
+			} else {
+				varname2 := genTempVarPfx + "j" + mi
+				x.line(varname2 + " := &" + varname)
+				x.decStruct(varname2, rtid, t)
+			}
+		} else {
+			// delete(x.td, rtid)
+			x.line("z.DecFallback(" + addrPfx + varname + ", false)")
+		}
+	default:
+		if rtidAdded {
+			delete(x.te, rtid)
+		}
+		x.line("z.DecFallback(" + addrPfx + varname + ", true)")
+	}
+}
+
+func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type, isptr bool) (done bool) {
+	// This should only be used for exact primitives (ie un-named types).
+	// Named types may be implementations of Selfer, Unmarshaler, etc.
+	// They should be handled by dec(...)
+
+	var ptr string
+	if isptr {
+		ptr = "*"
+	}
+	switch t.Kind() {
+	case reflect.Int:
+		x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs)
+	case reflect.Int8:
+		x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 8))", ptr, varname, x.genTypeName(t))
+	case reflect.Int16:
+		x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 16))", ptr, varname, x.genTypeName(t))
+	case reflect.Int32:
+		x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 32))", ptr, varname, x.genTypeName(t))
+	case reflect.Int64:
+		x.linef("%s%s = (%s)(r.DecodeInt64())", ptr, varname, x.genTypeName(t))
+
+	case reflect.Uint:
+		x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs)
+	case reflect.Uint8:
+		x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 8))", ptr, varname, x.genTypeName(t))
+	case reflect.Uint16:
+		x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 16))", ptr, varname, x.genTypeName(t))
+	case reflect.Uint32:
+		x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 32))", ptr, varname, x.genTypeName(t))
+	case reflect.Uint64:
+		x.linef("%s%s = (%s)(r.DecodeUint64())", ptr, varname, x.genTypeName(t))
+	case reflect.Uintptr:
+		x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs)
+
+	case reflect.Float32:
+		x.linef("%s%s = (%s)(r.DecodeFloat32As64())", ptr, varname, x.genTypeName(t))
+	case reflect.Float64:
+		x.linef("%s%s = (%s)(r.DecodeFloat64())", ptr, varname, x.genTypeName(t))
+
+	case reflect.Bool:
+		x.linef("%s%s = (%s)(r.DecodeBool())", ptr, varname, x.genTypeName(t))
+	case reflect.String:
+		x.linef("%s%s = (%s)(r.DecodeString())", ptr, varname, x.genTypeName(t))
+	default:
+		return false
+	}
+	return true
+}
+
+func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) {
+	if t.AssignableTo(uint8SliceTyp) {
+		x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false)")
+		return
+	}
+	if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 {
+		x.linef("r.DecodeBytes( ((*[%d]byte)(%s))[:], true)", t.Len(), varname)
+		return
+	}
+	type tstruc struct {
+		TempVar   string
+		Rand      string
+		Varname   string
+		CTyp      string
+		Typ       string
+		Immutable bool
+		Size      int
+	}
+	telem := t.Elem()
+	ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())}
+
+	funcs := make(template.FuncMap)
+
+	funcs["decLineVar"] = func(varname string) string {
+		x.decVar(varname, "", telem, false, true)
+		return ""
+	}
+	funcs["var"] = func(s string) string {
+		return ts.TempVar + s + ts.Rand
+	}
+	funcs["zero"] = func() string {
+		return x.genZeroValueR(telem)
+	}
+	funcs["isArray"] = func() bool {
+		return t.Kind() == reflect.Array
+	}
+	funcs["isSlice"] = func() bool {
+		return t.Kind() == reflect.Slice
+	}
+	funcs["isChan"] = func() bool {
+		return t.Kind() == reflect.Chan
+	}
+	tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl)
+	if err != nil {
+		panic(err)
+	}
+	if err = tm.Execute(x.w, &ts); err != nil {
+		panic(err)
+	}
+}
+
+func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) {
+	type tstruc struct {
+		TempVar string
+		Sfx     string
+		Rand    string
+		Varname string
+		KTyp    string
+		Typ     string
+		Size    int
+	}
+	telem := t.Elem()
+	tkey := t.Key()
+	ts := tstruc{
+		genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey),
+		x.genTypeName(telem), int(telem.Size() + tkey.Size()),
+	}
+
+	funcs := make(template.FuncMap)
+	funcs["decElemZero"] = func() string {
+		return x.genZeroValueR(telem)
+	}
+	funcs["decElemKindImmutable"] = func() bool {
+		return genIsImmutable(telem)
+	}
+	funcs["decElemKindPtr"] = func() bool {
+		return telem.Kind() == reflect.Ptr
+	}
+	funcs["decElemKindIntf"] = func() bool {
+		return telem.Kind() == reflect.Interface
+	}
+	funcs["decLineVarK"] = func(varname string) string {
+		x.decVar(varname, "", tkey, false, true)
+		return ""
+	}
+	funcs["decLineVar"] = func(varname, decodedNilVarname string) string {
+		x.decVar(varname, decodedNilVarname, telem, false, true)
+		return ""
+	}
+	funcs["var"] = func(s string) string {
+		return ts.TempVar + s + ts.Rand
+	}
+
+	tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl)
+	if err != nil {
+		panic(err)
+	}
+	if err = tm.Execute(x.w, &ts); err != nil {
+		panic(err)
+	}
+}
+
+func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) {
+	ti := x.ti.get(rtid, t)
+	tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing.
+	x.line("switch (" + kName + ") {")
+	var newbuf, nilbuf genBuf
+	for _, si := range tisfi {
+		x.line("case \"" + si.encName + "\":")
+		newbuf.reset()
+		nilbuf.reset()
+		t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf)
+		x.linef("if r.TryDecodeAsNil() { %s } else { %s", nilbuf.buf, newbuf.buf)
+		x.decVarMain(varname+"."+t2.Name, x.varsfx(), t2.Type, false)
+		x.line("}")
+	}
+	x.line("default:")
+	// pass the slice here, so that the string will not escape, and maybe save allocation
+	x.line("z.DecStructFieldNotFound(-1, " + kName + ")")
+	x.line("} // end switch " + kName)
+}
+
+func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) {
+	tpfx := genTempVarPfx
+	ti := x.ti.get(rtid, t)
+	i := x.varsfx()
+	kName := tpfx + "s" + i
+
+	switch style {
+	case genStructMapStyleLenPrefix:
+		x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i)
+	case genStructMapStyleCheckBreak:
+		x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i)
+	default: // 0, otherwise.
+		x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length
+		x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i)
+		x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname)
+		x.line("} else { if r.CheckBreak() { break }; }")
+	}
+	x.line("r.ReadMapElemKey()")
+
+	// emulate decstructfieldkey
+	switch ti.keyType {
+	case valueTypeInt:
+		x.linef("%s := z.StringView(strconv.AppendInt(z.DecScratchArrayBuffer()[:0], r.DecodeInt64(), 10))", kName)
+	case valueTypeUint:
+		x.linef("%s := z.StringView(strconv.AppendUint(z.DecScratchArrayBuffer()[:0], r.DecodeUint64(), 10))", kName)
+	case valueTypeFloat:
+		x.linef("%s := z.StringView(strconv.AppendFloat(z.DecScratchArrayBuffer()[:0], r.DecodeFloat64(), 'f', -1, 64))", kName)
+	default: // string
+		x.linef("%s := z.StringView(r.DecodeStringAsBytes())", kName)
+	}
+	// x.linef("%s := z.StringView(r.DecStructFieldKey(codecSelferValueType%s%s, z.DecScratchArrayBuffer()))", kName, ti.keyType.String(), x.xs)
+
+	x.line("r.ReadMapElemValue()")
+	x.decStructMapSwitch(kName, varname, rtid, t)
+
+	x.line("} // end for " + tpfx + "j" + i)
+	x.line("r.ReadMapEnd()")
+}
+
+func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) {
+	tpfx := genTempVarPfx
+	i := x.varsfx()
+	ti := x.ti.get(rtid, t)
+	tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing.
+	x.linef("var %sj%s int", tpfx, i)
+	x.linef("var %sb%s bool", tpfx, i)                        // break
+	x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length
+	var newbuf, nilbuf genBuf
+	for _, si := range tisfi {
+		x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }",
+			tpfx, i, tpfx, i, tpfx, i,
+			tpfx, i, lenvarname, tpfx, i)
+		x.linef("if %sb%s { r.ReadArrayEnd(); %s }", tpfx, i, breakString)
+		x.line("r.ReadArrayElem()")
+		newbuf.reset()
+		nilbuf.reset()
+		t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf)
+		x.linef("if r.TryDecodeAsNil() { %s } else { %s", nilbuf.buf, newbuf.buf)
+		x.decVarMain(varname+"."+t2.Name, x.varsfx(), t2.Type, false)
+		x.line("}")
+	}
+	// read remaining values and throw away.
+	x.line("for {")
+	x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }",
+		tpfx, i, tpfx, i, tpfx, i,
+		tpfx, i, lenvarname, tpfx, i)
+	x.linef("if %sb%s { break }", tpfx, i)
+	x.line("r.ReadArrayElem()")
+	x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i)
+	x.line("}")
+	x.line("r.ReadArrayEnd()")
+}
+
+func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) {
+	// varname MUST be a ptr, or a struct field or a slice element.
+	i := x.varsfx()
+	x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i)
+	x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs)
+	x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()")
+	x.linef("if %sl%s == 0 {", genTempVarPfx, i)
+	x.line("r.ReadMapEnd()")
+	if genUseOneFunctionForDecStructMap {
+		x.line("} else { ")
+		x.linef("%s.codecDecodeSelfFromMap(%sl%s, d)", varname, genTempVarPfx, i)
+	} else {
+		x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ")
+		x.line(varname + ".codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)")
+		x.line("} else {")
+		x.line(varname + ".codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)")
+	}
+	x.line("}")
+
+	// else if container is array
+	x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs)
+	x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()")
+	x.linef("if %sl%s == 0 {", genTempVarPfx, i)
+	x.line("r.ReadArrayEnd()")
+	x.line("} else { ")
+	x.linef("%s.codecDecodeSelfFromArray(%sl%s, d)", varname, genTempVarPfx, i)
+	x.line("}")
+	// else panic
+	x.line("} else { ")
+	x.line("panic(errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + ")")
+	x.line("} ")
+}
+
+// --------
+
+type genV struct {
+	// genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice
+	MapKey    string
+	Elem      string
+	Primitive string
+	Size      int
+}
+
+func (x *genRunner) newGenV(t reflect.Type) (v genV) {
+	switch t.Kind() {
+	case reflect.Slice, reflect.Array:
+		te := t.Elem()
+		v.Elem = x.genTypeName(te)
+		v.Size = int(te.Size())
+	case reflect.Map:
+		te, tk := t.Elem(), t.Key()
+		v.Elem = x.genTypeName(te)
+		v.MapKey = x.genTypeName(tk)
+		v.Size = int(te.Size() + tk.Size())
+	default:
+		panic("unexpected type for newGenV. Requires map or slice type")
+	}
+	return
+}
+
+func (x *genV) MethodNamePfx(prefix string, prim bool) string {
+	var name []byte
+	if prefix != "" {
+		name = append(name, prefix...)
+	}
+	if prim {
+		name = append(name, genTitleCaseName(x.Primitive)...)
+	} else {
+		if x.MapKey == "" {
+			name = append(name, "Slice"...)
+		} else {
+			name = append(name, "Map"...)
+			name = append(name, genTitleCaseName(x.MapKey)...)
+		}
+		name = append(name, genTitleCaseName(x.Elem)...)
+	}
+	return string(name)
+
+}
+
+// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise.
+//
+// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled,
+// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped.
+// We strip it here.
+func genImportPath(t reflect.Type) (s string) {
+	s = t.PkgPath()
+	if genCheckVendor {
+		// HACK: always handle vendoring. It should be typically on in go 1.6, 1.7
+		s = genStripVendor(s)
+	}
+	return
+}
+
+// A go identifier is (letter|_)[letter|number|_]*
+func genGoIdentifier(s string, checkFirstChar bool) string {
+	b := make([]byte, 0, len(s))
+	t := make([]byte, 4)
+	var n int
+	for i, r := range s {
+		if checkFirstChar && i == 0 && !unicode.IsLetter(r) {
+			b = append(b, '_')
+		}
+		// r must be unicode_letter, unicode_digit or _
+		if unicode.IsLetter(r) || unicode.IsDigit(r) {
+			n = utf8.EncodeRune(t, r)
+			b = append(b, t[:n]...)
+		} else {
+			b = append(b, '_')
+		}
+	}
+	return string(b)
+}
+
+func genNonPtr(t reflect.Type) reflect.Type {
+	for t.Kind() == reflect.Ptr {
+		t = t.Elem()
+	}
+	return t
+}
+
+func genTitleCaseName(s string) string {
+	switch s {
+	case "interface{}", "interface {}":
+		return "Intf"
+	default:
+		return strings.ToUpper(s[0:1]) + s[1:]
+	}
+}
+
+func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) {
+	var ptrPfx string
+	for t.Kind() == reflect.Ptr {
+		ptrPfx += "Ptrto"
+		t = t.Elem()
+	}
+	tstr := t.String()
+	if tn := t.Name(); tn != "" {
+		if tRef != nil && genImportPath(t) == genImportPath(tRef) {
+			return ptrPfx + tn
+		} else {
+			if genQNameRegex.MatchString(tstr) {
+				return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+			} else {
+				return ptrPfx + genCustomTypeName(tstr)
+			}
+		}
+	}
+	switch t.Kind() {
+	case reflect.Map:
+		return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef)
+	case reflect.Slice:
+		return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef)
+	case reflect.Array:
+		return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef)
+	case reflect.Chan:
+		var cx string
+		switch t.ChanDir() {
+		case reflect.SendDir:
+			cx = "ChanSend"
+		case reflect.RecvDir:
+			cx = "ChanRecv"
+		default:
+			cx = "Chan"
+		}
+		return ptrPfx + cx + genMethodNameT(t.Elem(), tRef)
+	default:
+		if t == intfTyp {
+			return ptrPfx + "Interface"
+		} else {
+			if tRef != nil && genImportPath(t) == genImportPath(tRef) {
+				if t.Name() != "" {
+					return ptrPfx + t.Name()
+				} else {
+					return ptrPfx + genCustomTypeName(tstr)
+				}
+			} else {
+				// best way to get the package name inclusive
+				// return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+				// return ptrPfx + genBase64enc.EncodeToString([]byte(tstr))
+				if t.Name() != "" && genQNameRegex.MatchString(tstr) {
+					return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+				} else {
+					return ptrPfx + genCustomTypeName(tstr)
+				}
+			}
+		}
+	}
+}
+
+// genCustomNameForType base64encodes the t.String() value in such a way
+// that it can be used within a function name.
+func genCustomTypeName(tstr string) string {
+	len2 := genBase64enc.EncodedLen(len(tstr))
+	bufx := make([]byte, len2)
+	genBase64enc.Encode(bufx, []byte(tstr))
+	for i := len2 - 1; i >= 0; i-- {
+		if bufx[i] == '=' {
+			len2--
+		} else {
+			break
+		}
+	}
+	return string(bufx[:len2])
+}
+
+func genIsImmutable(t reflect.Type) (v bool) {
+	return isImmutableKind(t.Kind())
+}
+
+type genInternal struct {
+	Version int
+	Values  []genV
+}
+
+func (x genInternal) FastpathLen() (l int) {
+	for _, v := range x.Values {
+		if v.Primitive == "" && !(v.MapKey == "" && v.Elem == "uint8") {
+			l++
+		}
+	}
+	return
+}
+
+func genInternalZeroValue(s string) string {
+	switch s {
+	case "interface{}", "interface {}":
+		return "nil"
+	case "bool":
+		return "false"
+	case "string":
+		return `""`
+	default:
+		return "0"
+	}
+}
+
+var genInternalNonZeroValueIdx [5]uint64
+var genInternalNonZeroValueStrs = [2][5]string{
+	{`"string-is-an-interface"`, "true", `"some-string"`, "11.1", "33"},
+	{`"string-is-an-interface-2"`, "true", `"some-string-2"`, "22.2", "44"},
+}
+
+func genInternalNonZeroValue(s string) string {
+	switch s {
+	case "interface{}", "interface {}":
+		genInternalNonZeroValueIdx[0]++
+		return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[0]%2][0] // return string, to remove ambiguity
+	case "bool":
+		genInternalNonZeroValueIdx[1]++
+		return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[1]%2][1]
+	case "string":
+		genInternalNonZeroValueIdx[2]++
+		return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[2]%2][2]
+	case "float32", "float64", "float", "double":
+		genInternalNonZeroValueIdx[3]++
+		return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[3]%2][3]
+	default:
+		genInternalNonZeroValueIdx[4]++
+		return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[4]%2][4]
+	}
+}
+
+func genInternalEncCommandAsString(s string, vname string) string {
+	switch s {
+	case "uint", "uint8", "uint16", "uint32", "uint64":
+		return "ee.EncodeUint(uint64(" + vname + "))"
+	case "int", "int8", "int16", "int32", "int64":
+		return "ee.EncodeInt(int64(" + vname + "))"
+	case "string":
+		return "ee.EncodeString(cUTF8, " + vname + ")"
+	case "float32":
+		return "ee.EncodeFloat32(" + vname + ")"
+	case "float64":
+		return "ee.EncodeFloat64(" + vname + ")"
+	case "bool":
+		return "ee.EncodeBool(" + vname + ")"
+	// case "symbol":
+	// 	return "ee.EncodeSymbol(" + vname + ")"
+	default:
+		return "e.encode(" + vname + ")"
+	}
+}
+
+func genInternalDecCommandAsString(s string) string {
+	switch s {
+	case "uint":
+		return "uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))"
+	case "uint8":
+		return "uint8(chkOvf.UintV(dd.DecodeUint64(), 8))"
+	case "uint16":
+		return "uint16(chkOvf.UintV(dd.DecodeUint64(), 16))"
+	case "uint32":
+		return "uint32(chkOvf.UintV(dd.DecodeUint64(), 32))"
+	case "uint64":
+		return "dd.DecodeUint64()"
+	case "uintptr":
+		return "uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))"
+	case "int":
+		return "int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))"
+	case "int8":
+		return "int8(chkOvf.IntV(dd.DecodeInt64(), 8))"
+	case "int16":
+		return "int16(chkOvf.IntV(dd.DecodeInt64(), 16))"
+	case "int32":
+		return "int32(chkOvf.IntV(dd.DecodeInt64(), 32))"
+	case "int64":
+		return "dd.DecodeInt64()"
+
+	case "string":
+		return "dd.DecodeString()"
+	case "float32":
+		return "float32(chkOvf.Float32V(dd.DecodeFloat64()))"
+	case "float64":
+		return "dd.DecodeFloat64()"
+	case "bool":
+		return "dd.DecodeBool()"
+	default:
+		panic(errors.New("gen internal: unknown type for decode: " + s))
+	}
+}
+
+func genInternalSortType(s string, elem bool) string {
+	for _, v := range [...]string{"int", "uint", "float", "bool", "string"} {
+		if strings.HasPrefix(s, v) {
+			if elem {
+				if v == "int" || v == "uint" || v == "float" {
+					return v + "64"
+				} else {
+					return v
+				}
+			}
+			return v + "Slice"
+		}
+	}
+	panic("sorttype: unexpected type: " + s)
+}
+
+func genStripVendor(s string) string {
+	// HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later.
+	// if s contains /vendor/ OR startsWith vendor/, then return everything after it.
+	const vendorStart = "vendor/"
+	const vendorInline = "/vendor/"
+	if i := strings.LastIndex(s, vendorInline); i >= 0 {
+		s = s[i+len(vendorInline):]
+	} else if strings.HasPrefix(s, vendorStart) {
+		s = s[len(vendorStart):]
+	}
+	return s
+}
+
+// var genInternalMu sync.Mutex
+var genInternalV = genInternal{Version: genVersion}
+var genInternalTmplFuncs template.FuncMap
+var genInternalOnce sync.Once
+
+func genInternalInit() {
+	types := [...]string{
+		"interface{}",
+		"string",
+		"float32",
+		"float64",
+		"uint",
+		"uint8",
+		"uint16",
+		"uint32",
+		"uint64",
+		"uintptr",
+		"int",
+		"int8",
+		"int16",
+		"int32",
+		"int64",
+		"bool",
+	}
+	// keep as slice, so it is in specific iteration order.
+	// Initial order was uint64, string, interface{}, int, int64
+	mapvaltypes := [...]string{
+		"interface{}",
+		"string",
+		"uint",
+		"uint8",
+		"uint16",
+		"uint32",
+		"uint64",
+		"uintptr",
+		"int",
+		"int8",
+		"int16",
+		"int32",
+		"int64",
+		"float32",
+		"float64",
+		"bool",
+	}
+	wordSizeBytes := int(intBitsize) / 8
+
+	mapvaltypes2 := map[string]int{
+		"interface{}": 2 * wordSizeBytes,
+		"string":      2 * wordSizeBytes,
+		"uint":        1 * wordSizeBytes,
+		"uint8":       1,
+		"uint16":      2,
+		"uint32":      4,
+		"uint64":      8,
+		"uintptr":     1 * wordSizeBytes,
+		"int":         1 * wordSizeBytes,
+		"int8":        1,
+		"int16":       2,
+		"int32":       4,
+		"int64":       8,
+		"float32":     4,
+		"float64":     8,
+		"bool":        1,
+	}
+	var gt = genInternal{Version: genVersion}
+
+	// For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function
+	for _, s := range types {
+		gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]})
+		// if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already.
+		// 	gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]})
+		// }
+		gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]})
+		if _, ok := mapvaltypes2[s]; !ok {
+			gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]})
+		}
+		for _, ms := range mapvaltypes {
+			gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]})
+		}
+	}
+
+	funcs := make(template.FuncMap)
+	// funcs["haspfx"] = strings.HasPrefix
+	funcs["encmd"] = genInternalEncCommandAsString
+	funcs["decmd"] = genInternalDecCommandAsString
+	funcs["zerocmd"] = genInternalZeroValue
+	funcs["nonzerocmd"] = genInternalNonZeroValue
+	funcs["hasprefix"] = strings.HasPrefix
+	funcs["sorttype"] = genInternalSortType
+
+	genInternalV = gt
+	genInternalTmplFuncs = funcs
+}
+
+// genInternalGoFile is used to generate source files from templates.
+// It is run by the program author alone.
+// Unfortunately, it has to be exported so that it can be called from a command line tool.
+// *** DO NOT USE ***
+func genInternalGoFile(r io.Reader, w io.Writer) (err error) {
+	genInternalOnce.Do(genInternalInit)
+
+	gt := genInternalV
+
+	t := template.New("").Funcs(genInternalTmplFuncs)
+
+	tmplstr, err := ioutil.ReadAll(r)
+	if err != nil {
+		return
+	}
+
+	if t, err = t.Parse(string(tmplstr)); err != nil {
+		return
+	}
+
+	var out bytes.Buffer
+	err = t.Execute(&out, gt)
+	if err != nil {
+		return
+	}
+
+	bout, err := format.Source(out.Bytes())
+	if err != nil {
+		w.Write(out.Bytes()) // write out if error, so we can still see.
+		// w.Write(bout) // write out if error, as much as possible, so we can still see.
+		return
+	}
+	w.Write(bout)
+	return
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go b/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go
new file mode 100644
index 0000000..9ddbe20
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go
@@ -0,0 +1,14 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build go1.5
+
+package codec
+
+import "reflect"
+
+const reflectArrayOfSupported = true
+
+func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
+	return reflect.ArrayOf(count, elem)
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go b/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go
new file mode 100644
index 0000000..c5fcd66
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go
@@ -0,0 +1,14 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build !go1.5
+
+package codec
+
+import "reflect"
+
+const reflectArrayOfSupported = false
+
+func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
+	panic("codec: reflect.ArrayOf unsupported in this go version")
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go
new file mode 100644
index 0000000..bc39d6b
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go
@@ -0,0 +1,15 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build go1.9
+
+package codec
+
+import "reflect"
+
+func makeMapReflect(t reflect.Type, size int) reflect.Value {
+	if size < 0 {
+		return reflect.MakeMapWithSize(t, 4)
+	}
+	return reflect.MakeMapWithSize(t, size)
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go
new file mode 100644
index 0000000..cde4cd3
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build !go1.9
+
+package codec
+
+import "reflect"
+
+func makeMapReflect(t reflect.Type, size int) reflect.Value {
+	return reflect.MakeMap(t)
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_gte_go110.go b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_gte_go110.go
new file mode 100644
index 0000000..794133a
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_gte_go110.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build go1.10
+
+package codec
+
+const allowSetUnexportedEmbeddedPtr = false
diff --git a/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_lt_go110.go b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_lt_go110.go
new file mode 100644
index 0000000..fd92ede
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_lt_go110.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build !go1.10
+
+package codec
+
+const allowSetUnexportedEmbeddedPtr = true
diff --git a/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go b/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go
new file mode 100644
index 0000000..8debfa6
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go
@@ -0,0 +1,17 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build !go1.4
+
+package codec
+
+// This codec package will only work for go1.4 and above.
+// This is for the following reasons:
+//   - go 1.4 was released in 2014
+//   - go runtime is written fully in go
+//   - interface only holds pointers
+//   - reflect.Value is stabilized as 3 words
+
+func init() {
+	panic("codec: go 1.3 and below are not supported")
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go
new file mode 100644
index 0000000..0f1bb01
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go
@@ -0,0 +1,10 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build go1.5,!go1.6
+
+package codec
+
+import "os"
+
+var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1"
diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go
new file mode 100644
index 0000000..2fb4b05
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go
@@ -0,0 +1,10 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build go1.6,!go1.7
+
+package codec
+
+import "os"
+
+var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0"
diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go
new file mode 100644
index 0000000..c5b8155
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build go1.7
+
+package codec
+
+const genCheckVendor = true
diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go
new file mode 100644
index 0000000..837cf24
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build !go1.5
+
+package codec
+
+var genCheckVendor = false
diff --git a/vendor/github.com/ugorji/go/codec/helper.go b/vendor/github.com/ugorji/go/codec/helper.go
new file mode 100644
index 0000000..bd29895
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper.go
@@ -0,0 +1,2414 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// Contains code shared by both encode and decode.
+
+// Some shared ideas around encoding/decoding
+// ------------------------------------------
+//
+// If an interface{} is passed, we first do a type assertion to see if it is
+// a primitive type or a map/slice of primitive types, and use a fastpath to handle it.
+//
+// If we start with a reflect.Value, we are already in reflect.Value land and
+// will try to grab the function for the underlying Type and directly call that function.
+// This is more performant than calling reflect.Value.Interface().
+//
+// This still helps us bypass many layers of reflection, and give best performance.
+//
+// Containers
+// ------------
+// Containers in the stream are either associative arrays (key-value pairs) or
+// regular arrays (indexed by incrementing integers).
+//
+// Some streams support indefinite-length containers, and use a breaking
+// byte-sequence to denote that the container has come to an end.
+//
+// Some streams also are text-based, and use explicit separators to denote the
+// end/beginning of different values.
+//
+// During encode, we use a high-level condition to determine how to iterate through
+// the container. That decision is based on whether the container is text-based (with
+// separators) or binary (without separators). If binary, we do not even call the
+// encoding of separators.
+//
+// During decode, we use a different high-level condition to determine how to iterate
+// through the containers. That decision is based on whether the stream contained
+// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that
+// it has to be binary, and we do not even try to read separators.
+//
+// Philosophy
+// ------------
+// On decode, this codec will update containers appropriately:
+//    - If struct, update fields from stream into fields of struct.
+//      If field in stream not found in struct, handle appropriately (based on option).
+//      If a struct field has no corresponding value in the stream, leave it AS IS.
+//      If nil in stream, set value to nil/zero value.
+//    - If map, update map from stream.
+//      If the stream value is NIL, set the map to nil.
+//    - if slice, try to update up to length of array in stream.
+//      if container len is less than stream array length,
+//      and container cannot be expanded, handled (based on option).
+//      This means you can decode 4-element stream array into 1-element array.
+//
+// ------------------------------------
+// On encode, user can specify omitEmpty. This means that the value will be omitted
+// if the zero value. The problem may occur during decode, where omitted values do not affect
+// the value being decoded into. This means that if decoding into a struct with an
+// int field with current value=5, and the field is omitted in the stream, then after
+// decoding, the value will still be 5 (not 0).
+// omitEmpty only works if you guarantee that you always decode into zero-values.
+//
+// ------------------------------------
+// We could have truncated a map to remove keys not available in the stream,
+// or set values in the struct which are not in the stream to their zero values.
+// We decided against it because there is no efficient way to do it.
+// We may introduce it as an option later.
+// However, that will require enabling it for both runtime and code generation modes.
+//
+// To support truncate, we need to do 2 passes over the container:
+//   map
+//   - first collect all keys (e.g. in k1)
+//   - for each key in stream, mark k1 that the key should not be removed
+//   - after updating map, do second pass and call delete for all keys in k1 which are not marked
+//   struct:
+//   - for each field, track the *typeInfo s1
+//   - iterate through all s1, and for each one not marked, set value to zero
+//   - this involves checking the possible anonymous fields which are nil ptrs.
+//     too much work.
+//
+// ------------------------------------------
+// Error Handling is done within the library using panic.
+//
+// This way, the code doesn't have to keep checking if an error has happened,
+// and we don't have to keep sending the error value along with each call
+// or storing it in the En|Decoder and checking it constantly along the way.
+//
+// The disadvantage is that small functions which use panics cannot be inlined.
+// The code accounts for that by only using panics behind an interface;
+// since interface calls cannot be inlined, this is irrelevant.
+//
+// We considered storing the error is En|Decoder.
+//   - once it has its err field set, it cannot be used again.
+//   - panicing will be optional, controlled by const flag.
+//   - code should always check error first and return early.
+// We eventually decided against it as it makes the code clumsier to always
+// check for these error conditions.
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+)
+
+const (
+	scratchByteArrayLen = 32
+	// initCollectionCap   = 16 // 32 is defensive. 16 is preferred.
+
+	// Support encoding.(Binary|Text)(Unm|M)arshaler.
+	// This constant flag will enable or disable it.
+	supportMarshalInterfaces = true
+
+	// for debugging, set this to false, to catch panic traces.
+	// Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
+	recoverPanicToErr = true
+
+	// arrayCacheLen is the length of the cache used in encoder or decoder for
+	// allowing zero-alloc initialization.
+	arrayCacheLen = 8
+
+	// size of the cacheline: defaulting to value for archs: amd64, arm64, 386
+	// should use "runtime/internal/sys".CacheLineSize, but that is not exposed.
+	cacheLineSize = 64
+
+	wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize
+	wordSize     = wordSizeBits / 8
+
+	maxLevelsEmbedding = 15 // use this, so structFieldInfo fits into 8 bytes
+)
+
+var (
+	oneByteArr    = [1]byte{0}
+	zeroByteSlice = oneByteArr[:0:0]
+)
+
+var refBitset bitset32
+var pool pooler
+var panicv panicHdl
+
+func init() {
+	pool.init()
+
+	refBitset.set(byte(reflect.Map))
+	refBitset.set(byte(reflect.Ptr))
+	refBitset.set(byte(reflect.Func))
+	refBitset.set(byte(reflect.Chan))
+}
+
+type charEncoding uint8
+
+const (
+	cRAW charEncoding = iota
+	cUTF8
+	cUTF16LE
+	cUTF16BE
+	cUTF32LE
+	cUTF32BE
+)
+
+// valueType is the stream type
+type valueType uint8
+
+const (
+	valueTypeUnset valueType = iota
+	valueTypeNil
+	valueTypeInt
+	valueTypeUint
+	valueTypeFloat
+	valueTypeBool
+	valueTypeString
+	valueTypeSymbol
+	valueTypeBytes
+	valueTypeMap
+	valueTypeArray
+	valueTypeTime
+	valueTypeExt
+
+	// valueTypeInvalid = 0xff
+)
+
+var valueTypeStrings = [...]string{
+	"Unset",
+	"Nil",
+	"Int",
+	"Uint",
+	"Float",
+	"Bool",
+	"String",
+	"Symbol",
+	"Bytes",
+	"Map",
+	"Array",
+	"Timestamp",
+	"Ext",
+}
+
+func (x valueType) String() string {
+	if int(x) < len(valueTypeStrings) {
+		return valueTypeStrings[x]
+	}
+	return strconv.FormatInt(int64(x), 10)
+}
+
+type seqType uint8
+
+const (
+	_ seqType = iota
+	seqTypeArray
+	seqTypeSlice
+	seqTypeChan
+)
+
+// note that containerMapStart and containerArraySend are not sent.
+// This is because the ReadXXXStart and EncodeXXXStart already does these.
+type containerState uint8
+
+const (
+	_ containerState = iota
+
+	containerMapStart // slot left open, since Driver method already covers it
+	containerMapKey
+	containerMapValue
+	containerMapEnd
+	containerArrayStart // slot left open, since Driver methods already cover it
+	containerArrayElem
+	containerArrayEnd
+)
+
+// // sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo
+// type sfiIdx struct {
+// 	name  string
+// 	index int
+// }
+
+// do not recurse if a containing type refers to an embedded type
+// which refers back to its containing type (via a pointer).
+// The second time this back-reference happens, break out,
+// so as not to cause an infinite loop.
+const rgetMaxRecursion = 2
+
+// Anecdotally, we believe most types have <= 12 fields.
+// - even Java's PMD rules set TooManyFields threshold to 15.
+// However, go has embedded fields, which should be regarded as
+// top level, allowing structs to possibly double or triple.
+// In addition, we don't want to keep creating transient arrays,
+// especially for the sfi index tracking, and the evtypes tracking.
+//
+// So - try to keep typeInfoLoadArray within 2K bytes
+const (
+	typeInfoLoadArraySfisLen   = 16
+	typeInfoLoadArraySfiidxLen = 8 * 112
+	typeInfoLoadArrayEtypesLen = 12
+	typeInfoLoadArrayBLen      = 8 * 4
+)
+
+type typeInfoLoad struct {
+	// fNames   []string
+	// encNames []string
+	etypes []uintptr
+	sfis   []structFieldInfo
+}
+
+type typeInfoLoadArray struct {
+	// fNames   [typeInfoLoadArrayLen]string
+	// encNames [typeInfoLoadArrayLen]string
+	sfis   [typeInfoLoadArraySfisLen]structFieldInfo
+	sfiidx [typeInfoLoadArraySfiidxLen]byte
+	etypes [typeInfoLoadArrayEtypesLen]uintptr
+	b      [typeInfoLoadArrayBLen]byte // scratch - used for struct field names
+}
+
+// mirror json.Marshaler and json.Unmarshaler here,
+// so we don't import the encoding/json package
+
+type jsonMarshaler interface {
+	MarshalJSON() ([]byte, error)
+}
+type jsonUnmarshaler interface {
+	UnmarshalJSON([]byte) error
+}
+
+type isZeroer interface {
+	IsZero() bool
+}
+
+// type byteAccepter func(byte) bool
+
+var (
+	bigen               = binary.BigEndian
+	structInfoFieldName = "_struct"
+
+	mapStrIntfTyp  = reflect.TypeOf(map[string]interface{}(nil))
+	mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
+	intfSliceTyp   = reflect.TypeOf([]interface{}(nil))
+	intfTyp        = intfSliceTyp.Elem()
+
+	reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem()
+
+	stringTyp     = reflect.TypeOf("")
+	timeTyp       = reflect.TypeOf(time.Time{})
+	rawExtTyp     = reflect.TypeOf(RawExt{})
+	rawTyp        = reflect.TypeOf(Raw{})
+	uintptrTyp    = reflect.TypeOf(uintptr(0))
+	uint8Typ      = reflect.TypeOf(uint8(0))
+	uint8SliceTyp = reflect.TypeOf([]uint8(nil))
+	uintTyp       = reflect.TypeOf(uint(0))
+	intTyp        = reflect.TypeOf(int(0))
+
+	mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
+
+	binaryMarshalerTyp   = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
+	binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
+
+	textMarshalerTyp   = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+	textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+
+	jsonMarshalerTyp   = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
+	jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
+
+	selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem()
+	iszeroTyp = reflect.TypeOf((*isZeroer)(nil)).Elem()
+
+	uint8TypId      = rt2id(uint8Typ)
+	uint8SliceTypId = rt2id(uint8SliceTyp)
+	rawExtTypId     = rt2id(rawExtTyp)
+	rawTypId        = rt2id(rawTyp)
+	intfTypId       = rt2id(intfTyp)
+	timeTypId       = rt2id(timeTyp)
+	stringTypId     = rt2id(stringTyp)
+
+	mapStrIntfTypId  = rt2id(mapStrIntfTyp)
+	mapIntfIntfTypId = rt2id(mapIntfIntfTyp)
+	intfSliceTypId   = rt2id(intfSliceTyp)
+	// mapBySliceTypId  = rt2id(mapBySliceTyp)
+
+	intBitsize  = uint8(intTyp.Bits())
+	uintBitsize = uint8(uintTyp.Bits())
+
+	bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
+	bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+
+	chkOvf checkOverflow
+
+	errNoFieldNameToStructFieldInfo = errors.New("no field name passed to parseStructFieldInfo")
+)
+
+var defTypeInfos = NewTypeInfos([]string{"codec", "json"})
+
+var immutableKindsSet = [32]bool{
+	// reflect.Invalid:  ,
+	reflect.Bool:       true,
+	reflect.Int:        true,
+	reflect.Int8:       true,
+	reflect.Int16:      true,
+	reflect.Int32:      true,
+	reflect.Int64:      true,
+	reflect.Uint:       true,
+	reflect.Uint8:      true,
+	reflect.Uint16:     true,
+	reflect.Uint32:     true,
+	reflect.Uint64:     true,
+	reflect.Uintptr:    true,
+	reflect.Float32:    true,
+	reflect.Float64:    true,
+	reflect.Complex64:  true,
+	reflect.Complex128: true,
+	// reflect.Array
+	// reflect.Chan
+	// reflect.Func: true,
+	// reflect.Interface
+	// reflect.Map
+	// reflect.Ptr
+	// reflect.Slice
+	reflect.String: true,
+	// reflect.Struct
+	// reflect.UnsafePointer
+}
+
+// Selfer defines methods by which a value can encode or decode itself.
+//
+// Any type which implements Selfer will be able to encode or decode itself.
+// Consequently, during (en|de)code, this takes precedence over
+// (text|binary)(M|Unm)arshal or extension support.
+//
+// Note: *the first set of bytes of any value MUST NOT represent nil in the format*.
+// This is because, during each decode, we first check the the next set of bytes
+// represent nil, and if so, we just set the value to nil.
+type Selfer interface {
+	CodecEncodeSelf(*Encoder)
+	CodecDecodeSelf(*Decoder)
+}
+
+// MapBySlice is a tag interface that denotes wrapped slice should encode as a map in the stream.
+// The slice contains a sequence of key-value pairs.
+// This affords storing a map in a specific sequence in the stream.
+//
+// Example usage:
+//    type T1 []string         // or []int or []Point or any other "slice" type
+//    func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map
+//    type T2 struct { KeyValues T1 }
+//
+//    var kvs = []string{"one", "1", "two", "2", "three", "3"}
+//    var v2 = T2{ KeyValues: T1(kvs) }
+//    // v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} }
+//
+// The support of MapBySlice affords the following:
+//   - A slice type which implements MapBySlice will be encoded as a map
+//   - A slice can be decoded from a map in the stream
+//   - It MUST be a slice type (not a pointer receiver) that implements MapBySlice
+type MapBySlice interface {
+	MapBySlice()
+}
+
+// BasicHandle encapsulates the common options and extension functions.
+//
+// Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
+type BasicHandle struct {
+	// BasicHandle is always a part of a different type.
+	// It doesn't have to fit into it own cache lines.
+
+	// TypeInfos is used to get the type info for any type.
+	//
+	// If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json
+	TypeInfos *TypeInfos
+
+	// Note: BasicHandle is not comparable, due to these slices here (extHandle, intf2impls).
+	// If *[]T is used instead, this becomes comparable, at the cost of extra indirection.
+	// Thses slices are used all the time, so keep as slices (not pointers).
+
+	extHandle
+
+	intf2impls
+
+	RPCOptions
+
+	// ---- cache line
+
+	DecodeOptions
+
+	// ---- cache line
+
+	EncodeOptions
+
+	// noBuiltInTypeChecker
+}
+
+func (x *BasicHandle) getBasicHandle() *BasicHandle {
+	return x
+}
+
+func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
+	if x.TypeInfos == nil {
+		return defTypeInfos.get(rtid, rt)
+	}
+	return x.TypeInfos.get(rtid, rt)
+}
+
+// Handle is the interface for a specific encoding format.
+//
+// Typically, a Handle is pre-configured before first time use,
+// and not modified while in use. Such a pre-configured Handle
+// is safe for concurrent access.
+type Handle interface {
+	Name() string
+	getBasicHandle() *BasicHandle
+	recreateEncDriver(encDriver) bool
+	newEncDriver(w *Encoder) encDriver
+	newDecDriver(r *Decoder) decDriver
+	isBinary() bool
+	hasElemSeparators() bool
+	// IsBuiltinType(rtid uintptr) bool
+}
+
+// Raw represents raw formatted bytes.
+// We "blindly" store it during encode and retrieve the raw bytes during decode.
+// Note: it is dangerous during encode, so we may gate the behaviour
+// behind an Encode flag which must be explicitly set.
+type Raw []byte
+
+// RawExt represents raw unprocessed extension data.
+// Some codecs will decode extension data as a *RawExt
+// if there is no registered extension for the tag.
+//
+// Only one of Data or Value is nil.
+// If Data is nil, then the content of the RawExt is in the Value.
+type RawExt struct {
+	Tag uint64
+	// Data is the []byte which represents the raw ext. If nil, ext is exposed in Value.
+	// Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types
+	Data []byte
+	// Value represents the extension, if Data is nil.
+	// Value is used by codecs (e.g. cbor, json) which leverage the format to do
+	// custom serialization of the types.
+	Value interface{}
+}
+
+// BytesExt handles custom (de)serialization of types to/from []byte.
+// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
+type BytesExt interface {
+	// WriteExt converts a value to a []byte.
+	//
+	// Note: v is a pointer iff the registered extension type is a struct or array kind.
+	WriteExt(v interface{}) []byte
+
+	// ReadExt updates a value from a []byte.
+	//
+	// Note: dst is always a pointer kind to the registered extension type.
+	ReadExt(dst interface{}, src []byte)
+}
+
+// InterfaceExt handles custom (de)serialization of types to/from another interface{} value.
+// The Encoder or Decoder will then handle the further (de)serialization of that known type.
+//
+// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types.
+type InterfaceExt interface {
+	// ConvertExt converts a value into a simpler interface for easy encoding
+	// e.g. convert time.Time to int64.
+	//
+	// Note: v is a pointer iff the registered extension type is a struct or array kind.
+	ConvertExt(v interface{}) interface{}
+
+	// UpdateExt updates a value from a simpler interface for easy decoding
+	// e.g. convert int64 to time.Time.
+	//
+	// Note: dst is always a pointer kind to the registered extension type.
+	UpdateExt(dst interface{}, src interface{})
+}
+
+// Ext handles custom (de)serialization of custom types / extensions.
+type Ext interface {
+	BytesExt
+	InterfaceExt
+}
+
+// addExtWrapper is a wrapper implementation to support former AddExt exported method.
+type addExtWrapper struct {
+	encFn func(reflect.Value) ([]byte, error)
+	decFn func(reflect.Value, []byte) error
+}
+
+func (x addExtWrapper) WriteExt(v interface{}) []byte {
+	bs, err := x.encFn(reflect.ValueOf(v))
+	if err != nil {
+		panic(err)
+	}
+	return bs
+}
+
+func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
+	if err := x.decFn(reflect.ValueOf(v), bs); err != nil {
+		panic(err)
+	}
+}
+
+func (x addExtWrapper) ConvertExt(v interface{}) interface{} {
+	return x.WriteExt(v)
+}
+
+func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) {
+	x.ReadExt(dest, v.([]byte))
+}
+
+type extWrapper struct {
+	BytesExt
+	InterfaceExt
+}
+
+type bytesExtFailer struct{}
+
+func (bytesExtFailer) WriteExt(v interface{}) []byte {
+	panicv.errorstr("BytesExt.WriteExt is not supported")
+	return nil
+}
+func (bytesExtFailer) ReadExt(v interface{}, bs []byte) {
+	panicv.errorstr("BytesExt.ReadExt is not supported")
+}
+
+type interfaceExtFailer struct{}
+
+func (interfaceExtFailer) ConvertExt(v interface{}) interface{} {
+	panicv.errorstr("InterfaceExt.ConvertExt is not supported")
+	return nil
+}
+func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) {
+	panicv.errorstr("InterfaceExt.UpdateExt is not supported")
+}
+
+type binaryEncodingType struct{}
+
+func (binaryEncodingType) isBinary() bool { return true }
+
+type textEncodingType struct{}
+
+func (textEncodingType) isBinary() bool { return false }
+
+// noBuiltInTypes is embedded into many types which do not support builtins
+// e.g. msgpack, simple, cbor.
+
+// type noBuiltInTypeChecker struct{}
+// func (noBuiltInTypeChecker) IsBuiltinType(rt uintptr) bool { return false }
+// type noBuiltInTypes struct{ noBuiltInTypeChecker }
+
+type noBuiltInTypes struct{}
+
+func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {}
+func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {}
+
+// type noStreamingCodec struct{}
+// func (noStreamingCodec) CheckBreak() bool { return false }
+// func (noStreamingCodec) hasElemSeparators() bool { return false }
+
+type noElemSeparators struct{}
+
+func (noElemSeparators) hasElemSeparators() (v bool)            { return }
+func (noElemSeparators) recreateEncDriver(e encDriver) (v bool) { return }
+
+// bigenHelper.
+// Users must already slice the x completely, because we will not reslice.
+type bigenHelper struct {
+	x []byte // must be correctly sliced to appropriate len. slicing is a cost.
+	w encWriter
+}
+
+func (z bigenHelper) writeUint16(v uint16) {
+	bigen.PutUint16(z.x, v)
+	z.w.writeb(z.x)
+}
+
+func (z bigenHelper) writeUint32(v uint32) {
+	bigen.PutUint32(z.x, v)
+	z.w.writeb(z.x)
+}
+
+func (z bigenHelper) writeUint64(v uint64) {
+	bigen.PutUint64(z.x, v)
+	z.w.writeb(z.x)
+}
+
+type extTypeTagFn struct {
+	rtid    uintptr
+	rtidptr uintptr
+	rt      reflect.Type
+	tag     uint64
+	ext     Ext
+	_       [1]uint64 // padding
+}
+
+type extHandle []extTypeTagFn
+
+// AddExt registes an encode and decode function for a reflect.Type.
+// To deregister an Ext, call AddExt with nil encfn and/or nil decfn.
+//
+// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
+func (o *extHandle) AddExt(rt reflect.Type, tag byte,
+	encfn func(reflect.Value) ([]byte, error),
+	decfn func(reflect.Value, []byte) error) (err error) {
+	if encfn == nil || decfn == nil {
+		return o.SetExt(rt, uint64(tag), nil)
+	}
+	return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn})
+}
+
+// SetExt will set the extension for a tag and reflect.Type.
+// Note that the type must be a named type, and specifically not a pointer or Interface.
+// An error is returned if that is not honored.
+// To Deregister an ext, call SetExt with nil Ext.
+//
+// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
+func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
+	// o is a pointer, because we may need to initialize it
+	rk := rt.Kind()
+	for rk == reflect.Ptr {
+		rt = rt.Elem()
+		rk = rt.Kind()
+	}
+
+	if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr {
+		return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt)
+	}
+
+	rtid := rt2id(rt)
+	switch rtid {
+	case timeTypId, rawTypId, rawExtTypId:
+		// all natively supported type, so cannot have an extension
+		return // TODO: should we silently ignore, or return an error???
+	}
+	// if o == nil {
+	// 	return errors.New("codec.Handle.SetExt: extHandle not initialized")
+	// }
+	o2 := *o
+	// if o2 == nil {
+	// 	return errors.New("codec.Handle.SetExt: extHandle not initialized")
+	// }
+	for i := range o2 {
+		v := &o2[i]
+		if v.rtid == rtid {
+			v.tag, v.ext = tag, ext
+			return
+		}
+	}
+	rtidptr := rt2id(reflect.PtrTo(rt))
+	*o = append(o2, extTypeTagFn{rtid, rtidptr, rt, tag, ext, [1]uint64{}})
+	return
+}
+
+func (o extHandle) getExt(rtid uintptr) (v *extTypeTagFn) {
+	for i := range o {
+		v = &o[i]
+		if v.rtid == rtid || v.rtidptr == rtid {
+			return
+		}
+	}
+	return nil
+}
+
+func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) {
+	for i := range o {
+		v = &o[i]
+		if v.tag == tag {
+			return
+		}
+	}
+	return nil
+}
+
+type intf2impl struct {
+	rtid uintptr // for intf
+	impl reflect.Type
+	// _    [1]uint64 // padding // not-needed, as *intf2impl is never returned.
+}
+
+type intf2impls []intf2impl
+
+// Intf2Impl maps an interface to an implementing type.
+// This allows us support infering the concrete type
+// and populating it when passed an interface.
+// e.g. var v io.Reader can be decoded as a bytes.Buffer, etc.
+//
+// Passing a nil impl will clear the mapping.
+func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) {
+	if impl != nil && !impl.Implements(intf) {
+		return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf)
+	}
+	rtid := rt2id(intf)
+	o2 := *o
+	for i := range o2 {
+		v := &o2[i]
+		if v.rtid == rtid {
+			v.impl = impl
+			return
+		}
+	}
+	*o = append(o2, intf2impl{rtid, impl})
+	return
+}
+
+func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) {
+	for i := range o {
+		v := &o[i]
+		if v.rtid == rtid {
+			if v.impl == nil {
+				return
+			}
+			if v.impl.Kind() == reflect.Ptr {
+				return reflect.New(v.impl.Elem())
+			}
+			return reflect.New(v.impl).Elem()
+		}
+	}
+	return
+}
+
+type structFieldInfoFlag uint8
+
+const (
+	_ structFieldInfoFlag = 1 << iota
+	structFieldInfoFlagReady
+	structFieldInfoFlagOmitEmpty
+)
+
+func (x *structFieldInfoFlag) flagSet(f structFieldInfoFlag) {
+	*x = *x | f
+}
+
+func (x *structFieldInfoFlag) flagClr(f structFieldInfoFlag) {
+	*x = *x &^ f
+}
+
+func (x structFieldInfoFlag) flagGet(f structFieldInfoFlag) bool {
+	return x&f != 0
+}
+
+func (x structFieldInfoFlag) omitEmpty() bool {
+	return x.flagGet(structFieldInfoFlagOmitEmpty)
+}
+
+func (x structFieldInfoFlag) ready() bool {
+	return x.flagGet(structFieldInfoFlagReady)
+}
+
+type structFieldInfo struct {
+	encName   string // encode name
+	fieldName string // field name
+
+	is  [maxLevelsEmbedding]uint16 // (recursive/embedded) field index in struct
+	nis uint8                      // num levels of embedding. if 1, then it's not embedded.
+	structFieldInfoFlag
+}
+
+func (si *structFieldInfo) setToZeroValue(v reflect.Value) {
+	if v, valid := si.field(v, false); valid {
+		v.Set(reflect.Zero(v.Type()))
+	}
+}
+
+// rv returns the field of the struct.
+// If anonymous, it returns an Invalid
+func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value, valid bool) {
+	// replicate FieldByIndex
+	for i, x := range si.is {
+		if uint8(i) == si.nis {
+			break
+		}
+		if v, valid = baseStructRv(v, update); !valid {
+			return
+		}
+		v = v.Field(int(x))
+	}
+
+	return v, true
+}
+
+// func (si *structFieldInfo) fieldval(v reflect.Value, update bool) reflect.Value {
+// 	v, _ = si.field(v, update)
+// 	return v
+// }
+
+func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) {
+	keytype = valueTypeString // default
+	if stag == "" {
+		return
+	}
+	for i, s := range strings.Split(stag, ",") {
+		if i == 0 {
+		} else {
+			switch s {
+			case "omitempty":
+				omitEmpty = true
+			case "toarray":
+				toArray = true
+			case "int":
+				keytype = valueTypeInt
+			case "uint":
+				keytype = valueTypeUint
+			case "float":
+				keytype = valueTypeFloat
+				// case "bool":
+				// 	keytype = valueTypeBool
+			case "string":
+				keytype = valueTypeString
+			}
+		}
+	}
+	return
+}
+
+func (si *structFieldInfo) parseTag(stag string) {
+	// if fname == "" {
+	// 	panic(errNoFieldNameToStructFieldInfo)
+	// }
+
+	if stag == "" {
+		return
+	}
+	for i, s := range strings.Split(stag, ",") {
+		if i == 0 {
+			if s != "" {
+				si.encName = s
+			}
+		} else {
+			switch s {
+			case "omitempty":
+				si.flagSet(structFieldInfoFlagOmitEmpty)
+				// si.omitEmpty = true
+				// case "toarray":
+				// 	si.toArray = true
+			}
+		}
+	}
+}
+
+type sfiSortedByEncName []*structFieldInfo
+
+func (p sfiSortedByEncName) Len() int {
+	return len(p)
+}
+
+func (p sfiSortedByEncName) Less(i, j int) bool {
+	return p[i].encName < p[j].encName
+}
+
+func (p sfiSortedByEncName) Swap(i, j int) {
+	p[i], p[j] = p[j], p[i]
+}
+
+const structFieldNodeNumToCache = 4
+
+type structFieldNodeCache struct {
+	rv  [structFieldNodeNumToCache]reflect.Value
+	idx [structFieldNodeNumToCache]uint32
+	num uint8
+}
+
+func (x *structFieldNodeCache) get(key uint32) (fv reflect.Value, valid bool) {
+	for i, k := range &x.idx {
+		if uint8(i) == x.num {
+			return // break
+		}
+		if key == k {
+			return x.rv[i], true
+		}
+	}
+	return
+}
+
+func (x *structFieldNodeCache) tryAdd(fv reflect.Value, key uint32) {
+	if x.num < structFieldNodeNumToCache {
+		x.rv[x.num] = fv
+		x.idx[x.num] = key
+		x.num++
+		return
+	}
+}
+
+type structFieldNode struct {
+	v      reflect.Value
+	cache2 structFieldNodeCache
+	cache3 structFieldNodeCache
+	update bool
+}
+
+func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) {
+	// return si.fieldval(x.v, x.update)
+	// Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding
+	// This mostly saves us time on the repeated calls to v.Elem, v.Field, etc.
+	var valid bool
+	switch si.nis {
+	case 1:
+		fv = x.v.Field(int(si.is[0]))
+	case 2:
+		if fv, valid = x.cache2.get(uint32(si.is[0])); valid {
+			fv = fv.Field(int(si.is[1]))
+			return
+		}
+		fv = x.v.Field(int(si.is[0]))
+		if fv, valid = baseStructRv(fv, x.update); !valid {
+			return
+		}
+		x.cache2.tryAdd(fv, uint32(si.is[0]))
+		fv = fv.Field(int(si.is[1]))
+	case 3:
+		var key uint32 = uint32(si.is[0])<<16 | uint32(si.is[1])
+		if fv, valid = x.cache3.get(key); valid {
+			fv = fv.Field(int(si.is[2]))
+			return
+		}
+		fv = x.v.Field(int(si.is[0]))
+		if fv, valid = baseStructRv(fv, x.update); !valid {
+			return
+		}
+		fv = fv.Field(int(si.is[1]))
+		if fv, valid = baseStructRv(fv, x.update); !valid {
+			return
+		}
+		x.cache3.tryAdd(fv, key)
+		fv = fv.Field(int(si.is[2]))
+	default:
+		fv, _ = si.field(x.v, x.update)
+	}
+	return
+}
+
+func baseStructRv(v reflect.Value, update bool) (v2 reflect.Value, valid bool) {
+	for v.Kind() == reflect.Ptr {
+		if v.IsNil() {
+			if !update {
+				return
+			}
+			v.Set(reflect.New(v.Type().Elem()))
+		}
+		v = v.Elem()
+	}
+	return v, true
+}
+
+type typeInfoFlag uint8
+
+const (
+	typeInfoFlagComparable = 1 << iota
+	typeInfoFlagIsZeroer
+	typeInfoFlagIsZeroerPtr
+)
+
+// typeInfo keeps information about each (non-ptr) type referenced in the encode/decode sequence.
+//
+// During an encode/decode sequence, we work as below:
+//   - If base is a built in type, en/decode base value
+//   - If base is registered as an extension, en/decode base value
+//   - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
+//   - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method
+//   - Else decode appropriately based on the reflect.Kind
+type typeInfo struct {
+	rt      reflect.Type
+	elem    reflect.Type
+	pkgpath string
+
+	rtid uintptr
+	// rv0  reflect.Value // saved zero value, used if immutableKind
+
+	numMeth uint16 // number of methods
+	kind    uint8
+	chandir uint8
+
+	anyOmitEmpty bool      // true if a struct, and any of the fields are tagged "omitempty"
+	toArray      bool      // whether this (struct) type should be encoded as an array
+	keyType      valueType // if struct, how is the field name stored in a stream? default is string
+	mbs          bool      // base type (T or *T) is a MapBySlice
+
+	// ---- cpu cache line boundary?
+	sfiSort []*structFieldInfo // sorted. Used when enc/dec struct to map.
+	sfiSrc  []*structFieldInfo // unsorted. Used when enc/dec struct to array.
+
+	key reflect.Type
+
+	// ---- cpu cache line boundary?
+	// sfis         []structFieldInfo // all sfi, in src order, as created.
+	sfiNamesSort []byte // all names, with indexes into the sfiSort
+
+	// format of marshal type fields below: [btj][mu]p? OR csp?
+
+	bm  bool // T is a binaryMarshaler
+	bmp bool // *T is a binaryMarshaler
+	bu  bool // T is a binaryUnmarshaler
+	bup bool // *T is a binaryUnmarshaler
+	tm  bool // T is a textMarshaler
+	tmp bool // *T is a textMarshaler
+	tu  bool // T is a textUnmarshaler
+	tup bool // *T is a textUnmarshaler
+
+	jm  bool // T is a jsonMarshaler
+	jmp bool // *T is a jsonMarshaler
+	ju  bool // T is a jsonUnmarshaler
+	jup bool // *T is a jsonUnmarshaler
+	cs  bool // T is a Selfer
+	csp bool // *T is a Selfer
+
+	// other flags, with individual bits representing if set.
+	flags typeInfoFlag
+
+	// _ [2]byte   // padding
+	_ [3]uint64 // padding
+}
+
+func (ti *typeInfo) isFlag(f typeInfoFlag) bool {
+	return ti.flags&f != 0
+}
+
+func (ti *typeInfo) indexForEncName(name []byte) (index int16) {
+	var sn []byte
+	if len(name)+2 <= 32 {
+		var buf [32]byte // should not escape
+		sn = buf[:len(name)+2]
+	} else {
+		sn = make([]byte, len(name)+2)
+	}
+	copy(sn[1:], name)
+	sn[0], sn[len(sn)-1] = tiSep2(name), 0xff
+	j := bytes.Index(ti.sfiNamesSort, sn)
+	if j < 0 {
+		return -1
+	}
+	index = int16(uint16(ti.sfiNamesSort[j+len(sn)+1]) | uint16(ti.sfiNamesSort[j+len(sn)])<<8)
+	return
+}
+
+type rtid2ti struct {
+	rtid uintptr
+	ti   *typeInfo
+}
+
+// TypeInfos caches typeInfo for each type on first inspection.
+//
+// It is configured with a set of tag keys, which are used to get
+// configuration for the type.
+type TypeInfos struct {
+	// infos: formerly map[uintptr]*typeInfo, now *[]rtid2ti, 2 words expected
+	infos atomicTypeInfoSlice
+	mu    sync.Mutex
+	tags  []string
+	_     [2]uint64 // padding
+}
+
+// NewTypeInfos creates a TypeInfos given a set of struct tags keys.
+//
+// This allows users customize the struct tag keys which contain configuration
+// of their types.
+func NewTypeInfos(tags []string) *TypeInfos {
+	return &TypeInfos{tags: tags}
+}
+
+func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
+	// check for tags: codec, json, in that order.
+	// this allows seamless support for many configured structs.
+	for _, x := range x.tags {
+		s = t.Get(x)
+		if s != "" {
+			return s
+		}
+	}
+	return
+}
+
+func (x *TypeInfos) find(s []rtid2ti, rtid uintptr) (idx int, ti *typeInfo) {
+	// binary search. adapted from sort/search.go.
+	// if sp == nil {
+	// 	return -1, nil
+	// }
+	// s := *sp
+	h, i, j := 0, 0, len(s)
+	for i < j {
+		h = i + (j-i)/2
+		if s[h].rtid < rtid {
+			i = h + 1
+		} else {
+			j = h
+		}
+	}
+	if i < len(s) && s[i].rtid == rtid {
+		return i, s[i].ti
+	}
+	return i, nil
+}
+
+func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
+	sp := x.infos.load()
+	var idx int
+	if sp != nil {
+		idx, pti = x.find(sp, rtid)
+		if pti != nil {
+			return
+		}
+	}
+
+	rk := rt.Kind()
+
+	if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) {
+		panicv.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt)
+	}
+
+	// do not hold lock while computing this.
+	// it may lead to duplication, but that's ok.
+	ti := typeInfo{rt: rt, rtid: rtid, kind: uint8(rk), pkgpath: rt.PkgPath()}
+	// ti.rv0 = reflect.Zero(rt)
+
+	// ti.comparable = rt.Comparable()
+	ti.numMeth = uint16(rt.NumMethod())
+
+	ti.bm, ti.bmp = implIntf(rt, binaryMarshalerTyp)
+	ti.bu, ti.bup = implIntf(rt, binaryUnmarshalerTyp)
+	ti.tm, ti.tmp = implIntf(rt, textMarshalerTyp)
+	ti.tu, ti.tup = implIntf(rt, textUnmarshalerTyp)
+	ti.jm, ti.jmp = implIntf(rt, jsonMarshalerTyp)
+	ti.ju, ti.jup = implIntf(rt, jsonUnmarshalerTyp)
+	ti.cs, ti.csp = implIntf(rt, selferTyp)
+
+	b1, b2 := implIntf(rt, iszeroTyp)
+	if b1 {
+		ti.flags |= typeInfoFlagIsZeroer
+	}
+	if b2 {
+		ti.flags |= typeInfoFlagIsZeroerPtr
+	}
+	if rt.Comparable() {
+		ti.flags |= typeInfoFlagComparable
+	}
+
+	switch rk {
+	case reflect.Struct:
+		var omitEmpty bool
+		if f, ok := rt.FieldByName(structInfoFieldName); ok {
+			ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag))
+		} else {
+			ti.keyType = valueTypeString
+		}
+		pp, pi := pool.tiLoad()
+		pv := pi.(*typeInfoLoadArray)
+		pv.etypes[0] = ti.rtid
+		// vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]}
+		vv := typeInfoLoad{pv.etypes[:1], pv.sfis[:0]}
+		x.rget(rt, rtid, omitEmpty, nil, &vv)
+		// ti.sfis = vv.sfis
+		ti.sfiSrc, ti.sfiSort, ti.sfiNamesSort, ti.anyOmitEmpty = rgetResolveSFI(rt, vv.sfis, pv)
+		pp.Put(pi)
+	case reflect.Map:
+		ti.elem = rt.Elem()
+		ti.key = rt.Key()
+	case reflect.Slice:
+		ti.mbs, _ = implIntf(rt, mapBySliceTyp)
+		ti.elem = rt.Elem()
+	case reflect.Chan:
+		ti.elem = rt.Elem()
+		ti.chandir = uint8(rt.ChanDir())
+	case reflect.Array, reflect.Ptr:
+		ti.elem = rt.Elem()
+	}
+	// sfi = sfiSrc
+
+	x.mu.Lock()
+	sp = x.infos.load()
+	if sp == nil {
+		pti = &ti
+		vs := []rtid2ti{{rtid, pti}}
+		x.infos.store(vs)
+	} else {
+		idx, pti = x.find(sp, rtid)
+		if pti == nil {
+			pti = &ti
+			vs := make([]rtid2ti, len(sp)+1)
+			copy(vs, sp[:idx])
+			copy(vs[idx+1:], sp[idx:])
+			vs[idx] = rtid2ti{rtid, pti}
+			x.infos.store(vs)
+		}
+	}
+	x.mu.Unlock()
+	return
+}
+
+func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool,
+	indexstack []uint16, pv *typeInfoLoad) {
+	// Read up fields and store how to access the value.
+	//
+	// It uses go's rules for message selectors,
+	// which say that the field with the shallowest depth is selected.
+	//
+	// Note: we consciously use slices, not a map, to simulate a set.
+	//       Typically, types have < 16 fields,
+	//       and iteration using equals is faster than maps there
+	flen := rt.NumField()
+	if flen > (1<<maxLevelsEmbedding - 1) {
+		panicv.errorf("codec: types with > %v fields are not supported - has %v fields",
+			(1<<maxLevelsEmbedding - 1), flen)
+	}
+	// pv.sfis = make([]structFieldInfo, flen)
+LOOP:
+	for j, jlen := uint16(0), uint16(flen); j < jlen; j++ {
+		f := rt.Field(int(j))
+		fkind := f.Type.Kind()
+		// skip if a func type, or is unexported, or structTag value == "-"
+		switch fkind {
+		case reflect.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer:
+			continue LOOP
+		}
+
+		isUnexported := f.PkgPath != ""
+		if isUnexported && !f.Anonymous {
+			continue
+		}
+		stag := x.structTag(f.Tag)
+		if stag == "-" {
+			continue
+		}
+		var si structFieldInfo
+		var parsed bool
+		// if anonymous and no struct tag (or it's blank),
+		// and a struct (or pointer to struct), inline it.
+		if f.Anonymous && fkind != reflect.Interface {
+			// ^^ redundant but ok: per go spec, an embedded pointer type cannot be to an interface
+			ft := f.Type
+			isPtr := ft.Kind() == reflect.Ptr
+			for ft.Kind() == reflect.Ptr {
+				ft = ft.Elem()
+			}
+			isStruct := ft.Kind() == reflect.Struct
+
+			// Ignore embedded fields of unexported non-struct types.
+			// Also, from go1.10, ignore pointers to unexported struct types
+			// because unmarshal cannot assign a new struct to an unexported field.
+			// See https://golang.org/issue/21357
+			if (isUnexported && !isStruct) || (!allowSetUnexportedEmbeddedPtr && isUnexported && isPtr) {
+				continue
+			}
+			doInline := stag == ""
+			if !doInline {
+				si.parseTag(stag)
+				parsed = true
+				doInline = si.encName == ""
+				// doInline = si.isZero()
+			}
+			if doInline && isStruct {
+				// if etypes contains this, don't call rget again (as fields are already seen here)
+				ftid := rt2id(ft)
+				// We cannot recurse forever, but we need to track other field depths.
+				// So - we break if we see a type twice (not the first time).
+				// This should be sufficient to handle an embedded type that refers to its
+				// owning type, which then refers to its embedded type.
+				processIt := true
+				numk := 0
+				for _, k := range pv.etypes {
+					if k == ftid {
+						numk++
+						if numk == rgetMaxRecursion {
+							processIt = false
+							break
+						}
+					}
+				}
+				if processIt {
+					pv.etypes = append(pv.etypes, ftid)
+					indexstack2 := make([]uint16, len(indexstack)+1)
+					copy(indexstack2, indexstack)
+					indexstack2[len(indexstack)] = j
+					// indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
+					x.rget(ft, ftid, omitEmpty, indexstack2, pv)
+				}
+				continue
+			}
+		}
+
+		// after the anonymous dance: if an unexported field, skip
+		if isUnexported {
+			continue
+		}
+
+		if f.Name == "" {
+			panic(errNoFieldNameToStructFieldInfo)
+		}
+
+		// pv.fNames = append(pv.fNames, f.Name)
+		// if si.encName == "" {
+
+		if !parsed {
+			si.encName = f.Name
+			si.parseTag(stag)
+			parsed = true
+		} else if si.encName == "" {
+			si.encName = f.Name
+		}
+		si.fieldName = f.Name
+		si.flagSet(structFieldInfoFlagReady)
+
+		// pv.encNames = append(pv.encNames, si.encName)
+
+		// si.ikind = int(f.Type.Kind())
+		if len(indexstack) > maxLevelsEmbedding-1 {
+			panicv.errorf("codec: only supports up to %v depth of embedding - type has %v depth",
+				maxLevelsEmbedding-1, len(indexstack))
+		}
+		si.nis = uint8(len(indexstack)) + 1
+		copy(si.is[:], indexstack)
+		si.is[len(indexstack)] = j
+
+		if omitEmpty {
+			si.flagSet(structFieldInfoFlagOmitEmpty)
+		}
+		pv.sfis = append(pv.sfis, si)
+	}
+}
+
+func tiSep(name string) uint8 {
+	// (xn[0]%64) // (between 192-255 - outside ascii BMP)
+	// return 0xfe - (name[0] & 63)
+	// return 0xfe - (name[0] & 63) - uint8(len(name))
+	// return 0xfe - (name[0] & 63) - uint8(len(name)&63)
+	// return ((0xfe - (name[0] & 63)) & 0xf8) | (uint8(len(name) & 0x07))
+	return 0xfe - (name[0] & 63) - uint8(len(name)&63)
+}
+
+func tiSep2(name []byte) uint8 {
+	return 0xfe - (name[0] & 63) - uint8(len(name)&63)
+}
+
+// resolves the struct field info got from a call to rget.
+// Returns a trimmed, unsorted and sorted []*structFieldInfo.
+func rgetResolveSFI(rt reflect.Type, x []structFieldInfo, pv *typeInfoLoadArray) (
+	y, z []*structFieldInfo, ss []byte, anyOmitEmpty bool) {
+	sa := pv.sfiidx[:0]
+	sn := pv.b[:]
+	n := len(x)
+
+	var xn string
+	var ui uint16
+	var sep byte
+
+	for i := range x {
+		ui = uint16(i)
+		xn = x[i].encName // fieldName or encName? use encName for now.
+		if len(xn)+2 > cap(pv.b) {
+			sn = make([]byte, len(xn)+2)
+		} else {
+			sn = sn[:len(xn)+2]
+		}
+		// use a custom sep, so that misses are less frequent,
+		// since the sep (first char in search) is as unique as first char in field name.
+		sep = tiSep(xn)
+		sn[0], sn[len(sn)-1] = sep, 0xff
+		copy(sn[1:], xn)
+		j := bytes.Index(sa, sn)
+		if j == -1 {
+			sa = append(sa, sep)
+			sa = append(sa, xn...)
+			sa = append(sa, 0xff, byte(ui>>8), byte(ui))
+		} else {
+			index := uint16(sa[j+len(sn)+1]) | uint16(sa[j+len(sn)])<<8
+			// one of them must be reset to nil,
+			// and the index updated appropriately to the other one
+			if x[i].nis == x[index].nis {
+			} else if x[i].nis < x[index].nis {
+				sa[j+len(sn)], sa[j+len(sn)+1] = byte(ui>>8), byte(ui)
+				if x[index].ready() {
+					x[index].flagClr(structFieldInfoFlagReady)
+					n--
+				}
+			} else {
+				if x[i].ready() {
+					x[i].flagClr(structFieldInfoFlagReady)
+					n--
+				}
+			}
+		}
+
+	}
+	var w []structFieldInfo
+	sharingArray := len(x) <= typeInfoLoadArraySfisLen // sharing array with typeInfoLoadArray
+	if sharingArray {
+		w = make([]structFieldInfo, n)
+	}
+
+	// remove all the nils (non-ready)
+	y = make([]*structFieldInfo, n)
+	n = 0
+	var sslen int
+	for i := range x {
+		if !x[i].ready() {
+			continue
+		}
+		if !anyOmitEmpty && x[i].omitEmpty() {
+			anyOmitEmpty = true
+		}
+		if sharingArray {
+			w[n] = x[i]
+			y[n] = &w[n]
+		} else {
+			y[n] = &x[i]
+		}
+		sslen = sslen + len(x[i].encName) + 4
+		n++
+	}
+	if n != len(y) {
+		panicv.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d",
+			rt, len(y), len(x), n)
+	}
+
+	z = make([]*structFieldInfo, len(y))
+	copy(z, y)
+	sort.Sort(sfiSortedByEncName(z))
+
+	sharingArray = len(sa) <= typeInfoLoadArraySfiidxLen
+	if sharingArray {
+		ss = make([]byte, 0, sslen)
+	} else {
+		ss = sa[:0] // reuse the newly made sa array if necessary
+	}
+	for i := range z {
+		xn = z[i].encName
+		sep = tiSep(xn)
+		ui = uint16(i)
+		ss = append(ss, sep)
+		ss = append(ss, xn...)
+		ss = append(ss, 0xff, byte(ui>>8), byte(ui))
+	}
+	return
+}
+
+func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) {
+	return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp)
+}
+
+// isEmptyStruct is only called from isEmptyValue, and checks if a struct is empty:
+//    - does it implement IsZero() bool
+//    - is it comparable, and can i compare directly using ==
+//    - if checkStruct, then walk through the encodable fields
+//      and check if they are empty or not.
+func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool {
+	// v is a struct kind - no need to check again.
+	// We only check isZero on a struct kind, to reduce the amount of times
+	// that we lookup the rtid and typeInfo for each type as we walk the tree.
+
+	vt := v.Type()
+	rtid := rt2id(vt)
+	if tinfos == nil {
+		tinfos = defTypeInfos
+	}
+	ti := tinfos.get(rtid, vt)
+	if ti.rtid == timeTypId {
+		return rv2i(v).(time.Time).IsZero()
+	}
+	if ti.isFlag(typeInfoFlagIsZeroerPtr) && v.CanAddr() {
+		return rv2i(v.Addr()).(isZeroer).IsZero()
+	}
+	if ti.isFlag(typeInfoFlagIsZeroer) {
+		return rv2i(v).(isZeroer).IsZero()
+	}
+	if ti.isFlag(typeInfoFlagComparable) {
+		return rv2i(v) == rv2i(reflect.Zero(vt))
+	}
+	if !checkStruct {
+		return false
+	}
+	// We only care about what we can encode/decode,
+	// so that is what we use to check omitEmpty.
+	for _, si := range ti.sfiSrc {
+		sfv, valid := si.field(v, false)
+		if valid && !isEmptyValue(sfv, tinfos, deref, checkStruct) {
+			return false
+		}
+	}
+	return true
+}
+
+// func roundFloat(x float64) float64 {
+// 	t := math.Trunc(x)
+// 	if math.Abs(x-t) >= 0.5 {
+// 		return t + math.Copysign(1, x)
+// 	}
+// 	return t
+// }
+
+func panicToErr(h errstrDecorator, err *error) {
+	// Note: This method MUST be called directly from defer i.e. defer panicToErr ...
+	// else it seems the recover is not fully handled
+	if recoverPanicToErr {
+		if x := recover(); x != nil {
+			// fmt.Printf("panic'ing with: %v\n", x)
+			// debug.PrintStack()
+			panicValToErr(h, x, err)
+		}
+	}
+}
+
+func panicValToErr(h errstrDecorator, v interface{}, err *error) {
+	switch xerr := v.(type) {
+	case nil:
+	case error:
+		switch xerr {
+		case nil:
+		case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized:
+			// treat as special (bubble up)
+			*err = xerr
+		default:
+			h.wrapErrstr(xerr.Error(), err)
+		}
+	case string:
+		if xerr != "" {
+			h.wrapErrstr(xerr, err)
+		}
+	case fmt.Stringer:
+		if xerr != nil {
+			h.wrapErrstr(xerr.String(), err)
+		}
+	default:
+		h.wrapErrstr(v, err)
+	}
+}
+
+func isImmutableKind(k reflect.Kind) (v bool) {
+	return immutableKindsSet[k]
+}
+
+// ----
+
+type codecFnInfo struct {
+	ti    *typeInfo
+	xfFn  Ext
+	xfTag uint64
+	seq   seqType
+	addrD bool
+	addrF bool // if addrD, this says whether decode function can take a value or a ptr
+	addrE bool
+	ready bool // ready to use
+}
+
+// codecFn encapsulates the captured variables and the encode function.
+// This way, we only do some calculations one times, and pass to the
+// code block that should be called (encapsulated in a function)
+// instead of executing the checks every time.
+type codecFn struct {
+	i  codecFnInfo
+	fe func(*Encoder, *codecFnInfo, reflect.Value)
+	fd func(*Decoder, *codecFnInfo, reflect.Value)
+	_  [1]uint64 // padding
+}
+
+type codecRtidFn struct {
+	rtid uintptr
+	fn   *codecFn
+}
+
+type codecFner struct {
+	// hh Handle
+	h  *BasicHandle
+	s  []codecRtidFn
+	be bool
+	js bool
+	_  [6]byte   // padding
+	_  [3]uint64 // padding
+}
+
+func (c *codecFner) reset(hh Handle) {
+	bh := hh.getBasicHandle()
+	// only reset iff extensions changed or *TypeInfos changed
+	var hhSame = true &&
+		c.h == bh && c.h.TypeInfos == bh.TypeInfos &&
+		len(c.h.extHandle) == len(bh.extHandle) &&
+		(len(c.h.extHandle) == 0 || &c.h.extHandle[0] == &bh.extHandle[0])
+	if !hhSame {
+		// c.hh = hh
+		c.h, bh = bh, c.h // swap both
+		_, c.js = hh.(*JsonHandle)
+		c.be = hh.isBinary()
+		for i := range c.s {
+			c.s[i].fn.i.ready = false
+		}
+	}
+}
+
+func (c *codecFner) get(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *codecFn) {
+	rtid := rt2id(rt)
+
+	for _, x := range c.s {
+		if x.rtid == rtid {
+			// if rtid exists, then there's a *codenFn attached (non-nil)
+			fn = x.fn
+			if fn.i.ready {
+				return
+			}
+			break
+		}
+	}
+	var ti *typeInfo
+	if fn == nil {
+		fn = new(codecFn)
+		if c.s == nil {
+			c.s = make([]codecRtidFn, 0, 8)
+		}
+		c.s = append(c.s, codecRtidFn{rtid, fn})
+	} else {
+		ti = fn.i.ti
+		*fn = codecFn{}
+		fn.i.ti = ti
+		// fn.fe, fn.fd = nil, nil
+	}
+	fi := &(fn.i)
+	fi.ready = true
+	if ti == nil {
+		ti = c.h.getTypeInfo(rtid, rt)
+		fi.ti = ti
+	}
+
+	rk := reflect.Kind(ti.kind)
+
+	if checkCodecSelfer && (ti.cs || ti.csp) {
+		fn.fe = (*Encoder).selferMarshal
+		fn.fd = (*Decoder).selferUnmarshal
+		fi.addrF = true
+		fi.addrD = ti.csp
+		fi.addrE = ti.csp
+	} else if rtid == timeTypId {
+		fn.fe = (*Encoder).kTime
+		fn.fd = (*Decoder).kTime
+	} else if rtid == rawTypId {
+		fn.fe = (*Encoder).raw
+		fn.fd = (*Decoder).raw
+	} else if rtid == rawExtTypId {
+		fn.fe = (*Encoder).rawExt
+		fn.fd = (*Decoder).rawExt
+		fi.addrF = true
+		fi.addrD = true
+		fi.addrE = true
+	} else if xfFn := c.h.getExt(rtid); xfFn != nil {
+		fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
+		fn.fe = (*Encoder).ext
+		fn.fd = (*Decoder).ext
+		fi.addrF = true
+		fi.addrD = true
+		if rk == reflect.Struct || rk == reflect.Array {
+			fi.addrE = true
+		}
+	} else if supportMarshalInterfaces && c.be && (ti.bm || ti.bmp) && (ti.bu || ti.bup) {
+		fn.fe = (*Encoder).binaryMarshal
+		fn.fd = (*Decoder).binaryUnmarshal
+		fi.addrF = true
+		fi.addrD = ti.bup
+		fi.addrE = ti.bmp
+	} else if supportMarshalInterfaces && !c.be && c.js && (ti.jm || ti.jmp) && (ti.ju || ti.jup) {
+		//If JSON, we should check JSONMarshal before textMarshal
+		fn.fe = (*Encoder).jsonMarshal
+		fn.fd = (*Decoder).jsonUnmarshal
+		fi.addrF = true
+		fi.addrD = ti.jup
+		fi.addrE = ti.jmp
+	} else if supportMarshalInterfaces && !c.be && (ti.tm || ti.tmp) && (ti.tu || ti.tup) {
+		fn.fe = (*Encoder).textMarshal
+		fn.fd = (*Decoder).textUnmarshal
+		fi.addrF = true
+		fi.addrD = ti.tup
+		fi.addrE = ti.tmp
+	} else {
+		if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) {
+			if ti.pkgpath == "" { // un-named slice or map
+				if idx := fastpathAV.index(rtid); idx != -1 {
+					fn.fe = fastpathAV[idx].encfn
+					fn.fd = fastpathAV[idx].decfn
+					fi.addrD = true
+					fi.addrF = false
+				}
+			} else {
+				// use mapping for underlying type if there
+				var rtu reflect.Type
+				if rk == reflect.Map {
+					rtu = reflect.MapOf(ti.key, ti.elem)
+				} else {
+					rtu = reflect.SliceOf(ti.elem)
+				}
+				rtuid := rt2id(rtu)
+				if idx := fastpathAV.index(rtuid); idx != -1 {
+					xfnf := fastpathAV[idx].encfn
+					xrt := fastpathAV[idx].rt
+					fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) {
+						xfnf(e, xf, xrv.Convert(xrt))
+					}
+					fi.addrD = true
+					fi.addrF = false // meaning it can be an address(ptr) or a value
+					xfnf2 := fastpathAV[idx].decfn
+					fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
+						if xrv.Kind() == reflect.Ptr {
+							xfnf2(d, xf, xrv.Convert(reflect.PtrTo(xrt)))
+						} else {
+							xfnf2(d, xf, xrv.Convert(xrt))
+						}
+					}
+				}
+			}
+		}
+		if fn.fe == nil && fn.fd == nil {
+			switch rk {
+			case reflect.Bool:
+				fn.fe = (*Encoder).kBool
+				fn.fd = (*Decoder).kBool
+			case reflect.String:
+				fn.fe = (*Encoder).kString
+				fn.fd = (*Decoder).kString
+			case reflect.Int:
+				fn.fd = (*Decoder).kInt
+				fn.fe = (*Encoder).kInt
+			case reflect.Int8:
+				fn.fe = (*Encoder).kInt8
+				fn.fd = (*Decoder).kInt8
+			case reflect.Int16:
+				fn.fe = (*Encoder).kInt16
+				fn.fd = (*Decoder).kInt16
+			case reflect.Int32:
+				fn.fe = (*Encoder).kInt32
+				fn.fd = (*Decoder).kInt32
+			case reflect.Int64:
+				fn.fe = (*Encoder).kInt64
+				fn.fd = (*Decoder).kInt64
+			case reflect.Uint:
+				fn.fd = (*Decoder).kUint
+				fn.fe = (*Encoder).kUint
+			case reflect.Uint8:
+				fn.fe = (*Encoder).kUint8
+				fn.fd = (*Decoder).kUint8
+			case reflect.Uint16:
+				fn.fe = (*Encoder).kUint16
+				fn.fd = (*Decoder).kUint16
+			case reflect.Uint32:
+				fn.fe = (*Encoder).kUint32
+				fn.fd = (*Decoder).kUint32
+			case reflect.Uint64:
+				fn.fe = (*Encoder).kUint64
+				fn.fd = (*Decoder).kUint64
+			case reflect.Uintptr:
+				fn.fe = (*Encoder).kUintptr
+				fn.fd = (*Decoder).kUintptr
+			case reflect.Float32:
+				fn.fe = (*Encoder).kFloat32
+				fn.fd = (*Decoder).kFloat32
+			case reflect.Float64:
+				fn.fe = (*Encoder).kFloat64
+				fn.fd = (*Decoder).kFloat64
+			case reflect.Invalid:
+				fn.fe = (*Encoder).kInvalid
+				fn.fd = (*Decoder).kErr
+			case reflect.Chan:
+				fi.seq = seqTypeChan
+				fn.fe = (*Encoder).kSlice
+				fn.fd = (*Decoder).kSlice
+			case reflect.Slice:
+				fi.seq = seqTypeSlice
+				fn.fe = (*Encoder).kSlice
+				fn.fd = (*Decoder).kSlice
+			case reflect.Array:
+				fi.seq = seqTypeArray
+				fn.fe = (*Encoder).kSlice
+				fi.addrF = false
+				fi.addrD = false
+				rt2 := reflect.SliceOf(ti.elem)
+				fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
+					d.cfer().get(rt2, true, false).fd(d, xf, xrv.Slice(0, xrv.Len()))
+				}
+				// fn.fd = (*Decoder).kArray
+			case reflect.Struct:
+				if ti.anyOmitEmpty {
+					fn.fe = (*Encoder).kStruct
+				} else {
+					fn.fe = (*Encoder).kStructNoOmitempty
+				}
+				fn.fd = (*Decoder).kStruct
+			case reflect.Map:
+				fn.fe = (*Encoder).kMap
+				fn.fd = (*Decoder).kMap
+			case reflect.Interface:
+				// encode: reflect.Interface are handled already by preEncodeValue
+				fn.fd = (*Decoder).kInterface
+				fn.fe = (*Encoder).kErr
+			default:
+				// reflect.Ptr and reflect.Interface are handled already by preEncodeValue
+				fn.fe = (*Encoder).kErr
+				fn.fd = (*Decoder).kErr
+			}
+		}
+	}
+	return
+}
+
+type codecFnPooler struct {
+	cf  *codecFner
+	cfp *sync.Pool
+	hh  Handle
+}
+
+func (d *codecFnPooler) cfer() *codecFner {
+	if d.cf == nil {
+		var v interface{}
+		d.cfp, v = pool.codecFner()
+		d.cf = v.(*codecFner)
+		d.cf.reset(d.hh)
+	}
+	return d.cf
+}
+
+func (d *codecFnPooler) alwaysAtEnd() {
+	if d.cf != nil {
+		d.cfp.Put(d.cf)
+		d.cf, d.cfp = nil, nil
+	}
+}
+
+// ----
+
+// these "checkOverflow" functions must be inlinable, and not call anybody.
+// Overflow means that the value cannot be represented without wrapping/overflow.
+// Overflow=false does not mean that the value can be represented without losing precision
+// (especially for floating point).
+
+type checkOverflow struct{}
+
+// func (checkOverflow) Float16(f float64) (overflow bool) {
+// 	panicv.errorf("unimplemented")
+// 	if f < 0 {
+// 		f = -f
+// 	}
+// 	return math.MaxFloat32 < f && f <= math.MaxFloat64
+// }
+
+func (checkOverflow) Float32(v float64) (overflow bool) {
+	if v < 0 {
+		v = -v
+	}
+	return math.MaxFloat32 < v && v <= math.MaxFloat64
+}
+func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) {
+	if bitsize == 0 || bitsize >= 64 || v == 0 {
+		return
+	}
+	if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
+		overflow = true
+	}
+	return
+}
+func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) {
+	if bitsize == 0 || bitsize >= 64 || v == 0 {
+		return
+	}
+	if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
+		overflow = true
+	}
+	return
+}
+func (checkOverflow) SignedInt(v uint64) (overflow bool) {
+	//e.g. -127 to 128 for int8
+	pos := (v >> 63) == 0
+	ui2 := v & 0x7fffffffffffffff
+	if pos {
+		if ui2 > math.MaxInt64 {
+			overflow = true
+		}
+	} else {
+		if ui2 > math.MaxInt64-1 {
+			overflow = true
+		}
+	}
+	return
+}
+
+func (x checkOverflow) Float32V(v float64) float64 {
+	if x.Float32(v) {
+		panicv.errorf("float32 overflow: %v", v)
+	}
+	return v
+}
+func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 {
+	if x.Uint(v, bitsize) {
+		panicv.errorf("uint64 overflow: %v", v)
+	}
+	return v
+}
+func (x checkOverflow) IntV(v int64, bitsize uint8) int64 {
+	if x.Int(v, bitsize) {
+		panicv.errorf("int64 overflow: %v", v)
+	}
+	return v
+}
+func (x checkOverflow) SignedIntV(v uint64) int64 {
+	if x.SignedInt(v) {
+		panicv.errorf("uint64 to int64 overflow: %v", v)
+	}
+	return int64(v)
+}
+
+// ------------------ SORT -----------------
+
+func isNaN(f float64) bool { return f != f }
+
+// -----------------------
+
+type ioFlusher interface {
+	Flush() error
+}
+
+type ioPeeker interface {
+	Peek(int) ([]byte, error)
+}
+
+type ioBuffered interface {
+	Buffered() int
+}
+
+// -----------------------
+
+type intSlice []int64
+type uintSlice []uint64
+
+// type uintptrSlice []uintptr
+type floatSlice []float64
+type boolSlice []bool
+type stringSlice []string
+
+// type bytesSlice [][]byte
+
+func (p intSlice) Len() int           { return len(p) }
+func (p intSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p intSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p uintSlice) Len() int           { return len(p) }
+func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p uintSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+// func (p uintptrSlice) Len() int           { return len(p) }
+// func (p uintptrSlice) Less(i, j int) bool { return p[i] < p[j] }
+// func (p uintptrSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p floatSlice) Len() int { return len(p) }
+func (p floatSlice) Less(i, j int) bool {
+	return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j])
+}
+func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p stringSlice) Len() int           { return len(p) }
+func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p stringSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+// func (p bytesSlice) Len() int           { return len(p) }
+// func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 }
+// func (p bytesSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p boolSlice) Len() int           { return len(p) }
+func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] }
+func (p boolSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+// ---------------------
+
+type intRv struct {
+	v int64
+	r reflect.Value
+}
+type intRvSlice []intRv
+type uintRv struct {
+	v uint64
+	r reflect.Value
+}
+type uintRvSlice []uintRv
+type floatRv struct {
+	v float64
+	r reflect.Value
+}
+type floatRvSlice []floatRv
+type boolRv struct {
+	v bool
+	r reflect.Value
+}
+type boolRvSlice []boolRv
+type stringRv struct {
+	v string
+	r reflect.Value
+}
+type stringRvSlice []stringRv
+type bytesRv struct {
+	v []byte
+	r reflect.Value
+}
+type bytesRvSlice []bytesRv
+type timeRv struct {
+	v time.Time
+	r reflect.Value
+}
+type timeRvSlice []timeRv
+
+func (p intRvSlice) Len() int           { return len(p) }
+func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p intRvSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p uintRvSlice) Len() int           { return len(p) }
+func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p uintRvSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p floatRvSlice) Len() int { return len(p) }
+func (p floatRvSlice) Less(i, j int) bool {
+	return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v)
+}
+func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p stringRvSlice) Len() int           { return len(p) }
+func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p stringRvSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p bytesRvSlice) Len() int           { return len(p) }
+func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
+func (p bytesRvSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p boolRvSlice) Len() int           { return len(p) }
+func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v }
+func (p boolRvSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p timeRvSlice) Len() int           { return len(p) }
+func (p timeRvSlice) Less(i, j int) bool { return p[i].v.Before(p[j].v) }
+func (p timeRvSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+// -----------------
+
+type bytesI struct {
+	v []byte
+	i interface{}
+}
+
+type bytesISlice []bytesI
+
+func (p bytesISlice) Len() int           { return len(p) }
+func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
+func (p bytesISlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+// -----------------
+
+type set []uintptr
+
+func (s *set) add(v uintptr) (exists bool) {
+	// e.ci is always nil, or len >= 1
+	x := *s
+	if x == nil {
+		x = make([]uintptr, 1, 8)
+		x[0] = v
+		*s = x
+		return
+	}
+	// typically, length will be 1. make this perform.
+	if len(x) == 1 {
+		if j := x[0]; j == 0 {
+			x[0] = v
+		} else if j == v {
+			exists = true
+		} else {
+			x = append(x, v)
+			*s = x
+		}
+		return
+	}
+	// check if it exists
+	for _, j := range x {
+		if j == v {
+			exists = true
+			return
+		}
+	}
+	// try to replace a "deleted" slot
+	for i, j := range x {
+		if j == 0 {
+			x[i] = v
+			return
+		}
+	}
+	// if unable to replace deleted slot, just append it.
+	x = append(x, v)
+	*s = x
+	return
+}
+
+func (s *set) remove(v uintptr) (exists bool) {
+	x := *s
+	if len(x) == 0 {
+		return
+	}
+	if len(x) == 1 {
+		if x[0] == v {
+			x[0] = 0
+		}
+		return
+	}
+	for i, j := range x {
+		if j == v {
+			exists = true
+			x[i] = 0 // set it to 0, as way to delete it.
+			// copy(x[i:], x[i+1:])
+			// x = x[:len(x)-1]
+			return
+		}
+	}
+	return
+}
+
+// ------
+
+// bitset types are better than [256]bool, because they permit the whole
+// bitset array being on a single cache line and use less memory.
+
+// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1).
+// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7
+
+type bitset256 [32]byte
+
+func (x *bitset256) isset(pos byte) bool {
+	return x[pos>>3]&(1<<(pos&7)) != 0
+}
+func (x *bitset256) issetv(pos byte) byte {
+	return x[pos>>3] & (1 << (pos & 7))
+}
+func (x *bitset256) set(pos byte) {
+	x[pos>>3] |= (1 << (pos & 7))
+}
+
+// func (x *bitset256) unset(pos byte) {
+// 	x[pos>>3] &^= (1 << (pos & 7))
+// }
+
+type bitset128 [16]byte
+
+func (x *bitset128) isset(pos byte) bool {
+	return x[pos>>3]&(1<<(pos&7)) != 0
+}
+func (x *bitset128) set(pos byte) {
+	x[pos>>3] |= (1 << (pos & 7))
+}
+
+// func (x *bitset128) unset(pos byte) {
+// 	x[pos>>3] &^= (1 << (pos & 7))
+// }
+
+type bitset32 [4]byte
+
+func (x *bitset32) isset(pos byte) bool {
+	return x[pos>>3]&(1<<(pos&7)) != 0
+}
+func (x *bitset32) set(pos byte) {
+	x[pos>>3] |= (1 << (pos & 7))
+}
+
+// func (x *bitset32) unset(pos byte) {
+// 	x[pos>>3] &^= (1 << (pos & 7))
+// }
+
+// type bit2set256 [64]byte
+
+// func (x *bit2set256) set(pos byte, v1, v2 bool) {
+// 	var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6
+// 	if v1 {
+// 		x[pos>>2] |= 1 << (pos2 + 1)
+// 	}
+// 	if v2 {
+// 		x[pos>>2] |= 1 << pos2
+// 	}
+// }
+// func (x *bit2set256) get(pos byte) uint8 {
+// 	var pos2 uint8 = (pos & 3) << 1     // returning 0, 2, 4 or 6
+// 	return x[pos>>2] << (6 - pos2) >> 6 // 11000000 -> 00000011
+// }
+
+// ------------
+
+type pooler struct {
+	dn                                          sync.Pool // for decNaked
+	cfn                                         sync.Pool // for codecFner
+	tiload                                      sync.Pool
+	strRv8, strRv16, strRv32, strRv64, strRv128 sync.Pool // for stringRV
+}
+
+func (p *pooler) init() {
+	p.strRv8.New = func() interface{} { return new([8]stringRv) }
+	p.strRv16.New = func() interface{} { return new([16]stringRv) }
+	p.strRv32.New = func() interface{} { return new([32]stringRv) }
+	p.strRv64.New = func() interface{} { return new([64]stringRv) }
+	p.strRv128.New = func() interface{} { return new([128]stringRv) }
+	p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x }
+	p.tiload.New = func() interface{} { return new(typeInfoLoadArray) }
+	p.cfn.New = func() interface{} { return new(codecFner) }
+}
+
+func (p *pooler) stringRv8() (sp *sync.Pool, v interface{}) {
+	return &p.strRv8, p.strRv8.Get()
+}
+func (p *pooler) stringRv16() (sp *sync.Pool, v interface{}) {
+	return &p.strRv16, p.strRv16.Get()
+}
+func (p *pooler) stringRv32() (sp *sync.Pool, v interface{}) {
+	return &p.strRv32, p.strRv32.Get()
+}
+func (p *pooler) stringRv64() (sp *sync.Pool, v interface{}) {
+	return &p.strRv64, p.strRv64.Get()
+}
+func (p *pooler) stringRv128() (sp *sync.Pool, v interface{}) {
+	return &p.strRv128, p.strRv128.Get()
+}
+func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) {
+	return &p.dn, p.dn.Get()
+}
+func (p *pooler) codecFner() (sp *sync.Pool, v interface{}) {
+	return &p.cfn, p.cfn.Get()
+}
+func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) {
+	return &p.tiload, p.tiload.Get()
+}
+
+// func (p *pooler) decNaked() (v *decNaked, f func(*decNaked) ) {
+// 	sp := &(p.dn)
+// 	vv := sp.Get()
+// 	return vv.(*decNaked), func(x *decNaked) { sp.Put(vv) }
+// }
+// func (p *pooler) decNakedGet() (v interface{}) {
+// 	return p.dn.Get()
+// }
+// func (p *pooler) codecFnerGet() (v interface{}) {
+// 	return p.cfn.Get()
+// }
+// func (p *pooler) tiLoadGet() (v interface{}) {
+// 	return p.tiload.Get()
+// }
+// func (p *pooler) decNakedPut(v interface{}) {
+// 	p.dn.Put(v)
+// }
+// func (p *pooler) codecFnerPut(v interface{}) {
+// 	p.cfn.Put(v)
+// }
+// func (p *pooler) tiLoadPut(v interface{}) {
+// 	p.tiload.Put(v)
+// }
+
+type panicHdl struct{}
+
+func (panicHdl) errorv(err error) {
+	if err != nil {
+		panic(err)
+	}
+}
+
+func (panicHdl) errorstr(message string) {
+	if message != "" {
+		panic(message)
+	}
+}
+
+func (panicHdl) errorf(format string, params ...interface{}) {
+	if format != "" {
+		if len(params) == 0 {
+			panic(format)
+		} else {
+			panic(fmt.Sprintf(format, params...))
+		}
+	}
+}
+
+type errstrDecorator interface {
+	wrapErrstr(interface{}, *error)
+}
+
+type errstrDecoratorDef struct{}
+
+func (errstrDecoratorDef) wrapErrstr(v interface{}, e *error) { *e = fmt.Errorf("%v", v) }
+
+type must struct{}
+
+func (must) String(s string, err error) string {
+	if err != nil {
+		panicv.errorv(err)
+	}
+	return s
+}
+func (must) Int(s int64, err error) int64 {
+	if err != nil {
+		panicv.errorv(err)
+	}
+	return s
+}
+func (must) Uint(s uint64, err error) uint64 {
+	if err != nil {
+		panicv.errorv(err)
+	}
+	return s
+}
+func (must) Float(s float64, err error) float64 {
+	if err != nil {
+		panicv.errorv(err)
+	}
+	return s
+}
+
+// xdebugf prints the message in red on the terminal.
+// Use it in place of fmt.Printf (which it calls internally)
+func xdebugf(pattern string, args ...interface{}) {
+	var delim string
+	if len(pattern) > 0 && pattern[len(pattern)-1] != '\n' {
+		delim = "\n"
+	}
+	fmt.Printf("\033[1;31m"+pattern+delim+"\033[0m", args...)
+}
+
+// func isImmutableKind(k reflect.Kind) (v bool) {
+// 	return false ||
+// 		k == reflect.Int ||
+// 		k == reflect.Int8 ||
+// 		k == reflect.Int16 ||
+// 		k == reflect.Int32 ||
+// 		k == reflect.Int64 ||
+// 		k == reflect.Uint ||
+// 		k == reflect.Uint8 ||
+// 		k == reflect.Uint16 ||
+// 		k == reflect.Uint32 ||
+// 		k == reflect.Uint64 ||
+// 		k == reflect.Uintptr ||
+// 		k == reflect.Float32 ||
+// 		k == reflect.Float64 ||
+// 		k == reflect.Bool ||
+// 		k == reflect.String
+// }
+
+// func timeLocUTCName(tzint int16) string {
+// 	if tzint == 0 {
+// 		return "UTC"
+// 	}
+// 	var tzname = []byte("UTC+00:00")
+// 	//tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below.
+// 	//tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
+// 	var tzhr, tzmin int16
+// 	if tzint < 0 {
+// 		tzname[3] = '-' // (TODO: verify. this works here)
+// 		tzhr, tzmin = -tzint/60, (-tzint)%60
+// 	} else {
+// 		tzhr, tzmin = tzint/60, tzint%60
+// 	}
+// 	tzname[4] = timeDigits[tzhr/10]
+// 	tzname[5] = timeDigits[tzhr%10]
+// 	tzname[7] = timeDigits[tzmin/10]
+// 	tzname[8] = timeDigits[tzmin%10]
+// 	return string(tzname)
+// 	//return time.FixedZone(string(tzname), int(tzint)*60)
+// }
diff --git a/vendor/github.com/ugorji/go/codec/helper_internal.go b/vendor/github.com/ugorji/go/codec/helper_internal.go
new file mode 100644
index 0000000..0cbd665
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper_internal.go
@@ -0,0 +1,121 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// All non-std package dependencies live in this file,
+// so porting to different environment is easy (just update functions).
+
+func pruneSignExt(v []byte, pos bool) (n int) {
+	if len(v) < 2 {
+	} else if pos && v[0] == 0 {
+		for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ {
+		}
+	} else if !pos && v[0] == 0xff {
+		for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ {
+		}
+	}
+	return
+}
+
+// validate that this function is correct ...
+// culled from OGRE (Object-Oriented Graphics Rendering Engine)
+// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html)
+func halfFloatToFloatBits(yy uint16) (d uint32) {
+	y := uint32(yy)
+	s := (y >> 15) & 0x01
+	e := (y >> 10) & 0x1f
+	m := y & 0x03ff
+
+	if e == 0 {
+		if m == 0 { // plu or minus 0
+			return s << 31
+		}
+		// Denormalized number -- renormalize it
+		for (m & 0x00000400) == 0 {
+			m <<= 1
+			e -= 1
+		}
+		e += 1
+		const zz uint32 = 0x0400
+		m &= ^zz
+	} else if e == 31 {
+		if m == 0 { // Inf
+			return (s << 31) | 0x7f800000
+		}
+		return (s << 31) | 0x7f800000 | (m << 13) // NaN
+	}
+	e = e + (127 - 15)
+	m = m << 13
+	return (s << 31) | (e << 23) | m
+}
+
+// GrowCap will return a new capacity for a slice, given the following:
+//   - oldCap: current capacity
+//   - unit: in-memory size of an element
+//   - num: number of elements to add
+func growCap(oldCap, unit, num int) (newCap int) {
+	// appendslice logic (if cap < 1024, *2, else *1.25):
+	//   leads to many copy calls, especially when copying bytes.
+	//   bytes.Buffer model (2*cap + n): much better for bytes.
+	// smarter way is to take the byte-size of the appended element(type) into account
+
+	// maintain 3 thresholds:
+	// t1: if cap <= t1, newcap = 2x
+	// t2: if cap <= t2, newcap = 1.75x
+	// t3: if cap <= t3, newcap = 1.5x
+	//     else          newcap = 1.25x
+	//
+	// t1, t2, t3 >= 1024 always.
+	// i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same)
+	//
+	// With this, appending for bytes increase by:
+	//    100% up to 4K
+	//     75% up to 8K
+	//     50% up to 16K
+	//     25% beyond that
+
+	// unit can be 0 e.g. for struct{}{}; handle that appropriately
+	var t1, t2, t3 int // thresholds
+	if unit <= 1 {
+		t1, t2, t3 = 4*1024, 8*1024, 16*1024
+	} else if unit < 16 {
+		t3 = 16 / unit * 1024
+		t1 = t3 * 1 / 4
+		t2 = t3 * 2 / 4
+	} else {
+		t1, t2, t3 = 1024, 1024, 1024
+	}
+
+	var x int // temporary variable
+
+	// x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively
+	if oldCap <= t1 { // [0,t1]
+		x = 8
+	} else if oldCap > t3 { // (t3,infinity]
+		x = 5
+	} else if oldCap <= t2 { // (t1,t2]
+		x = 7
+	} else { // (t2,t3]
+		x = 6
+	}
+	newCap = x * oldCap / 4
+
+	if num > 0 {
+		newCap += num
+	}
+
+	// ensure newCap is a multiple of 64 (if it is > 64) or 16.
+	if newCap > 64 {
+		if x = newCap % 64; x != 0 {
+			x = newCap / 64
+			newCap = 64 * (x + 1)
+		}
+	} else {
+		if x = newCap % 16; x != 0 {
+			x = newCap / 16
+			newCap = 16 * (x + 1)
+		}
+	}
+	return
+}
diff --git a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
new file mode 100644
index 0000000..fd52690
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
@@ -0,0 +1,272 @@
+// +build !go1.7 safe appengine
+
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+	"reflect"
+	"sync/atomic"
+	"time"
+)
+
+const safeMode = true
+
+// stringView returns a view of the []byte as a string.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+//
+// Usage: Always maintain a reference to v while result of this call is in use,
+//        and call keepAlive4BytesView(v) at point where done with view.
+func stringView(v []byte) string {
+	return string(v)
+}
+
+// bytesView returns a view of the string as a []byte.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+//
+// Usage: Always maintain a reference to v while result of this call is in use,
+//        and call keepAlive4BytesView(v) at point where done with view.
+func bytesView(v string) []byte {
+	return []byte(v)
+}
+
+func definitelyNil(v interface{}) bool {
+	// this is a best-effort option.
+	// We just return false, so we don't unnecessarily incur the cost of reflection this early.
+	return false
+}
+
+func rv2i(rv reflect.Value) interface{} {
+	return rv.Interface()
+}
+
+func rt2id(rt reflect.Type) uintptr {
+	return reflect.ValueOf(rt).Pointer()
+}
+
+func rv2rtid(rv reflect.Value) uintptr {
+	return reflect.ValueOf(rv.Type()).Pointer()
+}
+
+func i2rtid(i interface{}) uintptr {
+	return reflect.ValueOf(reflect.TypeOf(i)).Pointer()
+}
+
+// --------------------------
+
+func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool {
+	switch v.Kind() {
+	case reflect.Invalid:
+		return true
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Interface, reflect.Ptr:
+		if deref {
+			if v.IsNil() {
+				return true
+			}
+			return isEmptyValue(v.Elem(), tinfos, deref, checkStruct)
+		}
+		return v.IsNil()
+	case reflect.Struct:
+		return isEmptyStruct(v, tinfos, deref, checkStruct)
+	}
+	return false
+}
+
+// --------------------------
+// type ptrToRvMap struct{}
+
+// func (*ptrToRvMap) init() {}
+// func (*ptrToRvMap) get(i interface{}) reflect.Value {
+// 	return reflect.ValueOf(i).Elem()
+// }
+
+// --------------------------
+type atomicTypeInfoSlice struct { // expected to be 2 words
+	v atomic.Value
+}
+
+func (x *atomicTypeInfoSlice) load() []rtid2ti {
+	i := x.v.Load()
+	if i == nil {
+		return nil
+	}
+	return i.([]rtid2ti)
+}
+
+func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
+	x.v.Store(p)
+}
+
+// --------------------------
+func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
+	rv.SetBytes(d.rawBytes())
+}
+
+func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) {
+	rv.SetString(d.d.DecodeString())
+}
+
+func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) {
+	rv.SetBool(d.d.DecodeBool())
+}
+
+func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) {
+	rv.Set(reflect.ValueOf(d.d.DecodeTime()))
+}
+
+func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
+	fv := d.d.DecodeFloat64()
+	if chkOvf.Float32(fv) {
+		d.errorf("float32 overflow: %v", fv)
+	}
+	rv.SetFloat(fv)
+}
+
+func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
+	rv.SetFloat(d.d.DecodeFloat64())
+}
+
+func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) {
+	rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+}
+
+func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) {
+	rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 8))
+}
+
+func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) {
+	rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 16))
+}
+
+func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) {
+	rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 32))
+}
+
+func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) {
+	rv.SetInt(d.d.DecodeInt64())
+}
+
+func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) {
+	rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
+}
+
+func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
+	rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
+}
+
+func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) {
+	rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 8))
+}
+
+func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) {
+	rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 16))
+}
+
+func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) {
+	rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 32))
+}
+
+func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) {
+	rv.SetUint(d.d.DecodeUint64())
+}
+
+// ----------------
+
+func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeBool(rv.Bool())
+}
+
+func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeTime(rv2i(rv).(time.Time))
+}
+
+func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeString(cUTF8, rv.String())
+}
+
+func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeFloat64(rv.Float())
+}
+
+func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeFloat32(float32(rv.Float()))
+}
+
+func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeInt(rv.Int())
+}
+
+func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeInt(rv.Int())
+}
+
+func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeInt(rv.Int())
+}
+
+func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeInt(rv.Int())
+}
+
+func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeInt(rv.Int())
+}
+
+func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeUint(rv.Uint())
+}
+
+func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeUint(rv.Uint())
+}
+
+func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeUint(rv.Uint())
+}
+
+func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeUint(rv.Uint())
+}
+
+func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeUint(rv.Uint())
+}
+
+func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
+	e.e.EncodeUint(rv.Uint())
+}
+
+// // keepAlive4BytesView maintains a reference to the input parameter for bytesView.
+// //
+// // Usage: call this at point where done with the bytes view.
+// func keepAlive4BytesView(v string) {}
+
+// // keepAlive4BytesView maintains a reference to the input parameter for stringView.
+// //
+// // Usage: call this at point where done with the string view.
+// func keepAlive4StringView(v []byte) {}
+
+// func definitelyNil(v interface{}) bool {
+// 	rv := reflect.ValueOf(v)
+// 	switch rv.Kind() {
+// 	case reflect.Invalid:
+// 		return true
+// 	case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Slice, reflect.Map, reflect.Func:
+// 		return rv.IsNil()
+// 	default:
+// 		return false
+// 	}
+// }
diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_unsafe.go
new file mode 100644
index 0000000..e3df60a
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper_unsafe.go
@@ -0,0 +1,639 @@
+// +build !safe
+// +build !appengine
+// +build go1.7
+
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+	"reflect"
+	"sync/atomic"
+	"time"
+	"unsafe"
+)
+
+// This file has unsafe variants of some helper methods.
+// NOTE: See helper_not_unsafe.go for the usage information.
+
+// var zeroRTv [4]uintptr
+
+const safeMode = false
+const unsafeFlagIndir = 1 << 7 // keep in sync with GO_ROOT/src/reflect/value.go
+
+type unsafeString struct {
+	Data unsafe.Pointer
+	Len  int
+}
+
+type unsafeSlice struct {
+	Data unsafe.Pointer
+	Len  int
+	Cap  int
+}
+
+type unsafeIntf struct {
+	typ  unsafe.Pointer
+	word unsafe.Pointer
+}
+
+type unsafeReflectValue struct {
+	typ  unsafe.Pointer
+	ptr  unsafe.Pointer
+	flag uintptr
+}
+
+func stringView(v []byte) string {
+	if len(v) == 0 {
+		return ""
+	}
+	bx := (*unsafeSlice)(unsafe.Pointer(&v))
+	return *(*string)(unsafe.Pointer(&unsafeString{bx.Data, bx.Len}))
+}
+
+func bytesView(v string) []byte {
+	if len(v) == 0 {
+		return zeroByteSlice
+	}
+	sx := (*unsafeString)(unsafe.Pointer(&v))
+	return *(*[]byte)(unsafe.Pointer(&unsafeSlice{sx.Data, sx.Len, sx.Len}))
+}
+
+func definitelyNil(v interface{}) bool {
+	// There is no global way of checking if an interface is nil.
+	// For true references (map, ptr, func, chan), you can just look
+	// at the word of the interface. However, for slices, you have to dereference
+	// the word, and get a pointer to the 3-word interface value.
+	//
+	// However, the following are cheap calls
+	// - TypeOf(interface): cheap 2-line call.
+	// - ValueOf(interface{}): expensive
+	// - type.Kind: cheap call through an interface
+	// - Value.Type(): cheap call
+	//                 except it's a method value (e.g. r.Read, which implies that it is a Func)
+
+	return ((*unsafeIntf)(unsafe.Pointer(&v))).word == nil
+}
+
+func rv2i(rv reflect.Value) interface{} {
+	// TODO: consider a more generally-known optimization for reflect.Value ==> Interface
+	//
+	// Currently, we use this fragile method that taps into implememtation details from
+	// the source go stdlib reflect/value.go, and trims the implementation.
+
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	// true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir
+	var ptr unsafe.Pointer
+	if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 {
+		ptr = *(*unsafe.Pointer)(urv.ptr)
+	} else {
+		ptr = urv.ptr
+	}
+	return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr}))
+}
+
+func rt2id(rt reflect.Type) uintptr {
+	return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word)
+}
+
+func rv2rtid(rv reflect.Value) uintptr {
+	return uintptr((*unsafeReflectValue)(unsafe.Pointer(&rv)).typ)
+}
+
+func i2rtid(i interface{}) uintptr {
+	return uintptr(((*unsafeIntf)(unsafe.Pointer(&i))).typ)
+}
+
+// --------------------------
+
+func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool {
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&v))
+	if urv.flag == 0 {
+		return true
+	}
+	switch v.Kind() {
+	case reflect.Invalid:
+		return true
+	case reflect.String:
+		return (*unsafeString)(urv.ptr).Len == 0
+	case reflect.Slice:
+		return (*unsafeSlice)(urv.ptr).Len == 0
+	case reflect.Bool:
+		return !*(*bool)(urv.ptr)
+	case reflect.Int:
+		return *(*int)(urv.ptr) == 0
+	case reflect.Int8:
+		return *(*int8)(urv.ptr) == 0
+	case reflect.Int16:
+		return *(*int16)(urv.ptr) == 0
+	case reflect.Int32:
+		return *(*int32)(urv.ptr) == 0
+	case reflect.Int64:
+		return *(*int64)(urv.ptr) == 0
+	case reflect.Uint:
+		return *(*uint)(urv.ptr) == 0
+	case reflect.Uint8:
+		return *(*uint8)(urv.ptr) == 0
+	case reflect.Uint16:
+		return *(*uint16)(urv.ptr) == 0
+	case reflect.Uint32:
+		return *(*uint32)(urv.ptr) == 0
+	case reflect.Uint64:
+		return *(*uint64)(urv.ptr) == 0
+	case reflect.Uintptr:
+		return *(*uintptr)(urv.ptr) == 0
+	case reflect.Float32:
+		return *(*float32)(urv.ptr) == 0
+	case reflect.Float64:
+		return *(*float64)(urv.ptr) == 0
+	case reflect.Interface:
+		isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
+		if deref {
+			if isnil {
+				return true
+			}
+			return isEmptyValue(v.Elem(), tinfos, deref, checkStruct)
+		}
+		return isnil
+	case reflect.Ptr:
+		// isnil := urv.ptr == nil (not sufficient, as a pointer value encodes the type)
+		isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
+		if deref {
+			if isnil {
+				return true
+			}
+			return isEmptyValue(v.Elem(), tinfos, deref, checkStruct)
+		}
+		return isnil
+	case reflect.Struct:
+		return isEmptyStruct(v, tinfos, deref, checkStruct)
+	case reflect.Map, reflect.Array, reflect.Chan:
+		return v.Len() == 0
+	}
+	return false
+}
+
+// --------------------------
+
+// atomicTypeInfoSlice contains length and pointer to the array for a slice.
+// It is expected to be 2 words.
+//
+// Previously, we atomically loaded and stored the length and array pointer separately,
+// which could lead to some races.
+// We now just atomically store and load the pointer to the value directly.
+
+type atomicTypeInfoSlice struct { // expected to be 2 words
+	l int            // length of the data array (must be first in struct, for 64-bit alignment necessary for 386)
+	v unsafe.Pointer // data array - Pointer (not uintptr) to maintain GC reference
+}
+
+func (x *atomicTypeInfoSlice) load() []rtid2ti {
+	xp := unsafe.Pointer(x)
+	x2 := *(*atomicTypeInfoSlice)(atomic.LoadPointer(&xp))
+	if x2.l == 0 {
+		return nil
+	}
+	return *(*[]rtid2ti)(unsafe.Pointer(&unsafeSlice{Data: x2.v, Len: x2.l, Cap: x2.l}))
+}
+
+func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
+	s := (*unsafeSlice)(unsafe.Pointer(&p))
+	xp := unsafe.Pointer(x)
+	atomic.StorePointer(&xp, unsafe.Pointer(&atomicTypeInfoSlice{l: s.Len, v: s.Data}))
+}
+
+// --------------------------
+func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	*(*[]byte)(urv.ptr) = d.rawBytes()
+}
+
+func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) {
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	*(*string)(urv.ptr) = d.d.DecodeString()
+}
+
+func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) {
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	*(*bool)(urv.ptr) = d.d.DecodeBool()
+}
+
+func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) {
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	*(*time.Time)(urv.ptr) = d.d.DecodeTime()
+}
+
+func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
+	fv := d.d.DecodeFloat64()
+	if chkOvf.Float32(fv) {
+		d.errorf("float32 overflow: %v", fv)
+	}
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	*(*float32)(urv.ptr) = float32(fv)
+}
+
+func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	*(*float64)(urv.ptr) = d.d.DecodeFloat64()
+}
+
+func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) {
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	*(*int)(urv.ptr) = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+}
+
+func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) {
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	*(*int8)(urv.ptr) = int8(chkOvf.IntV(d.d.DecodeInt64(), 8))
+}
+
+func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) {
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	*(*int16)(urv.ptr) = int16(chkOvf.IntV(d.d.DecodeInt64(), 16))
+}
+
+func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) {
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	*(*int32)(urv.ptr) = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+}
+
+func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) {
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	*(*int64)(urv.ptr) = d.d.DecodeInt64()
+}
+
+func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) {
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	*(*uint)(urv.ptr) = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
+}
+
+func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	*(*uintptr)(urv.ptr) = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
+}
+
+func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) {
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	*(*uint8)(urv.ptr) = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+}
+
+func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) {
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	*(*uint16)(urv.ptr) = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))
+}
+
+func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) {
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	*(*uint32)(urv.ptr) = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))
+}
+
+func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) {
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	*(*uint64)(urv.ptr) = d.d.DecodeUint64()
+}
+
+// ------------
+
+func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) {
+	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	e.e.EncodeBool(*(*bool)(v.ptr))
+}
+
+func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) {
+	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	e.e.EncodeTime(*(*time.Time)(v.ptr))
+}
+
+func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) {
+	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	e.e.EncodeString(cUTF8, *(*string)(v.ptr))
+}
+
+func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
+	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	e.e.EncodeFloat64(*(*float64)(v.ptr))
+}
+
+func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
+	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	e.e.EncodeFloat32(*(*float32)(v.ptr))
+}
+
+func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) {
+	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	e.e.EncodeInt(int64(*(*int)(v.ptr)))
+}
+
+func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) {
+	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	e.e.EncodeInt(int64(*(*int8)(v.ptr)))
+}
+
+func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) {
+	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	e.e.EncodeInt(int64(*(*int16)(v.ptr)))
+}
+
+func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) {
+	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	e.e.EncodeInt(int64(*(*int32)(v.ptr)))
+}
+
+func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) {
+	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	e.e.EncodeInt(int64(*(*int64)(v.ptr)))
+}
+
+func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) {
+	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	e.e.EncodeUint(uint64(*(*uint)(v.ptr)))
+}
+
+func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) {
+	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	e.e.EncodeUint(uint64(*(*uint8)(v.ptr)))
+}
+
+func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) {
+	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	e.e.EncodeUint(uint64(*(*uint16)(v.ptr)))
+}
+
+func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) {
+	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	e.e.EncodeUint(uint64(*(*uint32)(v.ptr)))
+}
+
+func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) {
+	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	e.e.EncodeUint(uint64(*(*uint64)(v.ptr)))
+}
+
+func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
+	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	e.e.EncodeUint(uint64(*(*uintptr)(v.ptr)))
+}
+
+// ------------
+
+// func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
+// 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+// 	// if urv.flag&unsafeFlagIndir != 0 {
+// 	// 	urv.ptr = *(*unsafe.Pointer)(urv.ptr)
+// 	// }
+// 	*(*[]byte)(urv.ptr) = d.rawBytes()
+// }
+
+// func rv0t(rt reflect.Type) reflect.Value {
+// 	ut := (*unsafeIntf)(unsafe.Pointer(&rt))
+// 	// we need to determine whether ifaceIndir, and then whether to just pass 0 as the ptr
+// 	uv := unsafeReflectValue{ut.word, &zeroRTv, flag(rt.Kind())}
+// 	return *(*reflect.Value)(unsafe.Pointer(&uv})
+// }
+
+// func rv2i(rv reflect.Value) interface{} {
+// 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+// 	// true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir
+// 	var ptr unsafe.Pointer
+// 	// kk := reflect.Kind(urv.flag & (1<<5 - 1))
+// 	// if (kk == reflect.Map || kk == reflect.Ptr || kk == reflect.Chan || kk == reflect.Func) && urv.flag&unsafeFlagIndir != 0 {
+// 	if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 {
+// 		ptr = *(*unsafe.Pointer)(urv.ptr)
+// 	} else {
+// 		ptr = urv.ptr
+// 	}
+// 	return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr}))
+// 	// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
+// 	// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
+// }
+
+// func definitelyNil(v interface{}) bool {
+// 	var ui *unsafeIntf = (*unsafeIntf)(unsafe.Pointer(&v))
+// 	if ui.word == nil {
+// 		return true
+// 	}
+// 	var tk = reflect.TypeOf(v).Kind()
+// 	return (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.word) == nil
+// 	fmt.Printf(">>>> definitely nil: isnil: %v, TYPE: \t%T, word: %v, *word: %v, type: %v, nil: %v\n",
+// 	v == nil, v, word, *((*unsafe.Pointer)(word)), ui.typ, nil)
+// }
+
+// func keepAlive4BytesView(v string) {
+// 	runtime.KeepAlive(v)
+// }
+
+// func keepAlive4StringView(v []byte) {
+// 	runtime.KeepAlive(v)
+// }
+
+// func rt2id(rt reflect.Type) uintptr {
+// 	return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word)
+// 	// var i interface{} = rt
+// 	// // ui := (*unsafeIntf)(unsafe.Pointer(&i))
+// 	// return ((*unsafeIntf)(unsafe.Pointer(&i))).word
+// }
+
+// func rv2i(rv reflect.Value) interface{} {
+// 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+// 	// non-reference type: already indir
+// 	// reference type: depend on flagIndir property ('cos maybe was double-referenced)
+// 	// const (unsafeRvFlagKindMask    = 1<<5 - 1 , unsafeRvFlagIndir       = 1 << 7 )
+// 	// rvk := reflect.Kind(urv.flag & (1<<5 - 1))
+// 	// if (rvk == reflect.Chan ||
+// 	// 	rvk == reflect.Func ||
+// 	// 	rvk == reflect.Interface ||
+// 	// 	rvk == reflect.Map ||
+// 	// 	rvk == reflect.Ptr ||
+// 	// 	rvk == reflect.UnsafePointer) && urv.flag&(1<<8) != 0 {
+// 	// 	fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type())
+// 	// 	return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
+// 	// }
+// 	if urv.flag&(1<<5-1) == uintptr(reflect.Map) && urv.flag&(1<<7) != 0 {
+// 		// fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type())
+// 		return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
+// 	}
+// 	// fmt.Printf(">>>>> ++++ direct reference: %v, %v\n", rvk, rv.Type())
+// 	return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
+// }
+
+// const (
+// 	unsafeRvFlagKindMask    = 1<<5 - 1
+// 	unsafeRvKindDirectIface = 1 << 5
+// 	unsafeRvFlagIndir       = 1 << 7
+// 	unsafeRvFlagAddr        = 1 << 8
+// 	unsafeRvFlagMethod      = 1 << 9
+
+// 	_USE_RV_INTERFACE bool = false
+// 	_UNSAFE_RV_DEBUG       = true
+// )
+
+// type unsafeRtype struct {
+// 	_    [2]uintptr
+// 	_    uint32
+// 	_    uint8
+// 	_    uint8
+// 	_    uint8
+// 	kind uint8
+// 	_    [2]uintptr
+// 	_    int32
+// }
+
+// func _rv2i(rv reflect.Value) interface{} {
+// 	// Note: From use,
+// 	//   - it's never an interface
+// 	//   - the only calls here are for ifaceIndir types.
+// 	//     (though that conditional is wrong)
+// 	//     To know for sure, we need the value of t.kind (which is not exposed).
+// 	//
+// 	// Need to validate the path: type is indirect ==> only value is indirect ==> default (value is direct)
+// 	//    - Type indirect, Value indirect: ==> numbers, boolean, slice, struct, array, string
+// 	//    - Type Direct,   Value indirect: ==> map???
+// 	//    - Type Direct,   Value direct:   ==> pointers, unsafe.Pointer, func, chan, map
+// 	//
+// 	// TRANSLATES TO:
+// 	//    if typeIndirect { } else if valueIndirect { } else { }
+// 	//
+// 	// Since we don't deal with funcs, then "flagNethod" is unset, and can be ignored.
+
+// 	if _USE_RV_INTERFACE {
+// 		return rv.Interface()
+// 	}
+// 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+
+// 	// if urv.flag&unsafeRvFlagMethod != 0 || urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) {
+// 	// 	println("***** IS flag method or interface: delegating to rv.Interface()")
+// 	// 	return rv.Interface()
+// 	// }
+
+// 	// if urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) {
+// 	// 	println("***** IS Interface: delegate to rv.Interface")
+// 	// 	return rv.Interface()
+// 	// }
+// 	// if urv.flag&unsafeRvFlagKindMask&unsafeRvKindDirectIface == 0 {
+// 	// 	if urv.flag&unsafeRvFlagAddr == 0 {
+// 	// 		println("***** IS ifaceIndir typ")
+// 	// 		// ui := unsafeIntf{word: urv.ptr, typ: urv.typ}
+// 	// 		// return *(*interface{})(unsafe.Pointer(&ui))
+// 	// 		// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
+// 	// 	}
+// 	// } else if urv.flag&unsafeRvFlagIndir != 0 {
+// 	// 	println("***** IS flagindir")
+// 	// 	// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
+// 	// } else {
+// 	// 	println("***** NOT flagindir")
+// 	// 	return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
+// 	// }
+// 	// println("***** default: delegate to rv.Interface")
+
+// 	urt := (*unsafeRtype)(unsafe.Pointer(urv.typ))
+// 	if _UNSAFE_RV_DEBUG {
+// 		fmt.Printf(">>>> start: %v: ", rv.Type())
+// 		fmt.Printf("%v - %v\n", *urv, *urt)
+// 	}
+// 	if urt.kind&unsafeRvKindDirectIface == 0 {
+// 		if _UNSAFE_RV_DEBUG {
+// 			fmt.Printf("**** +ifaceIndir type: %v\n", rv.Type())
+// 		}
+// 		// println("***** IS ifaceIndir typ")
+// 		// if true || urv.flag&unsafeRvFlagAddr == 0 {
+// 		// 	// println("    ***** IS NOT addr")
+// 		return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
+// 		// }
+// 	} else if urv.flag&unsafeRvFlagIndir != 0 {
+// 		if _UNSAFE_RV_DEBUG {
+// 			fmt.Printf("**** +flagIndir type: %v\n", rv.Type())
+// 		}
+// 		// println("***** IS flagindir")
+// 		return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
+// 	} else {
+// 		if _UNSAFE_RV_DEBUG {
+// 			fmt.Printf("**** -flagIndir type: %v\n", rv.Type())
+// 		}
+// 		// println("***** NOT flagindir")
+// 		return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
+// 	}
+// 	// println("***** default: delegating to rv.Interface()")
+// 	// return rv.Interface()
+// }
+
+// var staticM0 = make(map[string]uint64)
+// var staticI0 = (int32)(-5)
+
+// func staticRv2iTest() {
+// 	i0 := (int32)(-5)
+// 	m0 := make(map[string]uint16)
+// 	m0["1"] = 1
+// 	for _, i := range []interface{}{
+// 		(int)(7),
+// 		(uint)(8),
+// 		(int16)(-9),
+// 		(uint16)(19),
+// 		(uintptr)(77),
+// 		(bool)(true),
+// 		float32(-32.7),
+// 		float64(64.9),
+// 		complex(float32(19), 5),
+// 		complex(float64(-32), 7),
+// 		[4]uint64{1, 2, 3, 4},
+// 		(chan<- int)(nil), // chan,
+// 		rv2i,              // func
+// 		io.Writer(ioutil.Discard),
+// 		make(map[string]uint),
+// 		(map[string]uint)(nil),
+// 		staticM0,
+// 		m0,
+// 		&m0,
+// 		i0,
+// 		&i0,
+// 		&staticI0,
+// 		&staticM0,
+// 		[]uint32{6, 7, 8},
+// 		"abc",
+// 		Raw{},
+// 		RawExt{},
+// 		&Raw{},
+// 		&RawExt{},
+// 		unsafe.Pointer(&i0),
+// 	} {
+// 		i2 := rv2i(reflect.ValueOf(i))
+// 		eq := reflect.DeepEqual(i, i2)
+// 		fmt.Printf(">>>> %v == %v? %v\n", i, i2, eq)
+// 	}
+// 	// os.Exit(0)
+// }
+
+// func init() {
+// 	staticRv2iTest()
+// }
+
+// func rv2i(rv reflect.Value) interface{} {
+// 	if _USE_RV_INTERFACE || rv.Kind() == reflect.Interface || rv.CanAddr() {
+// 		return rv.Interface()
+// 	}
+// 	// var i interface{}
+// 	// ui := (*unsafeIntf)(unsafe.Pointer(&i))
+// 	var ui unsafeIntf
+// 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+// 	// fmt.Printf("urv: flag: %b, typ: %b, ptr: %b\n", urv.flag, uintptr(urv.typ), uintptr(urv.ptr))
+// 	if (urv.flag&unsafeRvFlagKindMask)&unsafeRvKindDirectIface == 0 {
+// 		if urv.flag&unsafeRvFlagAddr != 0 {
+// 			println("***** indirect and addressable! Needs typed move - delegate to rv.Interface()")
+// 			return rv.Interface()
+// 		}
+// 		println("****** indirect type/kind")
+// 		ui.word = urv.ptr
+// 	} else if urv.flag&unsafeRvFlagIndir != 0 {
+// 		println("****** unsafe rv flag indir")
+// 		ui.word = *(*unsafe.Pointer)(urv.ptr)
+// 	} else {
+// 		println("****** default: assign prt to word directly")
+// 		ui.word = urv.ptr
+// 	}
+// 	// ui.word = urv.ptr
+// 	ui.typ = urv.typ
+// 	// fmt.Printf("(pointers) ui.typ: %p, word: %p\n", ui.typ, ui.word)
+// 	// fmt.Printf("(binary)   ui.typ: %b, word: %b\n", uintptr(ui.typ), uintptr(ui.word))
+// 	return *(*interface{})(unsafe.Pointer(&ui))
+// 	// return i
+// }
diff --git a/vendor/github.com/ugorji/go/codec/json.go b/vendor/github.com/ugorji/go/codec/json.go
new file mode 100644
index 0000000..bdd1996
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/json.go
@@ -0,0 +1,1423 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// By default, this json support uses base64 encoding for bytes, because you cannot
+// store and read any arbitrary string in json (only unicode).
+// However, the user can configre how to encode/decode bytes.
+//
+// This library specifically supports UTF-8 for encoding and decoding only.
+//
+// Note that the library will happily encode/decode things which are not valid
+// json e.g. a map[int64]string. We do it for consistency. With valid json,
+// we will encode and decode appropriately.
+// Users can specify their map type if necessary to force it.
+//
+// Note:
+//   - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently.
+//     We implement it here.
+
+// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver
+// MUST not call one-another.
+
+import (
+	"bytes"
+	"encoding/base64"
+	"math"
+	"reflect"
+	"strconv"
+	"time"
+	"unicode"
+	"unicode/utf16"
+	"unicode/utf8"
+)
+
+//--------------------------------
+
+var jsonLiterals = [...]byte{
+	'"', 't', 'r', 'u', 'e', '"',
+	'"', 'f', 'a', 'l', 's', 'e', '"',
+	'"', 'n', 'u', 'l', 'l', '"',
+}
+
+const (
+	jsonLitTrueQ  = 0
+	jsonLitTrue   = 1
+	jsonLitFalseQ = 6
+	jsonLitFalse  = 7
+	jsonLitNullQ  = 13
+	jsonLitNull   = 14
+)
+
+const (
+	jsonU4Chk2 = '0'
+	jsonU4Chk1 = 'a' - 10
+	jsonU4Chk0 = 'A' - 10
+
+	jsonScratchArrayLen = 64
+)
+
+const (
+	// If !jsonValidateSymbols, decoding will be faster, by skipping some checks:
+	//   - If we see first character of null, false or true,
+	//     do not validate subsequent characters.
+	//   - e.g. if we see a n, assume null and skip next 3 characters,
+	//     and do not validate they are ull.
+	// P.S. Do not expect a significant decoding boost from this.
+	jsonValidateSymbols = true
+
+	jsonSpacesOrTabsLen = 128
+
+	jsonAlwaysReturnInternString = false
+)
+
+var (
+	// jsonTabs and jsonSpaces are used as caches for indents
+	jsonTabs, jsonSpaces [jsonSpacesOrTabsLen]byte
+
+	jsonCharHtmlSafeSet   bitset128
+	jsonCharSafeSet       bitset128
+	jsonCharWhitespaceSet bitset256
+	jsonNumSet            bitset256
+)
+
+func init() {
+	for i := 0; i < jsonSpacesOrTabsLen; i++ {
+		jsonSpaces[i] = ' '
+		jsonTabs[i] = '\t'
+	}
+
+	// populate the safe values as true: note: ASCII control characters are (0-31)
+	// jsonCharSafeSet:     all true except (0-31) " \
+	// jsonCharHtmlSafeSet: all true except (0-31) " \ < > &
+	var i byte
+	for i = 32; i < utf8.RuneSelf; i++ {
+		switch i {
+		case '"', '\\':
+		case '<', '>', '&':
+			jsonCharSafeSet.set(i) // = true
+		default:
+			jsonCharSafeSet.set(i)
+			jsonCharHtmlSafeSet.set(i)
+		}
+	}
+	for i = 0; i <= utf8.RuneSelf; i++ {
+		switch i {
+		case ' ', '\t', '\r', '\n':
+			jsonCharWhitespaceSet.set(i)
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-':
+			jsonNumSet.set(i)
+		}
+	}
+}
+
+// ----------------
+
+type jsonEncDriverTypical struct {
+	w encWriter
+	// w  *encWriterSwitch
+	b  *[jsonScratchArrayLen]byte
+	tw bool // term white space
+	c  containerState
+}
+
+func (e *jsonEncDriverTypical) typical() {}
+
+func (e *jsonEncDriverTypical) reset(ee *jsonEncDriver) {
+	e.w = ee.ew
+	// e.w = &ee.e.encWriterSwitch
+	e.b = &ee.b
+	e.tw = ee.h.TermWhitespace
+	e.c = 0
+}
+
+func (e *jsonEncDriverTypical) WriteArrayStart(length int) {
+	e.w.writen1('[')
+	e.c = containerArrayStart
+}
+
+func (e *jsonEncDriverTypical) WriteArrayElem() {
+	if e.c != containerArrayStart {
+		e.w.writen1(',')
+	}
+	e.c = containerArrayElem
+}
+
+func (e *jsonEncDriverTypical) WriteArrayEnd() {
+	e.w.writen1(']')
+	e.c = containerArrayEnd
+}
+
+func (e *jsonEncDriverTypical) WriteMapStart(length int) {
+	e.w.writen1('{')
+	e.c = containerMapStart
+}
+
+func (e *jsonEncDriverTypical) WriteMapElemKey() {
+	if e.c != containerMapStart {
+		e.w.writen1(',')
+	}
+	e.c = containerMapKey
+}
+
+func (e *jsonEncDriverTypical) WriteMapElemValue() {
+	e.w.writen1(':')
+	e.c = containerMapValue
+}
+
+func (e *jsonEncDriverTypical) WriteMapEnd() {
+	e.w.writen1('}')
+	e.c = containerMapEnd
+}
+
+func (e *jsonEncDriverTypical) EncodeBool(b bool) {
+	if b {
+		e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4])
+	} else {
+		e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5])
+	}
+}
+
+func (e *jsonEncDriverTypical) EncodeFloat64(f float64) {
+	fmt, prec := jsonFloatStrconvFmtPrec(f)
+	e.w.writeb(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64))
+}
+
+func (e *jsonEncDriverTypical) EncodeInt(v int64) {
+	e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriverTypical) EncodeUint(v uint64) {
+	e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriverTypical) EncodeFloat32(f float32) {
+	e.EncodeFloat64(float64(f))
+}
+
+func (e *jsonEncDriverTypical) atEndOfEncode() {
+	if e.tw {
+		e.w.writen1(' ')
+	}
+}
+
+// ----------------
+
+type jsonEncDriverGeneric struct {
+	w encWriter // encWriter // *encWriterSwitch
+	b *[jsonScratchArrayLen]byte
+	c containerState
+	// ds string // indent string
+	di int8    // indent per
+	d  bool    // indenting?
+	dt bool    // indent using tabs
+	dl uint16  // indent level
+	ks bool    // map key as string
+	is byte    // integer as string
+	tw bool    // term white space
+	_  [7]byte // padding
+}
+
+// indent is done as below:
+//   - newline and indent are added before each mapKey or arrayElem
+//   - newline and indent are added before each ending,
+//     except there was no entry (so we can have {} or [])
+
+func (e *jsonEncDriverGeneric) reset(ee *jsonEncDriver) {
+	e.w = ee.ew
+	e.b = &ee.b
+	e.tw = ee.h.TermWhitespace
+	e.c = 0
+	e.d, e.dt, e.dl, e.di = false, false, 0, 0
+	h := ee.h
+	if h.Indent > 0 {
+		e.d = true
+		e.di = int8(h.Indent)
+	} else if h.Indent < 0 {
+		e.d = true
+		e.dt = true
+		e.di = int8(-h.Indent)
+	}
+	e.ks = h.MapKeyAsString
+	e.is = h.IntegerAsString
+}
+
+func (e *jsonEncDriverGeneric) WriteArrayStart(length int) {
+	if e.d {
+		e.dl++
+	}
+	e.w.writen1('[')
+	e.c = containerArrayStart
+}
+
+func (e *jsonEncDriverGeneric) WriteArrayElem() {
+	if e.c != containerArrayStart {
+		e.w.writen1(',')
+	}
+	if e.d {
+		e.writeIndent()
+	}
+	e.c = containerArrayElem
+}
+
+func (e *jsonEncDriverGeneric) WriteArrayEnd() {
+	if e.d {
+		e.dl--
+		if e.c != containerArrayStart {
+			e.writeIndent()
+		}
+	}
+	e.w.writen1(']')
+	e.c = containerArrayEnd
+}
+
+func (e *jsonEncDriverGeneric) WriteMapStart(length int) {
+	if e.d {
+		e.dl++
+	}
+	e.w.writen1('{')
+	e.c = containerMapStart
+}
+
+func (e *jsonEncDriverGeneric) WriteMapElemKey() {
+	if e.c != containerMapStart {
+		e.w.writen1(',')
+	}
+	if e.d {
+		e.writeIndent()
+	}
+	e.c = containerMapKey
+}
+
+func (e *jsonEncDriverGeneric) WriteMapElemValue() {
+	if e.d {
+		e.w.writen2(':', ' ')
+	} else {
+		e.w.writen1(':')
+	}
+	e.c = containerMapValue
+}
+
+func (e *jsonEncDriverGeneric) WriteMapEnd() {
+	if e.d {
+		e.dl--
+		if e.c != containerMapStart {
+			e.writeIndent()
+		}
+	}
+	e.w.writen1('}')
+	e.c = containerMapEnd
+}
+
+func (e *jsonEncDriverGeneric) writeIndent() {
+	e.w.writen1('\n')
+	x := int(e.di) * int(e.dl)
+	if e.dt {
+		for x > jsonSpacesOrTabsLen {
+			e.w.writeb(jsonTabs[:])
+			x -= jsonSpacesOrTabsLen
+		}
+		e.w.writeb(jsonTabs[:x])
+	} else {
+		for x > jsonSpacesOrTabsLen {
+			e.w.writeb(jsonSpaces[:])
+			x -= jsonSpacesOrTabsLen
+		}
+		e.w.writeb(jsonSpaces[:x])
+	}
+}
+
+func (e *jsonEncDriverGeneric) EncodeBool(b bool) {
+	if e.ks && e.c == containerMapKey {
+		if b {
+			e.w.writeb(jsonLiterals[jsonLitTrueQ : jsonLitTrueQ+6])
+		} else {
+			e.w.writeb(jsonLiterals[jsonLitFalseQ : jsonLitFalseQ+7])
+		}
+	} else {
+		if b {
+			e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4])
+		} else {
+			e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5])
+		}
+	}
+}
+
+func (e *jsonEncDriverGeneric) EncodeFloat64(f float64) {
+	// instead of using 'g', specify whether to use 'e' or 'f'
+	fmt, prec := jsonFloatStrconvFmtPrec(f)
+
+	var blen int
+	if e.ks && e.c == containerMapKey {
+		blen = 2 + len(strconv.AppendFloat(e.b[1:1], f, fmt, prec, 64))
+		e.b[0] = '"'
+		e.b[blen-1] = '"'
+	} else {
+		blen = len(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64))
+	}
+	e.w.writeb(e.b[:blen])
+}
+
+func (e *jsonEncDriverGeneric) EncodeInt(v int64) {
+	x := e.is
+	if x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) || (e.ks && e.c == containerMapKey) {
+		blen := 2 + len(strconv.AppendInt(e.b[1:1], v, 10))
+		e.b[0] = '"'
+		e.b[blen-1] = '"'
+		e.w.writeb(e.b[:blen])
+		return
+	}
+	e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriverGeneric) EncodeUint(v uint64) {
+	x := e.is
+	if x == 'A' || x == 'L' && v > 1<<53 || (e.ks && e.c == containerMapKey) {
+		blen := 2 + len(strconv.AppendUint(e.b[1:1], v, 10))
+		e.b[0] = '"'
+		e.b[blen-1] = '"'
+		e.w.writeb(e.b[:blen])
+		return
+	}
+	e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriverGeneric) EncodeFloat32(f float32) {
+	// e.encodeFloat(float64(f), 32)
+	// always encode all floats as IEEE 64-bit floating point.
+	// It also ensures that we can decode in full precision even if into a float32,
+	// as what is written is always to float64 precision.
+	e.EncodeFloat64(float64(f))
+}
+
+func (e *jsonEncDriverGeneric) atEndOfEncode() {
+	if e.tw {
+		if e.d {
+			e.w.writen1('\n')
+		} else {
+			e.w.writen1(' ')
+		}
+	}
+}
+
+// --------------------
+
+type jsonEncDriver struct {
+	noBuiltInTypes
+	e  *Encoder
+	h  *JsonHandle
+	ew encWriter // encWriter // *encWriterSwitch
+	se extWrapper
+	// ---- cpu cache line boundary?
+	bs []byte // scratch
+	// ---- cpu cache line boundary?
+	b [jsonScratchArrayLen]byte // scratch (encode time,
+}
+
+func (e *jsonEncDriver) EncodeNil() {
+	// We always encode nil as just null (never in quotes)
+	// This allows us to easily decode if a nil in the json stream
+	// ie if initial token is n.
+	e.ew.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4])
+
+	// if e.h.MapKeyAsString && e.c == containerMapKey {
+	// 	e.ew.writeb(jsonLiterals[jsonLitNullQ : jsonLitNullQ+6])
+	// } else {
+	// 	e.ew.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4])
+	// }
+}
+
+func (e *jsonEncDriver) EncodeTime(t time.Time) {
+	// Do NOT use MarshalJSON, as it allocates internally.
+	// instead, we call AppendFormat directly, using our scratch buffer (e.b)
+	if t.IsZero() {
+		e.EncodeNil()
+	} else {
+		e.b[0] = '"'
+		b := t.AppendFormat(e.b[1:1], time.RFC3339Nano)
+		e.b[len(b)+1] = '"'
+		e.ew.writeb(e.b[:len(b)+2])
+	}
+	// v, err := t.MarshalJSON(); if err != nil { e.e.error(err) } e.ew.writeb(v)
+}
+
+func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
+	if v := ext.ConvertExt(rv); v == nil {
+		e.EncodeNil()
+	} else {
+		en.encode(v)
+	}
+}
+
+func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
+	// only encodes re.Value (never re.Data)
+	if re.Value == nil {
+		e.EncodeNil()
+	} else {
+		en.encode(re.Value)
+	}
+}
+
+func (e *jsonEncDriver) EncodeString(c charEncoding, v string) {
+	e.quoteStr(v)
+}
+
+func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+	// if encoding raw bytes and RawBytesExt is configured, use it to encode
+	if v == nil {
+		e.EncodeNil()
+		return
+	}
+	if c == cRAW {
+		if e.se.InterfaceExt != nil {
+			e.EncodeExt(v, 0, &e.se, e.e)
+			return
+		}
+
+		slen := base64.StdEncoding.EncodedLen(len(v))
+		if cap(e.bs) >= slen+2 {
+			e.bs = e.bs[:slen+2]
+		} else {
+			e.bs = make([]byte, slen+2)
+		}
+		e.bs[0] = '"'
+		base64.StdEncoding.Encode(e.bs[1:], v)
+		e.bs[slen+1] = '"'
+		e.ew.writeb(e.bs)
+	} else {
+		e.quoteStr(stringView(v))
+	}
+}
+
+func (e *jsonEncDriver) EncodeAsis(v []byte) {
+	e.ew.writeb(v)
+}
+
+func (e *jsonEncDriver) quoteStr(s string) {
+	// adapted from std pkg encoding/json
+	const hex = "0123456789abcdef"
+	w := e.ew
+	htmlasis := e.h.HTMLCharsAsIs
+	w.writen1('"')
+	var start int
+	for i, slen := 0, len(s); i < slen; {
+		// encode all bytes < 0x20 (except \r, \n).
+		// also encode < > & to prevent security holes when served to some browsers.
+		if b := s[i]; b < utf8.RuneSelf {
+			// if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+			// if (htmlasis && jsonCharSafeSet.isset(b)) || jsonCharHtmlSafeSet.isset(b) {
+			if jsonCharHtmlSafeSet.isset(b) || (htmlasis && jsonCharSafeSet.isset(b)) {
+				i++
+				continue
+			}
+			if start < i {
+				w.writestr(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				w.writen2('\\', b)
+			case '\n':
+				w.writen2('\\', 'n')
+			case '\r':
+				w.writen2('\\', 'r')
+			case '\b':
+				w.writen2('\\', 'b')
+			case '\f':
+				w.writen2('\\', 'f')
+			case '\t':
+				w.writen2('\\', 't')
+			default:
+				w.writestr(`\u00`)
+				w.writen2(hex[b>>4], hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRuneInString(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				w.writestr(s[start:i])
+			}
+			w.writestr(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR.
+		// Both technically valid JSON, but bomb on JSONP, so fix here unconditionally.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				w.writestr(s[start:i])
+			}
+			w.writestr(`\u202`)
+			w.writen1(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		w.writestr(s[start:])
+	}
+	w.writen1('"')
+}
+
+type jsonDecDriver struct {
+	noBuiltInTypes
+	d  *Decoder
+	h  *JsonHandle
+	r  decReader // *decReaderSwitch // decReader
+	se extWrapper
+
+	// ---- writable fields during execution --- *try* to keep in sep cache line
+
+	c containerState
+	// tok is used to store the token read right after skipWhiteSpace.
+	tok   uint8
+	fnull bool    // found null from appendStringAsBytes
+	bs    []byte  // scratch. Initialized from b. Used for parsing strings or numbers.
+	bstr  [8]byte // scratch used for string \UXXX parsing
+	// ---- cpu cache line boundary?
+	b  [jsonScratchArrayLen]byte // scratch 1, used for parsing strings or numbers or time.Time
+	b2 [jsonScratchArrayLen]byte // scratch 2, used only for readUntil, decNumBytes
+
+	_ [3]uint64 // padding
+	// n jsonNum
+}
+
+// func jsonIsWS(b byte) bool {
+// 	// return b == ' ' || b == '\t' || b == '\r' || b == '\n'
+// 	return jsonCharWhitespaceSet.isset(b)
+// }
+
+func (d *jsonDecDriver) uncacheRead() {
+	if d.tok != 0 {
+		d.r.unreadn1()
+		d.tok = 0
+	}
+}
+
+func (d *jsonDecDriver) ReadMapStart() int {
+	if d.tok == 0 {
+		d.tok = d.r.skip(&jsonCharWhitespaceSet)
+	}
+	const xc uint8 = '{'
+	if d.tok != xc {
+		d.d.errorf("read map - expect char '%c' but got char '%c'", xc, d.tok)
+	}
+	d.tok = 0
+	d.c = containerMapStart
+	return -1
+}
+
+func (d *jsonDecDriver) ReadArrayStart() int {
+	if d.tok == 0 {
+		d.tok = d.r.skip(&jsonCharWhitespaceSet)
+	}
+	const xc uint8 = '['
+	if d.tok != xc {
+		d.d.errorf("read array - expect char '%c' but got char '%c'", xc, d.tok)
+	}
+	d.tok = 0
+	d.c = containerArrayStart
+	return -1
+}
+
+func (d *jsonDecDriver) CheckBreak() bool {
+	if d.tok == 0 {
+		d.tok = d.r.skip(&jsonCharWhitespaceSet)
+	}
+	return d.tok == '}' || d.tok == ']'
+}
+
+// For the ReadXXX methods below, we could just delegate to helper functions
+// readContainerState(c containerState, xc uint8, check bool)
+// - ReadArrayElem would become:
+//   readContainerState(containerArrayElem, ',', d.c != containerArrayStart)
+//
+// However, until mid-stack inlining comes in go1.11 which supports inlining of
+// one-liners, we explicitly write them all 5 out to elide the extra func call.
+//
+// TODO: For Go 1.11, if inlined, consider consolidating these.
+
+func (d *jsonDecDriver) ReadArrayElem() {
+	const xc uint8 = ','
+	if d.tok == 0 {
+		d.tok = d.r.skip(&jsonCharWhitespaceSet)
+	}
+	if d.c != containerArrayStart {
+		if d.tok != xc {
+			d.d.errorf("read array element - expect char '%c' but got char '%c'", xc, d.tok)
+		}
+		d.tok = 0
+	}
+	d.c = containerArrayElem
+}
+
+func (d *jsonDecDriver) ReadArrayEnd() {
+	const xc uint8 = ']'
+	if d.tok == 0 {
+		d.tok = d.r.skip(&jsonCharWhitespaceSet)
+	}
+	if d.tok != xc {
+		d.d.errorf("read array end - expect char '%c' but got char '%c'", xc, d.tok)
+	}
+	d.tok = 0
+	d.c = containerArrayEnd
+}
+
+func (d *jsonDecDriver) ReadMapElemKey() {
+	const xc uint8 = ','
+	if d.tok == 0 {
+		d.tok = d.r.skip(&jsonCharWhitespaceSet)
+	}
+	if d.c != containerMapStart {
+		if d.tok != xc {
+			d.d.errorf("read map key - expect char '%c' but got char '%c'", xc, d.tok)
+		}
+		d.tok = 0
+	}
+	d.c = containerMapKey
+}
+
+func (d *jsonDecDriver) ReadMapElemValue() {
+	const xc uint8 = ':'
+	if d.tok == 0 {
+		d.tok = d.r.skip(&jsonCharWhitespaceSet)
+	}
+	if d.tok != xc {
+		d.d.errorf("read map value - expect char '%c' but got char '%c'", xc, d.tok)
+	}
+	d.tok = 0
+	d.c = containerMapValue
+}
+
+func (d *jsonDecDriver) ReadMapEnd() {
+	const xc uint8 = '}'
+	if d.tok == 0 {
+		d.tok = d.r.skip(&jsonCharWhitespaceSet)
+	}
+	if d.tok != xc {
+		d.d.errorf("read map end - expect char '%c' but got char '%c'", xc, d.tok)
+	}
+	d.tok = 0
+	d.c = containerMapEnd
+}
+
+func (d *jsonDecDriver) readLit(length, fromIdx uint8) {
+	bs := d.r.readx(int(length))
+	d.tok = 0
+	if jsonValidateSymbols && !bytes.Equal(bs, jsonLiterals[fromIdx:fromIdx+length]) {
+		d.d.errorf("expecting %s: got %s", jsonLiterals[fromIdx:fromIdx+length], bs)
+		return
+	}
+}
+
+func (d *jsonDecDriver) TryDecodeAsNil() bool {
+	if d.tok == 0 {
+		d.tok = d.r.skip(&jsonCharWhitespaceSet)
+	}
+	// we shouldn't try to see if "null" was here, right?
+	// only the plain string: `null` denotes a nil (ie not quotes)
+	if d.tok == 'n' {
+		d.readLit(3, jsonLitNull+1) // (n)ull
+		return true
+	}
+	return false
+}
+
+func (d *jsonDecDriver) DecodeBool() (v bool) {
+	if d.tok == 0 {
+		d.tok = d.r.skip(&jsonCharWhitespaceSet)
+	}
+	fquot := d.c == containerMapKey && d.tok == '"'
+	if fquot {
+		d.tok = d.r.readn1()
+	}
+	switch d.tok {
+	case 'f':
+		d.readLit(4, jsonLitFalse+1) // (f)alse
+		// v = false
+	case 't':
+		d.readLit(3, jsonLitTrue+1) // (t)rue
+		v = true
+	default:
+		d.d.errorf("decode bool: got first char %c", d.tok)
+		// v = false // "unreachable"
+	}
+	if fquot {
+		d.r.readn1()
+	}
+	return
+}
+
+func (d *jsonDecDriver) DecodeTime() (t time.Time) {
+	// read string, and pass the string into json.unmarshal
+	d.appendStringAsBytes()
+	if d.fnull {
+		return
+	}
+	t, err := time.Parse(time.RFC3339, stringView(d.bs))
+	if err != nil {
+		d.d.errorv(err)
+	}
+	return
+}
+
+func (d *jsonDecDriver) ContainerType() (vt valueType) {
+	// check container type by checking the first char
+	if d.tok == 0 {
+		d.tok = d.r.skip(&jsonCharWhitespaceSet)
+	}
+
+	// optimize this, so we don't do 4 checks but do one computation.
+	// return jsonContainerSet[d.tok]
+
+	// ContainerType is mostly called for Map and Array,
+	// so this conditional is good enough (max 2 checks typically)
+	if b := d.tok; b == '{' {
+		return valueTypeMap
+	} else if b == '[' {
+		return valueTypeArray
+	} else if b == 'n' {
+		return valueTypeNil
+	} else if b == '"' {
+		return valueTypeString
+	}
+	return valueTypeUnset
+}
+
+func (d *jsonDecDriver) decNumBytes() (bs []byte) {
+	// stores num bytes in d.bs
+	if d.tok == 0 {
+		d.tok = d.r.skip(&jsonCharWhitespaceSet)
+	}
+	if d.tok == '"' {
+		bs = d.r.readUntil(d.b2[:0], '"')
+		bs = bs[:len(bs)-1]
+	} else {
+		d.r.unreadn1()
+		bs = d.r.readTo(d.bs[:0], &jsonNumSet)
+	}
+	d.tok = 0
+	return bs
+}
+
+func (d *jsonDecDriver) DecodeUint64() (u uint64) {
+	bs := d.decNumBytes()
+	n, neg, badsyntax, overflow := jsonParseInteger(bs)
+	if overflow {
+		d.d.errorf("overflow parsing unsigned integer: %s", bs)
+	} else if neg {
+		d.d.errorf("minus found parsing unsigned integer: %s", bs)
+	} else if badsyntax {
+		// fallback: try to decode as float, and cast
+		n = d.decUint64ViaFloat(stringView(bs))
+	}
+	return n
+}
+
+func (d *jsonDecDriver) DecodeInt64() (i int64) {
+	const cutoff = uint64(1 << uint(64-1))
+	bs := d.decNumBytes()
+	n, neg, badsyntax, overflow := jsonParseInteger(bs)
+	if overflow {
+		d.d.errorf("overflow parsing integer: %s", bs)
+	} else if badsyntax {
+		// d.d.errorf("invalid syntax for integer: %s", bs)
+		// fallback: try to decode as float, and cast
+		if neg {
+			n = d.decUint64ViaFloat(stringView(bs[1:]))
+		} else {
+			n = d.decUint64ViaFloat(stringView(bs))
+		}
+	}
+	if neg {
+		if n > cutoff {
+			d.d.errorf("overflow parsing integer: %s", bs)
+		}
+		i = -(int64(n))
+	} else {
+		if n >= cutoff {
+			d.d.errorf("overflow parsing integer: %s", bs)
+		}
+		i = int64(n)
+	}
+	return
+}
+
+func (d *jsonDecDriver) decUint64ViaFloat(s string) (u uint64) {
+	f, err := strconv.ParseFloat(s, 64)
+	if err != nil {
+		d.d.errorf("invalid syntax for integer: %s", s)
+		// d.d.errorv(err)
+	}
+	fi, ff := math.Modf(f)
+	if ff > 0 {
+		d.d.errorf("fractional part found parsing integer: %s", s)
+	} else if fi > float64(math.MaxUint64) {
+		d.d.errorf("overflow parsing integer: %s", s)
+	}
+	return uint64(fi)
+}
+
+func (d *jsonDecDriver) DecodeFloat64() (f float64) {
+	bs := d.decNumBytes()
+	f, err := strconv.ParseFloat(stringView(bs), 64)
+	if err != nil {
+		d.d.errorv(err)
+	}
+	return
+}
+
+func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+	if ext == nil {
+		re := rv.(*RawExt)
+		re.Tag = xtag
+		d.d.decode(&re.Value)
+	} else {
+		var v interface{}
+		d.d.decode(&v)
+		ext.UpdateExt(rv, v)
+	}
+	return
+}
+
+func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
+	// if decoding into raw bytes, and the RawBytesExt is configured, use it to decode.
+	if d.se.InterfaceExt != nil {
+		bsOut = bs
+		d.DecodeExt(&bsOut, 0, &d.se)
+		return
+	}
+	if d.tok == 0 {
+		d.tok = d.r.skip(&jsonCharWhitespaceSet)
+	}
+	// check if an "array" of uint8's (see ContainerType for how to infer if an array)
+	if d.tok == '[' {
+		bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
+		return
+	}
+	d.appendStringAsBytes()
+	// base64 encodes []byte{} as "", and we encode nil []byte as null.
+	// Consequently, base64 should decode null as a nil []byte, and "" as an empty []byte{}.
+	// appendStringAsBytes returns a zero-len slice for both, so as not to reset d.bs.
+	// However, it sets a fnull field to true, so we can check if a null was found.
+	if len(d.bs) == 0 {
+		if d.fnull {
+			return nil
+		}
+		return []byte{}
+	}
+	bs0 := d.bs
+	slen := base64.StdEncoding.DecodedLen(len(bs0))
+	if slen <= cap(bs) {
+		bsOut = bs[:slen]
+	} else if zerocopy && slen <= cap(d.b2) {
+		bsOut = d.b2[:slen]
+	} else {
+		bsOut = make([]byte, slen)
+	}
+	slen2, err := base64.StdEncoding.Decode(bsOut, bs0)
+	if err != nil {
+		d.d.errorf("error decoding base64 binary '%s': %v", bs0, err)
+		return nil
+	}
+	if slen != slen2 {
+		bsOut = bsOut[:slen2]
+	}
+	return
+}
+
+func (d *jsonDecDriver) DecodeString() (s string) {
+	d.appendStringAsBytes()
+	return d.bsToString()
+}
+
+func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) {
+	d.appendStringAsBytes()
+	return d.bs
+}
+
+func (d *jsonDecDriver) appendStringAsBytes() {
+	if d.tok == 0 {
+		d.tok = d.r.skip(&jsonCharWhitespaceSet)
+	}
+
+	d.fnull = false
+	if d.tok != '"' {
+		// d.d.errorf("expect char '%c' but got char '%c'", '"', d.tok)
+		// handle non-string scalar: null, true, false or a number
+		switch d.tok {
+		case 'n':
+			d.readLit(3, jsonLitNull+1) // (n)ull
+			d.bs = d.bs[:0]
+			d.fnull = true
+		case 'f':
+			d.readLit(4, jsonLitFalse+1) // (f)alse
+			d.bs = d.bs[:5]
+			copy(d.bs, "false")
+		case 't':
+			d.readLit(3, jsonLitTrue+1) // (t)rue
+			d.bs = d.bs[:4]
+			copy(d.bs, "true")
+		default:
+			// try to parse a valid number
+			bs := d.decNumBytes()
+			if len(bs) <= cap(d.bs) {
+				d.bs = d.bs[:len(bs)]
+			} else {
+				d.bs = make([]byte, len(bs))
+			}
+			copy(d.bs, bs)
+		}
+		return
+	}
+
+	d.tok = 0
+	r := d.r
+	var cs = r.readUntil(d.b2[:0], '"')
+	var cslen = len(cs)
+	var c uint8
+	v := d.bs[:0]
+	// append on each byte seen can be expensive, so we just
+	// keep track of where we last read a contiguous set of
+	// non-special bytes (using cursor variable),
+	// and when we see a special byte
+	// e.g. end-of-slice, " or \,
+	// we will append the full range into the v slice before proceeding
+	for i, cursor := 0, 0; ; {
+		if i == cslen {
+			v = append(v, cs[cursor:]...)
+			cs = r.readUntil(d.b2[:0], '"')
+			cslen = len(cs)
+			i, cursor = 0, 0
+		}
+		c = cs[i]
+		if c == '"' {
+			v = append(v, cs[cursor:i]...)
+			break
+		}
+		if c != '\\' {
+			i++
+			continue
+		}
+		v = append(v, cs[cursor:i]...)
+		i++
+		c = cs[i]
+		switch c {
+		case '"', '\\', '/', '\'':
+			v = append(v, c)
+		case 'b':
+			v = append(v, '\b')
+		case 'f':
+			v = append(v, '\f')
+		case 'n':
+			v = append(v, '\n')
+		case 'r':
+			v = append(v, '\r')
+		case 't':
+			v = append(v, '\t')
+		case 'u':
+			var r rune
+			var rr uint32
+			if len(cs) < i+4 { // may help reduce bounds-checking
+				d.d.errorf("need at least 4 more bytes for unicode sequence")
+			}
+			// c = cs[i+4] // may help reduce bounds-checking
+			for j := 1; j < 5; j++ {
+				// best to use explicit if-else
+				// - not a table, etc which involve memory loads, array lookup with bounds checks, etc
+				c = cs[i+j]
+				if c >= '0' && c <= '9' {
+					rr = rr*16 + uint32(c-jsonU4Chk2)
+				} else if c >= 'a' && c <= 'f' {
+					rr = rr*16 + uint32(c-jsonU4Chk1)
+				} else if c >= 'A' && c <= 'F' {
+					rr = rr*16 + uint32(c-jsonU4Chk0)
+				} else {
+					r = unicode.ReplacementChar
+					i += 4
+					goto encode_rune
+				}
+			}
+			r = rune(rr)
+			i += 4
+			if utf16.IsSurrogate(r) {
+				if len(cs) >= i+6 && cs[i+2] == 'u' && cs[i+1] == '\\' {
+					i += 2
+					// c = cs[i+4] // may help reduce bounds-checking
+					var rr1 uint32
+					for j := 1; j < 5; j++ {
+						c = cs[i+j]
+						if c >= '0' && c <= '9' {
+							rr = rr*16 + uint32(c-jsonU4Chk2)
+						} else if c >= 'a' && c <= 'f' {
+							rr = rr*16 + uint32(c-jsonU4Chk1)
+						} else if c >= 'A' && c <= 'F' {
+							rr = rr*16 + uint32(c-jsonU4Chk0)
+						} else {
+							r = unicode.ReplacementChar
+							i += 4
+							goto encode_rune
+						}
+					}
+					r = utf16.DecodeRune(r, rune(rr1))
+					i += 4
+				} else {
+					r = unicode.ReplacementChar
+					goto encode_rune
+				}
+			}
+		encode_rune:
+			w2 := utf8.EncodeRune(d.bstr[:], r)
+			v = append(v, d.bstr[:w2]...)
+		default:
+			d.d.errorf("unsupported escaped value: %c", c)
+		}
+		i++
+		cursor = i
+	}
+	d.bs = v
+}
+
+func (d *jsonDecDriver) nakedNum(z *decNaked, bs []byte) (err error) {
+	const cutoff = uint64(1 << uint(64-1))
+	var n uint64
+	var neg, badsyntax, overflow bool
+
+	if d.h.PreferFloat {
+		goto F
+	}
+	n, neg, badsyntax, overflow = jsonParseInteger(bs)
+	if badsyntax || overflow {
+		goto F
+	}
+	if neg {
+		if n > cutoff {
+			goto F
+		}
+		z.v = valueTypeInt
+		z.i = -(int64(n))
+	} else if d.h.SignedInteger {
+		if n >= cutoff {
+			goto F
+		}
+		z.v = valueTypeInt
+		z.i = int64(n)
+	} else {
+		z.v = valueTypeUint
+		z.u = n
+	}
+	return
+F:
+	z.v = valueTypeFloat
+	z.f, err = strconv.ParseFloat(stringView(bs), 64)
+	return
+}
+
+func (d *jsonDecDriver) bsToString() string {
+	// if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key
+	if jsonAlwaysReturnInternString || d.c == containerMapKey {
+		return d.d.string(d.bs)
+	}
+	return string(d.bs)
+}
+
+func (d *jsonDecDriver) DecodeNaked() {
+	z := d.d.n
+	// var decodeFurther bool
+
+	if d.tok == 0 {
+		d.tok = d.r.skip(&jsonCharWhitespaceSet)
+	}
+	switch d.tok {
+	case 'n':
+		d.readLit(3, jsonLitNull+1) // (n)ull
+		z.v = valueTypeNil
+	case 'f':
+		d.readLit(4, jsonLitFalse+1) // (f)alse
+		z.v = valueTypeBool
+		z.b = false
+	case 't':
+		d.readLit(3, jsonLitTrue+1) // (t)rue
+		z.v = valueTypeBool
+		z.b = true
+	case '{':
+		z.v = valueTypeMap // don't consume. kInterfaceNaked will call ReadMapStart
+	case '[':
+		z.v = valueTypeArray // don't consume. kInterfaceNaked will call ReadArrayStart
+	case '"':
+		// if a string, and MapKeyAsString, then try to decode it as a nil, bool or number first
+		d.appendStringAsBytes()
+		if len(d.bs) > 0 && d.c == containerMapKey && d.h.MapKeyAsString {
+			switch stringView(d.bs) {
+			case "null":
+				z.v = valueTypeNil
+			case "true":
+				z.v = valueTypeBool
+				z.b = true
+			case "false":
+				z.v = valueTypeBool
+				z.b = false
+			default:
+				// check if a number: float, int or uint
+				if err := d.nakedNum(z, d.bs); err != nil {
+					z.v = valueTypeString
+					z.s = d.bsToString()
+				}
+			}
+		} else {
+			z.v = valueTypeString
+			z.s = d.bsToString()
+		}
+	default: // number
+		bs := d.decNumBytes()
+		if len(bs) == 0 {
+			d.d.errorf("decode number from empty string")
+			return
+		}
+		if err := d.nakedNum(z, bs); err != nil {
+			d.d.errorf("decode number from %s: %v", bs, err)
+			return
+		}
+	}
+	// if decodeFurther {
+	// 	d.s.sc.retryRead()
+	// }
+	return
+}
+
+//----------------------
+
+// JsonHandle is a handle for JSON encoding format.
+//
+// Json is comprehensively supported:
+//    - decodes numbers into interface{} as int, uint or float64
+//      based on how the number looks and some config parameters e.g. PreferFloat, SignedInt, etc.
+//    - decode integers from float formatted numbers e.g. 1.27e+8
+//    - decode any json value (numbers, bool, etc) from quoted strings
+//    - configurable way to encode/decode []byte .
+//      by default, encodes and decodes []byte using base64 Std Encoding
+//    - UTF-8 support for encoding and decoding
+//
+// It has better performance than the json library in the standard library,
+// by leveraging the performance improvements of the codec library.
+//
+// In addition, it doesn't read more bytes than necessary during a decode, which allows
+// reading multiple values from a stream containing json and non-json content.
+// For example, a user can read a json value, then a cbor value, then a msgpack value,
+// all from the same stream in sequence.
+//
+// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs are
+// not treated as an error. Instead, they are replaced by the Unicode replacement character U+FFFD.
+type JsonHandle struct {
+	textEncodingType
+	BasicHandle
+
+	// Indent indicates how a value is encoded.
+	//   - If positive, indent by that number of spaces.
+	//   - If negative, indent by that number of tabs.
+	Indent int8
+
+	// IntegerAsString controls how integers (signed and unsigned) are encoded.
+	//
+	// Per the JSON Spec, JSON numbers are 64-bit floating point numbers.
+	// Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision.
+	// This can be mitigated by configuring how to encode integers.
+	//
+	// IntegerAsString interpretes the following values:
+	//   - if 'L', then encode integers > 2^53 as a json string.
+	//   - if 'A', then encode all integers as a json string
+	//             containing the exact integer representation as a decimal.
+	//   - else    encode all integers as a json number (default)
+	IntegerAsString byte
+
+	// HTMLCharsAsIs controls how to encode some special characters to html: < > &
+	//
+	// By default, we encode them as \uXXX
+	// to prevent security holes when served from some browsers.
+	HTMLCharsAsIs bool
+
+	// PreferFloat says that we will default to decoding a number as a float.
+	// If not set, we will examine the characters of the number and decode as an
+	// integer type if it doesn't have any of the characters [.eE].
+	PreferFloat bool
+
+	// TermWhitespace says that we add a whitespace character
+	// at the end of an encoding.
+	//
+	// The whitespace is important, especially if using numbers in a context
+	// where multiple items are written to a stream.
+	TermWhitespace bool
+
+	// MapKeyAsString says to encode all map keys as strings.
+	//
+	// Use this to enforce strict json output.
+	// The only caveat is that nil value is ALWAYS written as null (never as "null")
+	MapKeyAsString bool
+
+	// _ [2]byte // padding
+
+	// Note: below, we store hardly-used items e.g. RawBytesExt is cached in the (en|de)cDriver.
+
+	// RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way.
+	// If not configured, raw bytes are encoded to/from base64 text.
+	RawBytesExt InterfaceExt
+
+	_ [2]uint64 // padding
+}
+
+// Name returns the name of the handle: json
+func (h *JsonHandle) Name() string            { return "json" }
+func (h *JsonHandle) hasElemSeparators() bool { return true }
+func (h *JsonHandle) typical() bool {
+	return h.Indent == 0 && !h.MapKeyAsString && h.IntegerAsString != 'A' && h.IntegerAsString != 'L'
+}
+
+type jsonTypical interface {
+	typical()
+}
+
+func (h *JsonHandle) recreateEncDriver(ed encDriver) (v bool) {
+	_, v = ed.(jsonTypical)
+	return v != h.typical()
+}
+
+// SetInterfaceExt sets an extension
+func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
+	return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext})
+}
+
+type jsonEncDriverTypicalImpl struct {
+	jsonEncDriver
+	jsonEncDriverTypical
+	_ [1]uint64 // padding
+}
+
+func (x *jsonEncDriverTypicalImpl) reset() {
+	x.jsonEncDriver.reset()
+	x.jsonEncDriverTypical.reset(&x.jsonEncDriver)
+}
+
+type jsonEncDriverGenericImpl struct {
+	jsonEncDriver
+	jsonEncDriverGeneric
+}
+
+func (x *jsonEncDriverGenericImpl) reset() {
+	x.jsonEncDriver.reset()
+	x.jsonEncDriverGeneric.reset(&x.jsonEncDriver)
+}
+
+func (h *JsonHandle) newEncDriver(e *Encoder) (ee encDriver) {
+	var hd *jsonEncDriver
+	if h.typical() {
+		var v jsonEncDriverTypicalImpl
+		ee = &v
+		hd = &v.jsonEncDriver
+	} else {
+		var v jsonEncDriverGenericImpl
+		ee = &v
+		hd = &v.jsonEncDriver
+	}
+	hd.e, hd.h, hd.bs = e, h, hd.b[:0]
+	hd.se.BytesExt = bytesExtFailer{}
+	ee.reset()
+	return
+}
+
+func (h *JsonHandle) newDecDriver(d *Decoder) decDriver {
+	// d := jsonDecDriver{r: r.(*bytesDecReader), h: h}
+	hd := jsonDecDriver{d: d, h: h}
+	hd.se.BytesExt = bytesExtFailer{}
+	hd.bs = hd.b[:0]
+	hd.reset()
+	return &hd
+}
+
+func (e *jsonEncDriver) reset() {
+	e.ew = e.e.w // e.e.w // &e.e.encWriterSwitch
+	e.se.InterfaceExt = e.h.RawBytesExt
+	if e.bs != nil {
+		e.bs = e.bs[:0]
+	}
+}
+
+func (d *jsonDecDriver) reset() {
+	d.r = d.d.r // &d.d.decReaderSwitch // d.d.r
+	d.se.InterfaceExt = d.h.RawBytesExt
+	if d.bs != nil {
+		d.bs = d.bs[:0]
+	}
+	d.c, d.tok = 0, 0
+	// d.n.reset()
+}
+
+func jsonFloatStrconvFmtPrec(f float64) (fmt byte, prec int) {
+	prec = -1
+	var abs = math.Abs(f)
+	if abs != 0 && (abs < 1e-6 || abs >= 1e21) {
+		fmt = 'e'
+	} else {
+		fmt = 'f'
+		// set prec to 1 iff mod is 0.
+		//     better than using jsonIsFloatBytesB2 to check if a . or E in the float bytes.
+		// this ensures that every float has an e or .0 in it.
+		if abs <= 1 {
+			if abs == 0 || abs == 1 {
+				prec = 1
+			}
+		} else if _, mod := math.Modf(abs); mod == 0 {
+			prec = 1
+		}
+	}
+	return
+}
+
+// custom-fitted version of strconv.Parse(Ui|I)nt.
+// Also ensures we don't have to search for .eE to determine if a float or not.
+func jsonParseInteger(s []byte) (n uint64, neg, badSyntax, overflow bool) {
+	const maxUint64 = (1<<64 - 1)
+	const cutoff = maxUint64/10 + 1
+
+	if len(s) == 0 {
+		badSyntax = true
+		return
+	}
+	switch s[0] {
+	case '+':
+		s = s[1:]
+	case '-':
+		s = s[1:]
+		neg = true
+	}
+	for _, c := range s {
+		if c < '0' || c > '9' {
+			badSyntax = true
+			return
+		}
+		// unsigned integers don't overflow well on multiplication, so check cutoff here
+		// e.g. (maxUint64-5)*10 doesn't overflow well ...
+		if n >= cutoff {
+			overflow = true
+			return
+		}
+		n *= 10
+		n1 := n + uint64(c-'0')
+		if n1 < n || n1 > maxUint64 {
+			overflow = true
+			return
+		}
+		n = n1
+	}
+	return
+}
+
+var _ decDriver = (*jsonDecDriver)(nil)
+var _ encDriver = (*jsonEncDriverGenericImpl)(nil)
+var _ encDriver = (*jsonEncDriverTypicalImpl)(nil)
+var _ jsonTypical = (*jsonEncDriverTypical)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl b/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl
new file mode 100644
index 0000000..90d758c
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl
@@ -0,0 +1,154 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from mammoth-test.go.tmpl - DO NOT EDIT.
+
+package codec
+
+import "testing"
+import "fmt"
+import "reflect"
+
+// TestMammoth has all the different paths optimized in fast-path
+// It has all the primitives, slices and maps.
+// 
+// For each of those types, it has a pointer and a non-pointer field.
+
+func init() { _ = fmt.Printf } // so we can include fmt as needed
+
+type TestMammoth struct {
+
+{{range .Values }}{{if .Primitive }}{{/*
+*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }}
+{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }}
+{{end}}{{end}}
+
+{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/*
+*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }}
+{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }}
+{{end}}{{end}}{{end}}
+
+{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/*
+*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }}
+{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }}
+{{end}}{{end}}{{end}}
+
+}
+
+{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/*
+*/}} type {{ .MethodNamePfx "typMbs" false }} []{{ .Elem }}
+func (_ {{ .MethodNamePfx "typMbs" false }}) MapBySlice() { }
+{{end}}{{end}}{{end}}
+
+{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/*
+*/}} type {{ .MethodNamePfx "typMap" false }} map[{{ .MapKey }}]{{ .Elem }}
+{{end}}{{end}}{{end}}
+
+func doTestMammothSlices(t *testing.T, h Handle) {
+{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/*
+*/}}
+    var v{{$i}}va [8]{{ .Elem }}
+    for _, v := range [][]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .Elem }}, {{ zerocmd .Elem }}, {{ zerocmd .Elem }}, {{ nonzerocmd .Elem }} } } { {{/*
+    // fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v)
+    //   - encode value to some []byte
+    //   - decode into a length-wise-equal []byte
+    //   - check if equal to initial slice
+    //   - encode ptr to the value
+    //   - check if encode bytes are same
+    //   - decode into ptrs to: nil, then 1-elem slice, equal-length, then large len slice
+    //   - decode into non-addressable slice of equal length, then larger len 
+    //   - for each decode, compare elem-by-elem to the original slice
+    //   - 
+    //   - rinse and repeat for a MapBySlice version
+    //   - 
+    */}}
+    var v{{$i}}v1, v{{$i}}v2 []{{ .Elem }}
+	v{{$i}}v1 = v
+	bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}")
+	if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) }
+	testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}")
+	testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}")
+	if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) }
+	testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value
+	testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-noaddr")
+	// ...
+	bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p")
+	v{{$i}}v2 = nil
+	testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p")
+	testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p")
+	v{{$i}}va = [8]{{ .Elem }}{} // clear the array
+	v{{$i}}v2 = v{{$i}}va[:1:1]
+	testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-1")
+	testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-1")
+	v{{$i}}va = [8]{{ .Elem }}{} // clear the array
+	v{{$i}}v2 = v{{$i}}va[:len(v{{$i}}v1):len(v{{$i}}v1)]
+	testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len")
+	testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-len")
+	v{{$i}}va = [8]{{ .Elem }}{} // clear the array
+	v{{$i}}v2 = v{{$i}}va[:]
+	testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap")
+	testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-cap")
+	if len(v{{$i}}v1) > 1 {
+	v{{$i}}va = [8]{{ .Elem }}{} // clear the array
+	testUnmarshalErr((&v{{$i}}va)[:len(v{{$i}}v1)], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len-noaddr")
+	testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-len-noaddr")
+	v{{$i}}va = [8]{{ .Elem }}{} // clear the array
+	testUnmarshalErr((&v{{$i}}va)[:], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap-noaddr")
+	testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-cap-noaddr")
+    }
+    // ...
+    var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMbs" false }}
+	v{{$i}}v2 = nil
+    if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) }
+    v{{$i}}v3 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v1)
+    v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2)
+    bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom")
+    testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom")
+    testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom")
+    bs{{$i}} = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p")
+    v{{$i}}v2 = nil
+    v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2)
+    testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p")
+    testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom-p")
+    }
+{{end}}{{end}}{{end}}
+}
+
+func doTestMammothMaps(t *testing.T, h Handle) {
+{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey }}{{/*
+*/}}
+    for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .MapKey }}:{{ zerocmd .Elem }} {{if ne "bool" .MapKey}}, {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} {{end}} } } {
+    // fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v)
+    var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }}
+	v{{$i}}v1 = v
+	bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}")
+	if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
+	testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}")
+	testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}")
+	if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
+	testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-map-v{{$i}}-noaddr") // decode into non-addressable map value
+	testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-noaddr")
+	if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
+	testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
+	testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-len")
+	bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p")
+	v{{$i}}v2 = nil
+	testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-nil")
+	testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-nil")
+    // ...
+	if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
+    var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMap" false }}
+	v{{$i}}v3 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v1)
+	v{{$i}}v4 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v2)
+    bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-map-v{{$i}}-custom")
+	testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
+	testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-map-v{{$i}}-p-len")
+    }
+{{end}}{{end}}{{end}}
+
+}
+
+func doTestMammothMapsAndSlices(t *testing.T, h Handle) {
+     doTestMammothSlices(t, h)
+     doTestMammothMaps(t, h)
+}
diff --git a/vendor/github.com/ugorji/go/codec/mammoth2-test.go.tmpl b/vendor/github.com/ugorji/go/codec/mammoth2-test.go.tmpl
new file mode 100644
index 0000000..7cdf8f5
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/mammoth2-test.go.tmpl
@@ -0,0 +1,94 @@
+// +build !notfastpath
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from mammoth2-test.go.tmpl - DO NOT EDIT.
+
+package codec
+
+// Increase codecoverage by covering all the codecgen paths, in fast-path and gen-helper.go....
+//
+// Add:
+// - test file for creating a mammoth generated file as _mammoth_generated.go
+//   - generate a second mammoth files in a different file: mammoth2_generated_test.go
+//     - mammoth-test.go.tmpl will do this
+//   - run codecgen on it, into mammoth2_codecgen_generated_test.go (no build tags)
+//   - as part of TestMammoth, run it also
+//   - this will cover all the codecgen, gen-helper, etc in one full run
+//   - check in mammoth* files into github also
+// - then
+//
+// Now, add some types:
+//  - some that implement BinaryMarshal, TextMarshal, JSONMarshal, and one that implements none of it
+//  - create a wrapper type that includes TestMammoth2, with it in slices, and maps, and the custom types
+//  - this wrapper object is what we work encode/decode (so that the codecgen methods are called)
+
+
+// import "encoding/binary"
+import "fmt"
+
+type TestMammoth2 struct {
+
+{{range .Values }}{{if .Primitive }}{{/*
+*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }}
+{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }}
+{{end}}{{end}}
+
+{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/*
+*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }}
+{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }}
+{{end}}{{end}}{{end}}
+
+{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/*
+*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }}
+{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }}
+{{end}}{{end}}{{end}}
+
+}
+
+// -----------
+
+type testMammoth2Binary uint64
+func (x testMammoth2Binary) MarshalBinary() (data []byte, err error) {
+data = make([]byte, 8)
+bigen.PutUint64(data, uint64(x))
+return
+}
+func (x *testMammoth2Binary) UnmarshalBinary(data []byte) (err error) {
+*x = testMammoth2Binary(bigen.Uint64(data))
+return
+}
+
+type testMammoth2Text uint64
+func (x testMammoth2Text) MarshalText() (data []byte, err error) {
+data = []byte(fmt.Sprintf("%b", uint64(x)))
+return
+}
+func (x *testMammoth2Text) UnmarshalText(data []byte) (err error) {
+_, err = fmt.Sscanf(string(data), "%b", (*uint64)(x))
+return
+}
+
+type testMammoth2Json uint64
+func (x testMammoth2Json) MarshalJSON() (data []byte, err error) {
+data = []byte(fmt.Sprintf("%v", uint64(x)))
+return
+}
+func (x *testMammoth2Json) UnmarshalJSON(data []byte) (err error) {
+_, err = fmt.Sscanf(string(data), "%v", (*uint64)(x))
+return
+}
+
+type testMammoth2Basic [4]uint64
+
+type TestMammoth2Wrapper struct {
+  V TestMammoth2
+  T testMammoth2Text
+  B testMammoth2Binary
+  J testMammoth2Json
+  C testMammoth2Basic
+  M map[testMammoth2Basic]TestMammoth2
+  L []TestMammoth2
+  A [4]int64
+}
diff --git a/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go
new file mode 100644
index 0000000..3271579
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/msgpack.go
@@ -0,0 +1,1092 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+/*
+MSGPACK
+
+Msgpack-c implementation powers the c, c++, python, ruby, etc libraries.
+We need to maintain compatibility with it and how it encodes integer values
+without caring about the type.
+
+For compatibility with behaviour of msgpack-c reference implementation:
+  - Go intX (>0) and uintX
+       IS ENCODED AS
+    msgpack +ve fixnum, unsigned
+  - Go intX (<0)
+       IS ENCODED AS
+    msgpack -ve fixnum, signed
+*/
+
+package codec
+
+import (
+	"fmt"
+	"io"
+	"math"
+	"net/rpc"
+	"reflect"
+	"time"
+)
+
+const (
+	mpPosFixNumMin byte = 0x00
+	mpPosFixNumMax      = 0x7f
+	mpFixMapMin         = 0x80
+	mpFixMapMax         = 0x8f
+	mpFixArrayMin       = 0x90
+	mpFixArrayMax       = 0x9f
+	mpFixStrMin         = 0xa0
+	mpFixStrMax         = 0xbf
+	mpNil               = 0xc0
+	_                   = 0xc1
+	mpFalse             = 0xc2
+	mpTrue              = 0xc3
+	mpFloat             = 0xca
+	mpDouble            = 0xcb
+	mpUint8             = 0xcc
+	mpUint16            = 0xcd
+	mpUint32            = 0xce
+	mpUint64            = 0xcf
+	mpInt8              = 0xd0
+	mpInt16             = 0xd1
+	mpInt32             = 0xd2
+	mpInt64             = 0xd3
+
+	// extensions below
+	mpBin8     = 0xc4
+	mpBin16    = 0xc5
+	mpBin32    = 0xc6
+	mpExt8     = 0xc7
+	mpExt16    = 0xc8
+	mpExt32    = 0xc9
+	mpFixExt1  = 0xd4
+	mpFixExt2  = 0xd5
+	mpFixExt4  = 0xd6
+	mpFixExt8  = 0xd7
+	mpFixExt16 = 0xd8
+
+	mpStr8  = 0xd9 // new
+	mpStr16 = 0xda
+	mpStr32 = 0xdb
+
+	mpArray16 = 0xdc
+	mpArray32 = 0xdd
+
+	mpMap16 = 0xde
+	mpMap32 = 0xdf
+
+	mpNegFixNumMin = 0xe0
+	mpNegFixNumMax = 0xff
+)
+
+var mpTimeExtTag int8 = -1
+var mpTimeExtTagU = uint8(mpTimeExtTag)
+
+// var mpdesc = map[byte]string{
+// 	mpPosFixNumMin: "PosFixNumMin",
+// 	mpPosFixNumMax: "PosFixNumMax",
+// 	mpFixMapMin:    "FixMapMin",
+// 	mpFixMapMax:    "FixMapMax",
+// 	mpFixArrayMin:  "FixArrayMin",
+// 	mpFixArrayMax:  "FixArrayMax",
+// 	mpFixStrMin:    "FixStrMin",
+// 	mpFixStrMax:    "FixStrMax",
+// 	mpNil:          "Nil",
+// 	mpFalse:        "False",
+// 	mpTrue:         "True",
+// 	mpFloat:        "Float",
+// 	mpDouble:       "Double",
+// 	mpUint8:        "Uint8",
+// 	mpUint16:       "Uint16",
+// 	mpUint32:       "Uint32",
+// 	mpUint64:       "Uint64",
+// 	mpInt8:         "Int8",
+// 	mpInt16:        "Int16",
+// 	mpInt32:        "Int32",
+// 	mpInt64:        "Int64",
+// 	mpBin8:         "Bin8",
+// 	mpBin16:        "Bin16",
+// 	mpBin32:        "Bin32",
+// 	mpExt8:         "Ext8",
+// 	mpExt16:        "Ext16",
+// 	mpExt32:        "Ext32",
+// 	mpFixExt1:      "FixExt1",
+// 	mpFixExt2:      "FixExt2",
+// 	mpFixExt4:      "FixExt4",
+// 	mpFixExt8:      "FixExt8",
+// 	mpFixExt16:     "FixExt16",
+// 	mpStr8:         "Str8",
+// 	mpStr16:        "Str16",
+// 	mpStr32:        "Str32",
+// 	mpArray16:      "Array16",
+// 	mpArray32:      "Array32",
+// 	mpMap16:        "Map16",
+// 	mpMap32:        "Map32",
+// 	mpNegFixNumMin: "NegFixNumMin",
+// 	mpNegFixNumMax: "NegFixNumMax",
+// }
+
+func mpdesc(bd byte) string {
+	switch bd {
+	case mpNil:
+		return "nil"
+	case mpFalse:
+		return "false"
+	case mpTrue:
+		return "true"
+	case mpFloat, mpDouble:
+		return "float"
+	case mpUint8, mpUint16, mpUint32, mpUint64:
+		return "uint"
+	case mpInt8, mpInt16, mpInt32, mpInt64:
+		return "int"
+	default:
+		switch {
+		case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
+			return "int"
+		case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
+			return "int"
+		case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
+			return "string|bytes"
+		case bd == mpBin8, bd == mpBin16, bd == mpBin32:
+			return "bytes"
+		case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
+			return "array"
+		case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
+			return "map"
+		case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
+			return "ext"
+		default:
+			return "unknown"
+		}
+	}
+}
+
+// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec
+// that the backend RPC service takes multiple arguments, which have been arranged
+// in sequence in the slice.
+//
+// The Codec then passes it AS-IS to the rpc service (without wrapping it in an
+// array of 1 element).
+type MsgpackSpecRpcMultiArgs []interface{}
+
+// A MsgpackContainer type specifies the different types of msgpackContainers.
+type msgpackContainerType struct {
+	fixCutoff                   int
+	bFixMin, b8, b16, b32       byte
+	hasFixMin, has8, has8Always bool
+}
+
+var (
+	msgpackContainerStr = msgpackContainerType{
+		32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false,
+	}
+	msgpackContainerBin = msgpackContainerType{
+		0, 0, mpBin8, mpBin16, mpBin32, false, true, true,
+	}
+	msgpackContainerList = msgpackContainerType{
+		16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false,
+	}
+	msgpackContainerMap = msgpackContainerType{
+		16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false,
+	}
+)
+
+//---------------------------------------------
+
+type msgpackEncDriver struct {
+	noBuiltInTypes
+	encDriverNoopContainerWriter
+	// encNoSeparator
+	e *Encoder
+	w encWriter
+	h *MsgpackHandle
+	x [8]byte
+	_ [3]uint64 // padding
+}
+
+func (e *msgpackEncDriver) EncodeNil() {
+	e.w.writen1(mpNil)
+}
+
+func (e *msgpackEncDriver) EncodeInt(i int64) {
+	// if i >= 0 {
+	// 	e.EncodeUint(uint64(i))
+	// } else if false &&
+	if i > math.MaxInt8 {
+		if i <= math.MaxInt16 {
+			e.w.writen1(mpInt16)
+			bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
+		} else if i <= math.MaxInt32 {
+			e.w.writen1(mpInt32)
+			bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
+		} else {
+			e.w.writen1(mpInt64)
+			bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
+		}
+	} else if i >= -32 {
+		if e.h.NoFixedNum {
+			e.w.writen2(mpInt8, byte(i))
+		} else {
+			e.w.writen1(byte(i))
+		}
+	} else if i >= math.MinInt8 {
+		e.w.writen2(mpInt8, byte(i))
+	} else if i >= math.MinInt16 {
+		e.w.writen1(mpInt16)
+		bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
+	} else if i >= math.MinInt32 {
+		e.w.writen1(mpInt32)
+		bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
+	} else {
+		e.w.writen1(mpInt64)
+		bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
+	}
+}
+
+func (e *msgpackEncDriver) EncodeUint(i uint64) {
+	if i <= math.MaxInt8 {
+		if e.h.NoFixedNum {
+			e.w.writen2(mpUint8, byte(i))
+		} else {
+			e.w.writen1(byte(i))
+		}
+	} else if i <= math.MaxUint8 {
+		e.w.writen2(mpUint8, byte(i))
+	} else if i <= math.MaxUint16 {
+		e.w.writen1(mpUint16)
+		bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
+	} else if i <= math.MaxUint32 {
+		e.w.writen1(mpUint32)
+		bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
+	} else {
+		e.w.writen1(mpUint64)
+		bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
+	}
+}
+
+func (e *msgpackEncDriver) EncodeBool(b bool) {
+	if b {
+		e.w.writen1(mpTrue)
+	} else {
+		e.w.writen1(mpFalse)
+	}
+}
+
+func (e *msgpackEncDriver) EncodeFloat32(f float32) {
+	e.w.writen1(mpFloat)
+	bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *msgpackEncDriver) EncodeFloat64(f float64) {
+	e.w.writen1(mpDouble)
+	bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
+}
+
+func (e *msgpackEncDriver) EncodeTime(t time.Time) {
+	if t.IsZero() {
+		e.EncodeNil()
+		return
+	}
+	t = t.UTC()
+	sec, nsec := t.Unix(), uint64(t.Nanosecond())
+	var data64 uint64
+	var l = 4
+	if sec >= 0 && sec>>34 == 0 {
+		data64 = (nsec << 34) | uint64(sec)
+		if data64&0xffffffff00000000 != 0 {
+			l = 8
+		}
+	} else {
+		l = 12
+	}
+	if e.h.WriteExt {
+		e.encodeExtPreamble(mpTimeExtTagU, l)
+	} else {
+		e.writeContainerLen(msgpackContainerStr, l)
+	}
+	switch l {
+	case 4:
+		bigenHelper{e.x[:4], e.w}.writeUint32(uint32(data64))
+	case 8:
+		bigenHelper{e.x[:8], e.w}.writeUint64(data64)
+	case 12:
+		bigenHelper{e.x[:4], e.w}.writeUint32(uint32(nsec))
+		bigenHelper{e.x[:8], e.w}.writeUint64(uint64(sec))
+	}
+}
+
+func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) {
+	bs := ext.WriteExt(v)
+	if bs == nil {
+		e.EncodeNil()
+		return
+	}
+	if e.h.WriteExt {
+		e.encodeExtPreamble(uint8(xtag), len(bs))
+		e.w.writeb(bs)
+	} else {
+		e.EncodeStringBytes(cRAW, bs)
+	}
+}
+
+func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
+	e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+	e.w.writeb(re.Data)
+}
+
+func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) {
+	if l == 1 {
+		e.w.writen2(mpFixExt1, xtag)
+	} else if l == 2 {
+		e.w.writen2(mpFixExt2, xtag)
+	} else if l == 4 {
+		e.w.writen2(mpFixExt4, xtag)
+	} else if l == 8 {
+		e.w.writen2(mpFixExt8, xtag)
+	} else if l == 16 {
+		e.w.writen2(mpFixExt16, xtag)
+	} else if l < 256 {
+		e.w.writen2(mpExt8, byte(l))
+		e.w.writen1(xtag)
+	} else if l < 65536 {
+		e.w.writen1(mpExt16)
+		bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l))
+		e.w.writen1(xtag)
+	} else {
+		e.w.writen1(mpExt32)
+		bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l))
+		e.w.writen1(xtag)
+	}
+}
+
+func (e *msgpackEncDriver) WriteArrayStart(length int) {
+	e.writeContainerLen(msgpackContainerList, length)
+}
+
+func (e *msgpackEncDriver) WriteMapStart(length int) {
+	e.writeContainerLen(msgpackContainerMap, length)
+}
+
+func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) {
+	slen := len(s)
+	if c == cRAW && e.h.WriteExt {
+		e.writeContainerLen(msgpackContainerBin, slen)
+	} else {
+		e.writeContainerLen(msgpackContainerStr, slen)
+	}
+	if slen > 0 {
+		e.w.writestr(s)
+	}
+}
+
+func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) {
+	if bs == nil {
+		e.EncodeNil()
+		return
+	}
+	slen := len(bs)
+	if c == cRAW && e.h.WriteExt {
+		e.writeContainerLen(msgpackContainerBin, slen)
+	} else {
+		e.writeContainerLen(msgpackContainerStr, slen)
+	}
+	if slen > 0 {
+		e.w.writeb(bs)
+	}
+}
+
+func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) {
+	if ct.hasFixMin && l < ct.fixCutoff {
+		e.w.writen1(ct.bFixMin | byte(l))
+	} else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) {
+		e.w.writen2(ct.b8, uint8(l))
+	} else if l < 65536 {
+		e.w.writen1(ct.b16)
+		bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l))
+	} else {
+		e.w.writen1(ct.b32)
+		bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l))
+	}
+}
+
+//---------------------------------------------
+
+type msgpackDecDriver struct {
+	d *Decoder
+	r decReader // *Decoder decReader decReaderT
+	h *MsgpackHandle
+	// b      [scratchByteArrayLen]byte
+	bd     byte
+	bdRead bool
+	br     bool // bytes reader
+	noBuiltInTypes
+	// noStreamingCodec
+	// decNoSeparator
+	decDriverNoopContainerReader
+	_ [3]uint64 // padding
+}
+
+// Note: This returns either a primitive (int, bool, etc) for non-containers,
+// or a containerType, or a specific type denoting nil or extension.
+// It is called when a nil interface{} is passed, leaving it up to the DecDriver
+// to introspect the stream and decide how best to decode.
+// It deciphers the value by looking at the stream first.
+func (d *msgpackDecDriver) DecodeNaked() {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	bd := d.bd
+	n := d.d.n
+	var decodeFurther bool
+
+	switch bd {
+	case mpNil:
+		n.v = valueTypeNil
+		d.bdRead = false
+	case mpFalse:
+		n.v = valueTypeBool
+		n.b = false
+	case mpTrue:
+		n.v = valueTypeBool
+		n.b = true
+
+	case mpFloat:
+		n.v = valueTypeFloat
+		n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+	case mpDouble:
+		n.v = valueTypeFloat
+		n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+
+	case mpUint8:
+		n.v = valueTypeUint
+		n.u = uint64(d.r.readn1())
+	case mpUint16:
+		n.v = valueTypeUint
+		n.u = uint64(bigen.Uint16(d.r.readx(2)))
+	case mpUint32:
+		n.v = valueTypeUint
+		n.u = uint64(bigen.Uint32(d.r.readx(4)))
+	case mpUint64:
+		n.v = valueTypeUint
+		n.u = uint64(bigen.Uint64(d.r.readx(8)))
+
+	case mpInt8:
+		n.v = valueTypeInt
+		n.i = int64(int8(d.r.readn1()))
+	case mpInt16:
+		n.v = valueTypeInt
+		n.i = int64(int16(bigen.Uint16(d.r.readx(2))))
+	case mpInt32:
+		n.v = valueTypeInt
+		n.i = int64(int32(bigen.Uint32(d.r.readx(4))))
+	case mpInt64:
+		n.v = valueTypeInt
+		n.i = int64(int64(bigen.Uint64(d.r.readx(8))))
+
+	default:
+		switch {
+		case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
+			// positive fixnum (always signed)
+			n.v = valueTypeInt
+			n.i = int64(int8(bd))
+		case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
+			// negative fixnum
+			n.v = valueTypeInt
+			n.i = int64(int8(bd))
+		case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
+			if d.h.RawToString {
+				n.v = valueTypeString
+				n.s = d.DecodeString()
+			} else {
+				n.v = valueTypeBytes
+				n.l = d.DecodeBytes(nil, false)
+			}
+		case bd == mpBin8, bd == mpBin16, bd == mpBin32:
+			n.v = valueTypeBytes
+			n.l = d.DecodeBytes(nil, false)
+		case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
+			n.v = valueTypeArray
+			decodeFurther = true
+		case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
+			n.v = valueTypeMap
+			decodeFurther = true
+		case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
+			n.v = valueTypeExt
+			clen := d.readExtLen()
+			n.u = uint64(d.r.readn1())
+			if n.u == uint64(mpTimeExtTagU) {
+				n.v = valueTypeTime
+				n.t = d.decodeTime(clen)
+			} else {
+				n.l = d.r.readx(clen)
+			}
+		default:
+			d.d.errorf("cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd))
+		}
+	}
+	if !decodeFurther {
+		d.bdRead = false
+	}
+	if n.v == valueTypeUint && d.h.SignedInteger {
+		n.v = valueTypeInt
+		n.i = int64(n.u)
+	}
+	return
+}
+
+// int can be decoded from msgpack type: intXXX or uintXXX
+func (d *msgpackDecDriver) DecodeInt64() (i int64) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	switch d.bd {
+	case mpUint8:
+		i = int64(uint64(d.r.readn1()))
+	case mpUint16:
+		i = int64(uint64(bigen.Uint16(d.r.readx(2))))
+	case mpUint32:
+		i = int64(uint64(bigen.Uint32(d.r.readx(4))))
+	case mpUint64:
+		i = int64(bigen.Uint64(d.r.readx(8)))
+	case mpInt8:
+		i = int64(int8(d.r.readn1()))
+	case mpInt16:
+		i = int64(int16(bigen.Uint16(d.r.readx(2))))
+	case mpInt32:
+		i = int64(int32(bigen.Uint32(d.r.readx(4))))
+	case mpInt64:
+		i = int64(bigen.Uint64(d.r.readx(8)))
+	default:
+		switch {
+		case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
+			i = int64(int8(d.bd))
+		case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
+			i = int64(int8(d.bd))
+		default:
+			d.d.errorf("cannot decode signed integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd))
+			return
+		}
+	}
+	d.bdRead = false
+	return
+}
+
+// uint can be decoded from msgpack type: intXXX or uintXXX
+func (d *msgpackDecDriver) DecodeUint64() (ui uint64) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	switch d.bd {
+	case mpUint8:
+		ui = uint64(d.r.readn1())
+	case mpUint16:
+		ui = uint64(bigen.Uint16(d.r.readx(2)))
+	case mpUint32:
+		ui = uint64(bigen.Uint32(d.r.readx(4)))
+	case mpUint64:
+		ui = bigen.Uint64(d.r.readx(8))
+	case mpInt8:
+		if i := int64(int8(d.r.readn1())); i >= 0 {
+			ui = uint64(i)
+		} else {
+			d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
+			return
+		}
+	case mpInt16:
+		if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 {
+			ui = uint64(i)
+		} else {
+			d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
+			return
+		}
+	case mpInt32:
+		if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 {
+			ui = uint64(i)
+		} else {
+			d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
+			return
+		}
+	case mpInt64:
+		if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 {
+			ui = uint64(i)
+		} else {
+			d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
+			return
+		}
+	default:
+		switch {
+		case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
+			ui = uint64(d.bd)
+		case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
+			d.d.errorf("assigning negative signed value: %v, to unsigned type", int(d.bd))
+			return
+		default:
+			d.d.errorf("cannot decode unsigned integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd))
+			return
+		}
+	}
+	d.bdRead = false
+	return
+}
+
+// float can either be decoded from msgpack type: float, double or intX
+func (d *msgpackDecDriver) DecodeFloat64() (f float64) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.bd == mpFloat {
+		f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+	} else if d.bd == mpDouble {
+		f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+	} else {
+		f = float64(d.DecodeInt64())
+	}
+	d.bdRead = false
+	return
+}
+
+// bool can be decoded from bool, fixnum 0 or 1.
+func (d *msgpackDecDriver) DecodeBool() (b bool) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.bd == mpFalse || d.bd == 0 {
+		// b = false
+	} else if d.bd == mpTrue || d.bd == 1 {
+		b = true
+	} else {
+		d.d.errorf("cannot decode bool: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd))
+		return
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+
+	// check if an "array" of uint8's (see ContainerType for how to infer if an array)
+	bd := d.bd
+	// DecodeBytes could be from: bin str fixstr fixarray array ...
+	var clen int
+	vt := d.ContainerType()
+	switch vt {
+	case valueTypeBytes:
+		// valueTypeBytes may be a mpBin or an mpStr container
+		if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
+			clen = d.readContainerLen(msgpackContainerBin)
+		} else {
+			clen = d.readContainerLen(msgpackContainerStr)
+		}
+	case valueTypeString:
+		clen = d.readContainerLen(msgpackContainerStr)
+	case valueTypeArray:
+		if zerocopy && len(bs) == 0 {
+			bs = d.d.b[:]
+		}
+		bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
+		return
+	default:
+		d.d.errorf("invalid container type: expecting bin|str|array, got: 0x%x", uint8(vt))
+		return
+	}
+
+	// these are (bin|str)(8|16|32)
+	d.bdRead = false
+	// bytes may be nil, so handle it. if nil, clen=-1.
+	if clen < 0 {
+		return nil
+	}
+	if zerocopy {
+		if d.br {
+			return d.r.readx(clen)
+		} else if len(bs) == 0 {
+			bs = d.d.b[:]
+		}
+	}
+	return decByteSlice(d.r, clen, d.h.MaxInitLen, bs)
+}
+
+func (d *msgpackDecDriver) DecodeString() (s string) {
+	return string(d.DecodeBytes(d.d.b[:], true))
+}
+
+func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) {
+	return d.DecodeBytes(d.d.b[:], true)
+}
+
+func (d *msgpackDecDriver) readNextBd() {
+	d.bd = d.r.readn1()
+	d.bdRead = true
+}
+
+func (d *msgpackDecDriver) uncacheRead() {
+	if d.bdRead {
+		d.r.unreadn1()
+		d.bdRead = false
+	}
+}
+
+func (d *msgpackDecDriver) ContainerType() (vt valueType) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	bd := d.bd
+	if bd == mpNil {
+		return valueTypeNil
+	} else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 ||
+		(!d.h.RawToString &&
+			(bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) {
+		return valueTypeBytes
+	} else if d.h.RawToString &&
+		(bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) {
+		return valueTypeString
+	} else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) {
+		return valueTypeArray
+	} else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) {
+		return valueTypeMap
+	}
+	// else {
+	// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+	// }
+	return valueTypeUnset
+}
+
+func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.bd == mpNil {
+		d.bdRead = false
+		return true
+	}
+	return
+}
+
+func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) {
+	bd := d.bd
+	if bd == mpNil {
+		clen = -1 // to represent nil
+	} else if bd == ct.b8 {
+		clen = int(d.r.readn1())
+	} else if bd == ct.b16 {
+		clen = int(bigen.Uint16(d.r.readx(2)))
+	} else if bd == ct.b32 {
+		clen = int(bigen.Uint32(d.r.readx(4)))
+	} else if (ct.bFixMin & bd) == ct.bFixMin {
+		clen = int(ct.bFixMin ^ bd)
+	} else {
+		d.d.errorf("cannot read container length: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd)
+		return
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *msgpackDecDriver) ReadMapStart() int {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	return d.readContainerLen(msgpackContainerMap)
+}
+
+func (d *msgpackDecDriver) ReadArrayStart() int {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	return d.readContainerLen(msgpackContainerList)
+}
+
+func (d *msgpackDecDriver) readExtLen() (clen int) {
+	switch d.bd {
+	case mpNil:
+		clen = -1 // to represent nil
+	case mpFixExt1:
+		clen = 1
+	case mpFixExt2:
+		clen = 2
+	case mpFixExt4:
+		clen = 4
+	case mpFixExt8:
+		clen = 8
+	case mpFixExt16:
+		clen = 16
+	case mpExt8:
+		clen = int(d.r.readn1())
+	case mpExt16:
+		clen = int(bigen.Uint16(d.r.readx(2)))
+	case mpExt32:
+		clen = int(bigen.Uint32(d.r.readx(4)))
+	default:
+		d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd)
+		return
+	}
+	return
+}
+
+func (d *msgpackDecDriver) DecodeTime() (t time.Time) {
+	// decode time from string bytes or ext
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.bd == mpNil {
+		d.bdRead = false
+		return
+	}
+	var clen int
+	switch d.ContainerType() {
+	case valueTypeBytes, valueTypeString:
+		clen = d.readContainerLen(msgpackContainerStr)
+	default:
+		// expect to see mpFixExt4,-1 OR mpFixExt8,-1 OR mpExt8,12,-1
+		d.bdRead = false
+		b2 := d.r.readn1()
+		if d.bd == mpFixExt4 && b2 == mpTimeExtTagU {
+			clen = 4
+		} else if d.bd == mpFixExt8 && b2 == mpTimeExtTagU {
+			clen = 8
+		} else if d.bd == mpExt8 && b2 == 12 && d.r.readn1() == mpTimeExtTagU {
+			clen = 12
+		} else {
+			d.d.errorf("invalid bytes for decoding time as extension: got 0x%x, 0x%x", d.bd, b2)
+			return
+		}
+	}
+	return d.decodeTime(clen)
+}
+
+func (d *msgpackDecDriver) decodeTime(clen int) (t time.Time) {
+	// bs = d.r.readx(clen)
+	d.bdRead = false
+	switch clen {
+	case 4:
+		t = time.Unix(int64(bigen.Uint32(d.r.readx(4))), 0).UTC()
+	case 8:
+		tv := bigen.Uint64(d.r.readx(8))
+		t = time.Unix(int64(tv&0x00000003ffffffff), int64(tv>>34)).UTC()
+	case 12:
+		nsec := bigen.Uint32(d.r.readx(4))
+		sec := bigen.Uint64(d.r.readx(8))
+		t = time.Unix(int64(sec), int64(nsec)).UTC()
+	default:
+		d.d.errorf("invalid length of bytes for decoding time - expecting 4 or 8 or 12, got %d", clen)
+		return
+	}
+	return
+}
+
+func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+	if xtag > 0xff {
+		d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
+		return
+	}
+	realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
+	realxtag = uint64(realxtag1)
+	if ext == nil {
+		re := rv.(*RawExt)
+		re.Tag = realxtag
+		re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
+	} else {
+		ext.ReadExt(rv, xbs)
+	}
+	return
+}
+
+func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	xbd := d.bd
+	if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 {
+		xbs = d.DecodeBytes(nil, true)
+	} else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 ||
+		(xbd >= mpFixStrMin && xbd <= mpFixStrMax) {
+		xbs = d.DecodeStringAsBytes()
+	} else {
+		clen := d.readExtLen()
+		xtag = d.r.readn1()
+		if verifyTag && xtag != tag {
+			d.d.errorf("wrong extension tag - got %b, expecting %v", xtag, tag)
+			return
+		}
+		xbs = d.r.readx(clen)
+	}
+	d.bdRead = false
+	return
+}
+
+//--------------------------------------------------
+
+//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format.
+type MsgpackHandle struct {
+	BasicHandle
+
+	// RawToString controls how raw bytes are decoded into a nil interface{}.
+	RawToString bool
+
+	// NoFixedNum says to output all signed integers as 2-bytes, never as 1-byte fixednum.
+	NoFixedNum bool
+
+	// WriteExt flag supports encoding configured extensions with extension tags.
+	// It also controls whether other elements of the new spec are encoded (ie Str8).
+	//
+	// With WriteExt=false, configured extensions are serialized as raw bytes
+	// and Str8 is not encoded.
+	//
+	// A stream can still be decoded into a typed value, provided an appropriate value
+	// is provided, but the type cannot be inferred from the stream. If no appropriate
+	// type is provided (e.g. decoding into a nil interface{}), you get back
+	// a []byte or string based on the setting of RawToString.
+	WriteExt bool
+
+	binaryEncodingType
+	noElemSeparators
+
+	// _ [1]uint64 // padding
+}
+
+// Name returns the name of the handle: msgpack
+func (h *MsgpackHandle) Name() string { return "msgpack" }
+
+// SetBytesExt sets an extension
+func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+	return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}})
+}
+
+func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver {
+	return &msgpackEncDriver{e: e, w: e.w, h: h}
+}
+
+func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver {
+	return &msgpackDecDriver{d: d, h: h, r: d.r, br: d.bytes}
+}
+
+func (e *msgpackEncDriver) reset() {
+	e.w = e.e.w
+}
+
+func (d *msgpackDecDriver) reset() {
+	d.r, d.br = d.d.r, d.d.bytes
+	d.bd, d.bdRead = 0, false
+}
+
+//--------------------------------------------------
+
+type msgpackSpecRpcCodec struct {
+	rpcCodec
+}
+
+// /////////////// Spec RPC Codec ///////////////////
+func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
+	// WriteRequest can write to both a Go service, and other services that do
+	// not abide by the 1 argument rule of a Go service.
+	// We discriminate based on if the body is a MsgpackSpecRpcMultiArgs
+	var bodyArr []interface{}
+	if m, ok := body.(MsgpackSpecRpcMultiArgs); ok {
+		bodyArr = ([]interface{})(m)
+	} else {
+		bodyArr = []interface{}{body}
+	}
+	r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr}
+	return c.write(r2, nil, false)
+}
+
+func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
+	var moe interface{}
+	if r.Error != "" {
+		moe = r.Error
+	}
+	if moe != nil && body != nil {
+		body = nil
+	}
+	r2 := []interface{}{1, uint32(r.Seq), moe, body}
+	return c.write(r2, nil, false)
+}
+
+func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error {
+	return c.parseCustomHeader(1, &r.Seq, &r.Error)
+}
+
+func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error {
+	return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod)
+}
+
+func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error {
+	if body == nil { // read and discard
+		return c.read(nil)
+	}
+	bodyArr := []interface{}{body}
+	return c.read(&bodyArr)
+}
+
+func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) {
+	if c.isClosed() {
+		return io.EOF
+	}
+
+	// We read the response header by hand
+	// so that the body can be decoded on its own from the stream at a later time.
+
+	const fia byte = 0x94 //four item array descriptor value
+	// Not sure why the panic of EOF is swallowed above.
+	// if bs1 := c.dec.r.readn1(); bs1 != fia {
+	// 	err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1)
+	// 	return
+	// }
+	var ba [1]byte
+	var n int
+	for {
+		n, err = c.r.Read(ba[:])
+		if err != nil {
+			return
+		}
+		if n == 1 {
+			break
+		}
+	}
+
+	var b = ba[0]
+	if b != fia {
+		err = fmt.Errorf("not array - %s %x/%s", msgBadDesc, b, mpdesc(b))
+	} else {
+		err = c.read(&b)
+		if err == nil {
+			if b != expectTypeByte {
+				err = fmt.Errorf("%s - expecting %v but got %x/%s",
+					msgBadDesc, expectTypeByte, b, mpdesc(b))
+			} else {
+				err = c.read(msgid)
+				if err == nil {
+					err = c.read(methodOrError)
+				}
+			}
+		}
+	}
+	return
+}
+
+//--------------------------------------------------
+
+// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol
+// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+type msgpackSpecRpc struct{}
+
+// MsgpackSpecRpc implements Rpc using the communication protocol defined in
+// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md .
+//
+// See GoRpc documentation, for information on buffering for better performance.
+var MsgpackSpecRpc msgpackSpecRpc
+
+func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
+	return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
+}
+
+func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
+	return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
+}
+
+var _ decDriver = (*msgpackDecDriver)(nil)
+var _ encDriver = (*msgpackEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/rpc.go b/vendor/github.com/ugorji/go/codec/rpc.go
new file mode 100644
index 0000000..9fb3c01
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/rpc.go
@@ -0,0 +1,232 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+	"bufio"
+	"errors"
+	"io"
+	"net/rpc"
+	"sync"
+)
+
+// Rpc provides a rpc Server or Client Codec for rpc communication.
+type Rpc interface {
+	ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec
+	ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec
+}
+
+// RPCOptions holds options specific to rpc functionality
+type RPCOptions struct {
+	// RPCNoBuffer configures whether we attempt to buffer reads and writes during RPC calls.
+	//
+	// Set RPCNoBuffer=true to turn buffering off.
+	// Buffering can still be done if buffered connections are passed in, or
+	// buffering is configured on the handle.
+	RPCNoBuffer bool
+}
+
+// rpcCodec defines the struct members and common methods.
+type rpcCodec struct {
+	c io.Closer
+	r io.Reader
+	w io.Writer
+	f ioFlusher
+
+	dec *Decoder
+	enc *Encoder
+	// bw  *bufio.Writer
+	// br  *bufio.Reader
+	mu sync.Mutex
+	h  Handle
+
+	cls    bool
+	clsmu  sync.RWMutex
+	clsErr error
+}
+
+func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
+	// return newRPCCodec2(bufio.NewReader(conn), bufio.NewWriter(conn), conn, h)
+	return newRPCCodec2(conn, conn, conn, h)
+}
+
+func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec {
+	// defensive: ensure that jsonH has TermWhitespace turned on.
+	if jsonH, ok := h.(*JsonHandle); ok && !jsonH.TermWhitespace {
+		panic(errors.New("rpc requires a JsonHandle with TermWhitespace set to true"))
+	}
+	// always ensure that we use a flusher, and always flush what was written to the connection.
+	// we lose nothing by using a buffered writer internally.
+	f, ok := w.(ioFlusher)
+	bh := h.getBasicHandle()
+	if !bh.RPCNoBuffer {
+		if bh.WriterBufferSize <= 0 {
+			if !ok {
+				bw := bufio.NewWriter(w)
+				f, w = bw, bw
+			}
+		}
+		if bh.ReaderBufferSize <= 0 {
+			if _, ok = w.(ioPeeker); !ok {
+				if _, ok = w.(ioBuffered); !ok {
+					br := bufio.NewReader(r)
+					r = br
+				}
+			}
+		}
+	}
+	return rpcCodec{
+		c:   c,
+		w:   w,
+		r:   r,
+		f:   f,
+		h:   h,
+		enc: NewEncoder(w, h),
+		dec: NewDecoder(r, h),
+	}
+}
+
+func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2 bool) (err error) {
+	if c.isClosed() {
+		return c.clsErr
+	}
+	err = c.enc.Encode(obj1)
+	if err == nil {
+		if writeObj2 {
+			err = c.enc.Encode(obj2)
+		}
+		// if err == nil && c.f != nil {
+		// 	err = c.f.Flush()
+		// }
+	}
+	if c.f != nil {
+		if err == nil {
+			err = c.f.Flush()
+		} else {
+			_ = c.f.Flush() // swallow flush error, so we maintain prior error on write
+		}
+	}
+	return
+}
+
+func (c *rpcCodec) swallow(err *error) {
+	defer panicToErr(c.dec, err)
+	c.dec.swallow()
+}
+
+func (c *rpcCodec) read(obj interface{}) (err error) {
+	if c.isClosed() {
+		return c.clsErr
+	}
+	//If nil is passed in, we should read and discard
+	if obj == nil {
+		// var obj2 interface{}
+		// return c.dec.Decode(&obj2)
+		c.swallow(&err)
+		return
+	}
+	return c.dec.Decode(obj)
+}
+
+func (c *rpcCodec) isClosed() (b bool) {
+	if c.c != nil {
+		c.clsmu.RLock()
+		b = c.cls
+		c.clsmu.RUnlock()
+	}
+	return
+}
+
+func (c *rpcCodec) Close() error {
+	if c.c == nil || c.isClosed() {
+		return c.clsErr
+	}
+	c.clsmu.Lock()
+	c.cls = true
+	c.clsErr = c.c.Close()
+	c.clsmu.Unlock()
+	return c.clsErr
+}
+
+func (c *rpcCodec) ReadResponseBody(body interface{}) error {
+	return c.read(body)
+}
+
+// -------------------------------------
+
+type goRpcCodec struct {
+	rpcCodec
+}
+
+func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
+	// Must protect for concurrent access as per API
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	return c.write(r, body, true)
+}
+
+func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	return c.write(r, body, true)
+}
+
+func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error {
+	return c.read(r)
+}
+
+func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error {
+	return c.read(r)
+}
+
+func (c *goRpcCodec) ReadRequestBody(body interface{}) error {
+	return c.read(body)
+}
+
+// -------------------------------------
+
+// goRpc is the implementation of Rpc that uses the communication protocol
+// as defined in net/rpc package.
+type goRpc struct{}
+
+// GoRpc implements Rpc using the communication protocol defined in net/rpc package.
+//
+// Note: network connection (from net.Dial, of type io.ReadWriteCloser) is not buffered.
+//
+// For performance, you should configure WriterBufferSize and ReaderBufferSize on the handle.
+// This ensures we use an adequate buffer during reading and writing.
+// If not configured, we will internally initialize and use a buffer during reads and writes.
+// This can be turned off via the RPCNoBuffer option on the Handle.
+//   var handle codec.JsonHandle
+//   handle.RPCNoBuffer = true // turns off attempt by rpc module to initialize a buffer
+//
+// Example 1: one way of configuring buffering explicitly:
+//   var handle codec.JsonHandle // codec handle
+//   handle.ReaderBufferSize = 1024
+//   handle.WriterBufferSize = 1024
+//   var conn io.ReadWriteCloser // connection got from a socket
+//   var serverCodec = GoRpc.ServerCodec(conn, handle)
+//   var clientCodec = GoRpc.ClientCodec(conn, handle)
+//
+// Example 2: you can also explicitly create a buffered connection yourself,
+// and not worry about configuring the buffer sizes in the Handle.
+//   var handle codec.Handle     // codec handle
+//   var conn io.ReadWriteCloser // connection got from a socket
+//   var bufconn = struct {      // bufconn here is a buffered io.ReadWriteCloser
+//       io.Closer
+//       *bufio.Reader
+//       *bufio.Writer
+//   }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)}
+//   var serverCodec = GoRpc.ServerCodec(bufconn, handle)
+//   var clientCodec = GoRpc.ClientCodec(bufconn, handle)
+//
+var GoRpc goRpc
+
+func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
+	return &goRpcCodec{newRPCCodec(conn, h)}
+}
+
+func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
+	return &goRpcCodec{newRPCCodec(conn, h)}
+}
diff --git a/vendor/github.com/ugorji/go/codec/simple.go b/vendor/github.com/ugorji/go/codec/simple.go
new file mode 100644
index 0000000..f1e181e
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/simple.go
@@ -0,0 +1,652 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+	"math"
+	"reflect"
+	"time"
+)
+
+const (
+	_               uint8 = iota
+	simpleVdNil           = 1
+	simpleVdFalse         = 2
+	simpleVdTrue          = 3
+	simpleVdFloat32       = 4
+	simpleVdFloat64       = 5
+
+	// each lasts for 4 (ie n, n+1, n+2, n+3)
+	simpleVdPosInt = 8
+	simpleVdNegInt = 12
+
+	simpleVdTime = 24
+
+	// containers: each lasts for 4 (ie n, n+1, n+2, ... n+7)
+	simpleVdString    = 216
+	simpleVdByteArray = 224
+	simpleVdArray     = 232
+	simpleVdMap       = 240
+	simpleVdExt       = 248
+)
+
+type simpleEncDriver struct {
+	noBuiltInTypes
+	// encNoSeparator
+	e *Encoder
+	h *SimpleHandle
+	w encWriter
+	b [8]byte
+	// c containerState
+	encDriverTrackContainerWriter
+	// encDriverNoopContainerWriter
+	_ [2]uint64 // padding
+}
+
+func (e *simpleEncDriver) EncodeNil() {
+	e.w.writen1(simpleVdNil)
+}
+
+func (e *simpleEncDriver) EncodeBool(b bool) {
+	if e.h.EncZeroValuesAsNil && e.c != containerMapKey && !b {
+		e.EncodeNil()
+		return
+	}
+	if b {
+		e.w.writen1(simpleVdTrue)
+	} else {
+		e.w.writen1(simpleVdFalse)
+	}
+}
+
+func (e *simpleEncDriver) EncodeFloat32(f float32) {
+	if e.h.EncZeroValuesAsNil && e.c != containerMapKey && f == 0.0 {
+		e.EncodeNil()
+		return
+	}
+	e.w.writen1(simpleVdFloat32)
+	bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *simpleEncDriver) EncodeFloat64(f float64) {
+	if e.h.EncZeroValuesAsNil && e.c != containerMapKey && f == 0.0 {
+		e.EncodeNil()
+		return
+	}
+	e.w.writen1(simpleVdFloat64)
+	bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f))
+}
+
+func (e *simpleEncDriver) EncodeInt(v int64) {
+	if v < 0 {
+		e.encUint(uint64(-v), simpleVdNegInt)
+	} else {
+		e.encUint(uint64(v), simpleVdPosInt)
+	}
+}
+
+func (e *simpleEncDriver) EncodeUint(v uint64) {
+	e.encUint(v, simpleVdPosInt)
+}
+
+func (e *simpleEncDriver) encUint(v uint64, bd uint8) {
+	if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == 0 {
+		e.EncodeNil()
+		return
+	}
+	if v <= math.MaxUint8 {
+		e.w.writen2(bd, uint8(v))
+	} else if v <= math.MaxUint16 {
+		e.w.writen1(bd + 1)
+		bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
+	} else if v <= math.MaxUint32 {
+		e.w.writen1(bd + 2)
+		bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
+	} else { // if v <= math.MaxUint64 {
+		e.w.writen1(bd + 3)
+		bigenHelper{e.b[:8], e.w}.writeUint64(v)
+	}
+}
+
+func (e *simpleEncDriver) encLen(bd byte, length int) {
+	if length == 0 {
+		e.w.writen1(bd)
+	} else if length <= math.MaxUint8 {
+		e.w.writen1(bd + 1)
+		e.w.writen1(uint8(length))
+	} else if length <= math.MaxUint16 {
+		e.w.writen1(bd + 2)
+		bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length))
+	} else if int64(length) <= math.MaxUint32 {
+		e.w.writen1(bd + 3)
+		bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length))
+	} else {
+		e.w.writen1(bd + 4)
+		bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length))
+	}
+}
+
+func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
+	bs := ext.WriteExt(rv)
+	if bs == nil {
+		e.EncodeNil()
+		return
+	}
+	e.encodeExtPreamble(uint8(xtag), len(bs))
+	e.w.writeb(bs)
+}
+
+func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
+	e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+	e.w.writeb(re.Data)
+}
+
+func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) {
+	e.encLen(simpleVdExt, length)
+	e.w.writen1(xtag)
+}
+
+func (e *simpleEncDriver) WriteArrayStart(length int) {
+	e.c = containerArrayStart
+	e.encLen(simpleVdArray, length)
+}
+
+func (e *simpleEncDriver) WriteMapStart(length int) {
+	e.c = containerMapStart
+	e.encLen(simpleVdMap, length)
+}
+
+func (e *simpleEncDriver) EncodeString(c charEncoding, v string) {
+	if false && e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == "" {
+		e.EncodeNil()
+		return
+	}
+	e.encLen(simpleVdString, len(v))
+	e.w.writestr(v)
+}
+
+// func (e *simpleEncDriver) EncodeSymbol(v string) {
+// 	e.EncodeString(cUTF8, v)
+// }
+
+func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+	// if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == nil {
+	if v == nil {
+		e.EncodeNil()
+		return
+	}
+	e.encLen(simpleVdByteArray, len(v))
+	e.w.writeb(v)
+}
+
+func (e *simpleEncDriver) EncodeTime(t time.Time) {
+	// if e.h.EncZeroValuesAsNil && e.c != containerMapKey && t.IsZero() {
+	if t.IsZero() {
+		e.EncodeNil()
+		return
+	}
+	v, err := t.MarshalBinary()
+	if err != nil {
+		e.e.errorv(err)
+		return
+	}
+	// time.Time marshalbinary takes about 14 bytes.
+	e.w.writen2(simpleVdTime, uint8(len(v)))
+	e.w.writeb(v)
+}
+
+//------------------------------------
+
+type simpleDecDriver struct {
+	d      *Decoder
+	h      *SimpleHandle
+	r      decReader
+	bdRead bool
+	bd     byte
+	br     bool // a bytes reader?
+	c      containerState
+	// b      [scratchByteArrayLen]byte
+	noBuiltInTypes
+	// noStreamingCodec
+	decDriverNoopContainerReader
+	_ [3]uint64 // padding
+}
+
+func (d *simpleDecDriver) readNextBd() {
+	d.bd = d.r.readn1()
+	d.bdRead = true
+}
+
+func (d *simpleDecDriver) uncacheRead() {
+	if d.bdRead {
+		d.r.unreadn1()
+		d.bdRead = false
+	}
+}
+
+func (d *simpleDecDriver) ContainerType() (vt valueType) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	switch d.bd {
+	case simpleVdNil:
+		return valueTypeNil
+	case simpleVdByteArray, simpleVdByteArray + 1,
+		simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+		return valueTypeBytes
+	case simpleVdString, simpleVdString + 1,
+		simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
+		return valueTypeString
+	case simpleVdArray, simpleVdArray + 1,
+		simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
+		return valueTypeArray
+	case simpleVdMap, simpleVdMap + 1,
+		simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
+		return valueTypeMap
+		// case simpleVdTime:
+		// 	return valueTypeTime
+	}
+	// else {
+	// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+	// }
+	return valueTypeUnset
+}
+
+func (d *simpleDecDriver) TryDecodeAsNil() bool {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.bd == simpleVdNil {
+		d.bdRead = false
+		return true
+	}
+	return false
+}
+
+func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	switch d.bd {
+	case simpleVdPosInt:
+		ui = uint64(d.r.readn1())
+	case simpleVdPosInt + 1:
+		ui = uint64(bigen.Uint16(d.r.readx(2)))
+	case simpleVdPosInt + 2:
+		ui = uint64(bigen.Uint32(d.r.readx(4)))
+	case simpleVdPosInt + 3:
+		ui = uint64(bigen.Uint64(d.r.readx(8)))
+	case simpleVdNegInt:
+		ui = uint64(d.r.readn1())
+		neg = true
+	case simpleVdNegInt + 1:
+		ui = uint64(bigen.Uint16(d.r.readx(2)))
+		neg = true
+	case simpleVdNegInt + 2:
+		ui = uint64(bigen.Uint32(d.r.readx(4)))
+		neg = true
+	case simpleVdNegInt + 3:
+		ui = uint64(bigen.Uint64(d.r.readx(8)))
+		neg = true
+	default:
+		d.d.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
+		return
+	}
+	// don't do this check, because callers may only want the unsigned value.
+	// if ui > math.MaxInt64 {
+	// 	d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui)
+	//		return
+	// }
+	return
+}
+
+func (d *simpleDecDriver) DecodeInt64() (i int64) {
+	ui, neg := d.decCheckInteger()
+	i = chkOvf.SignedIntV(ui)
+	if neg {
+		i = -i
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *simpleDecDriver) DecodeUint64() (ui uint64) {
+	ui, neg := d.decCheckInteger()
+	if neg {
+		d.d.errorf("assigning negative signed value to unsigned type")
+		return
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *simpleDecDriver) DecodeFloat64() (f float64) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.bd == simpleVdFloat32 {
+		f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+	} else if d.bd == simpleVdFloat64 {
+		f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+	} else {
+		if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 {
+			f = float64(d.DecodeInt64())
+		} else {
+			d.d.errorf("float only valid from float32/64: Invalid descriptor: %v", d.bd)
+			return
+		}
+	}
+	d.bdRead = false
+	return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *simpleDecDriver) DecodeBool() (b bool) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.bd == simpleVdTrue {
+		b = true
+	} else if d.bd == simpleVdFalse {
+	} else {
+		d.d.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd)
+		return
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *simpleDecDriver) ReadMapStart() (length int) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	d.bdRead = false
+	d.c = containerMapStart
+	return d.decLen()
+}
+
+func (d *simpleDecDriver) ReadArrayStart() (length int) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	d.bdRead = false
+	d.c = containerArrayStart
+	return d.decLen()
+}
+
+func (d *simpleDecDriver) ReadArrayElem() {
+	d.c = containerArrayElem
+}
+
+func (d *simpleDecDriver) ReadArrayEnd() {
+	d.c = containerArrayEnd
+}
+
+func (d *simpleDecDriver) ReadMapElemKey() {
+	d.c = containerMapKey
+}
+
+func (d *simpleDecDriver) ReadMapElemValue() {
+	d.c = containerMapValue
+}
+
+func (d *simpleDecDriver) ReadMapEnd() {
+	d.c = containerMapEnd
+}
+
+func (d *simpleDecDriver) decLen() int {
+	switch d.bd % 8 {
+	case 0:
+		return 0
+	case 1:
+		return int(d.r.readn1())
+	case 2:
+		return int(bigen.Uint16(d.r.readx(2)))
+	case 3:
+		ui := uint64(bigen.Uint32(d.r.readx(4)))
+		if chkOvf.Uint(ui, intBitsize) {
+			d.d.errorf("overflow integer: %v", ui)
+			return 0
+		}
+		return int(ui)
+	case 4:
+		ui := bigen.Uint64(d.r.readx(8))
+		if chkOvf.Uint(ui, intBitsize) {
+			d.d.errorf("overflow integer: %v", ui)
+			return 0
+		}
+		return int(ui)
+	}
+	d.d.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
+	return -1
+}
+
+func (d *simpleDecDriver) DecodeString() (s string) {
+	return string(d.DecodeBytes(d.d.b[:], true))
+}
+
+func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) {
+	return d.DecodeBytes(d.d.b[:], true)
+}
+
+func (d *simpleDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.bd == simpleVdNil {
+		d.bdRead = false
+		return
+	}
+	// check if an "array" of uint8's (see ContainerType for how to infer if an array)
+	if d.bd >= simpleVdArray && d.bd <= simpleVdMap+4 {
+		if len(bs) == 0 && zerocopy {
+			bs = d.d.b[:]
+		}
+		bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
+		return
+	}
+
+	clen := d.decLen()
+	d.bdRead = false
+	if zerocopy {
+		if d.br {
+			return d.r.readx(clen)
+		} else if len(bs) == 0 {
+			bs = d.d.b[:]
+		}
+	}
+	return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
+}
+
+func (d *simpleDecDriver) DecodeTime() (t time.Time) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	if d.bd == simpleVdNil {
+		d.bdRead = false
+		return
+	}
+	if d.bd != simpleVdTime {
+		d.d.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd)
+		return
+	}
+	d.bdRead = false
+	clen := int(d.r.readn1())
+	b := d.r.readx(clen)
+	if err := (&t).UnmarshalBinary(b); err != nil {
+		d.d.errorv(err)
+	}
+	return
+}
+
+func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+	if xtag > 0xff {
+		d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
+		return
+	}
+	realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
+	realxtag = uint64(realxtag1)
+	if ext == nil {
+		re := rv.(*RawExt)
+		re.Tag = realxtag
+		re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
+	} else {
+		ext.ReadExt(rv, xbs)
+	}
+	return
+}
+
+func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+	switch d.bd {
+	case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
+		l := d.decLen()
+		xtag = d.r.readn1()
+		if verifyTag && xtag != tag {
+			d.d.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+			return
+		}
+		xbs = d.r.readx(l)
+	case simpleVdByteArray, simpleVdByteArray + 1,
+		simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+		xbs = d.DecodeBytes(nil, true)
+	default:
+		d.d.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd)
+		return
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *simpleDecDriver) DecodeNaked() {
+	if !d.bdRead {
+		d.readNextBd()
+	}
+
+	n := d.d.n
+	var decodeFurther bool
+
+	switch d.bd {
+	case simpleVdNil:
+		n.v = valueTypeNil
+	case simpleVdFalse:
+		n.v = valueTypeBool
+		n.b = false
+	case simpleVdTrue:
+		n.v = valueTypeBool
+		n.b = true
+	case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
+		if d.h.SignedInteger {
+			n.v = valueTypeInt
+			n.i = d.DecodeInt64()
+		} else {
+			n.v = valueTypeUint
+			n.u = d.DecodeUint64()
+		}
+	case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
+		n.v = valueTypeInt
+		n.i = d.DecodeInt64()
+	case simpleVdFloat32:
+		n.v = valueTypeFloat
+		n.f = d.DecodeFloat64()
+	case simpleVdFloat64:
+		n.v = valueTypeFloat
+		n.f = d.DecodeFloat64()
+	case simpleVdTime:
+		n.v = valueTypeTime
+		n.t = d.DecodeTime()
+	case simpleVdString, simpleVdString + 1,
+		simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
+		n.v = valueTypeString
+		n.s = d.DecodeString()
+	case simpleVdByteArray, simpleVdByteArray + 1,
+		simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+		n.v = valueTypeBytes
+		n.l = d.DecodeBytes(nil, false)
+	case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
+		n.v = valueTypeExt
+		l := d.decLen()
+		n.u = uint64(d.r.readn1())
+		n.l = d.r.readx(l)
+	case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2,
+		simpleVdArray + 3, simpleVdArray + 4:
+		n.v = valueTypeArray
+		decodeFurther = true
+	case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
+		n.v = valueTypeMap
+		decodeFurther = true
+	default:
+		d.d.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd)
+	}
+
+	if !decodeFurther {
+		d.bdRead = false
+	}
+	return
+}
+
+//------------------------------------
+
+// SimpleHandle is a Handle for a very simple encoding format.
+//
+// simple is a simplistic codec similar to binc, but not as compact.
+//   - Encoding of a value is always preceded by the descriptor byte (bd)
+//   - True, false, nil are encoded fully in 1 byte (the descriptor)
+//   - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
+//     There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
+//   - Floats are encoded in 4 or 8 bytes (plus a descriptor byte)
+//   - Length of containers (strings, bytes, array, map, extensions)
+//     are encoded in 0, 1, 2, 4 or 8 bytes.
+//     Zero-length containers have no length encoded.
+//     For others, the number of bytes is given by pow(2, bd%3)
+//   - maps are encoded as [bd] [length] [[key][value]]...
+//   - arrays are encoded as [bd] [length] [value]...
+//   - extensions are encoded as [bd] [length] [tag] [byte]...
+//   - strings/bytearrays are encoded as [bd] [length] [byte]...
+//   - time.Time are encoded as [bd] [length] [byte]...
+//
+// The full spec will be published soon.
+type SimpleHandle struct {
+	BasicHandle
+	binaryEncodingType
+	noElemSeparators
+	// EncZeroValuesAsNil says to encode zero values for numbers, bool, string, etc as nil
+	EncZeroValuesAsNil bool
+
+	// _ [1]uint64 // padding
+}
+
+// Name returns the name of the handle: simple
+func (h *SimpleHandle) Name() string { return "simple" }
+
+// SetBytesExt sets an extension
+func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+	return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}})
+}
+
+func (h *SimpleHandle) hasElemSeparators() bool { return true } // as it implements Write(Map|Array)XXX
+
+func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver {
+	return &simpleEncDriver{e: e, w: e.w, h: h}
+}
+
+func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver {
+	return &simpleDecDriver{d: d, h: h, r: d.r, br: d.bytes}
+}
+
+func (e *simpleEncDriver) reset() {
+	e.c = 0
+	e.w = e.e.w
+}
+
+func (d *simpleDecDriver) reset() {
+	d.c = 0
+	d.r, d.br = d.d.r, d.d.bytes
+	d.bd, d.bdRead = 0, false
+}
+
+var _ decDriver = (*simpleDecDriver)(nil)
+var _ encDriver = (*simpleEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json b/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json
new file mode 100644
index 0000000..9028586
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json
@@ -0,0 +1,639 @@
+[
+  {
+    "cbor": "AA==",
+    "hex": "00",
+    "roundtrip": true,
+    "decoded": 0
+  },
+  {
+    "cbor": "AQ==",
+    "hex": "01",
+    "roundtrip": true,
+    "decoded": 1
+  },
+  {
+    "cbor": "Cg==",
+    "hex": "0a",
+    "roundtrip": true,
+    "decoded": 10
+  },
+  {
+    "cbor": "Fw==",
+    "hex": "17",
+    "roundtrip": true,
+    "decoded": 23
+  },
+  {
+    "cbor": "GBg=",
+    "hex": "1818",
+    "roundtrip": true,
+    "decoded": 24
+  },
+  {
+    "cbor": "GBk=",
+    "hex": "1819",
+    "roundtrip": true,
+    "decoded": 25
+  },
+  {
+    "cbor": "GGQ=",
+    "hex": "1864",
+    "roundtrip": true,
+    "decoded": 100
+  },
+  {
+    "cbor": "GQPo",
+    "hex": "1903e8",
+    "roundtrip": true,
+    "decoded": 1000
+  },
+  {
+    "cbor": "GgAPQkA=",
+    "hex": "1a000f4240",
+    "roundtrip": true,
+    "decoded": 1000000
+  },
+  {
+    "cbor": "GwAAAOjUpRAA",
+    "hex": "1b000000e8d4a51000",
+    "roundtrip": true,
+    "decoded": 1000000000000
+  },
+  {
+    "cbor": "G///////////",
+    "hex": "1bffffffffffffffff",
+    "roundtrip": true,
+    "decoded": 18446744073709551615
+  },
+  {
+    "cbor": "wkkBAAAAAAAAAAA=",
+    "hex": "c249010000000000000000",
+    "roundtrip": true,
+    "decoded": 18446744073709551616
+  },
+  {
+    "cbor": "O///////////",
+    "hex": "3bffffffffffffffff",
+    "roundtrip": true,
+    "decoded": -18446744073709551616,
+    "skip": true
+  },
+  {
+    "cbor": "w0kBAAAAAAAAAAA=",
+    "hex": "c349010000000000000000",
+    "roundtrip": true,
+    "decoded": -18446744073709551617
+  },
+  {
+    "cbor": "IA==",
+    "hex": "20",
+    "roundtrip": true,
+    "decoded": -1
+  },
+  {
+    "cbor": "KQ==",
+    "hex": "29",
+    "roundtrip": true,
+    "decoded": -10
+  },
+  {
+    "cbor": "OGM=",
+    "hex": "3863",
+    "roundtrip": true,
+    "decoded": -100
+  },
+  {
+    "cbor": "OQPn",
+    "hex": "3903e7",
+    "roundtrip": true,
+    "decoded": -1000
+  },
+  {
+    "cbor": "+QAA",
+    "hex": "f90000",
+    "roundtrip": true,
+    "decoded": 0.0
+  },
+  {
+    "cbor": "+YAA",
+    "hex": "f98000",
+    "roundtrip": true,
+    "decoded": -0.0
+  },
+  {
+    "cbor": "+TwA",
+    "hex": "f93c00",
+    "roundtrip": true,
+    "decoded": 1.0
+  },
+  {
+    "cbor": "+z/xmZmZmZma",
+    "hex": "fb3ff199999999999a",
+    "roundtrip": true,
+    "decoded": 1.1
+  },
+  {
+    "cbor": "+T4A",
+    "hex": "f93e00",
+    "roundtrip": true,
+    "decoded": 1.5
+  },
+  {
+    "cbor": "+Xv/",
+    "hex": "f97bff",
+    "roundtrip": true,
+    "decoded": 65504.0
+  },
+  {
+    "cbor": "+kfDUAA=",
+    "hex": "fa47c35000",
+    "roundtrip": true,
+    "decoded": 100000.0
+  },
+  {
+    "cbor": "+n9///8=",
+    "hex": "fa7f7fffff",
+    "roundtrip": true,
+    "decoded": 3.4028234663852886e+38
+  },
+  {
+    "cbor": "+3435DyIAHWc",
+    "hex": "fb7e37e43c8800759c",
+    "roundtrip": true,
+    "decoded": 1.0e+300
+  },
+  {
+    "cbor": "+QAB",
+    "hex": "f90001",
+    "roundtrip": true,
+    "decoded": 5.960464477539063e-08
+  },
+  {
+    "cbor": "+QQA",
+    "hex": "f90400",
+    "roundtrip": true,
+    "decoded": 6.103515625e-05
+  },
+  {
+    "cbor": "+cQA",
+    "hex": "f9c400",
+    "roundtrip": true,
+    "decoded": -4.0
+  },
+  {
+    "cbor": "+8AQZmZmZmZm",
+    "hex": "fbc010666666666666",
+    "roundtrip": true,
+    "decoded": -4.1
+  },
+  {
+    "cbor": "+XwA",
+    "hex": "f97c00",
+    "roundtrip": true,
+    "diagnostic": "Infinity"
+  },
+  {
+    "cbor": "+X4A",
+    "hex": "f97e00",
+    "roundtrip": true,
+    "diagnostic": "NaN"
+  },
+  {
+    "cbor": "+fwA",
+    "hex": "f9fc00",
+    "roundtrip": true,
+    "diagnostic": "-Infinity"
+  },
+  {
+    "cbor": "+n+AAAA=",
+    "hex": "fa7f800000",
+    "roundtrip": false,
+    "diagnostic": "Infinity"
+  },
+  {
+    "cbor": "+n/AAAA=",
+    "hex": "fa7fc00000",
+    "roundtrip": false,
+    "diagnostic": "NaN"
+  },
+  {
+    "cbor": "+v+AAAA=",
+    "hex": "faff800000",
+    "roundtrip": false,
+    "diagnostic": "-Infinity"
+  },
+  {
+    "cbor": "+3/wAAAAAAAA",
+    "hex": "fb7ff0000000000000",
+    "roundtrip": false,
+    "diagnostic": "Infinity"
+  },
+  {
+    "cbor": "+3/4AAAAAAAA",
+    "hex": "fb7ff8000000000000",
+    "roundtrip": false,
+    "diagnostic": "NaN"
+  },
+  {
+    "cbor": "+//wAAAAAAAA",
+    "hex": "fbfff0000000000000",
+    "roundtrip": false,
+    "diagnostic": "-Infinity"
+  },
+  {
+    "cbor": "9A==",
+    "hex": "f4",
+    "roundtrip": true,
+    "decoded": false
+  },
+  {
+    "cbor": "9Q==",
+    "hex": "f5",
+    "roundtrip": true,
+    "decoded": true
+  },
+  {
+    "cbor": "9g==",
+    "hex": "f6",
+    "roundtrip": true,
+    "decoded": null
+  },
+  {
+    "cbor": "9w==",
+    "hex": "f7",
+    "roundtrip": true,
+    "diagnostic": "undefined"
+  },
+  {
+    "cbor": "8A==",
+    "hex": "f0",
+    "roundtrip": true,
+    "diagnostic": "simple(16)"
+  },
+  {
+    "cbor": "+Bg=",
+    "hex": "f818",
+    "roundtrip": true,
+    "diagnostic": "simple(24)"
+  },
+  {
+    "cbor": "+P8=",
+    "hex": "f8ff",
+    "roundtrip": true,
+    "diagnostic": "simple(255)"
+  },
+  {
+    "cbor": "wHQyMDEzLTAzLTIxVDIwOjA0OjAwWg==",
+    "hex": "c074323031332d30332d32315432303a30343a30305a",
+    "roundtrip": true,
+    "diagnostic": "0(\"2013-03-21T20:04:00Z\")"
+  },
+  {
+    "cbor": "wRpRS2ew",
+    "hex": "c11a514b67b0",
+    "roundtrip": true,
+    "diagnostic": "1(1363896240)"
+  },
+  {
+    "cbor": "wftB1FLZ7CAAAA==",
+    "hex": "c1fb41d452d9ec200000",
+    "roundtrip": true,
+    "diagnostic": "1(1363896240.5)"
+  },
+  {
+    "cbor": "10QBAgME",
+    "hex": "d74401020304",
+    "roundtrip": true,
+    "diagnostic": "23(h'01020304')"
+  },
+  {
+    "cbor": "2BhFZElFVEY=",
+    "hex": "d818456449455446",
+    "roundtrip": true,
+    "diagnostic": "24(h'6449455446')"
+  },
+  {
+    "cbor": "2CB2aHR0cDovL3d3dy5leGFtcGxlLmNvbQ==",
+    "hex": "d82076687474703a2f2f7777772e6578616d706c652e636f6d",
+    "roundtrip": true,
+    "diagnostic": "32(\"http://www.example.com\")"
+  },
+  {
+    "cbor": "QA==",
+    "hex": "40",
+    "roundtrip": true,
+    "diagnostic": "h''"
+  },
+  {
+    "cbor": "RAECAwQ=",
+    "hex": "4401020304",
+    "roundtrip": true,
+    "diagnostic": "h'01020304'"
+  },
+  {
+    "cbor": "YA==",
+    "hex": "60",
+    "roundtrip": true,
+    "decoded": ""
+  },
+  {
+    "cbor": "YWE=",
+    "hex": "6161",
+    "roundtrip": true,
+    "decoded": "a"
+  },
+  {
+    "cbor": "ZElFVEY=",
+    "hex": "6449455446",
+    "roundtrip": true,
+    "decoded": "IETF"
+  },
+  {
+    "cbor": "YiJc",
+    "hex": "62225c",
+    "roundtrip": true,
+    "decoded": "\"\\"
+  },
+  {
+    "cbor": "YsO8",
+    "hex": "62c3bc",
+    "roundtrip": true,
+    "decoded": "ü"
+  },
+  {
+    "cbor": "Y+awtA==",
+    "hex": "63e6b0b4",
+    "roundtrip": true,
+    "decoded": "水"
+  },
+  {
+    "cbor": "ZPCQhZE=",
+    "hex": "64f0908591",
+    "roundtrip": true,
+    "decoded": "𐅑"
+  },
+  {
+    "cbor": "gA==",
+    "hex": "80",
+    "roundtrip": true,
+    "decoded": [
+
+    ]
+  },
+  {
+    "cbor": "gwECAw==",
+    "hex": "83010203",
+    "roundtrip": true,
+    "decoded": [
+      1,
+      2,
+      3
+    ]
+  },
+  {
+    "cbor": "gwGCAgOCBAU=",
+    "hex": "8301820203820405",
+    "roundtrip": true,
+    "decoded": [
+      1,
+      [
+        2,
+        3
+      ],
+      [
+        4,
+        5
+      ]
+    ]
+  },
+  {
+    "cbor": "mBkBAgMEBQYHCAkKCwwNDg8QERITFBUWFxgYGBk=",
+    "hex": "98190102030405060708090a0b0c0d0e0f101112131415161718181819",
+    "roundtrip": true,
+    "decoded": [
+      1,
+      2,
+      3,
+      4,
+      5,
+      6,
+      7,
+      8,
+      9,
+      10,
+      11,
+      12,
+      13,
+      14,
+      15,
+      16,
+      17,
+      18,
+      19,
+      20,
+      21,
+      22,
+      23,
+      24,
+      25
+    ]
+  },
+  {
+    "cbor": "oA==",
+    "hex": "a0",
+    "roundtrip": true,
+    "decoded": {
+    }
+  },
+  {
+    "cbor": "ogECAwQ=",
+    "hex": "a201020304",
+    "roundtrip": true,
+    "skip": true,
+    "diagnostic": "{1: 2, 3: 4}"
+  },
+  {
+    "cbor": "omFhAWFiggID",
+    "hex": "a26161016162820203",
+    "roundtrip": true,
+    "decoded": {
+      "a": 1,
+      "b": [
+        2,
+        3
+      ]
+    }
+  },
+  {
+    "cbor": "gmFhoWFiYWM=",
+    "hex": "826161a161626163",
+    "roundtrip": true,
+    "decoded": [
+      "a",
+      {
+        "b": "c"
+      }
+    ]
+  },
+  {
+    "cbor": "pWFhYUFhYmFCYWNhQ2FkYURhZWFF",
+    "hex": "a56161614161626142616361436164614461656145",
+    "roundtrip": true,
+    "decoded": {
+      "a": "A",
+      "b": "B",
+      "c": "C",
+      "d": "D",
+      "e": "E"
+    }
+  },
+  {
+    "cbor": "X0IBAkMDBAX/",
+    "hex": "5f42010243030405ff",
+    "roundtrip": false,
+    "skip": true,
+    "diagnostic": "(_ h'0102', h'030405')"
+  },
+  {
+    "cbor": "f2VzdHJlYWRtaW5n/w==",
+    "hex": "7f657374726561646d696e67ff",
+    "roundtrip": false,
+    "decoded": "streaming"
+  },
+  {
+    "cbor": "n/8=",
+    "hex": "9fff",
+    "roundtrip": false,
+    "decoded": [
+
+    ]
+  },
+  {
+    "cbor": "nwGCAgOfBAX//w==",
+    "hex": "9f018202039f0405ffff",
+    "roundtrip": false,
+    "decoded": [
+      1,
+      [
+        2,
+        3
+      ],
+      [
+        4,
+        5
+      ]
+    ]
+  },
+  {
+    "cbor": "nwGCAgOCBAX/",
+    "hex": "9f01820203820405ff",
+    "roundtrip": false,
+    "decoded": [
+      1,
+      [
+        2,
+        3
+      ],
+      [
+        4,
+        5
+      ]
+    ]
+  },
+  {
+    "cbor": "gwGCAgOfBAX/",
+    "hex": "83018202039f0405ff",
+    "roundtrip": false,
+    "decoded": [
+      1,
+      [
+        2,
+        3
+      ],
+      [
+        4,
+        5
+      ]
+    ]
+  },
+  {
+    "cbor": "gwGfAgP/ggQF",
+    "hex": "83019f0203ff820405",
+    "roundtrip": false,
+    "decoded": [
+      1,
+      [
+        2,
+        3
+      ],
+      [
+        4,
+        5
+      ]
+    ]
+  },
+  {
+    "cbor": "nwECAwQFBgcICQoLDA0ODxAREhMUFRYXGBgYGf8=",
+    "hex": "9f0102030405060708090a0b0c0d0e0f101112131415161718181819ff",
+    "roundtrip": false,
+    "decoded": [
+      1,
+      2,
+      3,
+      4,
+      5,
+      6,
+      7,
+      8,
+      9,
+      10,
+      11,
+      12,
+      13,
+      14,
+      15,
+      16,
+      17,
+      18,
+      19,
+      20,
+      21,
+      22,
+      23,
+      24,
+      25
+    ]
+  },
+  {
+    "cbor": "v2FhAWFinwID//8=",
+    "hex": "bf61610161629f0203ffff",
+    "roundtrip": false,
+    "decoded": {
+      "a": 1,
+      "b": [
+        2,
+        3
+      ]
+    }
+  },
+  {
+    "cbor": "gmFhv2FiYWP/",
+    "hex": "826161bf61626163ff",
+    "roundtrip": false,
+    "decoded": [
+      "a",
+      {
+        "b": "c"
+      }
+    ]
+  },
+  {
+    "cbor": "v2NGdW71Y0FtdCH/",
+    "hex": "bf6346756ef563416d7421ff",
+    "roundtrip": false,
+    "decoded": {
+      "Fun": true,
+      "Amt": -2
+    }
+  }
+]
diff --git a/vendor/github.com/ugorji/go/codec/test.py b/vendor/github.com/ugorji/go/codec/test.py
new file mode 100755
index 0000000..800376f
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/test.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+
+# This will create golden files in a directory passed to it.
+# A Test calls this internally to create the golden files
+# So it can process them (so we don't have to checkin the files).
+
+# Ensure msgpack-python and cbor are installed first, using:
+#   sudo apt-get install python-dev
+#   sudo apt-get install python-pip
+#   pip install --user msgpack-python msgpack-rpc-python cbor
+
+# Ensure all "string" keys are utf strings (else encoded as bytes)
+
+import cbor, msgpack, msgpackrpc, sys, os, threading
+
+def get_test_data_list():
+    # get list with all primitive types, and a combo type
+    l0 = [ 
+        -8,
+         -1616,
+         -32323232,
+         -6464646464646464,
+         192,
+         1616,
+         32323232,
+         6464646464646464,
+         192,
+         -3232.0,
+         -6464646464.0,
+         3232.0,
+         6464.0,
+         6464646464.0,
+         False,
+         True,
+         u"null",
+         None,
+         u"some&day>some<day",
+         1328176922000002000,
+         u"",
+         -2206187877999998000,
+         u"bytestring",
+         270,
+         u"none",
+        -2013855847999995777,
+         #-6795364578871345152,
+         ]
+    l1 = [
+        { "true": True,
+          "false": False },
+        { "true": u"True",
+          "false": False,
+          "uint16(1616)": 1616 },
+        { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
+          "int32":32323232, "bool": True, 
+          "LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
+          "SHORT STRING": u"1234567890" },
+        { True: "true", 138: False, "false": 200 }
+        ]
+    
+    l = []
+    l.extend(l0)
+    l.append(l0)
+    l.append(1)
+    l.extend(l1)
+    return l
+
+def build_test_data(destdir):
+    l = get_test_data_list()
+    for i in range(len(l)):
+        # packer = msgpack.Packer()
+        serialized = msgpack.dumps(l[i])
+        f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
+        f.write(serialized)
+        f.close()
+        serialized = cbor.dumps(l[i])
+        f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
+        f.write(serialized)
+        f.close()
+
+def doRpcServer(port, stopTimeSec):
+    class EchoHandler(object):
+        def Echo123(self, msg1, msg2, msg3):
+            return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
+        def EchoStruct(self, msg):
+            return ("%s" % msg)
+    
+    addr = msgpackrpc.Address('127.0.0.1', port)
+    server = msgpackrpc.Server(EchoHandler())
+    server.listen(addr)
+    # run thread to stop it after stopTimeSec seconds if > 0
+    if stopTimeSec > 0:
+        def myStopRpcServer():
+            server.stop()
+        t = threading.Timer(stopTimeSec, myStopRpcServer)
+        t.start()
+    server.start()
+
+def doRpcClientToPythonSvc(port):
+    address = msgpackrpc.Address('127.0.0.1', port)
+    client = msgpackrpc.Client(address, unpack_encoding='utf-8')
+    print client.call("Echo123", "A1", "B2", "C3")
+    print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
+   
+def doRpcClientToGoSvc(port):
+    # print ">>>> port: ", port, " <<<<<"
+    address = msgpackrpc.Address('127.0.0.1', port)
+    client = msgpackrpc.Client(address, unpack_encoding='utf-8')
+    print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
+    print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
+
+def doMain(args):
+    if len(args) == 2 and args[0] == "testdata":
+        build_test_data(args[1])
+    elif len(args) == 3 and args[0] == "rpc-server":
+        doRpcServer(int(args[1]), int(args[2]))
+    elif len(args) == 2 and args[0] == "rpc-client-python-service":
+        doRpcClientToPythonSvc(int(args[1]))
+    elif len(args) == 2 and args[0] == "rpc-client-go-service":
+        doRpcClientToGoSvc(int(args[1]))
+    else:
+        print("Usage: test.py " + 
+              "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
+    
+if __name__ == "__main__":
+    doMain(sys.argv[1:])
+
diff --git a/vendor/github.com/ugorji/go/codec/xml.go b/vendor/github.com/ugorji/go/codec/xml.go
new file mode 100644
index 0000000..19fc36c
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/xml.go
@@ -0,0 +1,508 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build ignore
+
+package codec
+
+import "reflect"
+
+/*
+
+A strict Non-validating namespace-aware XML 1.0 parser and (en|de)coder.
+
+We are attempting this due to perceived issues with encoding/xml:
+  - Complicated. It tried to do too much, and is not as simple to use as json.
+  - Due to over-engineering, reflection is over-used AND performance suffers:
+    java is 6X faster:http://fabsk.eu/blog/category/informatique/dev/golang/
+    even PYTHON performs better: http://outgoing.typepad.com/outgoing/2014/07/exploring-golang.html
+
+codec framework will offer the following benefits
+  - VASTLY improved performance (when using reflection-mode or codecgen)
+  - simplicity and consistency: with the rest of the supported formats
+  - all other benefits of codec framework (streaming, codegeneration, etc)
+
+codec is not a drop-in replacement for encoding/xml.
+It is a replacement, based on the simplicity and performance of codec.
+Look at it like JAXB for Go.
+
+Challenges:
+  - Need to output XML preamble, with all namespaces at the right location in the output.
+  - Each "end" block is dynamic, so we need to maintain a context-aware stack
+  - How to decide when to use an attribute VS an element
+  - How to handle chardata, attr, comment EXPLICITLY.
+  - Should it output fragments?
+    e.g. encoding a bool should just output true OR false, which is not well-formed XML.
+
+Extend the struct tag. See representative example:
+  type X struct {
+    ID uint8 `codec:"http://ugorji.net/x-namespace xid id,omitempty,toarray,attr,cdata"`
+    // format: [namespace-uri ][namespace-prefix ]local-name, ...
+  }
+
+Based on this, we encode
+  - fields as elements, BUT
+    encode as attributes if struct tag contains ",attr" and is a scalar (bool, number or string)
+  - text as entity-escaped text, BUT encode as CDATA if struct tag contains ",cdata".
+
+To handle namespaces:
+  - XMLHandle is denoted as being namespace-aware.
+    Consequently, we WILL use the ns:name pair to encode and decode if defined, else use the plain name.
+  - *Encoder and *Decoder know whether the Handle "prefers" namespaces.
+  - add *Encoder.getEncName(*structFieldInfo).
+    No one calls *structFieldInfo.indexForEncName directly anymore
+  - OR better yet: indexForEncName is namespace-aware, and helper.go is all namespace-aware
+    indexForEncName takes a parameter of the form namespace:local-name OR local-name
+  - add *Decoder.getStructFieldInfo(encName string) // encName here is either like abc, or h1:nsabc
+    by being a method on *Decoder, or maybe a method on the Handle itself.
+    No one accesses .encName anymore
+  - let encode.go and decode.go use these (for consistency)
+  - only problem exists for gen.go, where we create a big switch on encName.
+    Now, we also have to add a switch on strings.endsWith(kName, encNsName)
+    - gen.go will need to have many more methods, and then double-on the 2 switch loops like:
+      switch k {
+        case "abc" : x.abc()
+        case "def" : x.def()
+        default {
+          switch {
+            case !nsAware: panic(...)
+            case strings.endsWith(":abc"): x.abc()
+            case strings.endsWith(":def"): x.def()
+            default: panic(...)
+          }
+        }
+     }
+
+The structure below accommodates this:
+
+  type typeInfo struct {
+    sfi []*structFieldInfo // sorted by encName
+    sfins // sorted by namespace
+    sfia  // sorted, to have those with attributes at the top. Needed to write XML appropriately.
+    sfip  // unsorted
+  }
+  type structFieldInfo struct {
+    encName
+    nsEncName
+    ns string
+    attr bool
+    cdata bool
+  }
+
+indexForEncName is now an internal helper function that takes a sorted array
+(one of ti.sfins or ti.sfi). It is only used by *Encoder.getStructFieldInfo(...)
+
+There will be a separate parser from the builder.
+The parser will have a method: next() xmlToken method. It has lookahead support,
+so you can pop multiple tokens, make a determination, and push them back in the order popped.
+This will be needed to determine whether we are "nakedly" decoding a container or not.
+The stack will be implemented using a slice and push/pop happens at the [0] element.
+
+xmlToken has fields:
+  - type uint8: 0 | ElementStart | ElementEnd | AttrKey | AttrVal | Text
+  - value string
+  - ns string
+
+SEE: http://www.xml.com/pub/a/98/10/guide0.html?page=3#ENTDECL
+
+The following are skipped when parsing:
+  - External Entities (from external file)
+  - Notation Declaration e.g. <!NOTATION GIF87A SYSTEM "GIF">
+  - Entity Declarations & References
+  - XML Declaration (assume UTF-8)
+  - XML Directive i.e. <! ... >
+  - Other Declarations: Notation, etc.
+  - Comment
+  - Processing Instruction
+  - schema / DTD for validation:
+    We are not a VALIDATING parser. Validation is done elsewhere.
+    However, some parts of the DTD internal subset are used (SEE BELOW).
+    For Attribute List Declarations e.g.
+    <!ATTLIST foo:oldjoke name ID #REQUIRED label CDATA #IMPLIED status ( funny | notfunny ) 'funny' >
+    We considered using the ATTLIST to get "default" value, but not to validate the contents. (VETOED)
+
+The following XML features are supported
+  - Namespace
+  - Element
+  - Attribute
+  - cdata
+  - Unicode escape
+
+The following DTD (when as an internal sub-set) features are supported:
+  - Internal Entities e.g.
+    <!ELEMENT burns "ugorji is cool" > AND entities for the set: [<>&"']
+  - Parameter entities e.g.
+    <!ENTITY % personcontent "ugorji is cool"> <!ELEMENT burns (%personcontent;)*>
+
+At decode time, a structure containing the following is kept
+  - namespace mapping
+  - default attribute values
+  - all internal entities (<>&"' and others written in the document)
+
+When decode starts, it parses XML namespace declarations and creates a map in the
+xmlDecDriver. While parsing, that map continuously gets updated.
+The only problem happens when a namespace declaration happens on the node that it defines.
+e.g. <hn:name xmlns:hn="http://www.ugorji.net" >
+To handle this, each Element must be fully parsed at a time,
+even if it amounts to multiple tokens which are returned one at a time on request.
+
+xmlns is a special attribute name.
+  - It is used to define namespaces, including the default
+  - It is never returned as an AttrKey or AttrVal.
+  *We may decide later to allow user to use it e.g. you want to parse the xmlns mappings into a field.*
+
+Number, bool, null, mapKey, etc can all be decoded from any xmlToken.
+This accommodates map[int]string for example.
+
+It should be possible to create a schema from the types,
+or vice versa (generate types from schema with appropriate tags).
+This is however out-of-scope from this parsing project.
+
+We should write all namespace information at the first point that it is referenced in the tree,
+and use the mapping for all child nodes and attributes. This means that state is maintained
+at a point in the tree. This also means that calls to Decode or MustDecode will reset some state.
+
+When decoding, it is important to keep track of entity references and default attribute values.
+It seems these can only be stored in the DTD components. We should honor them when decoding.
+
+Configuration for XMLHandle will look like this:
+
+  XMLHandle
+    DefaultNS string
+    // Encoding:
+    NS map[string]string // ns URI to key, used for encoding
+    // Decoding: in case ENTITY declared in external schema or dtd, store info needed here
+    Entities map[string]string // map of entity rep to character
+
+
+During encode, if a namespace mapping is not defined for a namespace found on a struct,
+then we create a mapping for it using nsN (where N is 1..1000000, and doesn't conflict
+with any other namespace mapping).
+
+Note that different fields in a struct can have different namespaces.
+However, all fields will default to the namespace on the _struct field (if defined).
+
+An XML document is a name, a map of attributes and a list of children.
+Consequently, we cannot "DecodeNaked" into a map[string]interface{} (for example).
+We have to "DecodeNaked" into something that resembles XML data.
+
+To support DecodeNaked (decode into nil interface{}), we have to define some "supporting" types:
+    type Name struct { // Preferred. Less allocations due to conversions.
+      Local string
+      Space string
+    }
+    type Element struct {
+      Name Name
+      Attrs map[Name]string
+      Children []interface{} // each child is either *Element or string
+    }
+Only two "supporting" types are exposed for XML: Name and Element.
+
+// ------------------
+
+We considered 'type Name string' where Name is like "Space Local" (space-separated).
+We decided against it, because each creation of a name would lead to
+double allocation (first convert []byte to string, then concatenate them into a string).
+The benefit is that it is faster to read Attrs from a map. But given that Element is a value
+object, we want to eschew methods and have public exposed variables.
+
+We also considered the following, where xml types were not value objects, and we used
+intelligent accessor methods to extract information and for performance.
+*** WE DECIDED AGAINST THIS. ***
+    type Attr struct {
+      Name Name
+      Value string
+    }
+    // Element is a ValueObject: There are no accessor methods.
+    // Make element self-contained.
+    type Element struct {
+      Name Name
+      attrsMap map[string]string // where key is "Space Local"
+      attrs []Attr
+      childrenT []string
+      childrenE []Element
+      childrenI []int // each child is a index into T or E.
+    }
+    func (x *Element) child(i) interface{} // returns string or *Element
+
+// ------------------
+
+Per XML spec and our default handling, white space is always treated as
+insignificant between elements, except in a text node. The xml:space='preserve'
+attribute is ignored.
+
+**Note: there is no xml: namespace. The xml: attributes were defined before namespaces.**
+**So treat them as just "directives" that should be interpreted to mean something**.
+
+On encoding, we support indenting aka prettifying markup in the same way we support it for json.
+
+A document or element can only be encoded/decoded from/to a struct. In this mode:
+  - struct name maps to element name (or tag-info from _struct field)
+  - fields are mapped to child elements or attributes
+
+A map is either encoded as attributes on current element, or as a set of child elements.
+Maps are encoded as attributes iff their keys and values are primitives (number, bool, string).
+
+A list is encoded as a set of child elements.
+
+Primitives (number, bool, string) are encoded as an element, attribute or text
+depending on the context.
+
+Extensions must encode themselves as a text string.
+
+Encoding is tough, specifically when encoding mappings, because we need to encode
+as either attribute or element. To do this, we need to default to encoding as attributes,
+and then let Encoder inform the Handle when to start encoding as nodes.
+i.e. Encoder does something like:
+
+    h.EncodeMapStart()
+    h.Encode(), h.Encode(), ...
+    h.EncodeMapNotAttrSignal() // this is not a bool, because it's a signal
+    h.Encode(), h.Encode(), ...
+    h.EncodeEnd()
+
+Only XMLHandle understands this, and will set itself to start encoding as elements.
+
+This support extends to maps. For example, if a struct field is a map, and it has
+the struct tag signifying it should be attr, then all its fields are encoded as attributes.
+e.g.
+
+    type X struct {
+       M map[string]int `codec:"m,attr"` // encode keys as attributes named
+    }
+
+Question:
+  - if encoding a map, what if map keys have spaces in them???
+    Then they cannot be attributes or child elements. Error.
+
+Options to consider adding later:
+  - For attribute values, normalize by trimming beginning and ending white space,
+    and converting every white space sequence to a single space.
+  - ATTLIST restrictions are enforced.
+    e.g. default value of xml:space, skipping xml:XYZ style attributes, etc.
+  - Consider supporting NON-STRICT mode (e.g. to handle HTML parsing).
+    Some elements e.g. br, hr, etc need not close and should be auto-closed
+    ... (see http://www.w3.org/TR/html4/loose.dtd)
+    An expansive set of entities are pre-defined.
+  - Have easy way to create a HTML parser:
+    add a HTML() method to XMLHandle, that will set Strict=false, specify AutoClose,
+    and add HTML Entities to the list.
+  - Support validating element/attribute XMLName before writing it.
+    Keep this behind a flag, which is set to false by default (for performance).
+    type XMLHandle struct {
+      CheckName bool
+    }
+
+Misc:
+
+ROADMAP (1 weeks):
+  - build encoder (1 day)
+  - build decoder (based off xmlParser) (1 day)
+  - implement xmlParser (2 days).
+    Look at encoding/xml for inspiration.
+  - integrate and TEST (1 days)
+  - write article and post it (1 day)
+
+// ---------- MORE NOTES FROM 2017-11-30 ------------
+
+when parsing
+- parse the attributes first
+- then parse the nodes
+
+basically:
+- if encoding a field: we use the field name for the wrapper
+- if encoding a non-field, then just use the element type name
+
+  map[string]string ==> <map><key>abc</key><value>val</value></map>... or
+                        <map key="abc">val</map>... OR
+                        <key1>val1</key1><key2>val2</key2>...                <- PREFERED
+  []string  ==> <string>v1</string><string>v2</string>...
+  string v1 ==> <string>v1</string>
+  bool true ==> <bool>true</bool>
+  float 1.0 ==> <float>1.0</float>
+  ...
+
+  F1 map[string]string ==> <F1><key>abc</key><value>val</value></F1>... OR
+                           <F1 key="abc">val</F1>... OR
+                           <F1><abc>val</abc>...</F1>                        <- PREFERED
+  F2 []string          ==> <F2>v1</F2><F2>v2</F2>...
+  F3 bool              ==> <F3>true</F3>
+  ...
+
+- a scalar is encoded as:
+  (value) of type T  ==> <T><value/></T>
+  (value) of field F ==> <F><value/></F>
+- A kv-pair is encoded as:
+  (key,value) ==> <map><key><value/></key></map> OR <map key="value">
+  (key,value) of field F ==> <F><key><value/></key></F> OR <F key="value">
+- A map or struct is just a list of kv-pairs
+- A list is encoded as sequences of same node e.g.
+  <F1 key1="value11">
+  <F1 key2="value12">
+  <F2>value21</F2>
+  <F2>value22</F2>
+- we may have to singularize the field name, when entering into xml,
+  and pluralize them when encoding.
+- bi-directional encode->decode->encode is not a MUST.
+  even encoding/xml cannot decode correctly what was encoded:
+
+  see https://play.golang.org/p/224V_nyhMS
+  func main() {
+	fmt.Println("Hello, playground")
+	v := []interface{}{"hello", 1, true, nil, time.Now()}
+	s, err := xml.Marshal(v)
+	fmt.Printf("err: %v, \ns: %s\n", err, s)
+	var v2 []interface{}
+	err = xml.Unmarshal(s, &v2)
+	fmt.Printf("err: %v, \nv2: %v\n", err, v2)
+	type T struct {
+	    V []interface{}
+	}
+	v3 := T{V: v}
+	s, err = xml.Marshal(v3)
+	fmt.Printf("err: %v, \ns: %s\n", err, s)
+	var v4 T
+	err = xml.Unmarshal(s, &v4)
+	fmt.Printf("err: %v, \nv4: %v\n", err, v4)
+  }
+  Output:
+    err: <nil>,
+    s: <string>hello</string><int>1</int><bool>true</bool><Time>2009-11-10T23:00:00Z</Time>
+    err: <nil>,
+    v2: [<nil>]
+    err: <nil>,
+    s: <T><V>hello</V><V>1</V><V>true</V><V>2009-11-10T23:00:00Z</V></T>
+    err: <nil>,
+    v4: {[<nil> <nil> <nil> <nil>]}
+-
+*/
+
+// ----------- PARSER  -------------------
+
+type xmlTokenType uint8
+
+const (
+	_ xmlTokenType = iota << 1
+	xmlTokenElemStart
+	xmlTokenElemEnd
+	xmlTokenAttrKey
+	xmlTokenAttrVal
+	xmlTokenText
+)
+
+type xmlToken struct {
+	Type      xmlTokenType
+	Value     string
+	Namespace string // blank for AttrVal and Text
+}
+
+type xmlParser struct {
+	r    decReader
+	toks []xmlToken // list of tokens.
+	ptr  int        // ptr into the toks slice
+	done bool       // nothing else to parse. r now returns EOF.
+}
+
+func (x *xmlParser) next() (t *xmlToken) {
+	// once x.done, or x.ptr == len(x.toks) == 0, then return nil (to signify finish)
+	if !x.done && len(x.toks) == 0 {
+		x.nextTag()
+	}
+	// parses one element at a time (into possible many tokens)
+	if x.ptr < len(x.toks) {
+		t = &(x.toks[x.ptr])
+		x.ptr++
+		if x.ptr == len(x.toks) {
+			x.ptr = 0
+			x.toks = x.toks[:0]
+		}
+	}
+	return
+}
+
+// nextTag will parses the next element and fill up toks.
+// It set done flag if/once EOF is reached.
+func (x *xmlParser) nextTag() {
+	// TODO: implement.
+}
+
+// ----------- ENCODER -------------------
+
+type xmlEncDriver struct {
+	e  *Encoder
+	w  encWriter
+	h  *XMLHandle
+	b  [64]byte // scratch
+	bs []byte   // scratch
+	// s  jsonStack
+	noBuiltInTypes
+}
+
+// ----------- DECODER -------------------
+
+type xmlDecDriver struct {
+	d    *Decoder
+	h    *XMLHandle
+	r    decReader // *bytesDecReader decReader
+	ct   valueType // container type. one of unset, array or map.
+	bstr [8]byte   // scratch used for string \UXXX parsing
+	b    [64]byte  // scratch
+
+	// wsSkipped bool // whitespace skipped
+
+	// s jsonStack
+
+	noBuiltInTypes
+}
+
+// DecodeNaked will decode into an XMLNode
+
+// XMLName is a value object representing a namespace-aware NAME
+type XMLName struct {
+	Local string
+	Space string
+}
+
+// XMLNode represents a "union" of the different types of XML Nodes.
+// Only one of fields (Text or *Element) is set.
+type XMLNode struct {
+	Element *Element
+	Text    string
+}
+
+// XMLElement is a value object representing an fully-parsed XML element.
+type XMLElement struct {
+	Name  Name
+	Attrs map[XMLName]string
+	// Children is a list of child nodes, each being a *XMLElement or string
+	Children []XMLNode
+}
+
+// ----------- HANDLE  -------------------
+
+type XMLHandle struct {
+	BasicHandle
+	textEncodingType
+
+	DefaultNS string
+	NS        map[string]string // ns URI to key, for encoding
+	Entities  map[string]string // entity representation to string, for encoding.
+}
+
+func (h *XMLHandle) newEncDriver(e *Encoder) encDriver {
+	return &xmlEncDriver{e: e, w: e.w, h: h}
+}
+
+func (h *XMLHandle) newDecDriver(d *Decoder) decDriver {
+	// d := xmlDecDriver{r: r.(*bytesDecReader), h: h}
+	hd := xmlDecDriver{d: d, r: d.r, h: h}
+	hd.n.bytes = d.b[:]
+	return &hd
+}
+
+func (h *XMLHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
+	return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext})
+}
+
+var _ decDriver = (*xmlDecDriver)(nil)
+var _ encDriver = (*xmlEncDriver)(nil)
diff --git a/vendor/github.com/xiang90/probing/.gitignore b/vendor/github.com/xiang90/probing/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/github.com/xiang90/probing/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/xiang90/probing/LICENSE b/vendor/github.com/xiang90/probing/LICENSE
new file mode 100644
index 0000000..cde8b8b
--- /dev/null
+++ b/vendor/github.com/xiang90/probing/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Xiang Li
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/xiang90/probing/README.md b/vendor/github.com/xiang90/probing/README.md
new file mode 100644
index 0000000..2ff6820
--- /dev/null
+++ b/vendor/github.com/xiang90/probing/README.md
@@ -0,0 +1,39 @@
+## Getting Started
+
+### Install the handler
+
+We first need to serve the probing HTTP handler.
+
+```go
+    http.HandleFunc("/health", probing.NewHandler())
+    err := http.ListenAndServe(":12345", nil)
+	if err != nil {
+		log.Fatal("ListenAndServe: ", err)
+	}
+```
+
+### Start to probe
+
+Now we can start to probe the endpoint.
+
+``` go
+    id := "example"
+    probingInterval = 5 * time.Second
+    url := "http://example.com:12345/health"
+    p.AddHTTP(id, probingInterval, url)
+
+	time.Sleep(13 * time.Second)
+	status, err := p.Status(id)
+ 	fmt.Printf("Total Probing: %d, Total Loss: %d, Estimated RTT: %v, Estimated Clock Difference: %v\n",
+		status.Total(), status.Loss(), status.SRTT(), status.ClockDiff())
+	// Total Probing: 2, Total Loss: 0, Estimated RTT: 320.771µs, Estimated Clock Difference: -35.869µs
+```
+
+### TODOs:
+
+- TCP probing
+- UDP probing
+- Gossip based probing
+- More accurate RTT estimation
+- More accurate Clock difference estimation
+- Use a clock interface rather than the real clock
diff --git a/vendor/github.com/xiang90/probing/prober.go b/vendor/github.com/xiang90/probing/prober.go
new file mode 100644
index 0000000..c917cfd
--- /dev/null
+++ b/vendor/github.com/xiang90/probing/prober.go
@@ -0,0 +1,134 @@
+package probing
+
+import (
+	"encoding/json"
+	"errors"
+	"net/http"
+	"sync"
+	"time"
+)
+
+var (
+	ErrNotFound = errors.New("probing: id not found")
+	ErrExist    = errors.New("probing: id exists")
+)
+
+type Prober interface {
+	AddHTTP(id string, probingInterval time.Duration, endpoints []string) error
+	Remove(id string) error
+	RemoveAll()
+	Reset(id string) error
+	Status(id string) (Status, error)
+}
+
+type prober struct {
+	mu      sync.Mutex
+	targets map[string]*status
+	tr      http.RoundTripper
+}
+
+func NewProber(tr http.RoundTripper) Prober {
+	p := &prober{targets: make(map[string]*status)}
+	if tr == nil {
+		p.tr = http.DefaultTransport
+	} else {
+		p.tr = tr
+	}
+	return p
+}
+
+func (p *prober) AddHTTP(id string, probingInterval time.Duration, endpoints []string) error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if _, ok := p.targets[id]; ok {
+		return ErrExist
+	}
+
+	s := &status{stopC: make(chan struct{})}
+	p.targets[id] = s
+
+	ticker := time.NewTicker(probingInterval)
+
+	go func() {
+		pinned := 0
+		for {
+			select {
+			case <-ticker.C:
+				start := time.Now()
+				req, err := http.NewRequest("GET", endpoints[pinned], nil)
+				if err != nil {
+					panic(err)
+				}
+				resp, err := p.tr.RoundTrip(req)
+				if err != nil {
+					s.recordFailure(err)
+					pinned = (pinned + 1) % len(endpoints)
+					continue
+				}
+
+				var hh Health
+				d := json.NewDecoder(resp.Body)
+				err = d.Decode(&hh)
+				resp.Body.Close()
+				if err != nil || !hh.OK {
+					s.recordFailure(err)
+					pinned = (pinned + 1) % len(endpoints)
+					continue
+				}
+
+				s.record(time.Since(start), hh.Now)
+			case <-s.stopC:
+				ticker.Stop()
+				return
+			}
+		}
+	}()
+
+	return nil
+}
+
+func (p *prober) Remove(id string) error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	s, ok := p.targets[id]
+	if !ok {
+		return ErrNotFound
+	}
+	close(s.stopC)
+	delete(p.targets, id)
+	return nil
+}
+
+func (p *prober) RemoveAll() {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	for _, s := range p.targets {
+		close(s.stopC)
+	}
+	p.targets = make(map[string]*status)
+}
+
+func (p *prober) Reset(id string) error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	s, ok := p.targets[id]
+	if !ok {
+		return ErrNotFound
+	}
+	s.reset()
+	return nil
+}
+
+func (p *prober) Status(id string) (Status, error) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	s, ok := p.targets[id]
+	if !ok {
+		return nil, ErrNotFound
+	}
+	return s, nil
+}
diff --git a/vendor/github.com/xiang90/probing/server.go b/vendor/github.com/xiang90/probing/server.go
new file mode 100644
index 0000000..0e7b797
--- /dev/null
+++ b/vendor/github.com/xiang90/probing/server.go
@@ -0,0 +1,25 @@
+package probing
+
+import (
+	"encoding/json"
+	"net/http"
+	"time"
+)
+
+func NewHandler() http.Handler {
+	return &httpHealth{}
+}
+
+type httpHealth struct {
+}
+
+type Health struct {
+	OK  bool
+	Now time.Time
+}
+
+func (h *httpHealth) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	health := Health{OK: true, Now: time.Now()}
+	e := json.NewEncoder(w)
+	e.Encode(health)
+}
diff --git a/vendor/github.com/xiang90/probing/status.go b/vendor/github.com/xiang90/probing/status.go
new file mode 100644
index 0000000..bb5f659
--- /dev/null
+++ b/vendor/github.com/xiang90/probing/status.go
@@ -0,0 +1,108 @@
+package probing
+
+import (
+	"sync"
+	"time"
+)
+
+var (
+	// weight factor
+	α = 0.125
+)
+
+type Status interface {
+	Total() int64
+	Loss() int64
+	Health() bool
+	Err() error
+	// Estimated smoothed round trip time
+	SRTT() time.Duration
+	// Estimated clock difference
+	ClockDiff() time.Duration
+	StopNotify() <-chan struct{}
+}
+
+type status struct {
+	mu        sync.Mutex
+	srtt      time.Duration
+	total     int64
+	loss      int64
+	health    bool
+	err       error
+	clockdiff time.Duration
+	stopC     chan struct{}
+}
+
+// SRTT = (1-α) * SRTT + α * RTT
+func (s *status) SRTT() time.Duration {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.srtt
+}
+
+func (s *status) Total() int64 {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.total
+}
+
+func (s *status) Loss() int64 {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.loss
+}
+
+func (s *status) Health() bool {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.health
+}
+
+func (s *status) Err() error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.err
+}
+
+func (s *status) ClockDiff() time.Duration {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.clockdiff
+}
+
+func (s *status) StopNotify() <-chan struct{} {
+	return s.stopC
+}
+
+func (s *status) record(rtt time.Duration, when time.Time) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	s.total += 1
+	s.health = true
+	s.srtt = time.Duration((1-α)*float64(s.srtt) + α*float64(rtt))
+	s.clockdiff = time.Now().Sub(when) - s.srtt/2
+	s.err = nil
+}
+
+func (s *status) recordFailure(err error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	s.total++
+	s.health = false
+	s.loss += 1
+	s.err = err
+}
+
+func (s *status) reset() {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	s.srtt = 0
+	s.total = 0
+	s.loss = 0
+	s.health = false
+	s.clockdiff = 0
+	s.err = nil
+}
diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS
new file mode 100644
index 0000000..2b00ddb
--- /dev/null
+++ b/vendor/golang.org/x/crypto/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at https://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS
new file mode 100644
index 0000000..1fbd3e9
--- /dev/null
+++ b/vendor/golang.org/x/crypto/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at https://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/golang.org/x/crypto/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/crypto/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go.  This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation.  If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go
new file mode 100644
index 0000000..fc31160
--- /dev/null
+++ b/vendor/golang.org/x/crypto/bcrypt/base64.go
@@ -0,0 +1,35 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bcrypt
+
+import "encoding/base64"
+
+const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
+
+var bcEncoding = base64.NewEncoding(alphabet)
+
+func base64Encode(src []byte) []byte {
+	n := bcEncoding.EncodedLen(len(src))
+	dst := make([]byte, n)
+	bcEncoding.Encode(dst, src)
+	for dst[n-1] == '=' {
+		n--
+	}
+	return dst[:n]
+}
+
+func base64Decode(src []byte) ([]byte, error) {
+	numOfEquals := 4 - (len(src) % 4)
+	for i := 0; i < numOfEquals; i++ {
+		src = append(src, '=')
+	}
+
+	dst := make([]byte, bcEncoding.DecodedLen(len(src)))
+	n, err := bcEncoding.Decode(dst, src)
+	if err != nil {
+		return nil, err
+	}
+	return dst[:n], nil
+}
diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
new file mode 100644
index 0000000..aeb73f8
--- /dev/null
+++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
@@ -0,0 +1,295 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
+// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
+package bcrypt // import "golang.org/x/crypto/bcrypt"
+
+// The code is a port of Provos and Mazières's C implementation.
+import (
+	"crypto/rand"
+	"crypto/subtle"
+	"errors"
+	"fmt"
+	"io"
+	"strconv"
+
+	"golang.org/x/crypto/blowfish"
+)
+
+const (
+	MinCost     int = 4  // the minimum allowable cost as passed in to GenerateFromPassword
+	MaxCost     int = 31 // the maximum allowable cost as passed in to GenerateFromPassword
+	DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword
+)
+
+// The error returned from CompareHashAndPassword when a password and hash do
+// not match.
+var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password")
+
+// The error returned from CompareHashAndPassword when a hash is too short to
+// be a bcrypt hash.
+var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
+
+// The error returned from CompareHashAndPassword when a hash was created with
+// a bcrypt algorithm newer than this implementation.
+type HashVersionTooNewError byte
+
+func (hv HashVersionTooNewError) Error() string {
+	return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion)
+}
+
+// The error returned from CompareHashAndPassword when a hash starts with something other than '$'
+type InvalidHashPrefixError byte
+
+func (ih InvalidHashPrefixError) Error() string {
+	return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih))
+}
+
+type InvalidCostError int
+
+func (ic InvalidCostError) Error() string {
+	return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost))
+}
+
+const (
+	majorVersion       = '2'
+	minorVersion       = 'a'
+	maxSaltSize        = 16
+	maxCryptedHashSize = 23
+	encodedSaltSize    = 22
+	encodedHashSize    = 31
+	minHashSize        = 59
+)
+
+// magicCipherData is an IV for the 64 Blowfish encryption calls in
+// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes.
+var magicCipherData = []byte{
+	0x4f, 0x72, 0x70, 0x68,
+	0x65, 0x61, 0x6e, 0x42,
+	0x65, 0x68, 0x6f, 0x6c,
+	0x64, 0x65, 0x72, 0x53,
+	0x63, 0x72, 0x79, 0x44,
+	0x6f, 0x75, 0x62, 0x74,
+}
+
+type hashed struct {
+	hash  []byte
+	salt  []byte
+	cost  int // allowed range is MinCost to MaxCost
+	major byte
+	minor byte
+}
+
+// GenerateFromPassword returns the bcrypt hash of the password at the given
+// cost. If the cost given is less than MinCost, the cost will be set to
+// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package,
+// to compare the returned hashed password with its cleartext version.
+func GenerateFromPassword(password []byte, cost int) ([]byte, error) {
+	p, err := newFromPassword(password, cost)
+	if err != nil {
+		return nil, err
+	}
+	return p.Hash(), nil
+}
+
+// CompareHashAndPassword compares a bcrypt hashed password with its possible
+// plaintext equivalent. Returns nil on success, or an error on failure.
+func CompareHashAndPassword(hashedPassword, password []byte) error {
+	p, err := newFromHash(hashedPassword)
+	if err != nil {
+		return err
+	}
+
+	otherHash, err := bcrypt(password, p.cost, p.salt)
+	if err != nil {
+		return err
+	}
+
+	otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}
+	if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {
+		return nil
+	}
+
+	return ErrMismatchedHashAndPassword
+}
+
+// Cost returns the hashing cost used to create the given hashed
+// password. When, in the future, the hashing cost of a password system needs
+// to be increased in order to adjust for greater computational power, this
+// function allows one to establish which passwords need to be updated.
+func Cost(hashedPassword []byte) (int, error) {
+	p, err := newFromHash(hashedPassword)
+	if err != nil {
+		return 0, err
+	}
+	return p.cost, nil
+}
+
+func newFromPassword(password []byte, cost int) (*hashed, error) {
+	if cost < MinCost {
+		cost = DefaultCost
+	}
+	p := new(hashed)
+	p.major = majorVersion
+	p.minor = minorVersion
+
+	err := checkCost(cost)
+	if err != nil {
+		return nil, err
+	}
+	p.cost = cost
+
+	unencodedSalt := make([]byte, maxSaltSize)
+	_, err = io.ReadFull(rand.Reader, unencodedSalt)
+	if err != nil {
+		return nil, err
+	}
+
+	p.salt = base64Encode(unencodedSalt)
+	hash, err := bcrypt(password, p.cost, p.salt)
+	if err != nil {
+		return nil, err
+	}
+	p.hash = hash
+	return p, err
+}
+
+func newFromHash(hashedSecret []byte) (*hashed, error) {
+	if len(hashedSecret) < minHashSize {
+		return nil, ErrHashTooShort
+	}
+	p := new(hashed)
+	n, err := p.decodeVersion(hashedSecret)
+	if err != nil {
+		return nil, err
+	}
+	hashedSecret = hashedSecret[n:]
+	n, err = p.decodeCost(hashedSecret)
+	if err != nil {
+		return nil, err
+	}
+	hashedSecret = hashedSecret[n:]
+
+	// The "+2" is here because we'll have to append at most 2 '=' to the salt
+	// when base64 decoding it in expensiveBlowfishSetup().
+	p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)
+	copy(p.salt, hashedSecret[:encodedSaltSize])
+
+	hashedSecret = hashedSecret[encodedSaltSize:]
+	p.hash = make([]byte, len(hashedSecret))
+	copy(p.hash, hashedSecret)
+
+	return p, nil
+}
+
+func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {
+	cipherData := make([]byte, len(magicCipherData))
+	copy(cipherData, magicCipherData)
+
+	c, err := expensiveBlowfishSetup(password, uint32(cost), salt)
+	if err != nil {
+		return nil, err
+	}
+
+	for i := 0; i < 24; i += 8 {
+		for j := 0; j < 64; j++ {
+			c.Encrypt(cipherData[i:i+8], cipherData[i:i+8])
+		}
+	}
+
+	// Bug compatibility with C bcrypt implementations. We only encode 23 of
+	// the 24 bytes encrypted.
+	hsh := base64Encode(cipherData[:maxCryptedHashSize])
+	return hsh, nil
+}
+
+func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
+	csalt, err := base64Decode(salt)
+	if err != nil {
+		return nil, err
+	}
+
+	// Bug compatibility with C bcrypt implementations. They use the trailing
+	// NULL in the key string during expansion.
+	// We copy the key to prevent changing the underlying array.
+	ckey := append(key[:len(key):len(key)], 0)
+
+	c, err := blowfish.NewSaltedCipher(ckey, csalt)
+	if err != nil {
+		return nil, err
+	}
+
+	var i, rounds uint64
+	rounds = 1 << cost
+	for i = 0; i < rounds; i++ {
+		blowfish.ExpandKey(ckey, c)
+		blowfish.ExpandKey(csalt, c)
+	}
+
+	return c, nil
+}
+
+func (p *hashed) Hash() []byte {
+	arr := make([]byte, 60)
+	arr[0] = '$'
+	arr[1] = p.major
+	n := 2
+	if p.minor != 0 {
+		arr[2] = p.minor
+		n = 3
+	}
+	arr[n] = '$'
+	n++
+	copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
+	n += 2
+	arr[n] = '$'
+	n++
+	copy(arr[n:], p.salt)
+	n += encodedSaltSize
+	copy(arr[n:], p.hash)
+	n += encodedHashSize
+	return arr[:n]
+}
+
+func (p *hashed) decodeVersion(sbytes []byte) (int, error) {
+	if sbytes[0] != '$' {
+		return -1, InvalidHashPrefixError(sbytes[0])
+	}
+	if sbytes[1] > majorVersion {
+		return -1, HashVersionTooNewError(sbytes[1])
+	}
+	p.major = sbytes[1]
+	n := 3
+	if sbytes[2] != '$' {
+		p.minor = sbytes[2]
+		n++
+	}
+	return n, nil
+}
+
+// sbytes should begin where decodeVersion left off.
+func (p *hashed) decodeCost(sbytes []byte) (int, error) {
+	cost, err := strconv.Atoi(string(sbytes[0:2]))
+	if err != nil {
+		return -1, err
+	}
+	err = checkCost(cost)
+	if err != nil {
+		return -1, err
+	}
+	p.cost = cost
+	return 3, nil
+}
+
+func (p *hashed) String() string {
+	return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor)
+}
+
+func checkCost(cost int) error {
+	if cost < MinCost || cost > MaxCost {
+		return InvalidCostError(cost)
+	}
+	return nil
+}
diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go
new file mode 100644
index 0000000..9d80f19
--- /dev/null
+++ b/vendor/golang.org/x/crypto/blowfish/block.go
@@ -0,0 +1,159 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package blowfish
+
+// getNextWord returns the next big-endian uint32 value from the byte slice
+// at the given position in a circular manner, updating the position.
+func getNextWord(b []byte, pos *int) uint32 {
+	var w uint32
+	j := *pos
+	for i := 0; i < 4; i++ {
+		w = w<<8 | uint32(b[j])
+		j++
+		if j >= len(b) {
+			j = 0
+		}
+	}
+	*pos = j
+	return w
+}
+
+// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
+// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
+// pi and substitution tables for calls to Encrypt. This is used, primarily,
+// by the bcrypt package to reuse the Blowfish key schedule during its
+// set up. It's unlikely that you need to use this directly.
+func ExpandKey(key []byte, c *Cipher) {
+	j := 0
+	for i := 0; i < 18; i++ {
+		// Using inlined getNextWord for performance.
+		var d uint32
+		for k := 0; k < 4; k++ {
+			d = d<<8 | uint32(key[j])
+			j++
+			if j >= len(key) {
+				j = 0
+			}
+		}
+		c.p[i] ^= d
+	}
+
+	var l, r uint32
+	for i := 0; i < 18; i += 2 {
+		l, r = encryptBlock(l, r, c)
+		c.p[i], c.p[i+1] = l, r
+	}
+
+	for i := 0; i < 256; i += 2 {
+		l, r = encryptBlock(l, r, c)
+		c.s0[i], c.s0[i+1] = l, r
+	}
+	for i := 0; i < 256; i += 2 {
+		l, r = encryptBlock(l, r, c)
+		c.s1[i], c.s1[i+1] = l, r
+	}
+	for i := 0; i < 256; i += 2 {
+		l, r = encryptBlock(l, r, c)
+		c.s2[i], c.s2[i+1] = l, r
+	}
+	for i := 0; i < 256; i += 2 {
+		l, r = encryptBlock(l, r, c)
+		c.s3[i], c.s3[i+1] = l, r
+	}
+}
+
+// This is similar to ExpandKey, but folds the salt during the key
+// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
+// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
+// and specializing it here is useful.
+func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
+	j := 0
+	for i := 0; i < 18; i++ {
+		c.p[i] ^= getNextWord(key, &j)
+	}
+
+	j = 0
+	var l, r uint32
+	for i := 0; i < 18; i += 2 {
+		l ^= getNextWord(salt, &j)
+		r ^= getNextWord(salt, &j)
+		l, r = encryptBlock(l, r, c)
+		c.p[i], c.p[i+1] = l, r
+	}
+
+	for i := 0; i < 256; i += 2 {
+		l ^= getNextWord(salt, &j)
+		r ^= getNextWord(salt, &j)
+		l, r = encryptBlock(l, r, c)
+		c.s0[i], c.s0[i+1] = l, r
+	}
+
+	for i := 0; i < 256; i += 2 {
+		l ^= getNextWord(salt, &j)
+		r ^= getNextWord(salt, &j)
+		l, r = encryptBlock(l, r, c)
+		c.s1[i], c.s1[i+1] = l, r
+	}
+
+	for i := 0; i < 256; i += 2 {
+		l ^= getNextWord(salt, &j)
+		r ^= getNextWord(salt, &j)
+		l, r = encryptBlock(l, r, c)
+		c.s2[i], c.s2[i+1] = l, r
+	}
+
+	for i := 0; i < 256; i += 2 {
+		l ^= getNextWord(salt, &j)
+		r ^= getNextWord(salt, &j)
+		l, r = encryptBlock(l, r, c)
+		c.s3[i], c.s3[i+1] = l, r
+	}
+}
+
+func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
+	xl, xr := l, r
+	xl ^= c.p[0]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
+	xr ^= c.p[17]
+	return xr, xl
+}
+
+func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
+	xl, xr := l, r
+	xl ^= c.p[17]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
+	xr ^= c.p[0]
+	return xr, xl
+}
diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go
new file mode 100644
index 0000000..2641dad
--- /dev/null
+++ b/vendor/golang.org/x/crypto/blowfish/cipher.go
@@ -0,0 +1,91 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
+package blowfish // import "golang.org/x/crypto/blowfish"
+
+// The code is a port of Bruce Schneier's C implementation.
+// See https://www.schneier.com/blowfish.html.
+
+import "strconv"
+
+// The Blowfish block size in bytes.
+const BlockSize = 8
+
+// A Cipher is an instance of Blowfish encryption using a particular key.
+type Cipher struct {
+	p              [18]uint32
+	s0, s1, s2, s3 [256]uint32
+}
+
+type KeySizeError int
+
+func (k KeySizeError) Error() string {
+	return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
+}
+
+// NewCipher creates and returns a Cipher.
+// The key argument should be the Blowfish key, from 1 to 56 bytes.
+func NewCipher(key []byte) (*Cipher, error) {
+	var result Cipher
+	if k := len(key); k < 1 || k > 56 {
+		return nil, KeySizeError(k)
+	}
+	initCipher(&result)
+	ExpandKey(key, &result)
+	return &result, nil
+}
+
+// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
+// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
+// sufficient and desirable. For bcrypt compatibility, the key can be over 56
+// bytes.
+func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
+	if len(salt) == 0 {
+		return NewCipher(key)
+	}
+	var result Cipher
+	if k := len(key); k < 1 {
+		return nil, KeySizeError(k)
+	}
+	initCipher(&result)
+	expandKeyWithSalt(key, salt, &result)
+	return &result, nil
+}
+
+// BlockSize returns the Blowfish block size, 8 bytes.
+// It is necessary to satisfy the Block interface in the
+// package "crypto/cipher".
+func (c *Cipher) BlockSize() int { return BlockSize }
+
+// Encrypt encrypts the 8-byte buffer src using the key k
+// and stores the result in dst.
+// Note that for amounts of data larger than a block,
+// it is not safe to just call Encrypt on successive blocks;
+// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
+func (c *Cipher) Encrypt(dst, src []byte) {
+	l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
+	r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
+	l, r = encryptBlock(l, r, c)
+	dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
+	dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
+}
+
+// Decrypt decrypts the 8-byte buffer src using the key k
+// and stores the result in dst.
+func (c *Cipher) Decrypt(dst, src []byte) {
+	l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
+	r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
+	l, r = decryptBlock(l, r, c)
+	dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
+	dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
+}
+
+func initCipher(c *Cipher) {
+	copy(c.p[0:], p[0:])
+	copy(c.s0[0:], s0[0:])
+	copy(c.s1[0:], s1[0:])
+	copy(c.s2[0:], s2[0:])
+	copy(c.s3[0:], s3[0:])
+}
diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go
new file mode 100644
index 0000000..d040775
--- /dev/null
+++ b/vendor/golang.org/x/crypto/blowfish/const.go
@@ -0,0 +1,199 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The startup permutation array and substitution boxes.
+// They are the hexadecimal digits of PI; see:
+// https://www.schneier.com/code/constants.txt.
+
+package blowfish
+
+var s0 = [256]uint32{
+	0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96,
+	0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
+	0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658,
+	0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
+	0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e,
+	0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
+	0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6,
+	0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
+	0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c,
+	0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
+	0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1,
+	0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
+	0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a,
+	0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
+	0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176,
+	0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
+	0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706,
+	0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
+	0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b,
+	0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
+	0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c,
+	0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
+	0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a,
+	0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
+	0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760,
+	0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
+	0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8,
+	0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
+	0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33,
+	0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
+	0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0,
+	0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
+	0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777,
+	0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
+	0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705,
+	0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
+	0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e,
+	0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
+	0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9,
+	0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
+	0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f,
+	0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
+	0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
+}
+
+var s1 = [256]uint32{
+	0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d,
+	0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
+	0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65,
+	0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
+	0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9,
+	0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
+	0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d,
+	0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
+	0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc,
+	0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
+	0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908,
+	0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
+	0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124,
+	0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
+	0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908,
+	0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
+	0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b,
+	0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
+	0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa,
+	0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
+	0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d,
+	0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
+	0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5,
+	0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
+	0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96,
+	0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
+	0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca,
+	0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
+	0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77,
+	0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
+	0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054,
+	0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
+	0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea,
+	0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
+	0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646,
+	0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
+	0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea,
+	0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
+	0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e,
+	0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
+	0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd,
+	0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
+	0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
+}
+
+var s2 = [256]uint32{
+	0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7,
+	0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
+	0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af,
+	0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
+	0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4,
+	0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
+	0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec,
+	0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
+	0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332,
+	0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
+	0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58,
+	0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
+	0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22,
+	0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
+	0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60,
+	0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
+	0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99,
+	0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
+	0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74,
+	0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
+	0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3,
+	0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
+	0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979,
+	0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
+	0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa,
+	0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
+	0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086,
+	0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
+	0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24,
+	0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
+	0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84,
+	0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
+	0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09,
+	0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
+	0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe,
+	0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
+	0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0,
+	0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
+	0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188,
+	0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
+	0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8,
+	0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
+	0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
+}
+
+var s3 = [256]uint32{
+	0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742,
+	0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
+	0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79,
+	0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
+	0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a,
+	0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
+	0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1,
+	0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
+	0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797,
+	0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
+	0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6,
+	0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
+	0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba,
+	0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
+	0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5,
+	0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
+	0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce,
+	0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
+	0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd,
+	0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
+	0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb,
+	0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
+	0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc,
+	0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
+	0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc,
+	0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
+	0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a,
+	0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
+	0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a,
+	0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
+	0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b,
+	0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
+	0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e,
+	0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
+	0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623,
+	0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
+	0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a,
+	0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
+	0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3,
+	0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
+	0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c,
+	0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
+	0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
+}
+
+var p = [18]uint32{
+	0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0,
+	0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
+	0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b,
+}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
new file mode 100644
index 0000000..9a88759
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
@@ -0,0 +1,951 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package terminal
+
+import (
+	"bytes"
+	"io"
+	"sync"
+	"unicode/utf8"
+)
+
+// EscapeCodes contains escape sequences that can be written to the terminal in
+// order to achieve different styles of text.
+type EscapeCodes struct {
+	// Foreground colors
+	Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte
+
+	// Reset all attributes
+	Reset []byte
+}
+
+var vt100EscapeCodes = EscapeCodes{
+	Black:   []byte{keyEscape, '[', '3', '0', 'm'},
+	Red:     []byte{keyEscape, '[', '3', '1', 'm'},
+	Green:   []byte{keyEscape, '[', '3', '2', 'm'},
+	Yellow:  []byte{keyEscape, '[', '3', '3', 'm'},
+	Blue:    []byte{keyEscape, '[', '3', '4', 'm'},
+	Magenta: []byte{keyEscape, '[', '3', '5', 'm'},
+	Cyan:    []byte{keyEscape, '[', '3', '6', 'm'},
+	White:   []byte{keyEscape, '[', '3', '7', 'm'},
+
+	Reset: []byte{keyEscape, '[', '0', 'm'},
+}
+
+// Terminal contains the state for running a VT100 terminal that is capable of
+// reading lines of input.
+type Terminal struct {
+	// AutoCompleteCallback, if non-null, is called for each keypress with
+	// the full input line and the current position of the cursor (in
+	// bytes, as an index into |line|). If it returns ok=false, the key
+	// press is processed normally. Otherwise it returns a replacement line
+	// and the new cursor position.
+	AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool)
+
+	// Escape contains a pointer to the escape codes for this terminal.
+	// It's always a valid pointer, although the escape codes themselves
+	// may be empty if the terminal doesn't support them.
+	Escape *EscapeCodes
+
+	// lock protects the terminal and the state in this object from
+	// concurrent processing of a key press and a Write() call.
+	lock sync.Mutex
+
+	c      io.ReadWriter
+	prompt []rune
+
+	// line is the current line being entered.
+	line []rune
+	// pos is the logical position of the cursor in line
+	pos int
+	// echo is true if local echo is enabled
+	echo bool
+	// pasteActive is true iff there is a bracketed paste operation in
+	// progress.
+	pasteActive bool
+
+	// cursorX contains the current X value of the cursor where the left
+	// edge is 0. cursorY contains the row number where the first row of
+	// the current line is 0.
+	cursorX, cursorY int
+	// maxLine is the greatest value of cursorY so far.
+	maxLine int
+
+	termWidth, termHeight int
+
+	// outBuf contains the terminal data to be sent.
+	outBuf []byte
+	// remainder contains the remainder of any partial key sequences after
+	// a read. It aliases into inBuf.
+	remainder []byte
+	inBuf     [256]byte
+
+	// history contains previously entered commands so that they can be
+	// accessed with the up and down keys.
+	history stRingBuffer
+	// historyIndex stores the currently accessed history entry, where zero
+	// means the immediately previous entry.
+	historyIndex int
+	// When navigating up and down the history it's possible to return to
+	// the incomplete, initial line. That value is stored in
+	// historyPending.
+	historyPending string
+}
+
+// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is
+// a local terminal, that terminal must first have been put into raw mode.
+// prompt is a string that is written at the start of each input line (i.e.
+// "> ").
+func NewTerminal(c io.ReadWriter, prompt string) *Terminal {
+	return &Terminal{
+		Escape:       &vt100EscapeCodes,
+		c:            c,
+		prompt:       []rune(prompt),
+		termWidth:    80,
+		termHeight:   24,
+		echo:         true,
+		historyIndex: -1,
+	}
+}
+
+const (
+	keyCtrlD     = 4
+	keyCtrlU     = 21
+	keyEnter     = '\r'
+	keyEscape    = 27
+	keyBackspace = 127
+	keyUnknown   = 0xd800 /* UTF-16 surrogate area */ + iota
+	keyUp
+	keyDown
+	keyLeft
+	keyRight
+	keyAltLeft
+	keyAltRight
+	keyHome
+	keyEnd
+	keyDeleteWord
+	keyDeleteLine
+	keyClearScreen
+	keyPasteStart
+	keyPasteEnd
+)
+
+var (
+	crlf       = []byte{'\r', '\n'}
+	pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'}
+	pasteEnd   = []byte{keyEscape, '[', '2', '0', '1', '~'}
+)
+
+// bytesToKey tries to parse a key sequence from b. If successful, it returns
+// the key and the remainder of the input. Otherwise it returns utf8.RuneError.
+func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
+	if len(b) == 0 {
+		return utf8.RuneError, nil
+	}
+
+	if !pasteActive {
+		switch b[0] {
+		case 1: // ^A
+			return keyHome, b[1:]
+		case 5: // ^E
+			return keyEnd, b[1:]
+		case 8: // ^H
+			return keyBackspace, b[1:]
+		case 11: // ^K
+			return keyDeleteLine, b[1:]
+		case 12: // ^L
+			return keyClearScreen, b[1:]
+		case 23: // ^W
+			return keyDeleteWord, b[1:]
+		}
+	}
+
+	if b[0] != keyEscape {
+		if !utf8.FullRune(b) {
+			return utf8.RuneError, b
+		}
+		r, l := utf8.DecodeRune(b)
+		return r, b[l:]
+	}
+
+	if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' {
+		switch b[2] {
+		case 'A':
+			return keyUp, b[3:]
+		case 'B':
+			return keyDown, b[3:]
+		case 'C':
+			return keyRight, b[3:]
+		case 'D':
+			return keyLeft, b[3:]
+		case 'H':
+			return keyHome, b[3:]
+		case 'F':
+			return keyEnd, b[3:]
+		}
+	}
+
+	if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' {
+		switch b[5] {
+		case 'C':
+			return keyAltRight, b[6:]
+		case 'D':
+			return keyAltLeft, b[6:]
+		}
+	}
+
+	if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) {
+		return keyPasteStart, b[6:]
+	}
+
+	if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) {
+		return keyPasteEnd, b[6:]
+	}
+
+	// If we get here then we have a key that we don't recognise, or a
+	// partial sequence. It's not clear how one should find the end of a
+	// sequence without knowing them all, but it seems that [a-zA-Z~] only
+	// appears at the end of a sequence.
+	for i, c := range b[0:] {
+		if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' {
+			return keyUnknown, b[i+1:]
+		}
+	}
+
+	return utf8.RuneError, b
+}
+
+// queue appends data to the end of t.outBuf
+func (t *Terminal) queue(data []rune) {
+	t.outBuf = append(t.outBuf, []byte(string(data))...)
+}
+
+var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'}
+var space = []rune{' '}
+
+func isPrintable(key rune) bool {
+	isInSurrogateArea := key >= 0xd800 && key <= 0xdbff
+	return key >= 32 && !isInSurrogateArea
+}
+
+// moveCursorToPos appends data to t.outBuf which will move the cursor to the
+// given, logical position in the text.
+func (t *Terminal) moveCursorToPos(pos int) {
+	if !t.echo {
+		return
+	}
+
+	x := visualLength(t.prompt) + pos
+	y := x / t.termWidth
+	x = x % t.termWidth
+
+	up := 0
+	if y < t.cursorY {
+		up = t.cursorY - y
+	}
+
+	down := 0
+	if y > t.cursorY {
+		down = y - t.cursorY
+	}
+
+	left := 0
+	if x < t.cursorX {
+		left = t.cursorX - x
+	}
+
+	right := 0
+	if x > t.cursorX {
+		right = x - t.cursorX
+	}
+
+	t.cursorX = x
+	t.cursorY = y
+	t.move(up, down, left, right)
+}
+
+func (t *Terminal) move(up, down, left, right int) {
+	movement := make([]rune, 3*(up+down+left+right))
+	m := movement
+	for i := 0; i < up; i++ {
+		m[0] = keyEscape
+		m[1] = '['
+		m[2] = 'A'
+		m = m[3:]
+	}
+	for i := 0; i < down; i++ {
+		m[0] = keyEscape
+		m[1] = '['
+		m[2] = 'B'
+		m = m[3:]
+	}
+	for i := 0; i < left; i++ {
+		m[0] = keyEscape
+		m[1] = '['
+		m[2] = 'D'
+		m = m[3:]
+	}
+	for i := 0; i < right; i++ {
+		m[0] = keyEscape
+		m[1] = '['
+		m[2] = 'C'
+		m = m[3:]
+	}
+
+	t.queue(movement)
+}
+
+func (t *Terminal) clearLineToRight() {
+	op := []rune{keyEscape, '[', 'K'}
+	t.queue(op)
+}
+
+const maxLineLength = 4096
+
+func (t *Terminal) setLine(newLine []rune, newPos int) {
+	if t.echo {
+		t.moveCursorToPos(0)
+		t.writeLine(newLine)
+		for i := len(newLine); i < len(t.line); i++ {
+			t.writeLine(space)
+		}
+		t.moveCursorToPos(newPos)
+	}
+	t.line = newLine
+	t.pos = newPos
+}
+
+func (t *Terminal) advanceCursor(places int) {
+	t.cursorX += places
+	t.cursorY += t.cursorX / t.termWidth
+	if t.cursorY > t.maxLine {
+		t.maxLine = t.cursorY
+	}
+	t.cursorX = t.cursorX % t.termWidth
+
+	if places > 0 && t.cursorX == 0 {
+		// Normally terminals will advance the current position
+		// when writing a character. But that doesn't happen
+		// for the last character in a line. However, when
+		// writing a character (except a new line) that causes
+		// a line wrap, the position will be advanced two
+		// places.
+		//
+		// So, if we are stopping at the end of a line, we
+		// need to write a newline so that our cursor can be
+		// advanced to the next line.
+		t.outBuf = append(t.outBuf, '\r', '\n')
+	}
+}
+
+func (t *Terminal) eraseNPreviousChars(n int) {
+	if n == 0 {
+		return
+	}
+
+	if t.pos < n {
+		n = t.pos
+	}
+	t.pos -= n
+	t.moveCursorToPos(t.pos)
+
+	copy(t.line[t.pos:], t.line[n+t.pos:])
+	t.line = t.line[:len(t.line)-n]
+	if t.echo {
+		t.writeLine(t.line[t.pos:])
+		for i := 0; i < n; i++ {
+			t.queue(space)
+		}
+		t.advanceCursor(n)
+		t.moveCursorToPos(t.pos)
+	}
+}
+
+// countToLeftWord returns then number of characters from the cursor to the
+// start of the previous word.
+func (t *Terminal) countToLeftWord() int {
+	if t.pos == 0 {
+		return 0
+	}
+
+	pos := t.pos - 1
+	for pos > 0 {
+		if t.line[pos] != ' ' {
+			break
+		}
+		pos--
+	}
+	for pos > 0 {
+		if t.line[pos] == ' ' {
+			pos++
+			break
+		}
+		pos--
+	}
+
+	return t.pos - pos
+}
+
+// countToRightWord returns then number of characters from the cursor to the
+// start of the next word.
+func (t *Terminal) countToRightWord() int {
+	pos := t.pos
+	for pos < len(t.line) {
+		if t.line[pos] == ' ' {
+			break
+		}
+		pos++
+	}
+	for pos < len(t.line) {
+		if t.line[pos] != ' ' {
+			break
+		}
+		pos++
+	}
+	return pos - t.pos
+}
+
+// visualLength returns the number of visible glyphs in s.
+func visualLength(runes []rune) int {
+	inEscapeSeq := false
+	length := 0
+
+	for _, r := range runes {
+		switch {
+		case inEscapeSeq:
+			if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') {
+				inEscapeSeq = false
+			}
+		case r == '\x1b':
+			inEscapeSeq = true
+		default:
+			length++
+		}
+	}
+
+	return length
+}
+
+// handleKey processes the given key and, optionally, returns a line of text
+// that the user has entered.
+func (t *Terminal) handleKey(key rune) (line string, ok bool) {
+	if t.pasteActive && key != keyEnter {
+		t.addKeyToLine(key)
+		return
+	}
+
+	switch key {
+	case keyBackspace:
+		if t.pos == 0 {
+			return
+		}
+		t.eraseNPreviousChars(1)
+	case keyAltLeft:
+		// move left by a word.
+		t.pos -= t.countToLeftWord()
+		t.moveCursorToPos(t.pos)
+	case keyAltRight:
+		// move right by a word.
+		t.pos += t.countToRightWord()
+		t.moveCursorToPos(t.pos)
+	case keyLeft:
+		if t.pos == 0 {
+			return
+		}
+		t.pos--
+		t.moveCursorToPos(t.pos)
+	case keyRight:
+		if t.pos == len(t.line) {
+			return
+		}
+		t.pos++
+		t.moveCursorToPos(t.pos)
+	case keyHome:
+		if t.pos == 0 {
+			return
+		}
+		t.pos = 0
+		t.moveCursorToPos(t.pos)
+	case keyEnd:
+		if t.pos == len(t.line) {
+			return
+		}
+		t.pos = len(t.line)
+		t.moveCursorToPos(t.pos)
+	case keyUp:
+		entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1)
+		if !ok {
+			return "", false
+		}
+		if t.historyIndex == -1 {
+			t.historyPending = string(t.line)
+		}
+		t.historyIndex++
+		runes := []rune(entry)
+		t.setLine(runes, len(runes))
+	case keyDown:
+		switch t.historyIndex {
+		case -1:
+			return
+		case 0:
+			runes := []rune(t.historyPending)
+			t.setLine(runes, len(runes))
+			t.historyIndex--
+		default:
+			entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1)
+			if ok {
+				t.historyIndex--
+				runes := []rune(entry)
+				t.setLine(runes, len(runes))
+			}
+		}
+	case keyEnter:
+		t.moveCursorToPos(len(t.line))
+		t.queue([]rune("\r\n"))
+		line = string(t.line)
+		ok = true
+		t.line = t.line[:0]
+		t.pos = 0
+		t.cursorX = 0
+		t.cursorY = 0
+		t.maxLine = 0
+	case keyDeleteWord:
+		// Delete zero or more spaces and then one or more characters.
+		t.eraseNPreviousChars(t.countToLeftWord())
+	case keyDeleteLine:
+		// Delete everything from the current cursor position to the
+		// end of line.
+		for i := t.pos; i < len(t.line); i++ {
+			t.queue(space)
+			t.advanceCursor(1)
+		}
+		t.line = t.line[:t.pos]
+		t.moveCursorToPos(t.pos)
+	case keyCtrlD:
+		// Erase the character under the current position.
+		// The EOF case when the line is empty is handled in
+		// readLine().
+		if t.pos < len(t.line) {
+			t.pos++
+			t.eraseNPreviousChars(1)
+		}
+	case keyCtrlU:
+		t.eraseNPreviousChars(t.pos)
+	case keyClearScreen:
+		// Erases the screen and moves the cursor to the home position.
+		t.queue([]rune("\x1b[2J\x1b[H"))
+		t.queue(t.prompt)
+		t.cursorX, t.cursorY = 0, 0
+		t.advanceCursor(visualLength(t.prompt))
+		t.setLine(t.line, t.pos)
+	default:
+		if t.AutoCompleteCallback != nil {
+			prefix := string(t.line[:t.pos])
+			suffix := string(t.line[t.pos:])
+
+			t.lock.Unlock()
+			newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key)
+			t.lock.Lock()
+
+			if completeOk {
+				t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos]))
+				return
+			}
+		}
+		if !isPrintable(key) {
+			return
+		}
+		if len(t.line) == maxLineLength {
+			return
+		}
+		t.addKeyToLine(key)
+	}
+	return
+}
+
+// addKeyToLine inserts the given key at the current position in the current
+// line.
+func (t *Terminal) addKeyToLine(key rune) {
+	if len(t.line) == cap(t.line) {
+		newLine := make([]rune, len(t.line), 2*(1+len(t.line)))
+		copy(newLine, t.line)
+		t.line = newLine
+	}
+	t.line = t.line[:len(t.line)+1]
+	copy(t.line[t.pos+1:], t.line[t.pos:])
+	t.line[t.pos] = key
+	if t.echo {
+		t.writeLine(t.line[t.pos:])
+	}
+	t.pos++
+	t.moveCursorToPos(t.pos)
+}
+
+func (t *Terminal) writeLine(line []rune) {
+	for len(line) != 0 {
+		remainingOnLine := t.termWidth - t.cursorX
+		todo := len(line)
+		if todo > remainingOnLine {
+			todo = remainingOnLine
+		}
+		t.queue(line[:todo])
+		t.advanceCursor(visualLength(line[:todo]))
+		line = line[todo:]
+	}
+}
+
+// writeWithCRLF writes buf to w but replaces all occurrences of \n with \r\n.
+func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) {
+	for len(buf) > 0 {
+		i := bytes.IndexByte(buf, '\n')
+		todo := len(buf)
+		if i >= 0 {
+			todo = i
+		}
+
+		var nn int
+		nn, err = w.Write(buf[:todo])
+		n += nn
+		if err != nil {
+			return n, err
+		}
+		buf = buf[todo:]
+
+		if i >= 0 {
+			if _, err = w.Write(crlf); err != nil {
+				return n, err
+			}
+			n++
+			buf = buf[1:]
+		}
+	}
+
+	return n, nil
+}
+
+func (t *Terminal) Write(buf []byte) (n int, err error) {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	if t.cursorX == 0 && t.cursorY == 0 {
+		// This is the easy case: there's nothing on the screen that we
+		// have to move out of the way.
+		return writeWithCRLF(t.c, buf)
+	}
+
+	// We have a prompt and possibly user input on the screen. We
+	// have to clear it first.
+	t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */)
+	t.cursorX = 0
+	t.clearLineToRight()
+
+	for t.cursorY > 0 {
+		t.move(1 /* up */, 0, 0, 0)
+		t.cursorY--
+		t.clearLineToRight()
+	}
+
+	if _, err = t.c.Write(t.outBuf); err != nil {
+		return
+	}
+	t.outBuf = t.outBuf[:0]
+
+	if n, err = writeWithCRLF(t.c, buf); err != nil {
+		return
+	}
+
+	t.writeLine(t.prompt)
+	if t.echo {
+		t.writeLine(t.line)
+	}
+
+	t.moveCursorToPos(t.pos)
+
+	if _, err = t.c.Write(t.outBuf); err != nil {
+		return
+	}
+	t.outBuf = t.outBuf[:0]
+	return
+}
+
+// ReadPassword temporarily changes the prompt and reads a password, without
+// echo, from the terminal.
+func (t *Terminal) ReadPassword(prompt string) (line string, err error) {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	oldPrompt := t.prompt
+	t.prompt = []rune(prompt)
+	t.echo = false
+
+	line, err = t.readLine()
+
+	t.prompt = oldPrompt
+	t.echo = true
+
+	return
+}
+
+// ReadLine returns a line of input from the terminal.
+func (t *Terminal) ReadLine() (line string, err error) {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	return t.readLine()
+}
+
+func (t *Terminal) readLine() (line string, err error) {
+	// t.lock must be held at this point
+
+	if t.cursorX == 0 && t.cursorY == 0 {
+		t.writeLine(t.prompt)
+		t.c.Write(t.outBuf)
+		t.outBuf = t.outBuf[:0]
+	}
+
+	lineIsPasted := t.pasteActive
+
+	for {
+		rest := t.remainder
+		lineOk := false
+		for !lineOk {
+			var key rune
+			key, rest = bytesToKey(rest, t.pasteActive)
+			if key == utf8.RuneError {
+				break
+			}
+			if !t.pasteActive {
+				if key == keyCtrlD {
+					if len(t.line) == 0 {
+						return "", io.EOF
+					}
+				}
+				if key == keyPasteStart {
+					t.pasteActive = true
+					if len(t.line) == 0 {
+						lineIsPasted = true
+					}
+					continue
+				}
+			} else if key == keyPasteEnd {
+				t.pasteActive = false
+				continue
+			}
+			if !t.pasteActive {
+				lineIsPasted = false
+			}
+			line, lineOk = t.handleKey(key)
+		}
+		if len(rest) > 0 {
+			n := copy(t.inBuf[:], rest)
+			t.remainder = t.inBuf[:n]
+		} else {
+			t.remainder = nil
+		}
+		t.c.Write(t.outBuf)
+		t.outBuf = t.outBuf[:0]
+		if lineOk {
+			if t.echo {
+				t.historyIndex = -1
+				t.history.Add(line)
+			}
+			if lineIsPasted {
+				err = ErrPasteIndicator
+			}
+			return
+		}
+
+		// t.remainder is a slice at the beginning of t.inBuf
+		// containing a partial key sequence
+		readBuf := t.inBuf[len(t.remainder):]
+		var n int
+
+		t.lock.Unlock()
+		n, err = t.c.Read(readBuf)
+		t.lock.Lock()
+
+		if err != nil {
+			return
+		}
+
+		t.remainder = t.inBuf[:n+len(t.remainder)]
+	}
+}
+
+// SetPrompt sets the prompt to be used when reading subsequent lines.
+func (t *Terminal) SetPrompt(prompt string) {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	t.prompt = []rune(prompt)
+}
+
+func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) {
+	// Move cursor to column zero at the start of the line.
+	t.move(t.cursorY, 0, t.cursorX, 0)
+	t.cursorX, t.cursorY = 0, 0
+	t.clearLineToRight()
+	for t.cursorY < numPrevLines {
+		// Move down a line
+		t.move(0, 1, 0, 0)
+		t.cursorY++
+		t.clearLineToRight()
+	}
+	// Move back to beginning.
+	t.move(t.cursorY, 0, 0, 0)
+	t.cursorX, t.cursorY = 0, 0
+
+	t.queue(t.prompt)
+	t.advanceCursor(visualLength(t.prompt))
+	t.writeLine(t.line)
+	t.moveCursorToPos(t.pos)
+}
+
+func (t *Terminal) SetSize(width, height int) error {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	if width == 0 {
+		width = 1
+	}
+
+	oldWidth := t.termWidth
+	t.termWidth, t.termHeight = width, height
+
+	switch {
+	case width == oldWidth:
+		// If the width didn't change then nothing else needs to be
+		// done.
+		return nil
+	case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0:
+		// If there is nothing on current line and no prompt printed,
+		// just do nothing
+		return nil
+	case width < oldWidth:
+		// Some terminals (e.g. xterm) will truncate lines that were
+		// too long when shinking. Others, (e.g. gnome-terminal) will
+		// attempt to wrap them. For the former, repainting t.maxLine
+		// works great, but that behaviour goes badly wrong in the case
+		// of the latter because they have doubled every full line.
+
+		// We assume that we are working on a terminal that wraps lines
+		// and adjust the cursor position based on every previous line
+		// wrapping and turning into two. This causes the prompt on
+		// xterms to move upwards, which isn't great, but it avoids a
+		// huge mess with gnome-terminal.
+		if t.cursorX >= t.termWidth {
+			t.cursorX = t.termWidth - 1
+		}
+		t.cursorY *= 2
+		t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2)
+	case width > oldWidth:
+		// If the terminal expands then our position calculations will
+		// be wrong in the future because we think the cursor is
+		// |t.pos| chars into the string, but there will be a gap at
+		// the end of any wrapped line.
+		//
+		// But the position will actually be correct until we move, so
+		// we can move back to the beginning and repaint everything.
+		t.clearAndRepaintLinePlusNPrevious(t.maxLine)
+	}
+
+	_, err := t.c.Write(t.outBuf)
+	t.outBuf = t.outBuf[:0]
+	return err
+}
+
+type pasteIndicatorError struct{}
+
+func (pasteIndicatorError) Error() string {
+	return "terminal: ErrPasteIndicator not correctly handled"
+}
+
+// ErrPasteIndicator may be returned from ReadLine as the error, in addition
+// to valid line data. It indicates that bracketed paste mode is enabled and
+// that the returned line consists only of pasted data. Programs may wish to
+// interpret pasted data more literally than typed data.
+var ErrPasteIndicator = pasteIndicatorError{}
+
+// SetBracketedPasteMode requests that the terminal bracket paste operations
+// with markers. Not all terminals support this but, if it is supported, then
+// enabling this mode will stop any autocomplete callback from running due to
+// pastes. Additionally, any lines that are completely pasted will be returned
+// from ReadLine with the error set to ErrPasteIndicator.
+func (t *Terminal) SetBracketedPasteMode(on bool) {
+	if on {
+		io.WriteString(t.c, "\x1b[?2004h")
+	} else {
+		io.WriteString(t.c, "\x1b[?2004l")
+	}
+}
+
+// stRingBuffer is a ring buffer of strings.
+type stRingBuffer struct {
+	// entries contains max elements.
+	entries []string
+	max     int
+	// head contains the index of the element most recently added to the ring.
+	head int
+	// size contains the number of elements in the ring.
+	size int
+}
+
+func (s *stRingBuffer) Add(a string) {
+	if s.entries == nil {
+		const defaultNumEntries = 100
+		s.entries = make([]string, defaultNumEntries)
+		s.max = defaultNumEntries
+	}
+
+	s.head = (s.head + 1) % s.max
+	s.entries[s.head] = a
+	if s.size < s.max {
+		s.size++
+	}
+}
+
+// NthPreviousEntry returns the value passed to the nth previous call to Add.
+// If n is zero then the immediately prior value is returned, if one, then the
+// next most recent, and so on. If such an element doesn't exist then ok is
+// false.
+func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {
+	if n >= s.size {
+		return "", false
+	}
+	index := s.head - n
+	if index < 0 {
+		index += s.max
+	}
+	return s.entries[index], true
+}
+
+// readPasswordLine reads from reader until it finds \n or io.EOF.
+// The slice returned does not include the \n.
+// readPasswordLine also ignores any \r it finds.
+func readPasswordLine(reader io.Reader) ([]byte, error) {
+	var buf [1]byte
+	var ret []byte
+
+	for {
+		n, err := reader.Read(buf[:])
+		if n > 0 {
+			switch buf[0] {
+			case '\n':
+				return ret, nil
+			case '\r':
+				// remove \r from passwords on Windows
+			default:
+				ret = append(ret, buf[0])
+			}
+			continue
+		}
+		if err != nil {
+			if err == io.EOF && len(ret) > 0 {
+				return ret, nil
+			}
+			return ret, err
+		}
+	}
+}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go
new file mode 100644
index 0000000..3911040
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go
@@ -0,0 +1,114 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix darwin dragonfly freebsd linux,!appengine netbsd openbsd
+
+// Package terminal provides support functions for dealing with terminals, as
+// commonly found on UNIX systems.
+//
+// Putting a terminal into raw mode is the most common requirement:
+//
+// 	oldState, err := terminal.MakeRaw(0)
+// 	if err != nil {
+// 	        panic(err)
+// 	}
+// 	defer terminal.Restore(0, oldState)
+package terminal // import "golang.org/x/crypto/ssh/terminal"
+
+import (
+	"golang.org/x/sys/unix"
+)
+
+// State contains the state of a terminal.
+type State struct {
+	termios unix.Termios
+}
+
+// IsTerminal returns whether the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+	_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+	return err == nil
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd int) (*State, error) {
+	termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+	if err != nil {
+		return nil, err
+	}
+
+	oldState := State{termios: *termios}
+
+	// This attempts to replicate the behaviour documented for cfmakeraw in
+	// the termios(3) manpage.
+	termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON
+	termios.Oflag &^= unix.OPOST
+	termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN
+	termios.Cflag &^= unix.CSIZE | unix.PARENB
+	termios.Cflag |= unix.CS8
+	termios.Cc[unix.VMIN] = 1
+	termios.Cc[unix.VTIME] = 0
+	if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, termios); err != nil {
+		return nil, err
+	}
+
+	return &oldState, nil
+}
+
+// GetState returns the current state of a terminal which may be useful to
+// restore the terminal after a signal.
+func GetState(fd int) (*State, error) {
+	termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+	if err != nil {
+		return nil, err
+	}
+
+	return &State{termios: *termios}, nil
+}
+
+// Restore restores the terminal connected to the given file descriptor to a
+// previous state.
+func Restore(fd int, state *State) error {
+	return unix.IoctlSetTermios(fd, ioctlWriteTermios, &state.termios)
+}
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (width, height int, err error) {
+	ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
+	if err != nil {
+		return -1, -1, err
+	}
+	return int(ws.Col), int(ws.Row), nil
+}
+
+// passwordReader is an io.Reader that reads from a specific file descriptor.
+type passwordReader int
+
+func (r passwordReader) Read(buf []byte) (int, error) {
+	return unix.Read(int(r), buf)
+}
+
+// ReadPassword reads a line of input from a terminal without local echo.  This
+// is commonly used for inputting passwords and other sensitive data. The slice
+// returned does not include the \n.
+func ReadPassword(fd int) ([]byte, error) {
+	termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+	if err != nil {
+		return nil, err
+	}
+
+	newState := *termios
+	newState.Lflag &^= unix.ECHO
+	newState.Lflag |= unix.ICANON | unix.ISIG
+	newState.Iflag |= unix.ICRNL
+	if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newState); err != nil {
+		return nil, err
+	}
+
+	defer unix.IoctlSetTermios(fd, ioctlWriteTermios, termios)
+
+	return readPasswordLine(passwordReader(fd))
+}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go b/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go
new file mode 100644
index 0000000..dfcd627
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go
@@ -0,0 +1,12 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix
+
+package terminal
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TCGETS
+const ioctlWriteTermios = unix.TCSETS
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go
new file mode 100644
index 0000000..cb23a59
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package terminal
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TIOCGETA
+const ioctlWriteTermios = unix.TIOCSETA
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
new file mode 100644
index 0000000..5fadfe8
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
@@ -0,0 +1,10 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package terminal
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TCGETS
+const ioctlWriteTermios = unix.TCSETS
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
new file mode 100644
index 0000000..9317ac7
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
@@ -0,0 +1,58 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package terminal provides support functions for dealing with terminals, as
+// commonly found on UNIX systems.
+//
+// Putting a terminal into raw mode is the most common requirement:
+//
+// 	oldState, err := terminal.MakeRaw(0)
+// 	if err != nil {
+// 	        panic(err)
+// 	}
+// 	defer terminal.Restore(0, oldState)
+package terminal
+
+import (
+	"fmt"
+	"runtime"
+)
+
+type State struct{}
+
+// IsTerminal returns whether the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+	return false
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd int) (*State, error) {
+	return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
+
+// GetState returns the current state of a terminal which may be useful to
+// restore the terminal after a signal.
+func GetState(fd int) (*State, error) {
+	return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
+
+// Restore restores the terminal connected to the given file descriptor to a
+// previous state.
+func Restore(fd int, state *State) error {
+	return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (width, height int, err error) {
+	return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
+
+// ReadPassword reads a line of input from a terminal without local echo.  This
+// is commonly used for inputting passwords and other sensitive data. The slice
+// returned does not include the \n.
+func ReadPassword(fd int) ([]byte, error) {
+	return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
new file mode 100644
index 0000000..3d5f06a
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
@@ -0,0 +1,124 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build solaris
+
+package terminal // import "golang.org/x/crypto/ssh/terminal"
+
+import (
+	"golang.org/x/sys/unix"
+	"io"
+	"syscall"
+)
+
+// State contains the state of a terminal.
+type State struct {
+	termios unix.Termios
+}
+
+// IsTerminal returns whether the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+	_, err := unix.IoctlGetTermio(fd, unix.TCGETA)
+	return err == nil
+}
+
+// ReadPassword reads a line of input from a terminal without local echo.  This
+// is commonly used for inputting passwords and other sensitive data. The slice
+// returned does not include the \n.
+func ReadPassword(fd int) ([]byte, error) {
+	// see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c
+	val, err := unix.IoctlGetTermios(fd, unix.TCGETS)
+	if err != nil {
+		return nil, err
+	}
+	oldState := *val
+
+	newState := oldState
+	newState.Lflag &^= syscall.ECHO
+	newState.Lflag |= syscall.ICANON | syscall.ISIG
+	newState.Iflag |= syscall.ICRNL
+	err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState)
+	if err != nil {
+		return nil, err
+	}
+
+	defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState)
+
+	var buf [16]byte
+	var ret []byte
+	for {
+		n, err := syscall.Read(fd, buf[:])
+		if err != nil {
+			return nil, err
+		}
+		if n == 0 {
+			if len(ret) == 0 {
+				return nil, io.EOF
+			}
+			break
+		}
+		if buf[n-1] == '\n' {
+			n--
+		}
+		ret = append(ret, buf[:n]...)
+		if n < len(buf) {
+			break
+		}
+	}
+
+	return ret, nil
+}
+
+// MakeRaw puts the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+// see http://cr.illumos.org/~webrev/andy_js/1060/
+func MakeRaw(fd int) (*State, error) {
+	termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
+	if err != nil {
+		return nil, err
+	}
+
+	oldState := State{termios: *termios}
+
+	termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON
+	termios.Oflag &^= unix.OPOST
+	termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN
+	termios.Cflag &^= unix.CSIZE | unix.PARENB
+	termios.Cflag |= unix.CS8
+	termios.Cc[unix.VMIN] = 1
+	termios.Cc[unix.VTIME] = 0
+
+	if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil {
+		return nil, err
+	}
+
+	return &oldState, nil
+}
+
+// Restore restores the terminal connected to the given file descriptor to a
+// previous state.
+func Restore(fd int, oldState *State) error {
+	return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios)
+}
+
+// GetState returns the current state of a terminal which may be useful to
+// restore the terminal after a signal.
+func GetState(fd int) (*State, error) {
+	termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
+	if err != nil {
+		return nil, err
+	}
+
+	return &State{termios: *termios}, nil
+}
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (width, height int, err error) {
+	ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
+	if err != nil {
+		return 0, 0, err
+	}
+	return int(ws.Col), int(ws.Row), nil
+}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
new file mode 100644
index 0000000..6cb8a95
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
@@ -0,0 +1,103 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+// Package terminal provides support functions for dealing with terminals, as
+// commonly found on UNIX systems.
+//
+// Putting a terminal into raw mode is the most common requirement:
+//
+// 	oldState, err := terminal.MakeRaw(0)
+// 	if err != nil {
+// 	        panic(err)
+// 	}
+// 	defer terminal.Restore(0, oldState)
+package terminal
+
+import (
+	"os"
+
+	"golang.org/x/sys/windows"
+)
+
+type State struct {
+	mode uint32
+}
+
+// IsTerminal returns whether the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+	var st uint32
+	err := windows.GetConsoleMode(windows.Handle(fd), &st)
+	return err == nil
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd int) (*State, error) {
+	var st uint32
+	if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil {
+		return nil, err
+	}
+	raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT)
+	if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil {
+		return nil, err
+	}
+	return &State{st}, nil
+}
+
+// GetState returns the current state of a terminal which may be useful to
+// restore the terminal after a signal.
+func GetState(fd int) (*State, error) {
+	var st uint32
+	if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil {
+		return nil, err
+	}
+	return &State{st}, nil
+}
+
+// Restore restores the terminal connected to the given file descriptor to a
+// previous state.
+func Restore(fd int, state *State) error {
+	return windows.SetConsoleMode(windows.Handle(fd), state.mode)
+}
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (width, height int, err error) {
+	var info windows.ConsoleScreenBufferInfo
+	if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil {
+		return 0, 0, err
+	}
+	return int(info.Size.X), int(info.Size.Y), nil
+}
+
+// ReadPassword reads a line of input from a terminal without local echo.  This
+// is commonly used for inputting passwords and other sensitive data. The slice
+// returned does not include the \n.
+func ReadPassword(fd int) ([]byte, error) {
+	var st uint32
+	if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil {
+		return nil, err
+	}
+	old := st
+
+	st &^= (windows.ENABLE_ECHO_INPUT)
+	st |= (windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT)
+	if err := windows.SetConsoleMode(windows.Handle(fd), st); err != nil {
+		return nil, err
+	}
+
+	defer windows.SetConsoleMode(windows.Handle(fd), old)
+
+	var h windows.Handle
+	p, _ := windows.GetCurrentProcess()
+	if err := windows.DuplicateHandle(p, windows.Handle(fd), p, &h, 0, false, windows.DUPLICATE_SAME_ACCESS); err != nil {
+		return nil, err
+	}
+
+	f := os.NewFile(uintptr(h), "stdin")
+	defer f.Close()
+	return readPasswordLine(f)
+}
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
new file mode 100644
index 0000000..37dc0cf
--- /dev/null
+++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
@@ -0,0 +1,71 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
+package ctxhttp // import "golang.org/x/net/context/ctxhttp"
+
+import (
+	"context"
+	"io"
+	"net/http"
+	"net/url"
+	"strings"
+)
+
+// Do sends an HTTP request with the provided http.Client and returns
+// an HTTP response.
+//
+// If the client is nil, http.DefaultClient is used.
+//
+// The provided ctx must be non-nil. If it is canceled or times out,
+// ctx.Err() will be returned.
+func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+	if client == nil {
+		client = http.DefaultClient
+	}
+	resp, err := client.Do(req.WithContext(ctx))
+	// If we got an error, and the context has been canceled,
+	// the context's error is probably more useful.
+	if err != nil {
+		select {
+		case <-ctx.Done():
+			err = ctx.Err()
+		default:
+		}
+	}
+	return resp, err
+}
+
+// Get issues a GET request via the Do function.
+func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+	req, err := http.NewRequest("GET", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	return Do(ctx, client, req)
+}
+
+// Head issues a HEAD request via the Do function.
+func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+	req, err := http.NewRequest("HEAD", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	return Do(ctx, client, req)
+}
+
+// Post issues a POST request via the Do function.
+func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
+	req, err := http.NewRequest("POST", url, body)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", bodyType)
+	return Do(ctx, client, req)
+}
+
+// PostForm issues a POST request via the Do function.
+func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
+	return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml
new file mode 100644
index 0000000..fa139db
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+go:
+  - tip
+
+install:
+  - export GOPATH="$HOME/gopath"
+  - mkdir -p "$GOPATH/src/golang.org/x"
+  - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
+  - go get -v -t -d golang.org/x/oauth2/...
+
+script:
+  - go test -v golang.org/x/oauth2/...
diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md
new file mode 100644
index 0000000..dfbed62
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md
@@ -0,0 +1,26 @@
+# Contributing to Go
+
+Go is an open source project.
+
+It is the work of hundreds of contributors. We appreciate your help!
+
+## Filing issues
+
+When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
+
+1.  What version of Go are you using (`go version`)?
+2.  What operating system and processor architecture are you using?
+3.  What did you do?
+4.  What did you expect to see?
+5.  What did you see instead?
+
+General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
+The gophers there will answer or ask you to file an issue if you've tripped over a bug.
+
+## Contributing code
+
+Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
+before sending patches.
+
+Unless otherwise noted, the Go source files are distributed under
+the BSD-style license found in the LICENSE file.
diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md
new file mode 100644
index 0000000..68f436e
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/README.md
@@ -0,0 +1,86 @@
+# OAuth2 for Go
+
+[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
+[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2)
+
+oauth2 package contains a client implementation for OAuth 2.0 spec.
+
+## Installation
+
+~~~~
+go get golang.org/x/oauth2
+~~~~
+
+Or you can manually git clone the repository to
+`$(go env GOPATH)/src/golang.org/x/oauth2`.
+
+See godoc for further documentation and examples.
+
+* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
+* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
+
+
+## App Engine
+
+In change 96e89be (March 2015), we removed the `oauth2.Context2` type in favor
+of the [`context.Context`](https://golang.org/x/net/context#Context) type from
+the `golang.org/x/net/context` package. Later replaced by the standard `context` package
+of the [`context.Context`](https://golang.org/pkg/context#Context) type.
+
+
+This means it's no longer possible to use the "Classic App Engine"
+`appengine.Context` type with the `oauth2` package. (You're using
+Classic App Engine if you import the package `"appengine"`.)
+
+To work around this, you may use the new `"google.golang.org/appengine"`
+package. This package has almost the same API as the `"appengine"` package,
+but it can be fetched with `go get` and used on "Managed VMs" and well as
+Classic App Engine.
+
+See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
+for information on updating your app.
+
+If you don't want to update your entire app to use the new App Engine packages,
+you may use both sets of packages in parallel, using only the new packages
+with the `oauth2` package.
+
+```go
+import (
+	"context"
+	"golang.org/x/oauth2"
+	"golang.org/x/oauth2/google"
+	newappengine "google.golang.org/appengine"
+	newurlfetch "google.golang.org/appengine/urlfetch"
+
+	"appengine"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+	var c appengine.Context = appengine.NewContext(r)
+	c.Infof("Logging a message with the old package")
+
+	var ctx context.Context = newappengine.NewContext(r)
+	client := &http.Client{
+		Transport: &oauth2.Transport{
+			Source: google.AppEngineTokenSource(ctx, "scope"),
+			Base:   &newurlfetch.Transport{Context: ctx},
+		},
+	}
+	client.Get("...")
+}
+```
+
+## Policy for new packages
+
+We no longer accept new provider-specific packages in this repo. For
+defining provider endpoints and provider-specific OAuth2 behavior, we
+encourage you to create packages elsewhere. We'll keep the existing
+packages for compatibility.
+
+## Report Issues / Send Patches
+
+This repository uses Gerrit for code changes. To learn how to submit changes to
+this repository, see https://golang.org/doc/contribute.html.
+
+The main issue tracker for the oauth2 repository is located at
+https://github.com/golang/oauth2/issues.
diff --git a/vendor/golang.org/x/oauth2/go.mod b/vendor/golang.org/x/oauth2/go.mod
new file mode 100644
index 0000000..b345781
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/go.mod
@@ -0,0 +1,10 @@
+module golang.org/x/oauth2
+
+go 1.11
+
+require (
+	cloud.google.com/go v0.34.0
+	golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e
+	golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect
+	google.golang.org/appengine v1.4.0
+)
diff --git a/vendor/golang.org/x/oauth2/go.sum b/vendor/golang.org/x/oauth2/go.sum
new file mode 100644
index 0000000..6f0079b
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/go.sum
@@ -0,0 +1,12 @@
+cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go
new file mode 100644
index 0000000..7434871
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import "google.golang.org/appengine/urlfetch"
+
+func init() {
+	appengineClientHook = urlfetch.Client
+}
diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go
new file mode 100644
index 0000000..03265e8
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go
new file mode 100644
index 0000000..c0ab196
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/oauth2.go
@@ -0,0 +1,37 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/pem"
+	"errors"
+	"fmt"
+)
+
+// ParseKey converts the binary contents of a private key file
+// to an *rsa.PrivateKey. It detects whether the private key is in a
+// PEM container or not. If so, it extracts the the private key
+// from PEM container before conversion. It only supports PEM
+// containers with no passphrase.
+func ParseKey(key []byte) (*rsa.PrivateKey, error) {
+	block, _ := pem.Decode(key)
+	if block != nil {
+		key = block.Bytes
+	}
+	parsedKey, err := x509.ParsePKCS8PrivateKey(key)
+	if err != nil {
+		parsedKey, err = x509.ParsePKCS1PrivateKey(key)
+		if err != nil {
+			return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err)
+		}
+	}
+	parsed, ok := parsedKey.(*rsa.PrivateKey)
+	if !ok {
+		return nil, errors.New("private key is invalid")
+	}
+	return parsed, nil
+}
diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go
new file mode 100644
index 0000000..a831b77
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/token.go
@@ -0,0 +1,277 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"mime"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"golang.org/x/net/context/ctxhttp"
+)
+
+// Token represents the credentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// This type is a mirror of oauth2.Token and exists to break
+// an otherwise-circular dependency. Other internal packages
+// should convert this Token into an oauth2.Token before use.
+type Token struct {
+	// AccessToken is the token that authorizes and authenticates
+	// the requests.
+	AccessToken string
+
+	// TokenType is the type of token.
+	// The Type method returns either this or "Bearer", the default.
+	TokenType string
+
+	// RefreshToken is a token that's used by the application
+	// (as opposed to the user) to refresh the access token
+	// if it expires.
+	RefreshToken string
+
+	// Expiry is the optional expiration time of the access token.
+	//
+	// If zero, TokenSource implementations will reuse the same
+	// token forever and RefreshToken or equivalent
+	// mechanisms for that TokenSource will not be used.
+	Expiry time.Time
+
+	// Raw optionally contains extra metadata from the server
+	// when updating a token.
+	Raw interface{}
+}
+
+// tokenJSON is the struct representing the HTTP response from OAuth2
+// providers returning a token in JSON form.
+type tokenJSON struct {
+	AccessToken  string         `json:"access_token"`
+	TokenType    string         `json:"token_type"`
+	RefreshToken string         `json:"refresh_token"`
+	ExpiresIn    expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
+	Expires      expirationTime `json:"expires"`    // broken Facebook spelling of expires_in
+}
+
+func (e *tokenJSON) expiry() (t time.Time) {
+	if v := e.ExpiresIn; v != 0 {
+		return time.Now().Add(time.Duration(v) * time.Second)
+	}
+	if v := e.Expires; v != 0 {
+		return time.Now().Add(time.Duration(v) * time.Second)
+	}
+	return
+}
+
+type expirationTime int32
+
+func (e *expirationTime) UnmarshalJSON(b []byte) error {
+	var n json.Number
+	err := json.Unmarshal(b, &n)
+	if err != nil {
+		return err
+	}
+	i, err := n.Int64()
+	if err != nil {
+		return err
+	}
+	*e = expirationTime(i)
+	return nil
+}
+
+var brokenAuthHeaderProviders = []string{
+	"https://accounts.google.com/",
+	"https://api.codeswholesale.com/oauth/token",
+	"https://api.dropbox.com/",
+	"https://api.dropboxapi.com/",
+	"https://api.instagram.com/",
+	"https://api.netatmo.net/",
+	"https://api.odnoklassniki.ru/",
+	"https://api.pushbullet.com/",
+	"https://api.soundcloud.com/",
+	"https://api.twitch.tv/",
+	"https://id.twitch.tv/",
+	"https://app.box.com/",
+	"https://api.box.com/",
+	"https://connect.stripe.com/",
+	"https://login.mailchimp.com/",
+	"https://login.microsoftonline.com/",
+	"https://login.salesforce.com/",
+	"https://login.windows.net",
+	"https://login.live.com/",
+	"https://login.live-int.com/",
+	"https://oauth.sandbox.trainingpeaks.com/",
+	"https://oauth.trainingpeaks.com/",
+	"https://oauth.vk.com/",
+	"https://openapi.baidu.com/",
+	"https://slack.com/",
+	"https://test-sandbox.auth.corp.google.com",
+	"https://test.salesforce.com/",
+	"https://user.gini.net/",
+	"https://www.douban.com/",
+	"https://www.googleapis.com/",
+	"https://www.linkedin.com/",
+	"https://www.strava.com/oauth/",
+	"https://www.wunderlist.com/oauth/",
+	"https://api.patreon.com/",
+	"https://sandbox.codeswholesale.com/oauth/token",
+	"https://api.sipgate.com/v1/authorization/oauth",
+	"https://api.medium.com/v1/tokens",
+	"https://log.finalsurge.com/oauth/token",
+	"https://multisport.todaysplan.com.au/rest/oauth/access_token",
+	"https://whats.todaysplan.com.au/rest/oauth/access_token",
+	"https://stackoverflow.com/oauth/access_token",
+	"https://account.health.nokia.com",
+	"https://accounts.zoho.com",
+	"https://gitter.im/login/oauth/token",
+	"https://openid-connect.onelogin.com/oidc",
+	"https://api.dailymotion.com/oauth/token",
+}
+
+// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints.
+var brokenAuthHeaderDomains = []string{
+	".auth0.com",
+	".force.com",
+	".myshopify.com",
+	".okta.com",
+	".oktapreview.com",
+}
+
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {
+	brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL)
+}
+
+// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
+// implements the OAuth2 spec correctly
+// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+// In summary:
+// - Reddit only accepts client secret in the Authorization header
+// - Dropbox accepts either it in URL param or Auth header, but not both.
+// - Google only accepts URL param (not spec compliant?), not Auth header
+// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
+func providerAuthHeaderWorks(tokenURL string) bool {
+	for _, s := range brokenAuthHeaderProviders {
+		if strings.HasPrefix(tokenURL, s) {
+			// Some sites fail to implement the OAuth2 spec fully.
+			return false
+		}
+	}
+
+	if u, err := url.Parse(tokenURL); err == nil {
+		for _, s := range brokenAuthHeaderDomains {
+			if strings.HasSuffix(u.Host, s) {
+				return false
+			}
+		}
+	}
+
+	// Assume the provider implements the spec properly
+	// otherwise. We can add more exceptions as they're
+	// discovered. We will _not_ be adding configurable hooks
+	// to this package to let users select server bugs.
+	return true
+}
+
+func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) {
+	bustedAuth := !providerAuthHeaderWorks(tokenURL)
+	if bustedAuth {
+		if clientID != "" {
+			v.Set("client_id", clientID)
+		}
+		if clientSecret != "" {
+			v.Set("client_secret", clientSecret)
+		}
+	}
+	req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode()))
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+	if !bustedAuth {
+		req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret))
+	}
+	r, err := ctxhttp.Do(ctx, ContextClient(ctx), req)
+	if err != nil {
+		return nil, err
+	}
+	defer r.Body.Close()
+	body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
+	if err != nil {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+	}
+	if code := r.StatusCode; code < 200 || code > 299 {
+		return nil, &RetrieveError{
+			Response: r,
+			Body:     body,
+		}
+	}
+
+	var token *Token
+	content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
+	switch content {
+	case "application/x-www-form-urlencoded", "text/plain":
+		vals, err := url.ParseQuery(string(body))
+		if err != nil {
+			return nil, err
+		}
+		token = &Token{
+			AccessToken:  vals.Get("access_token"),
+			TokenType:    vals.Get("token_type"),
+			RefreshToken: vals.Get("refresh_token"),
+			Raw:          vals,
+		}
+		e := vals.Get("expires_in")
+		if e == "" {
+			// TODO(jbd): Facebook's OAuth2 implementation is broken and
+			// returns expires_in field in expires. Remove the fallback to expires,
+			// when Facebook fixes their implementation.
+			e = vals.Get("expires")
+		}
+		expires, _ := strconv.Atoi(e)
+		if expires != 0 {
+			token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
+		}
+	default:
+		var tj tokenJSON
+		if err = json.Unmarshal(body, &tj); err != nil {
+			return nil, err
+		}
+		token = &Token{
+			AccessToken:  tj.AccessToken,
+			TokenType:    tj.TokenType,
+			RefreshToken: tj.RefreshToken,
+			Expiry:       tj.expiry(),
+			Raw:          make(map[string]interface{}),
+		}
+		json.Unmarshal(body, &token.Raw) // no error checks for optional fields
+	}
+	// Don't overwrite `RefreshToken` with an empty value
+	// if this was a token refreshing request.
+	if token.RefreshToken == "" {
+		token.RefreshToken = v.Get("refresh_token")
+	}
+	if token.AccessToken == "" {
+		return token, errors.New("oauth2: server response missing access_token")
+	}
+	return token, nil
+}
+
+type RetrieveError struct {
+	Response *http.Response
+	Body     []byte
+}
+
+func (r *RetrieveError) Error() string {
+	return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
+}
diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go
new file mode 100644
index 0000000..572074a
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/transport.go
@@ -0,0 +1,33 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+	"context"
+	"net/http"
+)
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient ContextKey
+
+// ContextKey is just an empty struct. It exists so HTTPClient can be
+// an immutable public variable with a unique type. It's immutable
+// because nobody else can create a ContextKey, being unexported.
+type ContextKey struct{}
+
+var appengineClientHook func(context.Context) *http.Client
+
+func ContextClient(ctx context.Context) *http.Client {
+	if ctx != nil {
+		if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
+			return hc
+		}
+	}
+	if appengineClientHook != nil {
+		return appengineClientHook(ctx)
+	}
+	return http.DefaultClient
+}
diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go
new file mode 100644
index 0000000..3de6331
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/oauth2.go
@@ -0,0 +1,360 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package oauth2 provides support for making
+// OAuth2 authorized and authenticated HTTP requests,
+// as specified in RFC 6749.
+// It can additionally grant authorization with Bearer JWT.
+package oauth2 // import "golang.org/x/oauth2"
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"net/http"
+	"net/url"
+	"strings"
+	"sync"
+
+	"golang.org/x/oauth2/internal"
+)
+
+// NoContext is the default context you should supply if not using
+// your own context.Context (see https://golang.org/x/net/context).
+//
+// Deprecated: Use context.Background() or context.TODO() instead.
+var NoContext = context.TODO()
+
+// RegisterBrokenAuthHeaderProvider registers an OAuth2 server
+// identified by the tokenURL prefix as an OAuth2 implementation
+// which doesn't support the HTTP Basic authentication
+// scheme to authenticate with the authorization server.
+// Once a server is registered, credentials (client_id and client_secret)
+// will be passed as parameters in the request body rather than being present
+// in the Authorization header.
+// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {
+	internal.RegisterBrokenAuthHeaderProvider(tokenURL)
+}
+
+// Config describes a typical 3-legged OAuth2 flow, with both the
+// client application information and the server's endpoint URLs.
+// For the client credentials 2-legged OAuth2 flow, see the clientcredentials
+// package (https://golang.org/x/oauth2/clientcredentials).
+type Config struct {
+	// ClientID is the application's ID.
+	ClientID string
+
+	// ClientSecret is the application's secret.
+	ClientSecret string
+
+	// Endpoint contains the resource server's token endpoint
+	// URLs. These are constants specific to each server and are
+	// often available via site-specific packages, such as
+	// google.Endpoint or github.Endpoint.
+	Endpoint Endpoint
+
+	// RedirectURL is the URL to redirect users going through
+	// the OAuth flow, after the resource owner's URLs.
+	RedirectURL string
+
+	// Scope specifies optional requested permissions.
+	Scopes []string
+}
+
+// A TokenSource is anything that can return a token.
+type TokenSource interface {
+	// Token returns a token or an error.
+	// Token must be safe for concurrent use by multiple goroutines.
+	// The returned Token must not be modified.
+	Token() (*Token, error)
+}
+
+// Endpoint contains the OAuth 2.0 provider's authorization and token
+// endpoint URLs.
+type Endpoint struct {
+	AuthURL  string
+	TokenURL string
+}
+
+var (
+	// AccessTypeOnline and AccessTypeOffline are options passed
+	// to the Options.AuthCodeURL method. They modify the
+	// "access_type" field that gets sent in the URL returned by
+	// AuthCodeURL.
+	//
+	// Online is the default if neither is specified. If your
+	// application needs to refresh access tokens when the user
+	// is not present at the browser, then use offline. This will
+	// result in your application obtaining a refresh token the
+	// first time your application exchanges an authorization
+	// code for a user.
+	AccessTypeOnline  AuthCodeOption = SetAuthURLParam("access_type", "online")
+	AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline")
+
+	// ApprovalForce forces the users to view the consent dialog
+	// and confirm the permissions request at the URL returned
+	// from AuthCodeURL, even if they've already done so.
+	ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force")
+)
+
+// An AuthCodeOption is passed to Config.AuthCodeURL.
+type AuthCodeOption interface {
+	setValue(url.Values)
+}
+
+type setParam struct{ k, v string }
+
+func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
+
+// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
+// to a provider's authorization endpoint.
+func SetAuthURLParam(key, value string) AuthCodeOption {
+	return setParam{key, value}
+}
+
+// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
+// that asks for permissions for the required scopes explicitly.
+//
+// State is a token to protect the user from CSRF attacks. You must
+// always provide a non-empty string and validate that it matches the
+// the state query parameter on your redirect callback.
+// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
+//
+// Opts may include AccessTypeOnline or AccessTypeOffline, as well
+// as ApprovalForce.
+// It can also be used to pass the PKCE challange.
+// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
+func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
+	var buf bytes.Buffer
+	buf.WriteString(c.Endpoint.AuthURL)
+	v := url.Values{
+		"response_type": {"code"},
+		"client_id":     {c.ClientID},
+	}
+	if c.RedirectURL != "" {
+		v.Set("redirect_uri", c.RedirectURL)
+	}
+	if len(c.Scopes) > 0 {
+		v.Set("scope", strings.Join(c.Scopes, " "))
+	}
+	if state != "" {
+		// TODO(light): Docs say never to omit state; don't allow empty.
+		v.Set("state", state)
+	}
+	for _, opt := range opts {
+		opt.setValue(v)
+	}
+	if strings.Contains(c.Endpoint.AuthURL, "?") {
+		buf.WriteByte('&')
+	} else {
+		buf.WriteByte('?')
+	}
+	buf.WriteString(v.Encode())
+	return buf.String()
+}
+
+// PasswordCredentialsToken converts a resource owner username and password
+// pair into a token.
+//
+// Per the RFC, this grant type should only be used "when there is a high
+// degree of trust between the resource owner and the client (e.g., the client
+// is part of the device operating system or a highly privileged application),
+// and when other authorization grant types are not available."
+// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
+//
+// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
+func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
+	v := url.Values{
+		"grant_type": {"password"},
+		"username":   {username},
+		"password":   {password},
+	}
+	if len(c.Scopes) > 0 {
+		v.Set("scope", strings.Join(c.Scopes, " "))
+	}
+	return retrieveToken(ctx, c, v)
+}
+
+// Exchange converts an authorization code into a token.
+//
+// It is used after a resource provider redirects the user back
+// to the Redirect URI (the URL obtained from AuthCodeURL).
+//
+// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
+//
+// The code will be in the *http.Request.FormValue("code"). Before
+// calling Exchange, be sure to validate FormValue("state").
+//
+// Opts may include the PKCE verifier code if previously used in AuthCodeURL.
+// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
+func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) {
+	v := url.Values{
+		"grant_type": {"authorization_code"},
+		"code":       {code},
+	}
+	if c.RedirectURL != "" {
+		v.Set("redirect_uri", c.RedirectURL)
+	}
+	for _, opt := range opts {
+		opt.setValue(v)
+	}
+	return retrieveToken(ctx, c, v)
+}
+
+// Client returns an HTTP client using the provided token.
+// The token will auto-refresh as necessary. The underlying
+// HTTP transport will be obtained using the provided context.
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
+	return NewClient(ctx, c.TokenSource(ctx, t))
+}
+
+// TokenSource returns a TokenSource that returns t until t expires,
+// automatically refreshing it as necessary using the provided context.
+//
+// Most users will use Config.Client instead.
+func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
+	tkr := &tokenRefresher{
+		ctx:  ctx,
+		conf: c,
+	}
+	if t != nil {
+		tkr.refreshToken = t.RefreshToken
+	}
+	return &reuseTokenSource{
+		t:   t,
+		new: tkr,
+	}
+}
+
+// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
+// HTTP requests to renew a token using a RefreshToken.
+type tokenRefresher struct {
+	ctx          context.Context // used to get HTTP requests
+	conf         *Config
+	refreshToken string
+}
+
+// WARNING: Token is not safe for concurrent access, as it
+// updates the tokenRefresher's refreshToken field.
+// Within this package, it is used by reuseTokenSource which
+// synchronizes calls to this method with its own mutex.
+func (tf *tokenRefresher) Token() (*Token, error) {
+	if tf.refreshToken == "" {
+		return nil, errors.New("oauth2: token expired and refresh token is not set")
+	}
+
+	tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
+		"grant_type":    {"refresh_token"},
+		"refresh_token": {tf.refreshToken},
+	})
+
+	if err != nil {
+		return nil, err
+	}
+	if tf.refreshToken != tk.RefreshToken {
+		tf.refreshToken = tk.RefreshToken
+	}
+	return tk, err
+}
+
+// reuseTokenSource is a TokenSource that holds a single token in memory
+// and validates its expiry before each call to retrieve it with
+// Token. If it's expired, it will be auto-refreshed using the
+// new TokenSource.
+type reuseTokenSource struct {
+	new TokenSource // called when t is expired.
+
+	mu sync.Mutex // guards t
+	t  *Token
+}
+
+// Token returns the current token if it's still valid, else will
+// refresh the current token (using r.Context for HTTP client
+// information) and return the new one.
+func (s *reuseTokenSource) Token() (*Token, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if s.t.Valid() {
+		return s.t, nil
+	}
+	t, err := s.new.Token()
+	if err != nil {
+		return nil, err
+	}
+	s.t = t
+	return t, nil
+}
+
+// StaticTokenSource returns a TokenSource that always returns the same token.
+// Because the provided token t is never refreshed, StaticTokenSource is only
+// useful for tokens that never expire.
+func StaticTokenSource(t *Token) TokenSource {
+	return staticTokenSource{t}
+}
+
+// staticTokenSource is a TokenSource that always returns the same Token.
+type staticTokenSource struct {
+	t *Token
+}
+
+func (s staticTokenSource) Token() (*Token, error) {
+	return s.t, nil
+}
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient internal.ContextKey
+
+// NewClient creates an *http.Client from a Context and TokenSource.
+// The returned client is not valid beyond the lifetime of the context.
+//
+// Note that if a custom *http.Client is provided via the Context it
+// is used only for token acquisition and is not used to configure the
+// *http.Client returned from NewClient.
+//
+// As a special case, if src is nil, a non-OAuth2 client is returned
+// using the provided context. This exists to support related OAuth2
+// packages.
+func NewClient(ctx context.Context, src TokenSource) *http.Client {
+	if src == nil {
+		return internal.ContextClient(ctx)
+	}
+	return &http.Client{
+		Transport: &Transport{
+			Base:   internal.ContextClient(ctx).Transport,
+			Source: ReuseTokenSource(nil, src),
+		},
+	}
+}
+
+// ReuseTokenSource returns a TokenSource which repeatedly returns the
+// same token as long as it's valid, starting with t.
+// When its cached token is invalid, a new token is obtained from src.
+//
+// ReuseTokenSource is typically used to reuse tokens from a cache
+// (such as a file on disk) between runs of a program, rather than
+// obtaining new tokens unnecessarily.
+//
+// The initial token t may be nil, in which case the TokenSource is
+// wrapped in a caching version if it isn't one already. This also
+// means it's always safe to wrap ReuseTokenSource around any other
+// TokenSource without adverse effects.
+func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
+	// Don't wrap a reuseTokenSource in itself. That would work,
+	// but cause an unnecessary number of mutex operations.
+	// Just build the equivalent one.
+	if rt, ok := src.(*reuseTokenSource); ok {
+		if t == nil {
+			// Just use it directly.
+			return rt
+		}
+		src = rt.new
+	}
+	return &reuseTokenSource{
+		t:   t,
+		new: src,
+	}
+}
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
new file mode 100644
index 0000000..ee4be54
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -0,0 +1,178 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"golang.org/x/oauth2/internal"
+)
+
+// expiryDelta determines how earlier a token should be considered
+// expired than its actual expiration time. It is used to avoid late
+// expirations due to client-server time mismatches.
+const expiryDelta = 10 * time.Second
+
+// Token represents the credentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// Most users of this package should not access fields of Token
+// directly. They're exported mostly for use by related packages
+// implementing derivative OAuth2 flows.
+type Token struct {
+	// AccessToken is the token that authorizes and authenticates
+	// the requests.
+	AccessToken string `json:"access_token"`
+
+	// TokenType is the type of token.
+	// The Type method returns either this or "Bearer", the default.
+	TokenType string `json:"token_type,omitempty"`
+
+	// RefreshToken is a token that's used by the application
+	// (as opposed to the user) to refresh the access token
+	// if it expires.
+	RefreshToken string `json:"refresh_token,omitempty"`
+
+	// Expiry is the optional expiration time of the access token.
+	//
+	// If zero, TokenSource implementations will reuse the same
+	// token forever and RefreshToken or equivalent
+	// mechanisms for that TokenSource will not be used.
+	Expiry time.Time `json:"expiry,omitempty"`
+
+	// raw optionally contains extra metadata from the server
+	// when updating a token.
+	raw interface{}
+}
+
+// Type returns t.TokenType if non-empty, else "Bearer".
+func (t *Token) Type() string {
+	if strings.EqualFold(t.TokenType, "bearer") {
+		return "Bearer"
+	}
+	if strings.EqualFold(t.TokenType, "mac") {
+		return "MAC"
+	}
+	if strings.EqualFold(t.TokenType, "basic") {
+		return "Basic"
+	}
+	if t.TokenType != "" {
+		return t.TokenType
+	}
+	return "Bearer"
+}
+
+// SetAuthHeader sets the Authorization header to r using the access
+// token in t.
+//
+// This method is unnecessary when using Transport or an HTTP Client
+// returned by this package.
+func (t *Token) SetAuthHeader(r *http.Request) {
+	r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
+}
+
+// WithExtra returns a new Token that's a clone of t, but using the
+// provided raw extra map. This is only intended for use by packages
+// implementing derivative OAuth2 flows.
+func (t *Token) WithExtra(extra interface{}) *Token {
+	t2 := new(Token)
+	*t2 = *t
+	t2.raw = extra
+	return t2
+}
+
+// Extra returns an extra field.
+// Extra fields are key-value pairs returned by the server as a
+// part of the token retrieval response.
+func (t *Token) Extra(key string) interface{} {
+	if raw, ok := t.raw.(map[string]interface{}); ok {
+		return raw[key]
+	}
+
+	vals, ok := t.raw.(url.Values)
+	if !ok {
+		return nil
+	}
+
+	v := vals.Get(key)
+	switch s := strings.TrimSpace(v); strings.Count(s, ".") {
+	case 0: // Contains no "."; try to parse as int
+		if i, err := strconv.ParseInt(s, 10, 64); err == nil {
+			return i
+		}
+	case 1: // Contains a single "."; try to parse as float
+		if f, err := strconv.ParseFloat(s, 64); err == nil {
+			return f
+		}
+	}
+
+	return v
+}
+
+// timeNow is time.Now but pulled out as a variable for tests.
+var timeNow = time.Now
+
+// expired reports whether the token is expired.
+// t must be non-nil.
+func (t *Token) expired() bool {
+	if t.Expiry.IsZero() {
+		return false
+	}
+	return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow())
+}
+
+// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
+func (t *Token) Valid() bool {
+	return t != nil && t.AccessToken != "" && !t.expired()
+}
+
+// tokenFromInternal maps an *internal.Token struct into
+// a *Token struct.
+func tokenFromInternal(t *internal.Token) *Token {
+	if t == nil {
+		return nil
+	}
+	return &Token{
+		AccessToken:  t.AccessToken,
+		TokenType:    t.TokenType,
+		RefreshToken: t.RefreshToken,
+		Expiry:       t.Expiry,
+		raw:          t.Raw,
+	}
+}
+
+// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
+// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
+// with an error..
+func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
+	tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v)
+	if err != nil {
+		if rErr, ok := err.(*internal.RetrieveError); ok {
+			return nil, (*RetrieveError)(rErr)
+		}
+		return nil, err
+	}
+	return tokenFromInternal(tk), nil
+}
+
+// RetrieveError is the error returned when the token endpoint returns a
+// non-2XX HTTP status code.
+type RetrieveError struct {
+	Response *http.Response
+	// Body is the body that was consumed by reading Response.Body.
+	// It may be truncated.
+	Body []byte
+}
+
+func (r *RetrieveError) Error() string {
+	return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
+}
diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go
new file mode 100644
index 0000000..aa0d34f
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/transport.go
@@ -0,0 +1,144 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+	"errors"
+	"io"
+	"net/http"
+	"sync"
+)
+
+// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
+// wrapping a base RoundTripper and adding an Authorization header
+// with a token from the supplied Sources.
+//
+// Transport is a low-level mechanism. Most code will use the
+// higher-level Config.Client method instead.
+type Transport struct {
+	// Source supplies the token to add to outgoing requests'
+	// Authorization headers.
+	Source TokenSource
+
+	// Base is the base RoundTripper used to make HTTP requests.
+	// If nil, http.DefaultTransport is used.
+	Base http.RoundTripper
+
+	mu     sync.Mutex                      // guards modReq
+	modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// RoundTrip authorizes and authenticates the request with an
+// access token from Transport's Source.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+	reqBodyClosed := false
+	if req.Body != nil {
+		defer func() {
+			if !reqBodyClosed {
+				req.Body.Close()
+			}
+		}()
+	}
+
+	if t.Source == nil {
+		return nil, errors.New("oauth2: Transport's Source is nil")
+	}
+	token, err := t.Source.Token()
+	if err != nil {
+		return nil, err
+	}
+
+	req2 := cloneRequest(req) // per RoundTripper contract
+	token.SetAuthHeader(req2)
+	t.setModReq(req, req2)
+	res, err := t.base().RoundTrip(req2)
+
+	// req.Body is assumed to have been closed by the base RoundTripper.
+	reqBodyClosed = true
+
+	if err != nil {
+		t.setModReq(req, nil)
+		return nil, err
+	}
+	res.Body = &onEOFReader{
+		rc: res.Body,
+		fn: func() { t.setModReq(req, nil) },
+	}
+	return res, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (t *Transport) CancelRequest(req *http.Request) {
+	type canceler interface {
+		CancelRequest(*http.Request)
+	}
+	if cr, ok := t.base().(canceler); ok {
+		t.mu.Lock()
+		modReq := t.modReq[req]
+		delete(t.modReq, req)
+		t.mu.Unlock()
+		cr.CancelRequest(modReq)
+	}
+}
+
+func (t *Transport) base() http.RoundTripper {
+	if t.Base != nil {
+		return t.Base
+	}
+	return http.DefaultTransport
+}
+
+func (t *Transport) setModReq(orig, mod *http.Request) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if t.modReq == nil {
+		t.modReq = make(map[*http.Request]*http.Request)
+	}
+	if mod == nil {
+		delete(t.modReq, orig)
+	} else {
+		t.modReq[orig] = mod
+	}
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+	// shallow copy of the struct
+	r2 := new(http.Request)
+	*r2 = *r
+	// deep copy of the Header
+	r2.Header = make(http.Header, len(r.Header))
+	for k, s := range r.Header {
+		r2.Header[k] = append([]string(nil), s...)
+	}
+	return r2
+}
+
+type onEOFReader struct {
+	rc io.ReadCloser
+	fn func()
+}
+
+func (r *onEOFReader) Read(p []byte) (n int, err error) {
+	n, err = r.rc.Read(p)
+	if err == io.EOF {
+		r.runFunc()
+	}
+	return
+}
+
+func (r *onEOFReader) Close() error {
+	err := r.rc.Close()
+	r.runFunc()
+	return err
+}
+
+func (r *onEOFReader) runFunc() {
+	if fn := r.fn; fn != nil {
+		fn()
+		r.fn = nil
+	}
+}
diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go
new file mode 100644
index 0000000..af3af60
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/aliases.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+// +build go1.9
+
+package windows
+
+import "syscall"
+
+type Errno = syscall.Errno
+type SysProcAttr = syscall.SysProcAttr
diff --git a/vendor/golang.org/x/sys/windows/asm_windows_386.s b/vendor/golang.org/x/sys/windows/asm_windows_386.s
new file mode 100644
index 0000000..21d994d
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/asm_windows_386.s
@@ -0,0 +1,13 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//
+// System calls for 386, Windows are implemented in runtime/syscall_windows.goc
+//
+
+TEXT ·getprocaddress(SB), 7, $0-16
+	JMP	syscall·getprocaddress(SB)
+
+TEXT ·loadlibrary(SB), 7, $0-12
+	JMP	syscall·loadlibrary(SB)
diff --git a/vendor/golang.org/x/sys/windows/asm_windows_amd64.s b/vendor/golang.org/x/sys/windows/asm_windows_amd64.s
new file mode 100644
index 0000000..5bfdf79
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/asm_windows_amd64.s
@@ -0,0 +1,13 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//
+// System calls for amd64, Windows are implemented in runtime/syscall_windows.goc
+//
+
+TEXT ·getprocaddress(SB), 7, $0-32
+	JMP	syscall·getprocaddress(SB)
+
+TEXT ·loadlibrary(SB), 7, $0-24
+	JMP	syscall·loadlibrary(SB)
diff --git a/vendor/golang.org/x/sys/windows/asm_windows_arm.s b/vendor/golang.org/x/sys/windows/asm_windows_arm.s
new file mode 100644
index 0000000..55d8b91
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/asm_windows_arm.s
@@ -0,0 +1,11 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT ·getprocaddress(SB),NOSPLIT,$0
+	B	syscall·getprocaddress(SB)
+
+TEXT ·loadlibrary(SB),NOSPLIT,$0
+	B	syscall·loadlibrary(SB)
diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go
new file mode 100644
index 0000000..e92c05b
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/dll_windows.go
@@ -0,0 +1,378 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+import (
+	"sync"
+	"sync/atomic"
+	"syscall"
+	"unsafe"
+)
+
+// DLLError describes reasons for DLL load failures.
+type DLLError struct {
+	Err     error
+	ObjName string
+	Msg     string
+}
+
+func (e *DLLError) Error() string { return e.Msg }
+
+// Implemented in runtime/syscall_windows.goc; we provide jumps to them in our assembly file.
+func loadlibrary(filename *uint16) (handle uintptr, err syscall.Errno)
+func getprocaddress(handle uintptr, procname *uint8) (proc uintptr, err syscall.Errno)
+
+// A DLL implements access to a single DLL.
+type DLL struct {
+	Name   string
+	Handle Handle
+}
+
+// LoadDLL loads DLL file into memory.
+//
+// Warning: using LoadDLL without an absolute path name is subject to
+// DLL preloading attacks. To safely load a system DLL, use LazyDLL
+// with System set to true, or use LoadLibraryEx directly.
+func LoadDLL(name string) (dll *DLL, err error) {
+	namep, err := UTF16PtrFromString(name)
+	if err != nil {
+		return nil, err
+	}
+	h, e := loadlibrary(namep)
+	if e != 0 {
+		return nil, &DLLError{
+			Err:     e,
+			ObjName: name,
+			Msg:     "Failed to load " + name + ": " + e.Error(),
+		}
+	}
+	d := &DLL{
+		Name:   name,
+		Handle: Handle(h),
+	}
+	return d, nil
+}
+
+// MustLoadDLL is like LoadDLL but panics if load operation failes.
+func MustLoadDLL(name string) *DLL {
+	d, e := LoadDLL(name)
+	if e != nil {
+		panic(e)
+	}
+	return d
+}
+
+// FindProc searches DLL d for procedure named name and returns *Proc
+// if found. It returns an error if search fails.
+func (d *DLL) FindProc(name string) (proc *Proc, err error) {
+	namep, err := BytePtrFromString(name)
+	if err != nil {
+		return nil, err
+	}
+	a, e := getprocaddress(uintptr(d.Handle), namep)
+	if e != 0 {
+		return nil, &DLLError{
+			Err:     e,
+			ObjName: name,
+			Msg:     "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(),
+		}
+	}
+	p := &Proc{
+		Dll:  d,
+		Name: name,
+		addr: a,
+	}
+	return p, nil
+}
+
+// MustFindProc is like FindProc but panics if search fails.
+func (d *DLL) MustFindProc(name string) *Proc {
+	p, e := d.FindProc(name)
+	if e != nil {
+		panic(e)
+	}
+	return p
+}
+
+// Release unloads DLL d from memory.
+func (d *DLL) Release() (err error) {
+	return FreeLibrary(d.Handle)
+}
+
+// A Proc implements access to a procedure inside a DLL.
+type Proc struct {
+	Dll  *DLL
+	Name string
+	addr uintptr
+}
+
+// Addr returns the address of the procedure represented by p.
+// The return value can be passed to Syscall to run the procedure.
+func (p *Proc) Addr() uintptr {
+	return p.addr
+}
+
+//go:uintptrescapes
+
+// Call executes procedure p with arguments a. It will panic, if more than 15 arguments
+// are supplied.
+//
+// The returned error is always non-nil, constructed from the result of GetLastError.
+// Callers must inspect the primary return value to decide whether an error occurred
+// (according to the semantics of the specific function being called) before consulting
+// the error. The error will be guaranteed to contain windows.Errno.
+func (p *Proc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) {
+	switch len(a) {
+	case 0:
+		return syscall.Syscall(p.Addr(), uintptr(len(a)), 0, 0, 0)
+	case 1:
+		return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], 0, 0)
+	case 2:
+		return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], 0)
+	case 3:
+		return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], a[2])
+	case 4:
+		return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], 0, 0)
+	case 5:
+		return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], 0)
+	case 6:
+		return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5])
+	case 7:
+		return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], 0, 0)
+	case 8:
+		return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], 0)
+	case 9:
+		return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8])
+	case 10:
+		return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], 0, 0)
+	case 11:
+		return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], 0)
+	case 12:
+		return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11])
+	case 13:
+		return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], 0, 0)
+	case 14:
+		return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], 0)
+	case 15:
+		return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14])
+	default:
+		panic("Call " + p.Name + " with too many arguments " + itoa(len(a)) + ".")
+	}
+}
+
+// A LazyDLL implements access to a single DLL.
+// It will delay the load of the DLL until the first
+// call to its Handle method or to one of its
+// LazyProc's Addr method.
+type LazyDLL struct {
+	Name string
+
+	// System determines whether the DLL must be loaded from the
+	// Windows System directory, bypassing the normal DLL search
+	// path.
+	System bool
+
+	mu  sync.Mutex
+	dll *DLL // non nil once DLL is loaded
+}
+
+// Load loads DLL file d.Name into memory. It returns an error if fails.
+// Load will not try to load DLL, if it is already loaded into memory.
+func (d *LazyDLL) Load() error {
+	// Non-racy version of:
+	// if d.dll != nil {
+	if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll))) != nil {
+		return nil
+	}
+	d.mu.Lock()
+	defer d.mu.Unlock()
+	if d.dll != nil {
+		return nil
+	}
+
+	// kernel32.dll is special, since it's where LoadLibraryEx comes from.
+	// The kernel already special-cases its name, so it's always
+	// loaded from system32.
+	var dll *DLL
+	var err error
+	if d.Name == "kernel32.dll" {
+		dll, err = LoadDLL(d.Name)
+	} else {
+		dll, err = loadLibraryEx(d.Name, d.System)
+	}
+	if err != nil {
+		return err
+	}
+
+	// Non-racy version of:
+	// d.dll = dll
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll)), unsafe.Pointer(dll))
+	return nil
+}
+
+// mustLoad is like Load but panics if search fails.
+func (d *LazyDLL) mustLoad() {
+	e := d.Load()
+	if e != nil {
+		panic(e)
+	}
+}
+
+// Handle returns d's module handle.
+func (d *LazyDLL) Handle() uintptr {
+	d.mustLoad()
+	return uintptr(d.dll.Handle)
+}
+
+// NewProc returns a LazyProc for accessing the named procedure in the DLL d.
+func (d *LazyDLL) NewProc(name string) *LazyProc {
+	return &LazyProc{l: d, Name: name}
+}
+
+// NewLazyDLL creates new LazyDLL associated with DLL file.
+func NewLazyDLL(name string) *LazyDLL {
+	return &LazyDLL{Name: name}
+}
+
+// NewLazySystemDLL is like NewLazyDLL, but will only
+// search Windows System directory for the DLL if name is
+// a base name (like "advapi32.dll").
+func NewLazySystemDLL(name string) *LazyDLL {
+	return &LazyDLL{Name: name, System: true}
+}
+
+// A LazyProc implements access to a procedure inside a LazyDLL.
+// It delays the lookup until the Addr method is called.
+type LazyProc struct {
+	Name string
+
+	mu   sync.Mutex
+	l    *LazyDLL
+	proc *Proc
+}
+
+// Find searches DLL for procedure named p.Name. It returns
+// an error if search fails. Find will not search procedure,
+// if it is already found and loaded into memory.
+func (p *LazyProc) Find() error {
+	// Non-racy version of:
+	// if p.proc == nil {
+	if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc))) == nil {
+		p.mu.Lock()
+		defer p.mu.Unlock()
+		if p.proc == nil {
+			e := p.l.Load()
+			if e != nil {
+				return e
+			}
+			proc, e := p.l.dll.FindProc(p.Name)
+			if e != nil {
+				return e
+			}
+			// Non-racy version of:
+			// p.proc = proc
+			atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc)), unsafe.Pointer(proc))
+		}
+	}
+	return nil
+}
+
+// mustFind is like Find but panics if search fails.
+func (p *LazyProc) mustFind() {
+	e := p.Find()
+	if e != nil {
+		panic(e)
+	}
+}
+
+// Addr returns the address of the procedure represented by p.
+// The return value can be passed to Syscall to run the procedure.
+// It will panic if the procedure cannot be found.
+func (p *LazyProc) Addr() uintptr {
+	p.mustFind()
+	return p.proc.Addr()
+}
+
+//go:uintptrescapes
+
+// Call executes procedure p with arguments a. It will panic, if more than 15 arguments
+// are supplied. It will also panic if the procedure cannot be found.
+//
+// The returned error is always non-nil, constructed from the result of GetLastError.
+// Callers must inspect the primary return value to decide whether an error occurred
+// (according to the semantics of the specific function being called) before consulting
+// the error. The error will be guaranteed to contain windows.Errno.
+func (p *LazyProc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) {
+	p.mustFind()
+	return p.proc.Call(a...)
+}
+
+var canDoSearchSystem32Once struct {
+	sync.Once
+	v bool
+}
+
+func initCanDoSearchSystem32() {
+	// https://msdn.microsoft.com/en-us/library/ms684179(v=vs.85).aspx says:
+	// "Windows 7, Windows Server 2008 R2, Windows Vista, and Windows
+	// Server 2008: The LOAD_LIBRARY_SEARCH_* flags are available on
+	// systems that have KB2533623 installed. To determine whether the
+	// flags are available, use GetProcAddress to get the address of the
+	// AddDllDirectory, RemoveDllDirectory, or SetDefaultDllDirectories
+	// function. If GetProcAddress succeeds, the LOAD_LIBRARY_SEARCH_*
+	// flags can be used with LoadLibraryEx."
+	canDoSearchSystem32Once.v = (modkernel32.NewProc("AddDllDirectory").Find() == nil)
+}
+
+func canDoSearchSystem32() bool {
+	canDoSearchSystem32Once.Do(initCanDoSearchSystem32)
+	return canDoSearchSystem32Once.v
+}
+
+func isBaseName(name string) bool {
+	for _, c := range name {
+		if c == ':' || c == '/' || c == '\\' {
+			return false
+		}
+	}
+	return true
+}
+
+// loadLibraryEx wraps the Windows LoadLibraryEx function.
+//
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684179(v=vs.85).aspx
+//
+// If name is not an absolute path, LoadLibraryEx searches for the DLL
+// in a variety of automatic locations unless constrained by flags.
+// See: https://msdn.microsoft.com/en-us/library/ff919712%28VS.85%29.aspx
+func loadLibraryEx(name string, system bool) (*DLL, error) {
+	loadDLL := name
+	var flags uintptr
+	if system {
+		if canDoSearchSystem32() {
+			const LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
+			flags = LOAD_LIBRARY_SEARCH_SYSTEM32
+		} else if isBaseName(name) {
+			// WindowsXP or unpatched Windows machine
+			// trying to load "foo.dll" out of the system
+			// folder, but LoadLibraryEx doesn't support
+			// that yet on their system, so emulate it.
+			windir, _ := Getenv("WINDIR") // old var; apparently works on XP
+			if windir == "" {
+				return nil, errString("%WINDIR% not defined")
+			}
+			loadDLL = windir + "\\System32\\" + name
+		}
+	}
+	h, err := LoadLibraryEx(loadDLL, 0, flags)
+	if err != nil {
+		return nil, err
+	}
+	return &DLL{Name: name, Handle: h}, nil
+}
+
+type errString string
+
+func (s errString) Error() string { return string(s) }
diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go
new file mode 100644
index 0000000..bdc71e2
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/env_windows.go
@@ -0,0 +1,29 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Windows environment variables.
+
+package windows
+
+import "syscall"
+
+func Getenv(key string) (value string, found bool) {
+	return syscall.Getenv(key)
+}
+
+func Setenv(key, value string) error {
+	return syscall.Setenv(key, value)
+}
+
+func Clearenv() {
+	syscall.Clearenv()
+}
+
+func Environ() []string {
+	return syscall.Environ()
+}
+
+func Unsetenv(key string) error {
+	return syscall.Unsetenv(key)
+}
diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go
new file mode 100644
index 0000000..40af946
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/eventlog.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package windows
+
+const (
+	EVENTLOG_SUCCESS          = 0
+	EVENTLOG_ERROR_TYPE       = 1
+	EVENTLOG_WARNING_TYPE     = 2
+	EVENTLOG_INFORMATION_TYPE = 4
+	EVENTLOG_AUDIT_SUCCESS    = 8
+	EVENTLOG_AUDIT_FAILURE    = 16
+)
+
+//sys	RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) [failretval==0] = advapi32.RegisterEventSourceW
+//sys	DeregisterEventSource(handle Handle) (err error) = advapi32.DeregisterEventSource
+//sys	ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) = advapi32.ReportEventW
diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go
new file mode 100644
index 0000000..3606c3a
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/exec_windows.go
@@ -0,0 +1,97 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Fork, exec, wait, etc.
+
+package windows
+
+// EscapeArg rewrites command line argument s as prescribed
+// in http://msdn.microsoft.com/en-us/library/ms880421.
+// This function returns "" (2 double quotes) if s is empty.
+// Alternatively, these transformations are done:
+// - every back slash (\) is doubled, but only if immediately
+//   followed by double quote (");
+// - every double quote (") is escaped by back slash (\);
+// - finally, s is wrapped with double quotes (arg -> "arg"),
+//   but only if there is space or tab inside s.
+func EscapeArg(s string) string {
+	if len(s) == 0 {
+		return "\"\""
+	}
+	n := len(s)
+	hasSpace := false
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		case '"', '\\':
+			n++
+		case ' ', '\t':
+			hasSpace = true
+		}
+	}
+	if hasSpace {
+		n += 2
+	}
+	if n == len(s) {
+		return s
+	}
+
+	qs := make([]byte, n)
+	j := 0
+	if hasSpace {
+		qs[j] = '"'
+		j++
+	}
+	slashes := 0
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		default:
+			slashes = 0
+			qs[j] = s[i]
+		case '\\':
+			slashes++
+			qs[j] = s[i]
+		case '"':
+			for ; slashes > 0; slashes-- {
+				qs[j] = '\\'
+				j++
+			}
+			qs[j] = '\\'
+			j++
+			qs[j] = s[i]
+		}
+		j++
+	}
+	if hasSpace {
+		for ; slashes > 0; slashes-- {
+			qs[j] = '\\'
+			j++
+		}
+		qs[j] = '"'
+		j++
+	}
+	return string(qs[:j])
+}
+
+func CloseOnExec(fd Handle) {
+	SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0)
+}
+
+// FullPath retrieves the full path of the specified file.
+func FullPath(name string) (path string, err error) {
+	p, err := UTF16PtrFromString(name)
+	if err != nil {
+		return "", err
+	}
+	n := uint32(100)
+	for {
+		buf := make([]uint16, n)
+		n, err = GetFullPathName(p, uint32(len(buf)), &buf[0], nil)
+		if err != nil {
+			return "", err
+		}
+		if n <= uint32(len(buf)) {
+			return UTF16ToString(buf[:n]), nil
+		}
+	}
+}
diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go
new file mode 100644
index 0000000..f80a420
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/memory_windows.go
@@ -0,0 +1,26 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+const (
+	MEM_COMMIT      = 0x00001000
+	MEM_RESERVE     = 0x00002000
+	MEM_DECOMMIT    = 0x00004000
+	MEM_RELEASE     = 0x00008000
+	MEM_RESET       = 0x00080000
+	MEM_TOP_DOWN    = 0x00100000
+	MEM_WRITE_WATCH = 0x00200000
+	MEM_PHYSICAL    = 0x00400000
+	MEM_RESET_UNDO  = 0x01000000
+	MEM_LARGE_PAGES = 0x20000000
+
+	PAGE_NOACCESS          = 0x01
+	PAGE_READONLY          = 0x02
+	PAGE_READWRITE         = 0x04
+	PAGE_WRITECOPY         = 0x08
+	PAGE_EXECUTE_READ      = 0x20
+	PAGE_EXECUTE_READWRITE = 0x40
+	PAGE_EXECUTE_WRITECOPY = 0x80
+)
diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go
new file mode 100644
index 0000000..fb7db0e
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/mksyscall.go
@@ -0,0 +1,7 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go
diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go
new file mode 100644
index 0000000..a74e3e2
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/race.go
@@ -0,0 +1,30 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows,race
+
+package windows
+
+import (
+	"runtime"
+	"unsafe"
+)
+
+const raceenabled = true
+
+func raceAcquire(addr unsafe.Pointer) {
+	runtime.RaceAcquire(addr)
+}
+
+func raceReleaseMerge(addr unsafe.Pointer) {
+	runtime.RaceReleaseMerge(addr)
+}
+
+func raceReadRange(addr unsafe.Pointer, len int) {
+	runtime.RaceReadRange(addr, len)
+}
+
+func raceWriteRange(addr unsafe.Pointer, len int) {
+	runtime.RaceWriteRange(addr, len)
+}
diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go
new file mode 100644
index 0000000..e44a3cb
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/race0.go
@@ -0,0 +1,25 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows,!race
+
+package windows
+
+import (
+	"unsafe"
+)
+
+const raceenabled = false
+
+func raceAcquire(addr unsafe.Pointer) {
+}
+
+func raceReleaseMerge(addr unsafe.Pointer) {
+}
+
+func raceReadRange(addr unsafe.Pointer, len int) {
+}
+
+func raceWriteRange(addr unsafe.Pointer, len int) {
+}
diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go
new file mode 100644
index 0000000..4f17a33
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/security_windows.go
@@ -0,0 +1,478 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+const (
+	STANDARD_RIGHTS_REQUIRED = 0xf0000
+	STANDARD_RIGHTS_READ     = 0x20000
+	STANDARD_RIGHTS_WRITE    = 0x20000
+	STANDARD_RIGHTS_EXECUTE  = 0x20000
+	STANDARD_RIGHTS_ALL      = 0x1F0000
+)
+
+const (
+	NameUnknown          = 0
+	NameFullyQualifiedDN = 1
+	NameSamCompatible    = 2
+	NameDisplay          = 3
+	NameUniqueId         = 6
+	NameCanonical        = 7
+	NameUserPrincipal    = 8
+	NameCanonicalEx      = 9
+	NameServicePrincipal = 10
+	NameDnsDomain        = 12
+)
+
+// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL.
+// http://blogs.msdn.com/b/drnick/archive/2007/12/19/windows-and-upn-format-credentials.aspx
+//sys	TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.TranslateNameW
+//sys	GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.GetUserNameExW
+
+// TranslateAccountName converts a directory service
+// object name from one format to another.
+func TranslateAccountName(username string, from, to uint32, initSize int) (string, error) {
+	u, e := UTF16PtrFromString(username)
+	if e != nil {
+		return "", e
+	}
+	n := uint32(50)
+	for {
+		b := make([]uint16, n)
+		e = TranslateName(u, from, to, &b[0], &n)
+		if e == nil {
+			return UTF16ToString(b[:n]), nil
+		}
+		if e != ERROR_INSUFFICIENT_BUFFER {
+			return "", e
+		}
+		if n <= uint32(len(b)) {
+			return "", e
+		}
+	}
+}
+
+const (
+	// do not reorder
+	NetSetupUnknownStatus = iota
+	NetSetupUnjoined
+	NetSetupWorkgroupName
+	NetSetupDomainName
+)
+
+type UserInfo10 struct {
+	Name       *uint16
+	Comment    *uint16
+	UsrComment *uint16
+	FullName   *uint16
+}
+
+//sys	NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo
+//sys	NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation
+//sys	NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree
+
+const (
+	// do not reorder
+	SidTypeUser = 1 + iota
+	SidTypeGroup
+	SidTypeDomain
+	SidTypeAlias
+	SidTypeWellKnownGroup
+	SidTypeDeletedAccount
+	SidTypeInvalid
+	SidTypeUnknown
+	SidTypeComputer
+	SidTypeLabel
+)
+
+type SidIdentifierAuthority struct {
+	Value [6]byte
+}
+
+var (
+	SECURITY_NULL_SID_AUTHORITY        = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 0}}
+	SECURITY_WORLD_SID_AUTHORITY       = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 1}}
+	SECURITY_LOCAL_SID_AUTHORITY       = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 2}}
+	SECURITY_CREATOR_SID_AUTHORITY     = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 3}}
+	SECURITY_NON_UNIQUE_AUTHORITY      = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 4}}
+	SECURITY_NT_AUTHORITY              = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 5}}
+	SECURITY_MANDATORY_LABEL_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 16}}
+)
+
+const (
+	SECURITY_NULL_RID                   = 0
+	SECURITY_WORLD_RID                  = 0
+	SECURITY_LOCAL_RID                  = 0
+	SECURITY_CREATOR_OWNER_RID          = 0
+	SECURITY_CREATOR_GROUP_RID          = 1
+	SECURITY_DIALUP_RID                 = 1
+	SECURITY_NETWORK_RID                = 2
+	SECURITY_BATCH_RID                  = 3
+	SECURITY_INTERACTIVE_RID            = 4
+	SECURITY_LOGON_IDS_RID              = 5
+	SECURITY_SERVICE_RID                = 6
+	SECURITY_LOCAL_SYSTEM_RID           = 18
+	SECURITY_BUILTIN_DOMAIN_RID         = 32
+	SECURITY_PRINCIPAL_SELF_RID         = 10
+	SECURITY_CREATOR_OWNER_SERVER_RID   = 0x2
+	SECURITY_CREATOR_GROUP_SERVER_RID   = 0x3
+	SECURITY_LOGON_IDS_RID_COUNT        = 0x3
+	SECURITY_ANONYMOUS_LOGON_RID        = 0x7
+	SECURITY_PROXY_RID                  = 0x8
+	SECURITY_ENTERPRISE_CONTROLLERS_RID = 0x9
+	SECURITY_SERVER_LOGON_RID           = SECURITY_ENTERPRISE_CONTROLLERS_RID
+	SECURITY_AUTHENTICATED_USER_RID     = 0xb
+	SECURITY_RESTRICTED_CODE_RID        = 0xc
+	SECURITY_NT_NON_UNIQUE_RID          = 0x15
+)
+
+// Predefined domain-relative RIDs for local groups.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa379649(v=vs.85).aspx
+const (
+	DOMAIN_ALIAS_RID_ADMINS                         = 0x220
+	DOMAIN_ALIAS_RID_USERS                          = 0x221
+	DOMAIN_ALIAS_RID_GUESTS                         = 0x222
+	DOMAIN_ALIAS_RID_POWER_USERS                    = 0x223
+	DOMAIN_ALIAS_RID_ACCOUNT_OPS                    = 0x224
+	DOMAIN_ALIAS_RID_SYSTEM_OPS                     = 0x225
+	DOMAIN_ALIAS_RID_PRINT_OPS                      = 0x226
+	DOMAIN_ALIAS_RID_BACKUP_OPS                     = 0x227
+	DOMAIN_ALIAS_RID_REPLICATOR                     = 0x228
+	DOMAIN_ALIAS_RID_RAS_SERVERS                    = 0x229
+	DOMAIN_ALIAS_RID_PREW2KCOMPACCESS               = 0x22a
+	DOMAIN_ALIAS_RID_REMOTE_DESKTOP_USERS           = 0x22b
+	DOMAIN_ALIAS_RID_NETWORK_CONFIGURATION_OPS      = 0x22c
+	DOMAIN_ALIAS_RID_INCOMING_FOREST_TRUST_BUILDERS = 0x22d
+	DOMAIN_ALIAS_RID_MONITORING_USERS               = 0X22e
+	DOMAIN_ALIAS_RID_LOGGING_USERS                  = 0x22f
+	DOMAIN_ALIAS_RID_AUTHORIZATIONACCESS            = 0x230
+	DOMAIN_ALIAS_RID_TS_LICENSE_SERVERS             = 0x231
+	DOMAIN_ALIAS_RID_DCOM_USERS                     = 0x232
+	DOMAIN_ALIAS_RID_IUSERS                         = 0x238
+	DOMAIN_ALIAS_RID_CRYPTO_OPERATORS               = 0x239
+	DOMAIN_ALIAS_RID_CACHEABLE_PRINCIPALS_GROUP     = 0x23b
+	DOMAIN_ALIAS_RID_NON_CACHEABLE_PRINCIPALS_GROUP = 0x23c
+	DOMAIN_ALIAS_RID_EVENT_LOG_READERS_GROUP        = 0x23d
+	DOMAIN_ALIAS_RID_CERTSVC_DCOM_ACCESS_GROUP      = 0x23e
+)
+
+//sys	LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountSidW
+//sys	LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountNameW
+//sys	ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) = advapi32.ConvertSidToStringSidW
+//sys	ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) = advapi32.ConvertStringSidToSidW
+//sys	GetLengthSid(sid *SID) (len uint32) = advapi32.GetLengthSid
+//sys	CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) = advapi32.CopySid
+//sys	AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) = advapi32.AllocateAndInitializeSid
+//sys	FreeSid(sid *SID) (err error) [failretval!=0] = advapi32.FreeSid
+//sys	EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) = advapi32.EqualSid
+
+// The security identifier (SID) structure is a variable-length
+// structure used to uniquely identify users or groups.
+type SID struct{}
+
+// StringToSid converts a string-format security identifier
+// sid into a valid, functional sid.
+func StringToSid(s string) (*SID, error) {
+	var sid *SID
+	p, e := UTF16PtrFromString(s)
+	if e != nil {
+		return nil, e
+	}
+	e = ConvertStringSidToSid(p, &sid)
+	if e != nil {
+		return nil, e
+	}
+	defer LocalFree((Handle)(unsafe.Pointer(sid)))
+	return sid.Copy()
+}
+
+// LookupSID retrieves a security identifier sid for the account
+// and the name of the domain on which the account was found.
+// System specify target computer to search.
+func LookupSID(system, account string) (sid *SID, domain string, accType uint32, err error) {
+	if len(account) == 0 {
+		return nil, "", 0, syscall.EINVAL
+	}
+	acc, e := UTF16PtrFromString(account)
+	if e != nil {
+		return nil, "", 0, e
+	}
+	var sys *uint16
+	if len(system) > 0 {
+		sys, e = UTF16PtrFromString(system)
+		if e != nil {
+			return nil, "", 0, e
+		}
+	}
+	n := uint32(50)
+	dn := uint32(50)
+	for {
+		b := make([]byte, n)
+		db := make([]uint16, dn)
+		sid = (*SID)(unsafe.Pointer(&b[0]))
+		e = LookupAccountName(sys, acc, sid, &n, &db[0], &dn, &accType)
+		if e == nil {
+			return sid, UTF16ToString(db), accType, nil
+		}
+		if e != ERROR_INSUFFICIENT_BUFFER {
+			return nil, "", 0, e
+		}
+		if n <= uint32(len(b)) {
+			return nil, "", 0, e
+		}
+	}
+}
+
+// String converts sid to a string format
+// suitable for display, storage, or transmission.
+func (sid *SID) String() (string, error) {
+	var s *uint16
+	e := ConvertSidToStringSid(sid, &s)
+	if e != nil {
+		return "", e
+	}
+	defer LocalFree((Handle)(unsafe.Pointer(s)))
+	return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:]), nil
+}
+
+// Len returns the length, in bytes, of a valid security identifier sid.
+func (sid *SID) Len() int {
+	return int(GetLengthSid(sid))
+}
+
+// Copy creates a duplicate of security identifier sid.
+func (sid *SID) Copy() (*SID, error) {
+	b := make([]byte, sid.Len())
+	sid2 := (*SID)(unsafe.Pointer(&b[0]))
+	e := CopySid(uint32(len(b)), sid2, sid)
+	if e != nil {
+		return nil, e
+	}
+	return sid2, nil
+}
+
+// LookupAccount retrieves the name of the account for this sid
+// and the name of the first domain on which this sid is found.
+// System specify target computer to search for.
+func (sid *SID) LookupAccount(system string) (account, domain string, accType uint32, err error) {
+	var sys *uint16
+	if len(system) > 0 {
+		sys, err = UTF16PtrFromString(system)
+		if err != nil {
+			return "", "", 0, err
+		}
+	}
+	n := uint32(50)
+	dn := uint32(50)
+	for {
+		b := make([]uint16, n)
+		db := make([]uint16, dn)
+		e := LookupAccountSid(sys, sid, &b[0], &n, &db[0], &dn, &accType)
+		if e == nil {
+			return UTF16ToString(b), UTF16ToString(db), accType, nil
+		}
+		if e != ERROR_INSUFFICIENT_BUFFER {
+			return "", "", 0, e
+		}
+		if n <= uint32(len(b)) {
+			return "", "", 0, e
+		}
+	}
+}
+
+const (
+	// do not reorder
+	TOKEN_ASSIGN_PRIMARY = 1 << iota
+	TOKEN_DUPLICATE
+	TOKEN_IMPERSONATE
+	TOKEN_QUERY
+	TOKEN_QUERY_SOURCE
+	TOKEN_ADJUST_PRIVILEGES
+	TOKEN_ADJUST_GROUPS
+	TOKEN_ADJUST_DEFAULT
+	TOKEN_ADJUST_SESSIONID
+
+	TOKEN_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED |
+		TOKEN_ASSIGN_PRIMARY |
+		TOKEN_DUPLICATE |
+		TOKEN_IMPERSONATE |
+		TOKEN_QUERY |
+		TOKEN_QUERY_SOURCE |
+		TOKEN_ADJUST_PRIVILEGES |
+		TOKEN_ADJUST_GROUPS |
+		TOKEN_ADJUST_DEFAULT |
+		TOKEN_ADJUST_SESSIONID
+	TOKEN_READ  = STANDARD_RIGHTS_READ | TOKEN_QUERY
+	TOKEN_WRITE = STANDARD_RIGHTS_WRITE |
+		TOKEN_ADJUST_PRIVILEGES |
+		TOKEN_ADJUST_GROUPS |
+		TOKEN_ADJUST_DEFAULT
+	TOKEN_EXECUTE = STANDARD_RIGHTS_EXECUTE
+)
+
+const (
+	// do not reorder
+	TokenUser = 1 + iota
+	TokenGroups
+	TokenPrivileges
+	TokenOwner
+	TokenPrimaryGroup
+	TokenDefaultDacl
+	TokenSource
+	TokenType
+	TokenImpersonationLevel
+	TokenStatistics
+	TokenRestrictedSids
+	TokenSessionId
+	TokenGroupsAndPrivileges
+	TokenSessionReference
+	TokenSandBoxInert
+	TokenAuditPolicy
+	TokenOrigin
+	TokenElevationType
+	TokenLinkedToken
+	TokenElevation
+	TokenHasRestrictions
+	TokenAccessInformation
+	TokenVirtualizationAllowed
+	TokenVirtualizationEnabled
+	TokenIntegrityLevel
+	TokenUIAccess
+	TokenMandatoryPolicy
+	TokenLogonSid
+	MaxTokenInfoClass
+)
+
+type SIDAndAttributes struct {
+	Sid        *SID
+	Attributes uint32
+}
+
+type Tokenuser struct {
+	User SIDAndAttributes
+}
+
+type Tokenprimarygroup struct {
+	PrimaryGroup *SID
+}
+
+type Tokengroups struct {
+	GroupCount uint32
+	Groups     [1]SIDAndAttributes
+}
+
+// Authorization Functions
+//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership
+//sys	OpenProcessToken(h Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken
+//sys	GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation
+//sys	GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW
+
+// An access token contains the security information for a logon session.
+// The system creates an access token when a user logs on, and every
+// process executed on behalf of the user has a copy of the token.
+// The token identifies the user, the user's groups, and the user's
+// privileges. The system uses the token to control access to securable
+// objects and to control the ability of the user to perform various
+// system-related operations on the local computer.
+type Token Handle
+
+// OpenCurrentProcessToken opens the access token
+// associated with current process.
+func OpenCurrentProcessToken() (Token, error) {
+	p, e := GetCurrentProcess()
+	if e != nil {
+		return 0, e
+	}
+	var t Token
+	e = OpenProcessToken(p, TOKEN_QUERY, &t)
+	if e != nil {
+		return 0, e
+	}
+	return t, nil
+}
+
+// Close releases access to access token.
+func (t Token) Close() error {
+	return CloseHandle(Handle(t))
+}
+
+// getInfo retrieves a specified type of information about an access token.
+func (t Token) getInfo(class uint32, initSize int) (unsafe.Pointer, error) {
+	n := uint32(initSize)
+	for {
+		b := make([]byte, n)
+		e := GetTokenInformation(t, class, &b[0], uint32(len(b)), &n)
+		if e == nil {
+			return unsafe.Pointer(&b[0]), nil
+		}
+		if e != ERROR_INSUFFICIENT_BUFFER {
+			return nil, e
+		}
+		if n <= uint32(len(b)) {
+			return nil, e
+		}
+	}
+}
+
+// GetTokenUser retrieves access token t user account information.
+func (t Token) GetTokenUser() (*Tokenuser, error) {
+	i, e := t.getInfo(TokenUser, 50)
+	if e != nil {
+		return nil, e
+	}
+	return (*Tokenuser)(i), nil
+}
+
+// GetTokenGroups retrieves group accounts associated with access token t.
+func (t Token) GetTokenGroups() (*Tokengroups, error) {
+	i, e := t.getInfo(TokenGroups, 50)
+	if e != nil {
+		return nil, e
+	}
+	return (*Tokengroups)(i), nil
+}
+
+// GetTokenPrimaryGroup retrieves access token t primary group information.
+// A pointer to a SID structure representing a group that will become
+// the primary group of any objects created by a process using this access token.
+func (t Token) GetTokenPrimaryGroup() (*Tokenprimarygroup, error) {
+	i, e := t.getInfo(TokenPrimaryGroup, 50)
+	if e != nil {
+		return nil, e
+	}
+	return (*Tokenprimarygroup)(i), nil
+}
+
+// GetUserProfileDirectory retrieves path to the
+// root directory of the access token t user's profile.
+func (t Token) GetUserProfileDirectory() (string, error) {
+	n := uint32(100)
+	for {
+		b := make([]uint16, n)
+		e := GetUserProfileDirectory(t, &b[0], &n)
+		if e == nil {
+			return UTF16ToString(b), nil
+		}
+		if e != ERROR_INSUFFICIENT_BUFFER {
+			return "", e
+		}
+		if n <= uint32(len(b)) {
+			return "", e
+		}
+	}
+}
+
+// IsMember reports whether the access token t is a member of the provided SID.
+func (t Token) IsMember(sid *SID) (bool, error) {
+	var b int32
+	if e := checkTokenMembership(t, sid, &b); e != nil {
+		return false, e
+	}
+	return b != 0, nil
+}
diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go
new file mode 100644
index 0000000..62fc31b
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/service.go
@@ -0,0 +1,183 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package windows
+
+const (
+	SC_MANAGER_CONNECT            = 1
+	SC_MANAGER_CREATE_SERVICE     = 2
+	SC_MANAGER_ENUMERATE_SERVICE  = 4
+	SC_MANAGER_LOCK               = 8
+	SC_MANAGER_QUERY_LOCK_STATUS  = 16
+	SC_MANAGER_MODIFY_BOOT_CONFIG = 32
+	SC_MANAGER_ALL_ACCESS         = 0xf003f
+)
+
+//sys	OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenSCManagerW
+
+const (
+	SERVICE_KERNEL_DRIVER       = 1
+	SERVICE_FILE_SYSTEM_DRIVER  = 2
+	SERVICE_ADAPTER             = 4
+	SERVICE_RECOGNIZER_DRIVER   = 8
+	SERVICE_WIN32_OWN_PROCESS   = 16
+	SERVICE_WIN32_SHARE_PROCESS = 32
+	SERVICE_WIN32               = SERVICE_WIN32_OWN_PROCESS | SERVICE_WIN32_SHARE_PROCESS
+	SERVICE_INTERACTIVE_PROCESS = 256
+	SERVICE_DRIVER              = SERVICE_KERNEL_DRIVER | SERVICE_FILE_SYSTEM_DRIVER | SERVICE_RECOGNIZER_DRIVER
+	SERVICE_TYPE_ALL            = SERVICE_WIN32 | SERVICE_ADAPTER | SERVICE_DRIVER | SERVICE_INTERACTIVE_PROCESS
+
+	SERVICE_BOOT_START   = 0
+	SERVICE_SYSTEM_START = 1
+	SERVICE_AUTO_START   = 2
+	SERVICE_DEMAND_START = 3
+	SERVICE_DISABLED     = 4
+
+	SERVICE_ERROR_IGNORE   = 0
+	SERVICE_ERROR_NORMAL   = 1
+	SERVICE_ERROR_SEVERE   = 2
+	SERVICE_ERROR_CRITICAL = 3
+
+	SC_STATUS_PROCESS_INFO = 0
+
+	SC_ACTION_NONE        = 0
+	SC_ACTION_RESTART     = 1
+	SC_ACTION_REBOOT      = 2
+	SC_ACTION_RUN_COMMAND = 3
+
+	SERVICE_STOPPED          = 1
+	SERVICE_START_PENDING    = 2
+	SERVICE_STOP_PENDING     = 3
+	SERVICE_RUNNING          = 4
+	SERVICE_CONTINUE_PENDING = 5
+	SERVICE_PAUSE_PENDING    = 6
+	SERVICE_PAUSED           = 7
+	SERVICE_NO_CHANGE        = 0xffffffff
+
+	SERVICE_ACCEPT_STOP                  = 1
+	SERVICE_ACCEPT_PAUSE_CONTINUE        = 2
+	SERVICE_ACCEPT_SHUTDOWN              = 4
+	SERVICE_ACCEPT_PARAMCHANGE           = 8
+	SERVICE_ACCEPT_NETBINDCHANGE         = 16
+	SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 32
+	SERVICE_ACCEPT_POWEREVENT            = 64
+	SERVICE_ACCEPT_SESSIONCHANGE         = 128
+
+	SERVICE_CONTROL_STOP                  = 1
+	SERVICE_CONTROL_PAUSE                 = 2
+	SERVICE_CONTROL_CONTINUE              = 3
+	SERVICE_CONTROL_INTERROGATE           = 4
+	SERVICE_CONTROL_SHUTDOWN              = 5
+	SERVICE_CONTROL_PARAMCHANGE           = 6
+	SERVICE_CONTROL_NETBINDADD            = 7
+	SERVICE_CONTROL_NETBINDREMOVE         = 8
+	SERVICE_CONTROL_NETBINDENABLE         = 9
+	SERVICE_CONTROL_NETBINDDISABLE        = 10
+	SERVICE_CONTROL_DEVICEEVENT           = 11
+	SERVICE_CONTROL_HARDWAREPROFILECHANGE = 12
+	SERVICE_CONTROL_POWEREVENT            = 13
+	SERVICE_CONTROL_SESSIONCHANGE         = 14
+
+	SERVICE_ACTIVE    = 1
+	SERVICE_INACTIVE  = 2
+	SERVICE_STATE_ALL = 3
+
+	SERVICE_QUERY_CONFIG           = 1
+	SERVICE_CHANGE_CONFIG          = 2
+	SERVICE_QUERY_STATUS           = 4
+	SERVICE_ENUMERATE_DEPENDENTS   = 8
+	SERVICE_START                  = 16
+	SERVICE_STOP                   = 32
+	SERVICE_PAUSE_CONTINUE         = 64
+	SERVICE_INTERROGATE            = 128
+	SERVICE_USER_DEFINED_CONTROL   = 256
+	SERVICE_ALL_ACCESS             = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL
+	SERVICE_RUNS_IN_SYSTEM_PROCESS = 1
+	SERVICE_CONFIG_DESCRIPTION     = 1
+	SERVICE_CONFIG_FAILURE_ACTIONS = 2
+
+	NO_ERROR = 0
+
+	SC_ENUM_PROCESS_INFO = 0
+)
+
+type SERVICE_STATUS struct {
+	ServiceType             uint32
+	CurrentState            uint32
+	ControlsAccepted        uint32
+	Win32ExitCode           uint32
+	ServiceSpecificExitCode uint32
+	CheckPoint              uint32
+	WaitHint                uint32
+}
+
+type SERVICE_TABLE_ENTRY struct {
+	ServiceName *uint16
+	ServiceProc uintptr
+}
+
+type QUERY_SERVICE_CONFIG struct {
+	ServiceType      uint32
+	StartType        uint32
+	ErrorControl     uint32
+	BinaryPathName   *uint16
+	LoadOrderGroup   *uint16
+	TagId            uint32
+	Dependencies     *uint16
+	ServiceStartName *uint16
+	DisplayName      *uint16
+}
+
+type SERVICE_DESCRIPTION struct {
+	Description *uint16
+}
+
+type SERVICE_STATUS_PROCESS struct {
+	ServiceType             uint32
+	CurrentState            uint32
+	ControlsAccepted        uint32
+	Win32ExitCode           uint32
+	ServiceSpecificExitCode uint32
+	CheckPoint              uint32
+	WaitHint                uint32
+	ProcessId               uint32
+	ServiceFlags            uint32
+}
+
+type ENUM_SERVICE_STATUS_PROCESS struct {
+	ServiceName          *uint16
+	DisplayName          *uint16
+	ServiceStatusProcess SERVICE_STATUS_PROCESS
+}
+
+type SERVICE_FAILURE_ACTIONS struct {
+	ResetPeriod  uint32
+	RebootMsg    *uint16
+	Command      *uint16
+	ActionsCount uint32
+	Actions      *SC_ACTION
+}
+
+type SC_ACTION struct {
+	Type  uint32
+	Delay uint32
+}
+
+//sys	CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle
+//sys	CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW
+//sys	OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW
+//sys	DeleteService(service Handle) (err error) = advapi32.DeleteService
+//sys	StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) = advapi32.StartServiceW
+//sys	QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) = advapi32.QueryServiceStatus
+//sys	ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) = advapi32.ControlService
+//sys	StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) = advapi32.StartServiceCtrlDispatcherW
+//sys	SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) = advapi32.SetServiceStatus
+//sys	ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) = advapi32.ChangeServiceConfigW
+//sys	QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfigW
+//sys	ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) = advapi32.ChangeServiceConfig2W
+//sys	QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfig2W
+//sys	EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) = advapi32.EnumServicesStatusExW
+//sys   QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx
diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go
new file mode 100644
index 0000000..917cc2a
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/str.go
@@ -0,0 +1,22 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package windows
+
+func itoa(val int) string { // do it here rather than with fmt to avoid dependency
+	if val < 0 {
+		return "-" + itoa(-val)
+	}
+	var buf [32]byte // big enough for int64
+	i := len(buf) - 1
+	for val >= 10 {
+		buf[i] = byte(val%10 + '0')
+		i--
+		val /= 10
+	}
+	buf[i] = byte(val + '0')
+	return string(buf[i:])
+}
diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go
new file mode 100644
index 0000000..af828a9
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/syscall.go
@@ -0,0 +1,74 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+// Package windows contains an interface to the low-level operating system
+// primitives. OS details vary depending on the underlying system, and
+// by default, godoc will display the OS-specific documentation for the current
+// system. If you want godoc to display syscall documentation for another
+// system, set $GOOS and $GOARCH to the desired system. For example, if
+// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS
+// to freebsd and $GOARCH to arm.
+//
+// The primary use of this package is inside other packages that provide a more
+// portable interface to the system, such as "os", "time" and "net".  Use
+// those packages rather than this one if you can.
+//
+// For details of the functions and data types in this package consult
+// the manuals for the appropriate operating system.
+//
+// These calls return err == nil to indicate success; otherwise
+// err represents an operating system error describing the failure and
+// holds a value of type syscall.Errno.
+package windows // import "golang.org/x/sys/windows"
+
+import (
+	"syscall"
+)
+
+// ByteSliceFromString returns a NUL-terminated slice of bytes
+// containing the text of s. If s contains a NUL byte at any
+// location, it returns (nil, syscall.EINVAL).
+func ByteSliceFromString(s string) ([]byte, error) {
+	for i := 0; i < len(s); i++ {
+		if s[i] == 0 {
+			return nil, syscall.EINVAL
+		}
+	}
+	a := make([]byte, len(s)+1)
+	copy(a, s)
+	return a, nil
+}
+
+// BytePtrFromString returns a pointer to a NUL-terminated array of
+// bytes containing the text of s. If s contains a NUL byte at any
+// location, it returns (nil, syscall.EINVAL).
+func BytePtrFromString(s string) (*byte, error) {
+	a, err := ByteSliceFromString(s)
+	if err != nil {
+		return nil, err
+	}
+	return &a[0], nil
+}
+
+// Single-word zero for use when we need a valid pointer to 0 bytes.
+// See mksyscall.pl.
+var _zero uintptr
+
+func (ts *Timespec) Unix() (sec int64, nsec int64) {
+	return int64(ts.Sec), int64(ts.Nsec)
+}
+
+func (tv *Timeval) Unix() (sec int64, nsec int64) {
+	return int64(tv.Sec), int64(tv.Usec) * 1000
+}
+
+func (ts *Timespec) Nano() int64 {
+	return int64(ts.Sec)*1e9 + int64(ts.Nsec)
+}
+
+func (tv *Timeval) Nano() int64 {
+	return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000
+}
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
new file mode 100644
index 0000000..8a00b71
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -0,0 +1,1205 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Windows system calls.
+
+package windows
+
+import (
+	errorspkg "errors"
+	"sync"
+	"syscall"
+	"unicode/utf16"
+	"unsafe"
+)
+
+type Handle uintptr
+
+const (
+	InvalidHandle = ^Handle(0)
+
+	// Flags for DefineDosDevice.
+	DDD_EXACT_MATCH_ON_REMOVE = 0x00000004
+	DDD_NO_BROADCAST_SYSTEM   = 0x00000008
+	DDD_RAW_TARGET_PATH       = 0x00000001
+	DDD_REMOVE_DEFINITION     = 0x00000002
+
+	// Return values for GetDriveType.
+	DRIVE_UNKNOWN     = 0
+	DRIVE_NO_ROOT_DIR = 1
+	DRIVE_REMOVABLE   = 2
+	DRIVE_FIXED       = 3
+	DRIVE_REMOTE      = 4
+	DRIVE_CDROM       = 5
+	DRIVE_RAMDISK     = 6
+
+	// File system flags from GetVolumeInformation and GetVolumeInformationByHandle.
+	FILE_CASE_SENSITIVE_SEARCH        = 0x00000001
+	FILE_CASE_PRESERVED_NAMES         = 0x00000002
+	FILE_FILE_COMPRESSION             = 0x00000010
+	FILE_DAX_VOLUME                   = 0x20000000
+	FILE_NAMED_STREAMS                = 0x00040000
+	FILE_PERSISTENT_ACLS              = 0x00000008
+	FILE_READ_ONLY_VOLUME             = 0x00080000
+	FILE_SEQUENTIAL_WRITE_ONCE        = 0x00100000
+	FILE_SUPPORTS_ENCRYPTION          = 0x00020000
+	FILE_SUPPORTS_EXTENDED_ATTRIBUTES = 0x00800000
+	FILE_SUPPORTS_HARD_LINKS          = 0x00400000
+	FILE_SUPPORTS_OBJECT_IDS          = 0x00010000
+	FILE_SUPPORTS_OPEN_BY_FILE_ID     = 0x01000000
+	FILE_SUPPORTS_REPARSE_POINTS      = 0x00000080
+	FILE_SUPPORTS_SPARSE_FILES        = 0x00000040
+	FILE_SUPPORTS_TRANSACTIONS        = 0x00200000
+	FILE_SUPPORTS_USN_JOURNAL         = 0x02000000
+	FILE_UNICODE_ON_DISK              = 0x00000004
+	FILE_VOLUME_IS_COMPRESSED         = 0x00008000
+	FILE_VOLUME_QUOTAS                = 0x00000020
+)
+
+// StringToUTF16 is deprecated. Use UTF16FromString instead.
+// If s contains a NUL byte this function panics instead of
+// returning an error.
+func StringToUTF16(s string) []uint16 {
+	a, err := UTF16FromString(s)
+	if err != nil {
+		panic("windows: string with NUL passed to StringToUTF16")
+	}
+	return a
+}
+
+// UTF16FromString returns the UTF-16 encoding of the UTF-8 string
+// s, with a terminating NUL added. If s contains a NUL byte at any
+// location, it returns (nil, syscall.EINVAL).
+func UTF16FromString(s string) ([]uint16, error) {
+	for i := 0; i < len(s); i++ {
+		if s[i] == 0 {
+			return nil, syscall.EINVAL
+		}
+	}
+	return utf16.Encode([]rune(s + "\x00")), nil
+}
+
+// UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s,
+// with a terminating NUL removed.
+func UTF16ToString(s []uint16) string {
+	for i, v := range s {
+		if v == 0 {
+			s = s[0:i]
+			break
+		}
+	}
+	return string(utf16.Decode(s))
+}
+
+// StringToUTF16Ptr is deprecated. Use UTF16PtrFromString instead.
+// If s contains a NUL byte this function panics instead of
+// returning an error.
+func StringToUTF16Ptr(s string) *uint16 { return &StringToUTF16(s)[0] }
+
+// UTF16PtrFromString returns pointer to the UTF-16 encoding of
+// the UTF-8 string s, with a terminating NUL added. If s
+// contains a NUL byte at any location, it returns (nil, syscall.EINVAL).
+func UTF16PtrFromString(s string) (*uint16, error) {
+	a, err := UTF16FromString(s)
+	if err != nil {
+		return nil, err
+	}
+	return &a[0], nil
+}
+
+func Getpagesize() int { return 4096 }
+
+// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention.
+// This is useful when interoperating with Windows code requiring callbacks.
+// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
+func NewCallback(fn interface{}) uintptr {
+	return syscall.NewCallback(fn)
+}
+
+// NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention.
+// This is useful when interoperating with Windows code requiring callbacks.
+// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
+func NewCallbackCDecl(fn interface{}) uintptr {
+	return syscall.NewCallbackCDecl(fn)
+}
+
+// windows api calls
+
+//sys	GetLastError() (lasterr error)
+//sys	LoadLibrary(libname string) (handle Handle, err error) = LoadLibraryW
+//sys	LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) = LoadLibraryExW
+//sys	FreeLibrary(handle Handle) (err error)
+//sys	GetProcAddress(module Handle, procname string) (proc uintptr, err error)
+//sys	GetVersion() (ver uint32, err error)
+//sys	FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW
+//sys	ExitProcess(exitcode uint32)
+//sys	CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile int32) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW
+//sys	ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error)
+//sys	WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error)
+//sys	SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) [failretval==0xffffffff]
+//sys	CloseHandle(handle Handle) (err error)
+//sys	GetStdHandle(stdhandle uint32) (handle Handle, err error) [failretval==InvalidHandle]
+//sys	SetStdHandle(stdhandle uint32, handle Handle) (err error)
+//sys	findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstFileW
+//sys	findNextFile1(handle Handle, data *win32finddata1) (err error) = FindNextFileW
+//sys	FindClose(handle Handle) (err error)
+//sys	GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error)
+//sys	GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) = GetCurrentDirectoryW
+//sys	SetCurrentDirectory(path *uint16) (err error) = SetCurrentDirectoryW
+//sys	CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) = CreateDirectoryW
+//sys	RemoveDirectory(path *uint16) (err error) = RemoveDirectoryW
+//sys	DeleteFile(path *uint16) (err error) = DeleteFileW
+//sys	MoveFile(from *uint16, to *uint16) (err error) = MoveFileW
+//sys	MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW
+//sys	GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW
+//sys	GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW
+//sys	SetEndOfFile(handle Handle) (err error)
+//sys	GetSystemTimeAsFileTime(time *Filetime)
+//sys	GetSystemTimePreciseAsFileTime(time *Filetime)
+//sys	GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff]
+//sys	CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error)
+//sys	GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error)
+//sys	PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error)
+//sys	CancelIo(s Handle) (err error)
+//sys	CancelIoEx(s Handle, o *Overlapped) (err error)
+//sys	CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW
+//sys	OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error)
+//sys	TerminateProcess(handle Handle, exitcode uint32) (err error)
+//sys	GetExitCodeProcess(handle Handle, exitcode *uint32) (err error)
+//sys	GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW
+//sys	GetCurrentProcess() (pseudoHandle Handle, err error)
+//sys	GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error)
+//sys	DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error)
+//sys	WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff]
+//sys	GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) = GetTempPathW
+//sys	CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error)
+//sys	GetFileType(filehandle Handle) (n uint32, err error)
+//sys	CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) = advapi32.CryptAcquireContextW
+//sys	CryptReleaseContext(provhandle Handle, flags uint32) (err error) = advapi32.CryptReleaseContext
+//sys	CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) = advapi32.CryptGenRandom
+//sys	GetEnvironmentStrings() (envs *uint16, err error) [failretval==nil] = kernel32.GetEnvironmentStringsW
+//sys	FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW
+//sys	GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW
+//sys	SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW
+//sys	SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error)
+//sys	GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW
+//sys	SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW
+//sys	GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW
+//sys	GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW
+//sys	CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW
+//sys	LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0]
+//sys	SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error)
+//sys	FlushFileBuffers(handle Handle) (err error)
+//sys	GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) = kernel32.GetFullPathNameW
+//sys	GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) = kernel32.GetLongPathNameW
+//sys	GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) = kernel32.GetShortPathNameW
+//sys	CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) = kernel32.CreateFileMappingW
+//sys	MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error)
+//sys	UnmapViewOfFile(addr uintptr) (err error)
+//sys	FlushViewOfFile(addr uintptr, length uintptr) (err error)
+//sys	VirtualLock(addr uintptr, length uintptr) (err error)
+//sys	VirtualUnlock(addr uintptr, length uintptr) (err error)
+//sys	VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) = kernel32.VirtualAlloc
+//sys	VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) = kernel32.VirtualFree
+//sys	VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) = kernel32.VirtualProtect
+//sys	TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) = mswsock.TransmitFile
+//sys	ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) = kernel32.ReadDirectoryChangesW
+//sys	CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) = crypt32.CertOpenSystemStoreW
+//sys   CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) [failretval==InvalidHandle] = crypt32.CertOpenStore
+//sys	CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) [failretval==nil] = crypt32.CertEnumCertificatesInStore
+//sys   CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) = crypt32.CertAddCertificateContextToStore
+//sys	CertCloseStore(store Handle, flags uint32) (err error) = crypt32.CertCloseStore
+//sys   CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) = crypt32.CertGetCertificateChain
+//sys   CertFreeCertificateChain(ctx *CertChainContext) = crypt32.CertFreeCertificateChain
+//sys   CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) [failretval==nil] = crypt32.CertCreateCertificateContext
+//sys   CertFreeCertificateContext(ctx *CertContext) (err error) = crypt32.CertFreeCertificateContext
+//sys   CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) = crypt32.CertVerifyCertificateChainPolicy
+//sys	RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) = advapi32.RegOpenKeyExW
+//sys	RegCloseKey(key Handle) (regerrno error) = advapi32.RegCloseKey
+//sys	RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW
+//sys	RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW
+//sys	RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW
+//sys	getCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId
+//sys	GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode
+//sys	SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode
+//sys	GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo
+//sys	WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW
+//sys	ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW
+//sys	CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot
+//sys	Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW
+//sys	Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW
+//sys	DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error)
+// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL.
+//sys	CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW
+//sys	CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) [failretval&0xff==0] = CreateHardLinkW
+//sys	GetCurrentThreadId() (id uint32)
+//sys	CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) = kernel32.CreateEventW
+//sys	CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateEventExW
+//sys	OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenEventW
+//sys	SetEvent(event Handle) (err error) = kernel32.SetEvent
+//sys	ResetEvent(event Handle) (err error) = kernel32.ResetEvent
+//sys	PulseEvent(event Handle) (err error) = kernel32.PulseEvent
+
+// Volume Management Functions
+//sys	DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW
+//sys	DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) = DeleteVolumeMountPointW
+//sys	FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeW
+//sys	FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeMountPointW
+//sys	FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) = FindNextVolumeW
+//sys	FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) = FindNextVolumeMountPointW
+//sys	FindVolumeClose(findVolume Handle) (err error)
+//sys	FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error)
+//sys	GetDriveType(rootPathName *uint16) (driveType uint32) = GetDriveTypeW
+//sys	GetLogicalDrives() (drivesBitMask uint32, err error) [failretval==0]
+//sys	GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) [failretval==0] = GetLogicalDriveStringsW
+//sys	GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationW
+//sys	GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationByHandleW
+//sys	GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) = GetVolumeNameForVolumeMountPointW
+//sys	GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) = GetVolumePathNameW
+//sys	GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) = GetVolumePathNamesForVolumeNameW
+//sys	QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) [failretval==0] = QueryDosDeviceW
+//sys	SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW
+//sys	SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW
+
+// syscall interface implementation for other packages
+
+// GetProcAddressByOrdinal retrieves the address of the exported
+// function from module by ordinal.
+func GetProcAddressByOrdinal(module Handle, ordinal uintptr) (proc uintptr, err error) {
+	r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), ordinal, 0)
+	proc = uintptr(r0)
+	if proc == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func Exit(code int) { ExitProcess(uint32(code)) }
+
+func makeInheritSa() *SecurityAttributes {
+	var sa SecurityAttributes
+	sa.Length = uint32(unsafe.Sizeof(sa))
+	sa.InheritHandle = 1
+	return &sa
+}
+
+func Open(path string, mode int, perm uint32) (fd Handle, err error) {
+	if len(path) == 0 {
+		return InvalidHandle, ERROR_FILE_NOT_FOUND
+	}
+	pathp, err := UTF16PtrFromString(path)
+	if err != nil {
+		return InvalidHandle, err
+	}
+	var access uint32
+	switch mode & (O_RDONLY | O_WRONLY | O_RDWR) {
+	case O_RDONLY:
+		access = GENERIC_READ
+	case O_WRONLY:
+		access = GENERIC_WRITE
+	case O_RDWR:
+		access = GENERIC_READ | GENERIC_WRITE
+	}
+	if mode&O_CREAT != 0 {
+		access |= GENERIC_WRITE
+	}
+	if mode&O_APPEND != 0 {
+		access &^= GENERIC_WRITE
+		access |= FILE_APPEND_DATA
+	}
+	sharemode := uint32(FILE_SHARE_READ | FILE_SHARE_WRITE)
+	var sa *SecurityAttributes
+	if mode&O_CLOEXEC == 0 {
+		sa = makeInheritSa()
+	}
+	var createmode uint32
+	switch {
+	case mode&(O_CREAT|O_EXCL) == (O_CREAT | O_EXCL):
+		createmode = CREATE_NEW
+	case mode&(O_CREAT|O_TRUNC) == (O_CREAT | O_TRUNC):
+		createmode = CREATE_ALWAYS
+	case mode&O_CREAT == O_CREAT:
+		createmode = OPEN_ALWAYS
+	case mode&O_TRUNC == O_TRUNC:
+		createmode = TRUNCATE_EXISTING
+	default:
+		createmode = OPEN_EXISTING
+	}
+	h, e := CreateFile(pathp, access, sharemode, sa, createmode, FILE_ATTRIBUTE_NORMAL, 0)
+	return h, e
+}
+
+func Read(fd Handle, p []byte) (n int, err error) {
+	var done uint32
+	e := ReadFile(fd, p, &done, nil)
+	if e != nil {
+		if e == ERROR_BROKEN_PIPE {
+			// NOTE(brainman): work around ERROR_BROKEN_PIPE is returned on reading EOF from stdin
+			return 0, nil
+		}
+		return 0, e
+	}
+	if raceenabled {
+		if done > 0 {
+			raceWriteRange(unsafe.Pointer(&p[0]), int(done))
+		}
+		raceAcquire(unsafe.Pointer(&ioSync))
+	}
+	return int(done), nil
+}
+
+func Write(fd Handle, p []byte) (n int, err error) {
+	if raceenabled {
+		raceReleaseMerge(unsafe.Pointer(&ioSync))
+	}
+	var done uint32
+	e := WriteFile(fd, p, &done, nil)
+	if e != nil {
+		return 0, e
+	}
+	if raceenabled && done > 0 {
+		raceReadRange(unsafe.Pointer(&p[0]), int(done))
+	}
+	return int(done), nil
+}
+
+var ioSync int64
+
+func Seek(fd Handle, offset int64, whence int) (newoffset int64, err error) {
+	var w uint32
+	switch whence {
+	case 0:
+		w = FILE_BEGIN
+	case 1:
+		w = FILE_CURRENT
+	case 2:
+		w = FILE_END
+	}
+	hi := int32(offset >> 32)
+	lo := int32(offset)
+	// use GetFileType to check pipe, pipe can't do seek
+	ft, _ := GetFileType(fd)
+	if ft == FILE_TYPE_PIPE {
+		return 0, syscall.EPIPE
+	}
+	rlo, e := SetFilePointer(fd, lo, &hi, w)
+	if e != nil {
+		return 0, e
+	}
+	return int64(hi)<<32 + int64(rlo), nil
+}
+
+func Close(fd Handle) (err error) {
+	return CloseHandle(fd)
+}
+
+var (
+	Stdin  = getStdHandle(STD_INPUT_HANDLE)
+	Stdout = getStdHandle(STD_OUTPUT_HANDLE)
+	Stderr = getStdHandle(STD_ERROR_HANDLE)
+)
+
+func getStdHandle(stdhandle uint32) (fd Handle) {
+	r, _ := GetStdHandle(stdhandle)
+	CloseOnExec(r)
+	return r
+}
+
+const ImplementsGetwd = true
+
+func Getwd() (wd string, err error) {
+	b := make([]uint16, 300)
+	n, e := GetCurrentDirectory(uint32(len(b)), &b[0])
+	if e != nil {
+		return "", e
+	}
+	return string(utf16.Decode(b[0:n])), nil
+}
+
+func Chdir(path string) (err error) {
+	pathp, err := UTF16PtrFromString(path)
+	if err != nil {
+		return err
+	}
+	return SetCurrentDirectory(pathp)
+}
+
+func Mkdir(path string, mode uint32) (err error) {
+	pathp, err := UTF16PtrFromString(path)
+	if err != nil {
+		return err
+	}
+	return CreateDirectory(pathp, nil)
+}
+
+func Rmdir(path string) (err error) {
+	pathp, err := UTF16PtrFromString(path)
+	if err != nil {
+		return err
+	}
+	return RemoveDirectory(pathp)
+}
+
+func Unlink(path string) (err error) {
+	pathp, err := UTF16PtrFromString(path)
+	if err != nil {
+		return err
+	}
+	return DeleteFile(pathp)
+}
+
+func Rename(oldpath, newpath string) (err error) {
+	from, err := UTF16PtrFromString(oldpath)
+	if err != nil {
+		return err
+	}
+	to, err := UTF16PtrFromString(newpath)
+	if err != nil {
+		return err
+	}
+	return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING)
+}
+
+func ComputerName() (name string, err error) {
+	var n uint32 = MAX_COMPUTERNAME_LENGTH + 1
+	b := make([]uint16, n)
+	e := GetComputerName(&b[0], &n)
+	if e != nil {
+		return "", e
+	}
+	return string(utf16.Decode(b[0:n])), nil
+}
+
+func Ftruncate(fd Handle, length int64) (err error) {
+	curoffset, e := Seek(fd, 0, 1)
+	if e != nil {
+		return e
+	}
+	defer Seek(fd, curoffset, 0)
+	_, e = Seek(fd, length, 0)
+	if e != nil {
+		return e
+	}
+	e = SetEndOfFile(fd)
+	if e != nil {
+		return e
+	}
+	return nil
+}
+
+func Gettimeofday(tv *Timeval) (err error) {
+	var ft Filetime
+	GetSystemTimeAsFileTime(&ft)
+	*tv = NsecToTimeval(ft.Nanoseconds())
+	return nil
+}
+
+func Pipe(p []Handle) (err error) {
+	if len(p) != 2 {
+		return syscall.EINVAL
+	}
+	var r, w Handle
+	e := CreatePipe(&r, &w, makeInheritSa(), 0)
+	if e != nil {
+		return e
+	}
+	p[0] = r
+	p[1] = w
+	return nil
+}
+
+func Utimes(path string, tv []Timeval) (err error) {
+	if len(tv) != 2 {
+		return syscall.EINVAL
+	}
+	pathp, e := UTF16PtrFromString(path)
+	if e != nil {
+		return e
+	}
+	h, e := CreateFile(pathp,
+		FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil,
+		OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0)
+	if e != nil {
+		return e
+	}
+	defer Close(h)
+	a := NsecToFiletime(tv[0].Nanoseconds())
+	w := NsecToFiletime(tv[1].Nanoseconds())
+	return SetFileTime(h, nil, &a, &w)
+}
+
+func UtimesNano(path string, ts []Timespec) (err error) {
+	if len(ts) != 2 {
+		return syscall.EINVAL
+	}
+	pathp, e := UTF16PtrFromString(path)
+	if e != nil {
+		return e
+	}
+	h, e := CreateFile(pathp,
+		FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil,
+		OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0)
+	if e != nil {
+		return e
+	}
+	defer Close(h)
+	a := NsecToFiletime(TimespecToNsec(ts[0]))
+	w := NsecToFiletime(TimespecToNsec(ts[1]))
+	return SetFileTime(h, nil, &a, &w)
+}
+
+func Fsync(fd Handle) (err error) {
+	return FlushFileBuffers(fd)
+}
+
+func Chmod(path string, mode uint32) (err error) {
+	if mode == 0 {
+		return syscall.EINVAL
+	}
+	p, e := UTF16PtrFromString(path)
+	if e != nil {
+		return e
+	}
+	attrs, e := GetFileAttributes(p)
+	if e != nil {
+		return e
+	}
+	if mode&S_IWRITE != 0 {
+		attrs &^= FILE_ATTRIBUTE_READONLY
+	} else {
+		attrs |= FILE_ATTRIBUTE_READONLY
+	}
+	return SetFileAttributes(p, attrs)
+}
+
+func LoadGetSystemTimePreciseAsFileTime() error {
+	return procGetSystemTimePreciseAsFileTime.Find()
+}
+
+func LoadCancelIoEx() error {
+	return procCancelIoEx.Find()
+}
+
+func LoadSetFileCompletionNotificationModes() error {
+	return procSetFileCompletionNotificationModes.Find()
+}
+
+// net api calls
+
+const socket_error = uintptr(^uint32(0))
+
+//sys	WSAStartup(verreq uint32, data *WSAData) (sockerr error) = ws2_32.WSAStartup
+//sys	WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup
+//sys	WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl
+//sys	socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket
+//sys	Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) [failretval==socket_error] = ws2_32.setsockopt
+//sys	Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockopt
+//sys	bind(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.bind
+//sys	connect(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.connect
+//sys	getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockname
+//sys	getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getpeername
+//sys	listen(s Handle, backlog int32) (err error) [failretval==socket_error] = ws2_32.listen
+//sys	shutdown(s Handle, how int32) (err error) [failretval==socket_error] = ws2_32.shutdown
+//sys	Closesocket(s Handle) (err error) [failretval==socket_error] = ws2_32.closesocket
+//sys	AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) = mswsock.AcceptEx
+//sys	GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) = mswsock.GetAcceptExSockaddrs
+//sys	WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecv
+//sys	WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASend
+//sys	WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32,  from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom
+//sys	WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32,  overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo
+//sys	GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname
+//sys	GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname
+//sys	Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs
+//sys	GetProtoByName(name string) (p *Protoent, err error) [failretval==nil] = ws2_32.getprotobyname
+//sys	DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) = dnsapi.DnsQuery_W
+//sys	DnsRecordListFree(rl *DNSRecord, freetype uint32) = dnsapi.DnsRecordListFree
+//sys	DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) = dnsapi.DnsNameCompare_W
+//sys	GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) = ws2_32.GetAddrInfoW
+//sys	FreeAddrInfoW(addrinfo *AddrinfoW) = ws2_32.FreeAddrInfoW
+//sys	GetIfEntry(pIfRow *MibIfRow) (errcode error) = iphlpapi.GetIfEntry
+//sys	GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) = iphlpapi.GetAdaptersInfo
+//sys	SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) = kernel32.SetFileCompletionNotificationModes
+//sys	WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW
+//sys	GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses
+//sys	GetACP() (acp uint32) = kernel32.GetACP
+//sys	MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar
+
+// For testing: clients can set this flag to force
+// creation of IPv6 sockets to return EAFNOSUPPORT.
+var SocketDisableIPv6 bool
+
+type RawSockaddrInet4 struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	Zero   [8]uint8
+}
+
+type RawSockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type RawSockaddr struct {
+	Family uint16
+	Data   [14]int8
+}
+
+type RawSockaddrAny struct {
+	Addr RawSockaddr
+	Pad  [100]int8
+}
+
+type Sockaddr interface {
+	sockaddr() (ptr unsafe.Pointer, len int32, err error) // lowercase; only we can define Sockaddrs
+}
+
+type SockaddrInet4 struct {
+	Port int
+	Addr [4]byte
+	raw  RawSockaddrInet4
+}
+
+func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, int32, error) {
+	if sa.Port < 0 || sa.Port > 0xFFFF {
+		return nil, 0, syscall.EINVAL
+	}
+	sa.raw.Family = AF_INET
+	p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+	p[0] = byte(sa.Port >> 8)
+	p[1] = byte(sa.Port)
+	for i := 0; i < len(sa.Addr); i++ {
+		sa.raw.Addr[i] = sa.Addr[i]
+	}
+	return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil
+}
+
+type SockaddrInet6 struct {
+	Port   int
+	ZoneId uint32
+	Addr   [16]byte
+	raw    RawSockaddrInet6
+}
+
+func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, int32, error) {
+	if sa.Port < 0 || sa.Port > 0xFFFF {
+		return nil, 0, syscall.EINVAL
+	}
+	sa.raw.Family = AF_INET6
+	p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+	p[0] = byte(sa.Port >> 8)
+	p[1] = byte(sa.Port)
+	sa.raw.Scope_id = sa.ZoneId
+	for i := 0; i < len(sa.Addr); i++ {
+		sa.raw.Addr[i] = sa.Addr[i]
+	}
+	return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil
+}
+
+type RawSockaddrUnix struct {
+	Family uint16
+	Path   [UNIX_PATH_MAX]int8
+}
+
+type SockaddrUnix struct {
+	Name string
+	raw  RawSockaddrUnix
+}
+
+func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) {
+	name := sa.Name
+	n := len(name)
+	if n > len(sa.raw.Path) {
+		return nil, 0, syscall.EINVAL
+	}
+	if n == len(sa.raw.Path) && name[0] != '@' {
+		return nil, 0, syscall.EINVAL
+	}
+	sa.raw.Family = AF_UNIX
+	for i := 0; i < n; i++ {
+		sa.raw.Path[i] = int8(name[i])
+	}
+	// length is family (uint16), name, NUL.
+	sl := int32(2)
+	if n > 0 {
+		sl += int32(n) + 1
+	}
+	if sa.raw.Path[0] == '@' {
+		sa.raw.Path[0] = 0
+		// Don't count trailing NUL for abstract address.
+		sl--
+	}
+
+	return unsafe.Pointer(&sa.raw), sl, nil
+}
+
+func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) {
+	switch rsa.Addr.Family {
+	case AF_UNIX:
+		pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa))
+		sa := new(SockaddrUnix)
+		if pp.Path[0] == 0 {
+			// "Abstract" Unix domain socket.
+			// Rewrite leading NUL as @ for textual display.
+			// (This is the standard convention.)
+			// Not friendly to overwrite in place,
+			// but the callers below don't care.
+			pp.Path[0] = '@'
+		}
+
+		// Assume path ends at NUL.
+		// This is not technically the Linux semantics for
+		// abstract Unix domain sockets--they are supposed
+		// to be uninterpreted fixed-size binary blobs--but
+		// everyone uses this convention.
+		n := 0
+		for n < len(pp.Path) && pp.Path[n] != 0 {
+			n++
+		}
+		bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
+		sa.Name = string(bytes)
+		return sa, nil
+
+	case AF_INET:
+		pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
+		sa := new(SockaddrInet4)
+		p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+		sa.Port = int(p[0])<<8 + int(p[1])
+		for i := 0; i < len(sa.Addr); i++ {
+			sa.Addr[i] = pp.Addr[i]
+		}
+		return sa, nil
+
+	case AF_INET6:
+		pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa))
+		sa := new(SockaddrInet6)
+		p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+		sa.Port = int(p[0])<<8 + int(p[1])
+		sa.ZoneId = pp.Scope_id
+		for i := 0; i < len(sa.Addr); i++ {
+			sa.Addr[i] = pp.Addr[i]
+		}
+		return sa, nil
+	}
+	return nil, syscall.EAFNOSUPPORT
+}
+
+func Socket(domain, typ, proto int) (fd Handle, err error) {
+	if domain == AF_INET6 && SocketDisableIPv6 {
+		return InvalidHandle, syscall.EAFNOSUPPORT
+	}
+	return socket(int32(domain), int32(typ), int32(proto))
+}
+
+func SetsockoptInt(fd Handle, level, opt int, value int) (err error) {
+	v := int32(value)
+	return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), int32(unsafe.Sizeof(v)))
+}
+
+func Bind(fd Handle, sa Sockaddr) (err error) {
+	ptr, n, err := sa.sockaddr()
+	if err != nil {
+		return err
+	}
+	return bind(fd, ptr, n)
+}
+
+func Connect(fd Handle, sa Sockaddr) (err error) {
+	ptr, n, err := sa.sockaddr()
+	if err != nil {
+		return err
+	}
+	return connect(fd, ptr, n)
+}
+
+func Getsockname(fd Handle) (sa Sockaddr, err error) {
+	var rsa RawSockaddrAny
+	l := int32(unsafe.Sizeof(rsa))
+	if err = getsockname(fd, &rsa, &l); err != nil {
+		return
+	}
+	return rsa.Sockaddr()
+}
+
+func Getpeername(fd Handle) (sa Sockaddr, err error) {
+	var rsa RawSockaddrAny
+	l := int32(unsafe.Sizeof(rsa))
+	if err = getpeername(fd, &rsa, &l); err != nil {
+		return
+	}
+	return rsa.Sockaddr()
+}
+
+func Listen(s Handle, n int) (err error) {
+	return listen(s, int32(n))
+}
+
+func Shutdown(fd Handle, how int) (err error) {
+	return shutdown(fd, int32(how))
+}
+
+func WSASendto(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to Sockaddr, overlapped *Overlapped, croutine *byte) (err error) {
+	rsa, l, err := to.sockaddr()
+	if err != nil {
+		return err
+	}
+	return WSASendTo(s, bufs, bufcnt, sent, flags, (*RawSockaddrAny)(unsafe.Pointer(rsa)), l, overlapped, croutine)
+}
+
+func LoadGetAddrInfo() error {
+	return procGetAddrInfoW.Find()
+}
+
+var connectExFunc struct {
+	once sync.Once
+	addr uintptr
+	err  error
+}
+
+func LoadConnectEx() error {
+	connectExFunc.once.Do(func() {
+		var s Handle
+		s, connectExFunc.err = Socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)
+		if connectExFunc.err != nil {
+			return
+		}
+		defer CloseHandle(s)
+		var n uint32
+		connectExFunc.err = WSAIoctl(s,
+			SIO_GET_EXTENSION_FUNCTION_POINTER,
+			(*byte)(unsafe.Pointer(&WSAID_CONNECTEX)),
+			uint32(unsafe.Sizeof(WSAID_CONNECTEX)),
+			(*byte)(unsafe.Pointer(&connectExFunc.addr)),
+			uint32(unsafe.Sizeof(connectExFunc.addr)),
+			&n, nil, 0)
+	})
+	return connectExFunc.err
+}
+
+func connectEx(s Handle, name unsafe.Pointer, namelen int32, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) (err error) {
+	r1, _, e1 := syscall.Syscall9(connectExFunc.addr, 7, uintptr(s), uintptr(name), uintptr(namelen), uintptr(unsafe.Pointer(sendBuf)), uintptr(sendDataLen), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = error(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func ConnectEx(fd Handle, sa Sockaddr, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) error {
+	err := LoadConnectEx()
+	if err != nil {
+		return errorspkg.New("failed to find ConnectEx: " + err.Error())
+	}
+	ptr, n, err := sa.sockaddr()
+	if err != nil {
+		return err
+	}
+	return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped)
+}
+
+var sendRecvMsgFunc struct {
+	once     sync.Once
+	sendAddr uintptr
+	recvAddr uintptr
+	err      error
+}
+
+func loadWSASendRecvMsg() error {
+	sendRecvMsgFunc.once.Do(func() {
+		var s Handle
+		s, sendRecvMsgFunc.err = Socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)
+		if sendRecvMsgFunc.err != nil {
+			return
+		}
+		defer CloseHandle(s)
+		var n uint32
+		sendRecvMsgFunc.err = WSAIoctl(s,
+			SIO_GET_EXTENSION_FUNCTION_POINTER,
+			(*byte)(unsafe.Pointer(&WSAID_WSARECVMSG)),
+			uint32(unsafe.Sizeof(WSAID_WSARECVMSG)),
+			(*byte)(unsafe.Pointer(&sendRecvMsgFunc.recvAddr)),
+			uint32(unsafe.Sizeof(sendRecvMsgFunc.recvAddr)),
+			&n, nil, 0)
+		if sendRecvMsgFunc.err != nil {
+			return
+		}
+		sendRecvMsgFunc.err = WSAIoctl(s,
+			SIO_GET_EXTENSION_FUNCTION_POINTER,
+			(*byte)(unsafe.Pointer(&WSAID_WSASENDMSG)),
+			uint32(unsafe.Sizeof(WSAID_WSASENDMSG)),
+			(*byte)(unsafe.Pointer(&sendRecvMsgFunc.sendAddr)),
+			uint32(unsafe.Sizeof(sendRecvMsgFunc.sendAddr)),
+			&n, nil, 0)
+	})
+	return sendRecvMsgFunc.err
+}
+
+func WSASendMsg(fd Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlapped *Overlapped, croutine *byte) error {
+	err := loadWSASendRecvMsg()
+	if err != nil {
+		return err
+	}
+	r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
+	if r1 == socket_error {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return err
+}
+
+func WSARecvMsg(fd Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *Overlapped, croutine *byte) error {
+	err := loadWSASendRecvMsg()
+	if err != nil {
+		return err
+	}
+	r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0)
+	if r1 == socket_error {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return err
+}
+
+// Invented structures to support what package os expects.
+type Rusage struct {
+	CreationTime Filetime
+	ExitTime     Filetime
+	KernelTime   Filetime
+	UserTime     Filetime
+}
+
+type WaitStatus struct {
+	ExitCode uint32
+}
+
+func (w WaitStatus) Exited() bool { return true }
+
+func (w WaitStatus) ExitStatus() int { return int(w.ExitCode) }
+
+func (w WaitStatus) Signal() Signal { return -1 }
+
+func (w WaitStatus) CoreDump() bool { return false }
+
+func (w WaitStatus) Stopped() bool { return false }
+
+func (w WaitStatus) Continued() bool { return false }
+
+func (w WaitStatus) StopSignal() Signal { return -1 }
+
+func (w WaitStatus) Signaled() bool { return false }
+
+func (w WaitStatus) TrapCause() int { return -1 }
+
+// Timespec is an invented structure on Windows, but here for
+// consistency with the corresponding package for other operating systems.
+type Timespec struct {
+	Sec  int64
+	Nsec int64
+}
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+	ts.Sec = nsec / 1e9
+	ts.Nsec = nsec % 1e9
+	return
+}
+
+// TODO(brainman): fix all needed for net
+
+func Accept(fd Handle) (nfd Handle, sa Sockaddr, err error) { return 0, nil, syscall.EWINDOWS }
+func Recvfrom(fd Handle, p []byte, flags int) (n int, from Sockaddr, err error) {
+	return 0, nil, syscall.EWINDOWS
+}
+func Sendto(fd Handle, p []byte, flags int, to Sockaddr) (err error)       { return syscall.EWINDOWS }
+func SetsockoptTimeval(fd Handle, level, opt int, tv *Timeval) (err error) { return syscall.EWINDOWS }
+
+// The Linger struct is wrong but we only noticed after Go 1.
+// sysLinger is the real system call structure.
+
+// BUG(brainman): The definition of Linger is not appropriate for direct use
+// with Setsockopt and Getsockopt.
+// Use SetsockoptLinger instead.
+
+type Linger struct {
+	Onoff  int32
+	Linger int32
+}
+
+type sysLinger struct {
+	Onoff  uint16
+	Linger uint16
+}
+
+type IPMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type IPv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Interface uint32
+}
+
+func GetsockoptInt(fd Handle, level, opt int) (int, error) { return -1, syscall.EWINDOWS }
+
+func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) {
+	sys := sysLinger{Onoff: uint16(l.Onoff), Linger: uint16(l.Linger)}
+	return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&sys)), int32(unsafe.Sizeof(sys)))
+}
+
+func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) {
+	return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4)
+}
+func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) {
+	return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq)))
+}
+func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) {
+	return syscall.EWINDOWS
+}
+
+func Getpid() (pid int) { return int(getCurrentProcessId()) }
+
+func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) {
+	// NOTE(rsc): The Win32finddata struct is wrong for the system call:
+	// the two paths are each one uint16 short. Use the correct struct,
+	// a win32finddata1, and then copy the results out.
+	// There is no loss of expressivity here, because the final
+	// uint16, if it is used, is supposed to be a NUL, and Go doesn't need that.
+	// For Go 1.1, we might avoid the allocation of win32finddata1 here
+	// by adding a final Bug [2]uint16 field to the struct and then
+	// adjusting the fields in the result directly.
+	var data1 win32finddata1
+	handle, err = findFirstFile1(name, &data1)
+	if err == nil {
+		copyFindData(data, &data1)
+	}
+	return
+}
+
+func FindNextFile(handle Handle, data *Win32finddata) (err error) {
+	var data1 win32finddata1
+	err = findNextFile1(handle, &data1)
+	if err == nil {
+		copyFindData(data, &data1)
+	}
+	return
+}
+
+func getProcessEntry(pid int) (*ProcessEntry32, error) {
+	snapshot, err := CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)
+	if err != nil {
+		return nil, err
+	}
+	defer CloseHandle(snapshot)
+	var procEntry ProcessEntry32
+	procEntry.Size = uint32(unsafe.Sizeof(procEntry))
+	if err = Process32First(snapshot, &procEntry); err != nil {
+		return nil, err
+	}
+	for {
+		if procEntry.ProcessID == uint32(pid) {
+			return &procEntry, nil
+		}
+		err = Process32Next(snapshot, &procEntry)
+		if err != nil {
+			return nil, err
+		}
+	}
+}
+
+func Getppid() (ppid int) {
+	pe, err := getProcessEntry(Getpid())
+	if err != nil {
+		return -1
+	}
+	return int(pe.ParentProcessID)
+}
+
+// TODO(brainman): fix all needed for os
+func Fchdir(fd Handle) (err error)             { return syscall.EWINDOWS }
+func Link(oldpath, newpath string) (err error) { return syscall.EWINDOWS }
+func Symlink(path, link string) (err error)    { return syscall.EWINDOWS }
+
+func Fchmod(fd Handle, mode uint32) (err error)        { return syscall.EWINDOWS }
+func Chown(path string, uid int, gid int) (err error)  { return syscall.EWINDOWS }
+func Lchown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS }
+func Fchown(fd Handle, uid int, gid int) (err error)   { return syscall.EWINDOWS }
+
+func Getuid() (uid int)                  { return -1 }
+func Geteuid() (euid int)                { return -1 }
+func Getgid() (gid int)                  { return -1 }
+func Getegid() (egid int)                { return -1 }
+func Getgroups() (gids []int, err error) { return nil, syscall.EWINDOWS }
+
+type Signal int
+
+func (s Signal) Signal() {}
+
+func (s Signal) String() string {
+	if 0 <= s && int(s) < len(signals) {
+		str := signals[s]
+		if str != "" {
+			return str
+		}
+	}
+	return "signal " + itoa(int(s))
+}
+
+func LoadCreateSymbolicLink() error {
+	return procCreateSymbolicLinkW.Find()
+}
+
+// Readlink returns the destination of the named symbolic link.
+func Readlink(path string, buf []byte) (n int, err error) {
+	fd, err := CreateFile(StringToUTF16Ptr(path), GENERIC_READ, 0, nil, OPEN_EXISTING,
+		FILE_FLAG_OPEN_REPARSE_POINT|FILE_FLAG_BACKUP_SEMANTICS, 0)
+	if err != nil {
+		return -1, err
+	}
+	defer CloseHandle(fd)
+
+	rdbbuf := make([]byte, MAXIMUM_REPARSE_DATA_BUFFER_SIZE)
+	var bytesReturned uint32
+	err = DeviceIoControl(fd, FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil)
+	if err != nil {
+		return -1, err
+	}
+
+	rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0]))
+	var s string
+	switch rdb.ReparseTag {
+	case IO_REPARSE_TAG_SYMLINK:
+		data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
+		p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
+		s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2])
+	case IO_REPARSE_TAG_MOUNT_POINT:
+		data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
+		p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
+		s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2])
+	default:
+		// the path is not a symlink or junction but another type of reparse
+		// point
+		return -1, syscall.ENOENT
+	}
+	n = copy(buf, []byte(s))
+
+	return n, nil
+}
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
new file mode 100644
index 0000000..141ca81
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -0,0 +1,1469 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+import "syscall"
+
+const (
+	// Windows errors.
+	ERROR_FILE_NOT_FOUND         syscall.Errno = 2
+	ERROR_PATH_NOT_FOUND         syscall.Errno = 3
+	ERROR_ACCESS_DENIED          syscall.Errno = 5
+	ERROR_NO_MORE_FILES          syscall.Errno = 18
+	ERROR_HANDLE_EOF             syscall.Errno = 38
+	ERROR_NETNAME_DELETED        syscall.Errno = 64
+	ERROR_FILE_EXISTS            syscall.Errno = 80
+	ERROR_BROKEN_PIPE            syscall.Errno = 109
+	ERROR_BUFFER_OVERFLOW        syscall.Errno = 111
+	ERROR_INSUFFICIENT_BUFFER    syscall.Errno = 122
+	ERROR_MOD_NOT_FOUND          syscall.Errno = 126
+	ERROR_PROC_NOT_FOUND         syscall.Errno = 127
+	ERROR_ALREADY_EXISTS         syscall.Errno = 183
+	ERROR_ENVVAR_NOT_FOUND       syscall.Errno = 203
+	ERROR_MORE_DATA              syscall.Errno = 234
+	ERROR_OPERATION_ABORTED      syscall.Errno = 995
+	ERROR_IO_PENDING             syscall.Errno = 997
+	ERROR_SERVICE_SPECIFIC_ERROR syscall.Errno = 1066
+	ERROR_NOT_FOUND              syscall.Errno = 1168
+	ERROR_PRIVILEGE_NOT_HELD     syscall.Errno = 1314
+	WSAEACCES                    syscall.Errno = 10013
+	WSAEMSGSIZE                  syscall.Errno = 10040
+	WSAECONNRESET                syscall.Errno = 10054
+)
+
+const (
+	// Invented values to support what package os expects.
+	O_RDONLY   = 0x00000
+	O_WRONLY   = 0x00001
+	O_RDWR     = 0x00002
+	O_CREAT    = 0x00040
+	O_EXCL     = 0x00080
+	O_NOCTTY   = 0x00100
+	O_TRUNC    = 0x00200
+	O_NONBLOCK = 0x00800
+	O_APPEND   = 0x00400
+	O_SYNC     = 0x01000
+	O_ASYNC    = 0x02000
+	O_CLOEXEC  = 0x80000
+)
+
+const (
+	// More invented values for signals
+	SIGHUP  = Signal(0x1)
+	SIGINT  = Signal(0x2)
+	SIGQUIT = Signal(0x3)
+	SIGILL  = Signal(0x4)
+	SIGTRAP = Signal(0x5)
+	SIGABRT = Signal(0x6)
+	SIGBUS  = Signal(0x7)
+	SIGFPE  = Signal(0x8)
+	SIGKILL = Signal(0x9)
+	SIGSEGV = Signal(0xb)
+	SIGPIPE = Signal(0xd)
+	SIGALRM = Signal(0xe)
+	SIGTERM = Signal(0xf)
+)
+
+var signals = [...]string{
+	1:  "hangup",
+	2:  "interrupt",
+	3:  "quit",
+	4:  "illegal instruction",
+	5:  "trace/breakpoint trap",
+	6:  "aborted",
+	7:  "bus error",
+	8:  "floating point exception",
+	9:  "killed",
+	10: "user defined signal 1",
+	11: "segmentation fault",
+	12: "user defined signal 2",
+	13: "broken pipe",
+	14: "alarm clock",
+	15: "terminated",
+}
+
+const (
+	GENERIC_READ    = 0x80000000
+	GENERIC_WRITE   = 0x40000000
+	GENERIC_EXECUTE = 0x20000000
+	GENERIC_ALL     = 0x10000000
+
+	FILE_LIST_DIRECTORY   = 0x00000001
+	FILE_APPEND_DATA      = 0x00000004
+	FILE_WRITE_ATTRIBUTES = 0x00000100
+
+	FILE_SHARE_READ   = 0x00000001
+	FILE_SHARE_WRITE  = 0x00000002
+	FILE_SHARE_DELETE = 0x00000004
+
+	FILE_ATTRIBUTE_READONLY              = 0x00000001
+	FILE_ATTRIBUTE_HIDDEN                = 0x00000002
+	FILE_ATTRIBUTE_SYSTEM                = 0x00000004
+	FILE_ATTRIBUTE_DIRECTORY             = 0x00000010
+	FILE_ATTRIBUTE_ARCHIVE               = 0x00000020
+	FILE_ATTRIBUTE_DEVICE                = 0x00000040
+	FILE_ATTRIBUTE_NORMAL                = 0x00000080
+	FILE_ATTRIBUTE_TEMPORARY             = 0x00000100
+	FILE_ATTRIBUTE_SPARSE_FILE           = 0x00000200
+	FILE_ATTRIBUTE_REPARSE_POINT         = 0x00000400
+	FILE_ATTRIBUTE_COMPRESSED            = 0x00000800
+	FILE_ATTRIBUTE_OFFLINE               = 0x00001000
+	FILE_ATTRIBUTE_NOT_CONTENT_INDEXED   = 0x00002000
+	FILE_ATTRIBUTE_ENCRYPTED             = 0x00004000
+	FILE_ATTRIBUTE_INTEGRITY_STREAM      = 0x00008000
+	FILE_ATTRIBUTE_VIRTUAL               = 0x00010000
+	FILE_ATTRIBUTE_NO_SCRUB_DATA         = 0x00020000
+	FILE_ATTRIBUTE_RECALL_ON_OPEN        = 0x00040000
+	FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x00400000
+
+	INVALID_FILE_ATTRIBUTES = 0xffffffff
+
+	CREATE_NEW        = 1
+	CREATE_ALWAYS     = 2
+	OPEN_EXISTING     = 3
+	OPEN_ALWAYS       = 4
+	TRUNCATE_EXISTING = 5
+
+	FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000
+	FILE_FLAG_BACKUP_SEMANTICS   = 0x02000000
+	FILE_FLAG_OVERLAPPED         = 0x40000000
+
+	HANDLE_FLAG_INHERIT    = 0x00000001
+	STARTF_USESTDHANDLES   = 0x00000100
+	STARTF_USESHOWWINDOW   = 0x00000001
+	DUPLICATE_CLOSE_SOURCE = 0x00000001
+	DUPLICATE_SAME_ACCESS  = 0x00000002
+
+	STD_INPUT_HANDLE  = -10 & (1<<32 - 1)
+	STD_OUTPUT_HANDLE = -11 & (1<<32 - 1)
+	STD_ERROR_HANDLE  = -12 & (1<<32 - 1)
+
+	FILE_BEGIN   = 0
+	FILE_CURRENT = 1
+	FILE_END     = 2
+
+	LANG_ENGLISH       = 0x09
+	SUBLANG_ENGLISH_US = 0x01
+
+	FORMAT_MESSAGE_ALLOCATE_BUFFER = 256
+	FORMAT_MESSAGE_IGNORE_INSERTS  = 512
+	FORMAT_MESSAGE_FROM_STRING     = 1024
+	FORMAT_MESSAGE_FROM_HMODULE    = 2048
+	FORMAT_MESSAGE_FROM_SYSTEM     = 4096
+	FORMAT_MESSAGE_ARGUMENT_ARRAY  = 8192
+	FORMAT_MESSAGE_MAX_WIDTH_MASK  = 255
+
+	MAX_PATH      = 260
+	MAX_LONG_PATH = 32768
+
+	MAX_COMPUTERNAME_LENGTH = 15
+
+	TIME_ZONE_ID_UNKNOWN  = 0
+	TIME_ZONE_ID_STANDARD = 1
+
+	TIME_ZONE_ID_DAYLIGHT = 2
+	IGNORE                = 0
+	INFINITE              = 0xffffffff
+
+	WAIT_TIMEOUT   = 258
+	WAIT_ABANDONED = 0x00000080
+	WAIT_OBJECT_0  = 0x00000000
+	WAIT_FAILED    = 0xFFFFFFFF
+
+	PROCESS_TERMINATE         = 1
+	PROCESS_QUERY_INFORMATION = 0x00000400
+	SYNCHRONIZE               = 0x00100000
+
+	FILE_MAP_COPY    = 0x01
+	FILE_MAP_WRITE   = 0x02
+	FILE_MAP_READ    = 0x04
+	FILE_MAP_EXECUTE = 0x20
+
+	CTRL_C_EVENT     = 0
+	CTRL_BREAK_EVENT = 1
+
+	// Windows reserves errors >= 1<<29 for application use.
+	APPLICATION_ERROR = 1 << 29
+)
+
+const (
+	// Process creation flags.
+	CREATE_BREAKAWAY_FROM_JOB        = 0x01000000
+	CREATE_DEFAULT_ERROR_MODE        = 0x04000000
+	CREATE_NEW_CONSOLE               = 0x00000010
+	CREATE_NEW_PROCESS_GROUP         = 0x00000200
+	CREATE_NO_WINDOW                 = 0x08000000
+	CREATE_PROTECTED_PROCESS         = 0x00040000
+	CREATE_PRESERVE_CODE_AUTHZ_LEVEL = 0x02000000
+	CREATE_SEPARATE_WOW_VDM          = 0x00000800
+	CREATE_SHARED_WOW_VDM            = 0x00001000
+	CREATE_SUSPENDED                 = 0x00000004
+	CREATE_UNICODE_ENVIRONMENT       = 0x00000400
+	DEBUG_ONLY_THIS_PROCESS          = 0x00000002
+	DEBUG_PROCESS                    = 0x00000001
+	DETACHED_PROCESS                 = 0x00000008
+	EXTENDED_STARTUPINFO_PRESENT     = 0x00080000
+	INHERIT_PARENT_AFFINITY          = 0x00010000
+)
+
+const (
+	// flags for CreateToolhelp32Snapshot
+	TH32CS_SNAPHEAPLIST = 0x01
+	TH32CS_SNAPPROCESS  = 0x02
+	TH32CS_SNAPTHREAD   = 0x04
+	TH32CS_SNAPMODULE   = 0x08
+	TH32CS_SNAPMODULE32 = 0x10
+	TH32CS_SNAPALL      = TH32CS_SNAPHEAPLIST | TH32CS_SNAPMODULE | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD
+	TH32CS_INHERIT      = 0x80000000
+)
+
+const (
+	// filters for ReadDirectoryChangesW
+	FILE_NOTIFY_CHANGE_FILE_NAME   = 0x001
+	FILE_NOTIFY_CHANGE_DIR_NAME    = 0x002
+	FILE_NOTIFY_CHANGE_ATTRIBUTES  = 0x004
+	FILE_NOTIFY_CHANGE_SIZE        = 0x008
+	FILE_NOTIFY_CHANGE_LAST_WRITE  = 0x010
+	FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020
+	FILE_NOTIFY_CHANGE_CREATION    = 0x040
+	FILE_NOTIFY_CHANGE_SECURITY    = 0x100
+)
+
+const (
+	// do not reorder
+	FILE_ACTION_ADDED = iota + 1
+	FILE_ACTION_REMOVED
+	FILE_ACTION_MODIFIED
+	FILE_ACTION_RENAMED_OLD_NAME
+	FILE_ACTION_RENAMED_NEW_NAME
+)
+
+const (
+	// wincrypt.h
+	PROV_RSA_FULL                    = 1
+	PROV_RSA_SIG                     = 2
+	PROV_DSS                         = 3
+	PROV_FORTEZZA                    = 4
+	PROV_MS_EXCHANGE                 = 5
+	PROV_SSL                         = 6
+	PROV_RSA_SCHANNEL                = 12
+	PROV_DSS_DH                      = 13
+	PROV_EC_ECDSA_SIG                = 14
+	PROV_EC_ECNRA_SIG                = 15
+	PROV_EC_ECDSA_FULL               = 16
+	PROV_EC_ECNRA_FULL               = 17
+	PROV_DH_SCHANNEL                 = 18
+	PROV_SPYRUS_LYNKS                = 20
+	PROV_RNG                         = 21
+	PROV_INTEL_SEC                   = 22
+	PROV_REPLACE_OWF                 = 23
+	PROV_RSA_AES                     = 24
+	CRYPT_VERIFYCONTEXT              = 0xF0000000
+	CRYPT_NEWKEYSET                  = 0x00000008
+	CRYPT_DELETEKEYSET               = 0x00000010
+	CRYPT_MACHINE_KEYSET             = 0x00000020
+	CRYPT_SILENT                     = 0x00000040
+	CRYPT_DEFAULT_CONTAINER_OPTIONAL = 0x00000080
+
+	USAGE_MATCH_TYPE_AND = 0
+	USAGE_MATCH_TYPE_OR  = 1
+
+	/* msgAndCertEncodingType values for CertOpenStore function */
+	X509_ASN_ENCODING   = 0x00000001
+	PKCS_7_ASN_ENCODING = 0x00010000
+
+	/* storeProvider values for CertOpenStore function */
+	CERT_STORE_PROV_MSG               = 1
+	CERT_STORE_PROV_MEMORY            = 2
+	CERT_STORE_PROV_FILE              = 3
+	CERT_STORE_PROV_REG               = 4
+	CERT_STORE_PROV_PKCS7             = 5
+	CERT_STORE_PROV_SERIALIZED        = 6
+	CERT_STORE_PROV_FILENAME_A        = 7
+	CERT_STORE_PROV_FILENAME_W        = 8
+	CERT_STORE_PROV_FILENAME          = CERT_STORE_PROV_FILENAME_W
+	CERT_STORE_PROV_SYSTEM_A          = 9
+	CERT_STORE_PROV_SYSTEM_W          = 10
+	CERT_STORE_PROV_SYSTEM            = CERT_STORE_PROV_SYSTEM_W
+	CERT_STORE_PROV_COLLECTION        = 11
+	CERT_STORE_PROV_SYSTEM_REGISTRY_A = 12
+	CERT_STORE_PROV_SYSTEM_REGISTRY_W = 13
+	CERT_STORE_PROV_SYSTEM_REGISTRY   = CERT_STORE_PROV_SYSTEM_REGISTRY_W
+	CERT_STORE_PROV_PHYSICAL_W        = 14
+	CERT_STORE_PROV_PHYSICAL          = CERT_STORE_PROV_PHYSICAL_W
+	CERT_STORE_PROV_SMART_CARD_W      = 15
+	CERT_STORE_PROV_SMART_CARD        = CERT_STORE_PROV_SMART_CARD_W
+	CERT_STORE_PROV_LDAP_W            = 16
+	CERT_STORE_PROV_LDAP              = CERT_STORE_PROV_LDAP_W
+	CERT_STORE_PROV_PKCS12            = 17
+
+	/* store characteristics (low WORD of flag) for CertOpenStore function */
+	CERT_STORE_NO_CRYPT_RELEASE_FLAG            = 0x00000001
+	CERT_STORE_SET_LOCALIZED_NAME_FLAG          = 0x00000002
+	CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG = 0x00000004
+	CERT_STORE_DELETE_FLAG                      = 0x00000010
+	CERT_STORE_UNSAFE_PHYSICAL_FLAG             = 0x00000020
+	CERT_STORE_SHARE_STORE_FLAG                 = 0x00000040
+	CERT_STORE_SHARE_CONTEXT_FLAG               = 0x00000080
+	CERT_STORE_MANIFOLD_FLAG                    = 0x00000100
+	CERT_STORE_ENUM_ARCHIVED_FLAG               = 0x00000200
+	CERT_STORE_UPDATE_KEYID_FLAG                = 0x00000400
+	CERT_STORE_BACKUP_RESTORE_FLAG              = 0x00000800
+	CERT_STORE_MAXIMUM_ALLOWED_FLAG             = 0x00001000
+	CERT_STORE_CREATE_NEW_FLAG                  = 0x00002000
+	CERT_STORE_OPEN_EXISTING_FLAG               = 0x00004000
+	CERT_STORE_READONLY_FLAG                    = 0x00008000
+
+	/* store locations (high WORD of flag) for CertOpenStore function */
+	CERT_SYSTEM_STORE_CURRENT_USER               = 0x00010000
+	CERT_SYSTEM_STORE_LOCAL_MACHINE              = 0x00020000
+	CERT_SYSTEM_STORE_CURRENT_SERVICE            = 0x00040000
+	CERT_SYSTEM_STORE_SERVICES                   = 0x00050000
+	CERT_SYSTEM_STORE_USERS                      = 0x00060000
+	CERT_SYSTEM_STORE_CURRENT_USER_GROUP_POLICY  = 0x00070000
+	CERT_SYSTEM_STORE_LOCAL_MACHINE_GROUP_POLICY = 0x00080000
+	CERT_SYSTEM_STORE_LOCAL_MACHINE_ENTERPRISE   = 0x00090000
+	CERT_SYSTEM_STORE_UNPROTECTED_FLAG           = 0x40000000
+	CERT_SYSTEM_STORE_RELOCATE_FLAG              = 0x80000000
+
+	/* Miscellaneous high-WORD flags for CertOpenStore function */
+	CERT_REGISTRY_STORE_REMOTE_FLAG      = 0x00010000
+	CERT_REGISTRY_STORE_SERIALIZED_FLAG  = 0x00020000
+	CERT_REGISTRY_STORE_ROAMING_FLAG     = 0x00040000
+	CERT_REGISTRY_STORE_MY_IE_DIRTY_FLAG = 0x00080000
+	CERT_REGISTRY_STORE_LM_GPT_FLAG      = 0x01000000
+	CERT_REGISTRY_STORE_CLIENT_GPT_FLAG  = 0x80000000
+	CERT_FILE_STORE_COMMIT_ENABLE_FLAG   = 0x00010000
+	CERT_LDAP_STORE_SIGN_FLAG            = 0x00010000
+	CERT_LDAP_STORE_AREC_EXCLUSIVE_FLAG  = 0x00020000
+	CERT_LDAP_STORE_OPENED_FLAG          = 0x00040000
+	CERT_LDAP_STORE_UNBIND_FLAG          = 0x00080000
+
+	/* addDisposition values for CertAddCertificateContextToStore function */
+	CERT_STORE_ADD_NEW                                 = 1
+	CERT_STORE_ADD_USE_EXISTING                        = 2
+	CERT_STORE_ADD_REPLACE_EXISTING                    = 3
+	CERT_STORE_ADD_ALWAYS                              = 4
+	CERT_STORE_ADD_REPLACE_EXISTING_INHERIT_PROPERTIES = 5
+	CERT_STORE_ADD_NEWER                               = 6
+	CERT_STORE_ADD_NEWER_INHERIT_PROPERTIES            = 7
+
+	/* ErrorStatus values for CertTrustStatus struct */
+	CERT_TRUST_NO_ERROR                          = 0x00000000
+	CERT_TRUST_IS_NOT_TIME_VALID                 = 0x00000001
+	CERT_TRUST_IS_REVOKED                        = 0x00000004
+	CERT_TRUST_IS_NOT_SIGNATURE_VALID            = 0x00000008
+	CERT_TRUST_IS_NOT_VALID_FOR_USAGE            = 0x00000010
+	CERT_TRUST_IS_UNTRUSTED_ROOT                 = 0x00000020
+	CERT_TRUST_REVOCATION_STATUS_UNKNOWN         = 0x00000040
+	CERT_TRUST_IS_CYCLIC                         = 0x00000080
+	CERT_TRUST_INVALID_EXTENSION                 = 0x00000100
+	CERT_TRUST_INVALID_POLICY_CONSTRAINTS        = 0x00000200
+	CERT_TRUST_INVALID_BASIC_CONSTRAINTS         = 0x00000400
+	CERT_TRUST_INVALID_NAME_CONSTRAINTS          = 0x00000800
+	CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT = 0x00001000
+	CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT   = 0x00002000
+	CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT = 0x00004000
+	CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT      = 0x00008000
+	CERT_TRUST_IS_PARTIAL_CHAIN                  = 0x00010000
+	CERT_TRUST_CTL_IS_NOT_TIME_VALID             = 0x00020000
+	CERT_TRUST_CTL_IS_NOT_SIGNATURE_VALID        = 0x00040000
+	CERT_TRUST_CTL_IS_NOT_VALID_FOR_USAGE        = 0x00080000
+	CERT_TRUST_HAS_WEAK_SIGNATURE                = 0x00100000
+	CERT_TRUST_IS_OFFLINE_REVOCATION             = 0x01000000
+	CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY          = 0x02000000
+	CERT_TRUST_IS_EXPLICIT_DISTRUST              = 0x04000000
+	CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT    = 0x08000000
+
+	/* InfoStatus values for CertTrustStatus struct */
+	CERT_TRUST_HAS_EXACT_MATCH_ISSUER        = 0x00000001
+	CERT_TRUST_HAS_KEY_MATCH_ISSUER          = 0x00000002
+	CERT_TRUST_HAS_NAME_MATCH_ISSUER         = 0x00000004
+	CERT_TRUST_IS_SELF_SIGNED                = 0x00000008
+	CERT_TRUST_HAS_PREFERRED_ISSUER          = 0x00000100
+	CERT_TRUST_HAS_ISSUANCE_CHAIN_POLICY     = 0x00000400
+	CERT_TRUST_HAS_VALID_NAME_CONSTRAINTS    = 0x00000400
+	CERT_TRUST_IS_PEER_TRUSTED               = 0x00000800
+	CERT_TRUST_HAS_CRL_VALIDITY_EXTENDED     = 0x00001000
+	CERT_TRUST_IS_FROM_EXCLUSIVE_TRUST_STORE = 0x00002000
+	CERT_TRUST_IS_CA_TRUSTED                 = 0x00004000
+	CERT_TRUST_IS_COMPLEX_CHAIN              = 0x00010000
+
+	/* policyOID values for CertVerifyCertificateChainPolicy function */
+	CERT_CHAIN_POLICY_BASE              = 1
+	CERT_CHAIN_POLICY_AUTHENTICODE      = 2
+	CERT_CHAIN_POLICY_AUTHENTICODE_TS   = 3
+	CERT_CHAIN_POLICY_SSL               = 4
+	CERT_CHAIN_POLICY_BASIC_CONSTRAINTS = 5
+	CERT_CHAIN_POLICY_NT_AUTH           = 6
+	CERT_CHAIN_POLICY_MICROSOFT_ROOT    = 7
+	CERT_CHAIN_POLICY_EV                = 8
+	CERT_CHAIN_POLICY_SSL_F12           = 9
+
+	CERT_E_EXPIRED       = 0x800B0101
+	CERT_E_ROLE          = 0x800B0103
+	CERT_E_PURPOSE       = 0x800B0106
+	CERT_E_UNTRUSTEDROOT = 0x800B0109
+	CERT_E_CN_NO_MATCH   = 0x800B010F
+
+	/* AuthType values for SSLExtraCertChainPolicyPara struct */
+	AUTHTYPE_CLIENT = 1
+	AUTHTYPE_SERVER = 2
+
+	/* Checks values for SSLExtraCertChainPolicyPara struct */
+	SECURITY_FLAG_IGNORE_REVOCATION        = 0x00000080
+	SECURITY_FLAG_IGNORE_UNKNOWN_CA        = 0x00000100
+	SECURITY_FLAG_IGNORE_WRONG_USAGE       = 0x00000200
+	SECURITY_FLAG_IGNORE_CERT_CN_INVALID   = 0x00001000
+	SECURITY_FLAG_IGNORE_CERT_DATE_INVALID = 0x00002000
+)
+
+var (
+	OID_PKIX_KP_SERVER_AUTH = []byte("1.3.6.1.5.5.7.3.1\x00")
+	OID_SERVER_GATED_CRYPTO = []byte("1.3.6.1.4.1.311.10.3.3\x00")
+	OID_SGC_NETSCAPE        = []byte("2.16.840.1.113730.4.1\x00")
+)
+
+// Pointer represents a pointer to an arbitrary Windows type.
+//
+// Pointer-typed fields may point to one of many different types. It's
+// up to the caller to provide a pointer to the appropriate type, cast
+// to Pointer. The caller must obey the unsafe.Pointer rules while
+// doing so.
+type Pointer *struct{}
+
+// Invented values to support what package os expects.
+type Timeval struct {
+	Sec  int32
+	Usec int32
+}
+
+func (tv *Timeval) Nanoseconds() int64 {
+	return (int64(tv.Sec)*1e6 + int64(tv.Usec)) * 1e3
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+	tv.Sec = int32(nsec / 1e9)
+	tv.Usec = int32(nsec % 1e9 / 1e3)
+	return
+}
+
+type SecurityAttributes struct {
+	Length             uint32
+	SecurityDescriptor uintptr
+	InheritHandle      uint32
+}
+
+type Overlapped struct {
+	Internal     uintptr
+	InternalHigh uintptr
+	Offset       uint32
+	OffsetHigh   uint32
+	HEvent       Handle
+}
+
+type FileNotifyInformation struct {
+	NextEntryOffset uint32
+	Action          uint32
+	FileNameLength  uint32
+	FileName        uint16
+}
+
+type Filetime struct {
+	LowDateTime  uint32
+	HighDateTime uint32
+}
+
+// Nanoseconds returns Filetime ft in nanoseconds
+// since Epoch (00:00:00 UTC, January 1, 1970).
+func (ft *Filetime) Nanoseconds() int64 {
+	// 100-nanosecond intervals since January 1, 1601
+	nsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime)
+	// change starting time to the Epoch (00:00:00 UTC, January 1, 1970)
+	nsec -= 116444736000000000
+	// convert into nanoseconds
+	nsec *= 100
+	return nsec
+}
+
+func NsecToFiletime(nsec int64) (ft Filetime) {
+	// convert into 100-nanosecond
+	nsec /= 100
+	// change starting time to January 1, 1601
+	nsec += 116444736000000000
+	// split into high / low
+	ft.LowDateTime = uint32(nsec & 0xffffffff)
+	ft.HighDateTime = uint32(nsec >> 32 & 0xffffffff)
+	return ft
+}
+
+type Win32finddata struct {
+	FileAttributes    uint32
+	CreationTime      Filetime
+	LastAccessTime    Filetime
+	LastWriteTime     Filetime
+	FileSizeHigh      uint32
+	FileSizeLow       uint32
+	Reserved0         uint32
+	Reserved1         uint32
+	FileName          [MAX_PATH - 1]uint16
+	AlternateFileName [13]uint16
+}
+
+// This is the actual system call structure.
+// Win32finddata is what we committed to in Go 1.
+type win32finddata1 struct {
+	FileAttributes    uint32
+	CreationTime      Filetime
+	LastAccessTime    Filetime
+	LastWriteTime     Filetime
+	FileSizeHigh      uint32
+	FileSizeLow       uint32
+	Reserved0         uint32
+	Reserved1         uint32
+	FileName          [MAX_PATH]uint16
+	AlternateFileName [14]uint16
+}
+
+func copyFindData(dst *Win32finddata, src *win32finddata1) {
+	dst.FileAttributes = src.FileAttributes
+	dst.CreationTime = src.CreationTime
+	dst.LastAccessTime = src.LastAccessTime
+	dst.LastWriteTime = src.LastWriteTime
+	dst.FileSizeHigh = src.FileSizeHigh
+	dst.FileSizeLow = src.FileSizeLow
+	dst.Reserved0 = src.Reserved0
+	dst.Reserved1 = src.Reserved1
+
+	// The src is 1 element bigger than dst, but it must be NUL.
+	copy(dst.FileName[:], src.FileName[:])
+	copy(dst.AlternateFileName[:], src.AlternateFileName[:])
+}
+
+type ByHandleFileInformation struct {
+	FileAttributes     uint32
+	CreationTime       Filetime
+	LastAccessTime     Filetime
+	LastWriteTime      Filetime
+	VolumeSerialNumber uint32
+	FileSizeHigh       uint32
+	FileSizeLow        uint32
+	NumberOfLinks      uint32
+	FileIndexHigh      uint32
+	FileIndexLow       uint32
+}
+
+const (
+	GetFileExInfoStandard = 0
+	GetFileExMaxInfoLevel = 1
+)
+
+type Win32FileAttributeData struct {
+	FileAttributes uint32
+	CreationTime   Filetime
+	LastAccessTime Filetime
+	LastWriteTime  Filetime
+	FileSizeHigh   uint32
+	FileSizeLow    uint32
+}
+
+// ShowWindow constants
+const (
+	// winuser.h
+	SW_HIDE            = 0
+	SW_NORMAL          = 1
+	SW_SHOWNORMAL      = 1
+	SW_SHOWMINIMIZED   = 2
+	SW_SHOWMAXIMIZED   = 3
+	SW_MAXIMIZE        = 3
+	SW_SHOWNOACTIVATE  = 4
+	SW_SHOW            = 5
+	SW_MINIMIZE        = 6
+	SW_SHOWMINNOACTIVE = 7
+	SW_SHOWNA          = 8
+	SW_RESTORE         = 9
+	SW_SHOWDEFAULT     = 10
+	SW_FORCEMINIMIZE   = 11
+)
+
+type StartupInfo struct {
+	Cb            uint32
+	_             *uint16
+	Desktop       *uint16
+	Title         *uint16
+	X             uint32
+	Y             uint32
+	XSize         uint32
+	YSize         uint32
+	XCountChars   uint32
+	YCountChars   uint32
+	FillAttribute uint32
+	Flags         uint32
+	ShowWindow    uint16
+	_             uint16
+	_             *byte
+	StdInput      Handle
+	StdOutput     Handle
+	StdErr        Handle
+}
+
+type ProcessInformation struct {
+	Process   Handle
+	Thread    Handle
+	ProcessId uint32
+	ThreadId  uint32
+}
+
+type ProcessEntry32 struct {
+	Size            uint32
+	Usage           uint32
+	ProcessID       uint32
+	DefaultHeapID   uintptr
+	ModuleID        uint32
+	Threads         uint32
+	ParentProcessID uint32
+	PriClassBase    int32
+	Flags           uint32
+	ExeFile         [MAX_PATH]uint16
+}
+
+type Systemtime struct {
+	Year         uint16
+	Month        uint16
+	DayOfWeek    uint16
+	Day          uint16
+	Hour         uint16
+	Minute       uint16
+	Second       uint16
+	Milliseconds uint16
+}
+
+type Timezoneinformation struct {
+	Bias         int32
+	StandardName [32]uint16
+	StandardDate Systemtime
+	StandardBias int32
+	DaylightName [32]uint16
+	DaylightDate Systemtime
+	DaylightBias int32
+}
+
+// Socket related.
+
+const (
+	AF_UNSPEC  = 0
+	AF_UNIX    = 1
+	AF_INET    = 2
+	AF_INET6   = 23
+	AF_NETBIOS = 17
+
+	SOCK_STREAM    = 1
+	SOCK_DGRAM     = 2
+	SOCK_RAW       = 3
+	SOCK_SEQPACKET = 5
+
+	IPPROTO_IP   = 0
+	IPPROTO_IPV6 = 0x29
+	IPPROTO_TCP  = 6
+	IPPROTO_UDP  = 17
+
+	SOL_SOCKET                = 0xffff
+	SO_REUSEADDR              = 4
+	SO_KEEPALIVE              = 8
+	SO_DONTROUTE              = 16
+	SO_BROADCAST              = 32
+	SO_LINGER                 = 128
+	SO_RCVBUF                 = 0x1002
+	SO_SNDBUF                 = 0x1001
+	SO_UPDATE_ACCEPT_CONTEXT  = 0x700b
+	SO_UPDATE_CONNECT_CONTEXT = 0x7010
+
+	IOC_OUT                            = 0x40000000
+	IOC_IN                             = 0x80000000
+	IOC_VENDOR                         = 0x18000000
+	IOC_INOUT                          = IOC_IN | IOC_OUT
+	IOC_WS2                            = 0x08000000
+	SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6
+	SIO_KEEPALIVE_VALS                 = IOC_IN | IOC_VENDOR | 4
+	SIO_UDP_CONNRESET                  = IOC_IN | IOC_VENDOR | 12
+
+	// cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460
+
+	IP_TOS             = 0x3
+	IP_TTL             = 0x4
+	IP_MULTICAST_IF    = 0x9
+	IP_MULTICAST_TTL   = 0xa
+	IP_MULTICAST_LOOP  = 0xb
+	IP_ADD_MEMBERSHIP  = 0xc
+	IP_DROP_MEMBERSHIP = 0xd
+
+	IPV6_V6ONLY         = 0x1b
+	IPV6_UNICAST_HOPS   = 0x4
+	IPV6_MULTICAST_IF   = 0x9
+	IPV6_MULTICAST_HOPS = 0xa
+	IPV6_MULTICAST_LOOP = 0xb
+	IPV6_JOIN_GROUP     = 0xc
+	IPV6_LEAVE_GROUP    = 0xd
+
+	MSG_OOB       = 0x1
+	MSG_PEEK      = 0x2
+	MSG_DONTROUTE = 0x4
+	MSG_WAITALL   = 0x8
+
+	MSG_TRUNC  = 0x0100
+	MSG_CTRUNC = 0x0200
+	MSG_BCAST  = 0x0400
+	MSG_MCAST  = 0x0800
+
+	SOMAXCONN = 0x7fffffff
+
+	TCP_NODELAY = 1
+
+	SHUT_RD   = 0
+	SHUT_WR   = 1
+	SHUT_RDWR = 2
+
+	WSADESCRIPTION_LEN = 256
+	WSASYS_STATUS_LEN  = 128
+)
+
+type WSABuf struct {
+	Len uint32
+	Buf *byte
+}
+
+type WSAMsg struct {
+	Name        *syscall.RawSockaddrAny
+	Namelen     int32
+	Buffers     *WSABuf
+	BufferCount uint32
+	Control     WSABuf
+	Flags       uint32
+}
+
+// Invented values to support what package os expects.
+const (
+	S_IFMT   = 0x1f000
+	S_IFIFO  = 0x1000
+	S_IFCHR  = 0x2000
+	S_IFDIR  = 0x4000
+	S_IFBLK  = 0x6000
+	S_IFREG  = 0x8000
+	S_IFLNK  = 0xa000
+	S_IFSOCK = 0xc000
+	S_ISUID  = 0x800
+	S_ISGID  = 0x400
+	S_ISVTX  = 0x200
+	S_IRUSR  = 0x100
+	S_IWRITE = 0x80
+	S_IWUSR  = 0x80
+	S_IXUSR  = 0x40
+)
+
+const (
+	FILE_TYPE_CHAR    = 0x0002
+	FILE_TYPE_DISK    = 0x0001
+	FILE_TYPE_PIPE    = 0x0003
+	FILE_TYPE_REMOTE  = 0x8000
+	FILE_TYPE_UNKNOWN = 0x0000
+)
+
+type Hostent struct {
+	Name     *byte
+	Aliases  **byte
+	AddrType uint16
+	Length   uint16
+	AddrList **byte
+}
+
+type Protoent struct {
+	Name    *byte
+	Aliases **byte
+	Proto   uint16
+}
+
+const (
+	DNS_TYPE_A       = 0x0001
+	DNS_TYPE_NS      = 0x0002
+	DNS_TYPE_MD      = 0x0003
+	DNS_TYPE_MF      = 0x0004
+	DNS_TYPE_CNAME   = 0x0005
+	DNS_TYPE_SOA     = 0x0006
+	DNS_TYPE_MB      = 0x0007
+	DNS_TYPE_MG      = 0x0008
+	DNS_TYPE_MR      = 0x0009
+	DNS_TYPE_NULL    = 0x000a
+	DNS_TYPE_WKS     = 0x000b
+	DNS_TYPE_PTR     = 0x000c
+	DNS_TYPE_HINFO   = 0x000d
+	DNS_TYPE_MINFO   = 0x000e
+	DNS_TYPE_MX      = 0x000f
+	DNS_TYPE_TEXT    = 0x0010
+	DNS_TYPE_RP      = 0x0011
+	DNS_TYPE_AFSDB   = 0x0012
+	DNS_TYPE_X25     = 0x0013
+	DNS_TYPE_ISDN    = 0x0014
+	DNS_TYPE_RT      = 0x0015
+	DNS_TYPE_NSAP    = 0x0016
+	DNS_TYPE_NSAPPTR = 0x0017
+	DNS_TYPE_SIG     = 0x0018
+	DNS_TYPE_KEY     = 0x0019
+	DNS_TYPE_PX      = 0x001a
+	DNS_TYPE_GPOS    = 0x001b
+	DNS_TYPE_AAAA    = 0x001c
+	DNS_TYPE_LOC     = 0x001d
+	DNS_TYPE_NXT     = 0x001e
+	DNS_TYPE_EID     = 0x001f
+	DNS_TYPE_NIMLOC  = 0x0020
+	DNS_TYPE_SRV     = 0x0021
+	DNS_TYPE_ATMA    = 0x0022
+	DNS_TYPE_NAPTR   = 0x0023
+	DNS_TYPE_KX      = 0x0024
+	DNS_TYPE_CERT    = 0x0025
+	DNS_TYPE_A6      = 0x0026
+	DNS_TYPE_DNAME   = 0x0027
+	DNS_TYPE_SINK    = 0x0028
+	DNS_TYPE_OPT     = 0x0029
+	DNS_TYPE_DS      = 0x002B
+	DNS_TYPE_RRSIG   = 0x002E
+	DNS_TYPE_NSEC    = 0x002F
+	DNS_TYPE_DNSKEY  = 0x0030
+	DNS_TYPE_DHCID   = 0x0031
+	DNS_TYPE_UINFO   = 0x0064
+	DNS_TYPE_UID     = 0x0065
+	DNS_TYPE_GID     = 0x0066
+	DNS_TYPE_UNSPEC  = 0x0067
+	DNS_TYPE_ADDRS   = 0x00f8
+	DNS_TYPE_TKEY    = 0x00f9
+	DNS_TYPE_TSIG    = 0x00fa
+	DNS_TYPE_IXFR    = 0x00fb
+	DNS_TYPE_AXFR    = 0x00fc
+	DNS_TYPE_MAILB   = 0x00fd
+	DNS_TYPE_MAILA   = 0x00fe
+	DNS_TYPE_ALL     = 0x00ff
+	DNS_TYPE_ANY     = 0x00ff
+	DNS_TYPE_WINS    = 0xff01
+	DNS_TYPE_WINSR   = 0xff02
+	DNS_TYPE_NBSTAT  = 0xff01
+)
+
+const (
+	DNS_INFO_NO_RECORDS = 0x251D
+)
+
+const (
+	// flags inside DNSRecord.Dw
+	DnsSectionQuestion   = 0x0000
+	DnsSectionAnswer     = 0x0001
+	DnsSectionAuthority  = 0x0002
+	DnsSectionAdditional = 0x0003
+)
+
+type DNSSRVData struct {
+	Target   *uint16
+	Priority uint16
+	Weight   uint16
+	Port     uint16
+	Pad      uint16
+}
+
+type DNSPTRData struct {
+	Host *uint16
+}
+
+type DNSMXData struct {
+	NameExchange *uint16
+	Preference   uint16
+	Pad          uint16
+}
+
+type DNSTXTData struct {
+	StringCount uint16
+	StringArray [1]*uint16
+}
+
+type DNSRecord struct {
+	Next     *DNSRecord
+	Name     *uint16
+	Type     uint16
+	Length   uint16
+	Dw       uint32
+	Ttl      uint32
+	Reserved uint32
+	Data     [40]byte
+}
+
+const (
+	TF_DISCONNECT         = 1
+	TF_REUSE_SOCKET       = 2
+	TF_WRITE_BEHIND       = 4
+	TF_USE_DEFAULT_WORKER = 0
+	TF_USE_SYSTEM_THREAD  = 16
+	TF_USE_KERNEL_APC     = 32
+)
+
+type TransmitFileBuffers struct {
+	Head       uintptr
+	HeadLength uint32
+	Tail       uintptr
+	TailLength uint32
+}
+
+const (
+	IFF_UP           = 1
+	IFF_BROADCAST    = 2
+	IFF_LOOPBACK     = 4
+	IFF_POINTTOPOINT = 8
+	IFF_MULTICAST    = 16
+)
+
+const SIO_GET_INTERFACE_LIST = 0x4004747F
+
+// TODO(mattn): SockaddrGen is union of sockaddr/sockaddr_in/sockaddr_in6_old.
+// will be fixed to change variable type as suitable.
+
+type SockaddrGen [24]byte
+
+type InterfaceInfo struct {
+	Flags            uint32
+	Address          SockaddrGen
+	BroadcastAddress SockaddrGen
+	Netmask          SockaddrGen
+}
+
+type IpAddressString struct {
+	String [16]byte
+}
+
+type IpMaskString IpAddressString
+
+type IpAddrString struct {
+	Next      *IpAddrString
+	IpAddress IpAddressString
+	IpMask    IpMaskString
+	Context   uint32
+}
+
+const MAX_ADAPTER_NAME_LENGTH = 256
+const MAX_ADAPTER_DESCRIPTION_LENGTH = 128
+const MAX_ADAPTER_ADDRESS_LENGTH = 8
+
+type IpAdapterInfo struct {
+	Next                *IpAdapterInfo
+	ComboIndex          uint32
+	AdapterName         [MAX_ADAPTER_NAME_LENGTH + 4]byte
+	Description         [MAX_ADAPTER_DESCRIPTION_LENGTH + 4]byte
+	AddressLength       uint32
+	Address             [MAX_ADAPTER_ADDRESS_LENGTH]byte
+	Index               uint32
+	Type                uint32
+	DhcpEnabled         uint32
+	CurrentIpAddress    *IpAddrString
+	IpAddressList       IpAddrString
+	GatewayList         IpAddrString
+	DhcpServer          IpAddrString
+	HaveWins            bool
+	PrimaryWinsServer   IpAddrString
+	SecondaryWinsServer IpAddrString
+	LeaseObtained       int64
+	LeaseExpires        int64
+}
+
+const MAXLEN_PHYSADDR = 8
+const MAX_INTERFACE_NAME_LEN = 256
+const MAXLEN_IFDESCR = 256
+
+type MibIfRow struct {
+	Name            [MAX_INTERFACE_NAME_LEN]uint16
+	Index           uint32
+	Type            uint32
+	Mtu             uint32
+	Speed           uint32
+	PhysAddrLen     uint32
+	PhysAddr        [MAXLEN_PHYSADDR]byte
+	AdminStatus     uint32
+	OperStatus      uint32
+	LastChange      uint32
+	InOctets        uint32
+	InUcastPkts     uint32
+	InNUcastPkts    uint32
+	InDiscards      uint32
+	InErrors        uint32
+	InUnknownProtos uint32
+	OutOctets       uint32
+	OutUcastPkts    uint32
+	OutNUcastPkts   uint32
+	OutDiscards     uint32
+	OutErrors       uint32
+	OutQLen         uint32
+	DescrLen        uint32
+	Descr           [MAXLEN_IFDESCR]byte
+}
+
+type CertInfo struct {
+	// Not implemented
+}
+
+type CertContext struct {
+	EncodingType uint32
+	EncodedCert  *byte
+	Length       uint32
+	CertInfo     *CertInfo
+	Store        Handle
+}
+
+type CertChainContext struct {
+	Size                       uint32
+	TrustStatus                CertTrustStatus
+	ChainCount                 uint32
+	Chains                     **CertSimpleChain
+	LowerQualityChainCount     uint32
+	LowerQualityChains         **CertChainContext
+	HasRevocationFreshnessTime uint32
+	RevocationFreshnessTime    uint32
+}
+
+type CertTrustListInfo struct {
+	// Not implemented
+}
+
+type CertSimpleChain struct {
+	Size                       uint32
+	TrustStatus                CertTrustStatus
+	NumElements                uint32
+	Elements                   **CertChainElement
+	TrustListInfo              *CertTrustListInfo
+	HasRevocationFreshnessTime uint32
+	RevocationFreshnessTime    uint32
+}
+
+type CertChainElement struct {
+	Size              uint32
+	CertContext       *CertContext
+	TrustStatus       CertTrustStatus
+	RevocationInfo    *CertRevocationInfo
+	IssuanceUsage     *CertEnhKeyUsage
+	ApplicationUsage  *CertEnhKeyUsage
+	ExtendedErrorInfo *uint16
+}
+
+type CertRevocationCrlInfo struct {
+	// Not implemented
+}
+
+type CertRevocationInfo struct {
+	Size             uint32
+	RevocationResult uint32
+	RevocationOid    *byte
+	OidSpecificInfo  Pointer
+	HasFreshnessTime uint32
+	FreshnessTime    uint32
+	CrlInfo          *CertRevocationCrlInfo
+}
+
+type CertTrustStatus struct {
+	ErrorStatus uint32
+	InfoStatus  uint32
+}
+
+type CertUsageMatch struct {
+	Type  uint32
+	Usage CertEnhKeyUsage
+}
+
+type CertEnhKeyUsage struct {
+	Length           uint32
+	UsageIdentifiers **byte
+}
+
+type CertChainPara struct {
+	Size                         uint32
+	RequestedUsage               CertUsageMatch
+	RequstedIssuancePolicy       CertUsageMatch
+	URLRetrievalTimeout          uint32
+	CheckRevocationFreshnessTime uint32
+	RevocationFreshnessTime      uint32
+	CacheResync                  *Filetime
+}
+
+type CertChainPolicyPara struct {
+	Size            uint32
+	Flags           uint32
+	ExtraPolicyPara Pointer
+}
+
+type SSLExtraCertChainPolicyPara struct {
+	Size       uint32
+	AuthType   uint32
+	Checks     uint32
+	ServerName *uint16
+}
+
+type CertChainPolicyStatus struct {
+	Size              uint32
+	Error             uint32
+	ChainIndex        uint32
+	ElementIndex      uint32
+	ExtraPolicyStatus Pointer
+}
+
+const (
+	// do not reorder
+	HKEY_CLASSES_ROOT = 0x80000000 + iota
+	HKEY_CURRENT_USER
+	HKEY_LOCAL_MACHINE
+	HKEY_USERS
+	HKEY_PERFORMANCE_DATA
+	HKEY_CURRENT_CONFIG
+	HKEY_DYN_DATA
+
+	KEY_QUERY_VALUE        = 1
+	KEY_SET_VALUE          = 2
+	KEY_CREATE_SUB_KEY     = 4
+	KEY_ENUMERATE_SUB_KEYS = 8
+	KEY_NOTIFY             = 16
+	KEY_CREATE_LINK        = 32
+	KEY_WRITE              = 0x20006
+	KEY_EXECUTE            = 0x20019
+	KEY_READ               = 0x20019
+	KEY_WOW64_64KEY        = 0x0100
+	KEY_WOW64_32KEY        = 0x0200
+	KEY_ALL_ACCESS         = 0xf003f
+)
+
+const (
+	// do not reorder
+	REG_NONE = iota
+	REG_SZ
+	REG_EXPAND_SZ
+	REG_BINARY
+	REG_DWORD_LITTLE_ENDIAN
+	REG_DWORD_BIG_ENDIAN
+	REG_LINK
+	REG_MULTI_SZ
+	REG_RESOURCE_LIST
+	REG_FULL_RESOURCE_DESCRIPTOR
+	REG_RESOURCE_REQUIREMENTS_LIST
+	REG_QWORD_LITTLE_ENDIAN
+	REG_DWORD = REG_DWORD_LITTLE_ENDIAN
+	REG_QWORD = REG_QWORD_LITTLE_ENDIAN
+)
+
+type AddrinfoW struct {
+	Flags     int32
+	Family    int32
+	Socktype  int32
+	Protocol  int32
+	Addrlen   uintptr
+	Canonname *uint16
+	Addr      uintptr
+	Next      *AddrinfoW
+}
+
+const (
+	AI_PASSIVE     = 1
+	AI_CANONNAME   = 2
+	AI_NUMERICHOST = 4
+)
+
+type GUID struct {
+	Data1 uint32
+	Data2 uint16
+	Data3 uint16
+	Data4 [8]byte
+}
+
+var WSAID_CONNECTEX = GUID{
+	0x25a207b9,
+	0xddf3,
+	0x4660,
+	[8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e},
+}
+
+var WSAID_WSASENDMSG = GUID{
+	0xa441e712,
+	0x754f,
+	0x43ca,
+	[8]byte{0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d},
+}
+
+var WSAID_WSARECVMSG = GUID{
+	0xf689d7c8,
+	0x6f1f,
+	0x436b,
+	[8]byte{0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22},
+}
+
+const (
+	FILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
+	FILE_SKIP_SET_EVENT_ON_HANDLE        = 2
+)
+
+const (
+	WSAPROTOCOL_LEN    = 255
+	MAX_PROTOCOL_CHAIN = 7
+	BASE_PROTOCOL      = 1
+	LAYERED_PROTOCOL   = 0
+
+	XP1_CONNECTIONLESS           = 0x00000001
+	XP1_GUARANTEED_DELIVERY      = 0x00000002
+	XP1_GUARANTEED_ORDER         = 0x00000004
+	XP1_MESSAGE_ORIENTED         = 0x00000008
+	XP1_PSEUDO_STREAM            = 0x00000010
+	XP1_GRACEFUL_CLOSE           = 0x00000020
+	XP1_EXPEDITED_DATA           = 0x00000040
+	XP1_CONNECT_DATA             = 0x00000080
+	XP1_DISCONNECT_DATA          = 0x00000100
+	XP1_SUPPORT_BROADCAST        = 0x00000200
+	XP1_SUPPORT_MULTIPOINT       = 0x00000400
+	XP1_MULTIPOINT_CONTROL_PLANE = 0x00000800
+	XP1_MULTIPOINT_DATA_PLANE    = 0x00001000
+	XP1_QOS_SUPPORTED            = 0x00002000
+	XP1_UNI_SEND                 = 0x00008000
+	XP1_UNI_RECV                 = 0x00010000
+	XP1_IFS_HANDLES              = 0x00020000
+	XP1_PARTIAL_MESSAGE          = 0x00040000
+	XP1_SAN_SUPPORT_SDP          = 0x00080000
+
+	PFL_MULTIPLE_PROTO_ENTRIES  = 0x00000001
+	PFL_RECOMMENDED_PROTO_ENTRY = 0x00000002
+	PFL_HIDDEN                  = 0x00000004
+	PFL_MATCHES_PROTOCOL_ZERO   = 0x00000008
+	PFL_NETWORKDIRECT_PROVIDER  = 0x00000010
+)
+
+type WSAProtocolInfo struct {
+	ServiceFlags1     uint32
+	ServiceFlags2     uint32
+	ServiceFlags3     uint32
+	ServiceFlags4     uint32
+	ProviderFlags     uint32
+	ProviderId        GUID
+	CatalogEntryId    uint32
+	ProtocolChain     WSAProtocolChain
+	Version           int32
+	AddressFamily     int32
+	MaxSockAddr       int32
+	MinSockAddr       int32
+	SocketType        int32
+	Protocol          int32
+	ProtocolMaxOffset int32
+	NetworkByteOrder  int32
+	SecurityScheme    int32
+	MessageSize       uint32
+	ProviderReserved  uint32
+	ProtocolName      [WSAPROTOCOL_LEN + 1]uint16
+}
+
+type WSAProtocolChain struct {
+	ChainLen     int32
+	ChainEntries [MAX_PROTOCOL_CHAIN]uint32
+}
+
+type TCPKeepalive struct {
+	OnOff    uint32
+	Time     uint32
+	Interval uint32
+}
+
+type symbolicLinkReparseBuffer struct {
+	SubstituteNameOffset uint16
+	SubstituteNameLength uint16
+	PrintNameOffset      uint16
+	PrintNameLength      uint16
+	Flags                uint32
+	PathBuffer           [1]uint16
+}
+
+type mountPointReparseBuffer struct {
+	SubstituteNameOffset uint16
+	SubstituteNameLength uint16
+	PrintNameOffset      uint16
+	PrintNameLength      uint16
+	PathBuffer           [1]uint16
+}
+
+type reparseDataBuffer struct {
+	ReparseTag        uint32
+	ReparseDataLength uint16
+	Reserved          uint16
+
+	// GenericReparseBuffer
+	reparseBuffer byte
+}
+
+const (
+	FSCTL_GET_REPARSE_POINT          = 0x900A8
+	MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024
+	IO_REPARSE_TAG_MOUNT_POINT       = 0xA0000003
+	IO_REPARSE_TAG_SYMLINK           = 0xA000000C
+	SYMBOLIC_LINK_FLAG_DIRECTORY     = 0x1
+)
+
+const (
+	ComputerNameNetBIOS                   = 0
+	ComputerNameDnsHostname               = 1
+	ComputerNameDnsDomain                 = 2
+	ComputerNameDnsFullyQualified         = 3
+	ComputerNamePhysicalNetBIOS           = 4
+	ComputerNamePhysicalDnsHostname       = 5
+	ComputerNamePhysicalDnsDomain         = 6
+	ComputerNamePhysicalDnsFullyQualified = 7
+	ComputerNameMax                       = 8
+)
+
+const (
+	MOVEFILE_REPLACE_EXISTING      = 0x1
+	MOVEFILE_COPY_ALLOWED          = 0x2
+	MOVEFILE_DELAY_UNTIL_REBOOT    = 0x4
+	MOVEFILE_WRITE_THROUGH         = 0x8
+	MOVEFILE_CREATE_HARDLINK       = 0x10
+	MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20
+)
+
+const GAA_FLAG_INCLUDE_PREFIX = 0x00000010
+
+const (
+	IF_TYPE_OTHER              = 1
+	IF_TYPE_ETHERNET_CSMACD    = 6
+	IF_TYPE_ISO88025_TOKENRING = 9
+	IF_TYPE_PPP                = 23
+	IF_TYPE_SOFTWARE_LOOPBACK  = 24
+	IF_TYPE_ATM                = 37
+	IF_TYPE_IEEE80211          = 71
+	IF_TYPE_TUNNEL             = 131
+	IF_TYPE_IEEE1394           = 144
+)
+
+type SocketAddress struct {
+	Sockaddr       *syscall.RawSockaddrAny
+	SockaddrLength int32
+}
+
+type IpAdapterUnicastAddress struct {
+	Length             uint32
+	Flags              uint32
+	Next               *IpAdapterUnicastAddress
+	Address            SocketAddress
+	PrefixOrigin       int32
+	SuffixOrigin       int32
+	DadState           int32
+	ValidLifetime      uint32
+	PreferredLifetime  uint32
+	LeaseLifetime      uint32
+	OnLinkPrefixLength uint8
+}
+
+type IpAdapterAnycastAddress struct {
+	Length  uint32
+	Flags   uint32
+	Next    *IpAdapterAnycastAddress
+	Address SocketAddress
+}
+
+type IpAdapterMulticastAddress struct {
+	Length  uint32
+	Flags   uint32
+	Next    *IpAdapterMulticastAddress
+	Address SocketAddress
+}
+
+type IpAdapterDnsServerAdapter struct {
+	Length   uint32
+	Reserved uint32
+	Next     *IpAdapterDnsServerAdapter
+	Address  SocketAddress
+}
+
+type IpAdapterPrefix struct {
+	Length       uint32
+	Flags        uint32
+	Next         *IpAdapterPrefix
+	Address      SocketAddress
+	PrefixLength uint32
+}
+
+type IpAdapterAddresses struct {
+	Length                uint32
+	IfIndex               uint32
+	Next                  *IpAdapterAddresses
+	AdapterName           *byte
+	FirstUnicastAddress   *IpAdapterUnicastAddress
+	FirstAnycastAddress   *IpAdapterAnycastAddress
+	FirstMulticastAddress *IpAdapterMulticastAddress
+	FirstDnsServerAddress *IpAdapterDnsServerAdapter
+	DnsSuffix             *uint16
+	Description           *uint16
+	FriendlyName          *uint16
+	PhysicalAddress       [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte
+	PhysicalAddressLength uint32
+	Flags                 uint32
+	Mtu                   uint32
+	IfType                uint32
+	OperStatus            uint32
+	Ipv6IfIndex           uint32
+	ZoneIndices           [16]uint32
+	FirstPrefix           *IpAdapterPrefix
+	/* more fields might be present here. */
+}
+
+const (
+	IfOperStatusUp             = 1
+	IfOperStatusDown           = 2
+	IfOperStatusTesting        = 3
+	IfOperStatusUnknown        = 4
+	IfOperStatusDormant        = 5
+	IfOperStatusNotPresent     = 6
+	IfOperStatusLowerLayerDown = 7
+)
+
+// Console related constants used for the mode parameter to SetConsoleMode. See
+// https://docs.microsoft.com/en-us/windows/console/setconsolemode for details.
+
+const (
+	ENABLE_PROCESSED_INPUT        = 0x1
+	ENABLE_LINE_INPUT             = 0x2
+	ENABLE_ECHO_INPUT             = 0x4
+	ENABLE_WINDOW_INPUT           = 0x8
+	ENABLE_MOUSE_INPUT            = 0x10
+	ENABLE_INSERT_MODE            = 0x20
+	ENABLE_QUICK_EDIT_MODE        = 0x40
+	ENABLE_EXTENDED_FLAGS         = 0x80
+	ENABLE_AUTO_POSITION          = 0x100
+	ENABLE_VIRTUAL_TERMINAL_INPUT = 0x200
+
+	ENABLE_PROCESSED_OUTPUT            = 0x1
+	ENABLE_WRAP_AT_EOL_OUTPUT          = 0x2
+	ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4
+	DISABLE_NEWLINE_AUTO_RETURN        = 0x8
+	ENABLE_LVB_GRID_WORLDWIDE          = 0x10
+)
+
+type Coord struct {
+	X int16
+	Y int16
+}
+
+type SmallRect struct {
+	Left   int16
+	Top    int16
+	Right  int16
+	Bottom int16
+}
+
+// Used with GetConsoleScreenBuffer to retrieve information about a console
+// screen buffer. See
+// https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str
+// for details.
+
+type ConsoleScreenBufferInfo struct {
+	Size              Coord
+	CursorPosition    Coord
+	Attributes        uint16
+	Window            SmallRect
+	MaximumWindowSize Coord
+}
+
+const UNIX_PATH_MAX = 108 // defined in afunix.h
diff --git a/vendor/golang.org/x/sys/windows/types_windows_386.go b/vendor/golang.org/x/sys/windows/types_windows_386.go
new file mode 100644
index 0000000..fe0ddd0
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/types_windows_386.go
@@ -0,0 +1,22 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+type WSAData struct {
+	Version      uint16
+	HighVersion  uint16
+	Description  [WSADESCRIPTION_LEN + 1]byte
+	SystemStatus [WSASYS_STATUS_LEN + 1]byte
+	MaxSockets   uint16
+	MaxUdpDg     uint16
+	VendorInfo   *byte
+}
+
+type Servent struct {
+	Name    *byte
+	Aliases **byte
+	Port    uint16
+	Proto   *byte
+}
diff --git a/vendor/golang.org/x/sys/windows/types_windows_amd64.go b/vendor/golang.org/x/sys/windows/types_windows_amd64.go
new file mode 100644
index 0000000..7e154c2
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/types_windows_amd64.go
@@ -0,0 +1,22 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+type WSAData struct {
+	Version      uint16
+	HighVersion  uint16
+	MaxSockets   uint16
+	MaxUdpDg     uint16
+	VendorInfo   *byte
+	Description  [WSADESCRIPTION_LEN + 1]byte
+	SystemStatus [WSASYS_STATUS_LEN + 1]byte
+}
+
+type Servent struct {
+	Name    *byte
+	Aliases **byte
+	Proto   *byte
+	Port    uint16
+}
diff --git a/vendor/golang.org/x/sys/windows/types_windows_arm.go b/vendor/golang.org/x/sys/windows/types_windows_arm.go
new file mode 100644
index 0000000..74571e3
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/types_windows_arm.go
@@ -0,0 +1,22 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+type WSAData struct {
+	Version      uint16
+	HighVersion  uint16
+	Description  [WSADESCRIPTION_LEN + 1]byte
+	SystemStatus [WSASYS_STATUS_LEN + 1]byte
+	MaxSockets   uint16
+	MaxUdpDg     uint16
+	VendorInfo   *byte
+}
+
+type Servent struct {
+	Name    *byte
+	Aliases **byte
+	Port    uint16
+	Proto   *byte
+}
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
new file mode 100644
index 0000000..fc56aec
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -0,0 +1,2700 @@
+// Code generated by 'go generate'; DO NOT EDIT.
+
+package windows
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+	errnoERROR_IO_PENDING = 997
+)
+
+var (
+	errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+	switch e {
+	case 0:
+		return nil
+	case errnoERROR_IO_PENDING:
+		return errERROR_IO_PENDING
+	}
+	// TODO: add more here, after collecting data on the common
+	// error values see on Windows. (perhaps when running
+	// all.bat?)
+	return e
+}
+
+var (
+	modadvapi32 = NewLazySystemDLL("advapi32.dll")
+	modkernel32 = NewLazySystemDLL("kernel32.dll")
+	modshell32  = NewLazySystemDLL("shell32.dll")
+	modmswsock  = NewLazySystemDLL("mswsock.dll")
+	modcrypt32  = NewLazySystemDLL("crypt32.dll")
+	modws2_32   = NewLazySystemDLL("ws2_32.dll")
+	moddnsapi   = NewLazySystemDLL("dnsapi.dll")
+	modiphlpapi = NewLazySystemDLL("iphlpapi.dll")
+	modsecur32  = NewLazySystemDLL("secur32.dll")
+	modnetapi32 = NewLazySystemDLL("netapi32.dll")
+	moduserenv  = NewLazySystemDLL("userenv.dll")
+
+	procRegisterEventSourceW               = modadvapi32.NewProc("RegisterEventSourceW")
+	procDeregisterEventSource              = modadvapi32.NewProc("DeregisterEventSource")
+	procReportEventW                       = modadvapi32.NewProc("ReportEventW")
+	procOpenSCManagerW                     = modadvapi32.NewProc("OpenSCManagerW")
+	procCloseServiceHandle                 = modadvapi32.NewProc("CloseServiceHandle")
+	procCreateServiceW                     = modadvapi32.NewProc("CreateServiceW")
+	procOpenServiceW                       = modadvapi32.NewProc("OpenServiceW")
+	procDeleteService                      = modadvapi32.NewProc("DeleteService")
+	procStartServiceW                      = modadvapi32.NewProc("StartServiceW")
+	procQueryServiceStatus                 = modadvapi32.NewProc("QueryServiceStatus")
+	procControlService                     = modadvapi32.NewProc("ControlService")
+	procStartServiceCtrlDispatcherW        = modadvapi32.NewProc("StartServiceCtrlDispatcherW")
+	procSetServiceStatus                   = modadvapi32.NewProc("SetServiceStatus")
+	procChangeServiceConfigW               = modadvapi32.NewProc("ChangeServiceConfigW")
+	procQueryServiceConfigW                = modadvapi32.NewProc("QueryServiceConfigW")
+	procChangeServiceConfig2W              = modadvapi32.NewProc("ChangeServiceConfig2W")
+	procQueryServiceConfig2W               = modadvapi32.NewProc("QueryServiceConfig2W")
+	procEnumServicesStatusExW              = modadvapi32.NewProc("EnumServicesStatusExW")
+	procQueryServiceStatusEx               = modadvapi32.NewProc("QueryServiceStatusEx")
+	procGetLastError                       = modkernel32.NewProc("GetLastError")
+	procLoadLibraryW                       = modkernel32.NewProc("LoadLibraryW")
+	procLoadLibraryExW                     = modkernel32.NewProc("LoadLibraryExW")
+	procFreeLibrary                        = modkernel32.NewProc("FreeLibrary")
+	procGetProcAddress                     = modkernel32.NewProc("GetProcAddress")
+	procGetVersion                         = modkernel32.NewProc("GetVersion")
+	procFormatMessageW                     = modkernel32.NewProc("FormatMessageW")
+	procExitProcess                        = modkernel32.NewProc("ExitProcess")
+	procCreateFileW                        = modkernel32.NewProc("CreateFileW")
+	procReadFile                           = modkernel32.NewProc("ReadFile")
+	procWriteFile                          = modkernel32.NewProc("WriteFile")
+	procSetFilePointer                     = modkernel32.NewProc("SetFilePointer")
+	procCloseHandle                        = modkernel32.NewProc("CloseHandle")
+	procGetStdHandle                       = modkernel32.NewProc("GetStdHandle")
+	procSetStdHandle                       = modkernel32.NewProc("SetStdHandle")
+	procFindFirstFileW                     = modkernel32.NewProc("FindFirstFileW")
+	procFindNextFileW                      = modkernel32.NewProc("FindNextFileW")
+	procFindClose                          = modkernel32.NewProc("FindClose")
+	procGetFileInformationByHandle         = modkernel32.NewProc("GetFileInformationByHandle")
+	procGetCurrentDirectoryW               = modkernel32.NewProc("GetCurrentDirectoryW")
+	procSetCurrentDirectoryW               = modkernel32.NewProc("SetCurrentDirectoryW")
+	procCreateDirectoryW                   = modkernel32.NewProc("CreateDirectoryW")
+	procRemoveDirectoryW                   = modkernel32.NewProc("RemoveDirectoryW")
+	procDeleteFileW                        = modkernel32.NewProc("DeleteFileW")
+	procMoveFileW                          = modkernel32.NewProc("MoveFileW")
+	procMoveFileExW                        = modkernel32.NewProc("MoveFileExW")
+	procGetComputerNameW                   = modkernel32.NewProc("GetComputerNameW")
+	procGetComputerNameExW                 = modkernel32.NewProc("GetComputerNameExW")
+	procSetEndOfFile                       = modkernel32.NewProc("SetEndOfFile")
+	procGetSystemTimeAsFileTime            = modkernel32.NewProc("GetSystemTimeAsFileTime")
+	procGetSystemTimePreciseAsFileTime     = modkernel32.NewProc("GetSystemTimePreciseAsFileTime")
+	procGetTimeZoneInformation             = modkernel32.NewProc("GetTimeZoneInformation")
+	procCreateIoCompletionPort             = modkernel32.NewProc("CreateIoCompletionPort")
+	procGetQueuedCompletionStatus          = modkernel32.NewProc("GetQueuedCompletionStatus")
+	procPostQueuedCompletionStatus         = modkernel32.NewProc("PostQueuedCompletionStatus")
+	procCancelIo                           = modkernel32.NewProc("CancelIo")
+	procCancelIoEx                         = modkernel32.NewProc("CancelIoEx")
+	procCreateProcessW                     = modkernel32.NewProc("CreateProcessW")
+	procOpenProcess                        = modkernel32.NewProc("OpenProcess")
+	procTerminateProcess                   = modkernel32.NewProc("TerminateProcess")
+	procGetExitCodeProcess                 = modkernel32.NewProc("GetExitCodeProcess")
+	procGetStartupInfoW                    = modkernel32.NewProc("GetStartupInfoW")
+	procGetCurrentProcess                  = modkernel32.NewProc("GetCurrentProcess")
+	procGetProcessTimes                    = modkernel32.NewProc("GetProcessTimes")
+	procDuplicateHandle                    = modkernel32.NewProc("DuplicateHandle")
+	procWaitForSingleObject                = modkernel32.NewProc("WaitForSingleObject")
+	procGetTempPathW                       = modkernel32.NewProc("GetTempPathW")
+	procCreatePipe                         = modkernel32.NewProc("CreatePipe")
+	procGetFileType                        = modkernel32.NewProc("GetFileType")
+	procCryptAcquireContextW               = modadvapi32.NewProc("CryptAcquireContextW")
+	procCryptReleaseContext                = modadvapi32.NewProc("CryptReleaseContext")
+	procCryptGenRandom                     = modadvapi32.NewProc("CryptGenRandom")
+	procGetEnvironmentStringsW             = modkernel32.NewProc("GetEnvironmentStringsW")
+	procFreeEnvironmentStringsW            = modkernel32.NewProc("FreeEnvironmentStringsW")
+	procGetEnvironmentVariableW            = modkernel32.NewProc("GetEnvironmentVariableW")
+	procSetEnvironmentVariableW            = modkernel32.NewProc("SetEnvironmentVariableW")
+	procSetFileTime                        = modkernel32.NewProc("SetFileTime")
+	procGetFileAttributesW                 = modkernel32.NewProc("GetFileAttributesW")
+	procSetFileAttributesW                 = modkernel32.NewProc("SetFileAttributesW")
+	procGetFileAttributesExW               = modkernel32.NewProc("GetFileAttributesExW")
+	procGetCommandLineW                    = modkernel32.NewProc("GetCommandLineW")
+	procCommandLineToArgvW                 = modshell32.NewProc("CommandLineToArgvW")
+	procLocalFree                          = modkernel32.NewProc("LocalFree")
+	procSetHandleInformation               = modkernel32.NewProc("SetHandleInformation")
+	procFlushFileBuffers                   = modkernel32.NewProc("FlushFileBuffers")
+	procGetFullPathNameW                   = modkernel32.NewProc("GetFullPathNameW")
+	procGetLongPathNameW                   = modkernel32.NewProc("GetLongPathNameW")
+	procGetShortPathNameW                  = modkernel32.NewProc("GetShortPathNameW")
+	procCreateFileMappingW                 = modkernel32.NewProc("CreateFileMappingW")
+	procMapViewOfFile                      = modkernel32.NewProc("MapViewOfFile")
+	procUnmapViewOfFile                    = modkernel32.NewProc("UnmapViewOfFile")
+	procFlushViewOfFile                    = modkernel32.NewProc("FlushViewOfFile")
+	procVirtualLock                        = modkernel32.NewProc("VirtualLock")
+	procVirtualUnlock                      = modkernel32.NewProc("VirtualUnlock")
+	procVirtualAlloc                       = modkernel32.NewProc("VirtualAlloc")
+	procVirtualFree                        = modkernel32.NewProc("VirtualFree")
+	procVirtualProtect                     = modkernel32.NewProc("VirtualProtect")
+	procTransmitFile                       = modmswsock.NewProc("TransmitFile")
+	procReadDirectoryChangesW              = modkernel32.NewProc("ReadDirectoryChangesW")
+	procCertOpenSystemStoreW               = modcrypt32.NewProc("CertOpenSystemStoreW")
+	procCertOpenStore                      = modcrypt32.NewProc("CertOpenStore")
+	procCertEnumCertificatesInStore        = modcrypt32.NewProc("CertEnumCertificatesInStore")
+	procCertAddCertificateContextToStore   = modcrypt32.NewProc("CertAddCertificateContextToStore")
+	procCertCloseStore                     = modcrypt32.NewProc("CertCloseStore")
+	procCertGetCertificateChain            = modcrypt32.NewProc("CertGetCertificateChain")
+	procCertFreeCertificateChain           = modcrypt32.NewProc("CertFreeCertificateChain")
+	procCertCreateCertificateContext       = modcrypt32.NewProc("CertCreateCertificateContext")
+	procCertFreeCertificateContext         = modcrypt32.NewProc("CertFreeCertificateContext")
+	procCertVerifyCertificateChainPolicy   = modcrypt32.NewProc("CertVerifyCertificateChainPolicy")
+	procRegOpenKeyExW                      = modadvapi32.NewProc("RegOpenKeyExW")
+	procRegCloseKey                        = modadvapi32.NewProc("RegCloseKey")
+	procRegQueryInfoKeyW                   = modadvapi32.NewProc("RegQueryInfoKeyW")
+	procRegEnumKeyExW                      = modadvapi32.NewProc("RegEnumKeyExW")
+	procRegQueryValueExW                   = modadvapi32.NewProc("RegQueryValueExW")
+	procGetCurrentProcessId                = modkernel32.NewProc("GetCurrentProcessId")
+	procGetConsoleMode                     = modkernel32.NewProc("GetConsoleMode")
+	procSetConsoleMode                     = modkernel32.NewProc("SetConsoleMode")
+	procGetConsoleScreenBufferInfo         = modkernel32.NewProc("GetConsoleScreenBufferInfo")
+	procWriteConsoleW                      = modkernel32.NewProc("WriteConsoleW")
+	procReadConsoleW                       = modkernel32.NewProc("ReadConsoleW")
+	procCreateToolhelp32Snapshot           = modkernel32.NewProc("CreateToolhelp32Snapshot")
+	procProcess32FirstW                    = modkernel32.NewProc("Process32FirstW")
+	procProcess32NextW                     = modkernel32.NewProc("Process32NextW")
+	procDeviceIoControl                    = modkernel32.NewProc("DeviceIoControl")
+	procCreateSymbolicLinkW                = modkernel32.NewProc("CreateSymbolicLinkW")
+	procCreateHardLinkW                    = modkernel32.NewProc("CreateHardLinkW")
+	procGetCurrentThreadId                 = modkernel32.NewProc("GetCurrentThreadId")
+	procCreateEventW                       = modkernel32.NewProc("CreateEventW")
+	procCreateEventExW                     = modkernel32.NewProc("CreateEventExW")
+	procOpenEventW                         = modkernel32.NewProc("OpenEventW")
+	procSetEvent                           = modkernel32.NewProc("SetEvent")
+	procResetEvent                         = modkernel32.NewProc("ResetEvent")
+	procPulseEvent                         = modkernel32.NewProc("PulseEvent")
+	procDefineDosDeviceW                   = modkernel32.NewProc("DefineDosDeviceW")
+	procDeleteVolumeMountPointW            = modkernel32.NewProc("DeleteVolumeMountPointW")
+	procFindFirstVolumeW                   = modkernel32.NewProc("FindFirstVolumeW")
+	procFindFirstVolumeMountPointW         = modkernel32.NewProc("FindFirstVolumeMountPointW")
+	procFindNextVolumeW                    = modkernel32.NewProc("FindNextVolumeW")
+	procFindNextVolumeMountPointW          = modkernel32.NewProc("FindNextVolumeMountPointW")
+	procFindVolumeClose                    = modkernel32.NewProc("FindVolumeClose")
+	procFindVolumeMountPointClose          = modkernel32.NewProc("FindVolumeMountPointClose")
+	procGetDriveTypeW                      = modkernel32.NewProc("GetDriveTypeW")
+	procGetLogicalDrives                   = modkernel32.NewProc("GetLogicalDrives")
+	procGetLogicalDriveStringsW            = modkernel32.NewProc("GetLogicalDriveStringsW")
+	procGetVolumeInformationW              = modkernel32.NewProc("GetVolumeInformationW")
+	procGetVolumeInformationByHandleW      = modkernel32.NewProc("GetVolumeInformationByHandleW")
+	procGetVolumeNameForVolumeMountPointW  = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW")
+	procGetVolumePathNameW                 = modkernel32.NewProc("GetVolumePathNameW")
+	procGetVolumePathNamesForVolumeNameW   = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW")
+	procQueryDosDeviceW                    = modkernel32.NewProc("QueryDosDeviceW")
+	procSetVolumeLabelW                    = modkernel32.NewProc("SetVolumeLabelW")
+	procSetVolumeMountPointW               = modkernel32.NewProc("SetVolumeMountPointW")
+	procWSAStartup                         = modws2_32.NewProc("WSAStartup")
+	procWSACleanup                         = modws2_32.NewProc("WSACleanup")
+	procWSAIoctl                           = modws2_32.NewProc("WSAIoctl")
+	procsocket                             = modws2_32.NewProc("socket")
+	procsetsockopt                         = modws2_32.NewProc("setsockopt")
+	procgetsockopt                         = modws2_32.NewProc("getsockopt")
+	procbind                               = modws2_32.NewProc("bind")
+	procconnect                            = modws2_32.NewProc("connect")
+	procgetsockname                        = modws2_32.NewProc("getsockname")
+	procgetpeername                        = modws2_32.NewProc("getpeername")
+	proclisten                             = modws2_32.NewProc("listen")
+	procshutdown                           = modws2_32.NewProc("shutdown")
+	procclosesocket                        = modws2_32.NewProc("closesocket")
+	procAcceptEx                           = modmswsock.NewProc("AcceptEx")
+	procGetAcceptExSockaddrs               = modmswsock.NewProc("GetAcceptExSockaddrs")
+	procWSARecv                            = modws2_32.NewProc("WSARecv")
+	procWSASend                            = modws2_32.NewProc("WSASend")
+	procWSARecvFrom                        = modws2_32.NewProc("WSARecvFrom")
+	procWSASendTo                          = modws2_32.NewProc("WSASendTo")
+	procgethostbyname                      = modws2_32.NewProc("gethostbyname")
+	procgetservbyname                      = modws2_32.NewProc("getservbyname")
+	procntohs                              = modws2_32.NewProc("ntohs")
+	procgetprotobyname                     = modws2_32.NewProc("getprotobyname")
+	procDnsQuery_W                         = moddnsapi.NewProc("DnsQuery_W")
+	procDnsRecordListFree                  = moddnsapi.NewProc("DnsRecordListFree")
+	procDnsNameCompare_W                   = moddnsapi.NewProc("DnsNameCompare_W")
+	procGetAddrInfoW                       = modws2_32.NewProc("GetAddrInfoW")
+	procFreeAddrInfoW                      = modws2_32.NewProc("FreeAddrInfoW")
+	procGetIfEntry                         = modiphlpapi.NewProc("GetIfEntry")
+	procGetAdaptersInfo                    = modiphlpapi.NewProc("GetAdaptersInfo")
+	procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
+	procWSAEnumProtocolsW                  = modws2_32.NewProc("WSAEnumProtocolsW")
+	procGetAdaptersAddresses               = modiphlpapi.NewProc("GetAdaptersAddresses")
+	procGetACP                             = modkernel32.NewProc("GetACP")
+	procMultiByteToWideChar                = modkernel32.NewProc("MultiByteToWideChar")
+	procTranslateNameW                     = modsecur32.NewProc("TranslateNameW")
+	procGetUserNameExW                     = modsecur32.NewProc("GetUserNameExW")
+	procNetUserGetInfo                     = modnetapi32.NewProc("NetUserGetInfo")
+	procNetGetJoinInformation              = modnetapi32.NewProc("NetGetJoinInformation")
+	procNetApiBufferFree                   = modnetapi32.NewProc("NetApiBufferFree")
+	procLookupAccountSidW                  = modadvapi32.NewProc("LookupAccountSidW")
+	procLookupAccountNameW                 = modadvapi32.NewProc("LookupAccountNameW")
+	procConvertSidToStringSidW             = modadvapi32.NewProc("ConvertSidToStringSidW")
+	procConvertStringSidToSidW             = modadvapi32.NewProc("ConvertStringSidToSidW")
+	procGetLengthSid                       = modadvapi32.NewProc("GetLengthSid")
+	procCopySid                            = modadvapi32.NewProc("CopySid")
+	procAllocateAndInitializeSid           = modadvapi32.NewProc("AllocateAndInitializeSid")
+	procFreeSid                            = modadvapi32.NewProc("FreeSid")
+	procEqualSid                           = modadvapi32.NewProc("EqualSid")
+	procCheckTokenMembership               = modadvapi32.NewProc("CheckTokenMembership")
+	procOpenProcessToken                   = modadvapi32.NewProc("OpenProcessToken")
+	procGetTokenInformation                = modadvapi32.NewProc("GetTokenInformation")
+	procGetUserProfileDirectoryW           = moduserenv.NewProc("GetUserProfileDirectoryW")
+)
+
+func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0)
+	handle = Handle(r0)
+	if handle == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func DeregisterEventSource(handle Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) {
+	r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData)))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access))
+	handle = Handle(r0)
+	if handle == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CloseServiceHandle(handle Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0)
+	handle = Handle(r0)
+	if handle == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access))
+	handle = Handle(r0)
+	if handle == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func DeleteService(service Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) {
+	r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors)))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) {
+	r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) {
+	r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status)))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) {
+	r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) {
+	r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) {
+	r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) {
+	r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) {
+	r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info)))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) {
+	r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) {
+	r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) {
+	r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetLastError() (lasterr error) {
+	r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0)
+	if r0 != 0 {
+		lasterr = syscall.Errno(r0)
+	}
+	return
+}
+
+func LoadLibrary(libname string) (handle Handle, err error) {
+	var _p0 *uint16
+	_p0, err = syscall.UTF16PtrFromString(libname)
+	if err != nil {
+		return
+	}
+	return _LoadLibrary(_p0)
+}
+
+func _LoadLibrary(libname *uint16) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0)
+	handle = Handle(r0)
+	if handle == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) {
+	var _p0 *uint16
+	_p0, err = syscall.UTF16PtrFromString(libname)
+	if err != nil {
+		return
+	}
+	return _LoadLibraryEx(_p0, zero, flags)
+}
+
+func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags))
+	handle = Handle(r0)
+	if handle == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func FreeLibrary(handle Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetProcAddress(module Handle, procname string) (proc uintptr, err error) {
+	var _p0 *byte
+	_p0, err = syscall.BytePtrFromString(procname)
+	if err != nil {
+		return
+	}
+	return _GetProcAddress(module, _p0)
+}
+
+func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) {
+	r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0)
+	proc = uintptr(r0)
+	if proc == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetVersion() (ver uint32, err error) {
+	r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0)
+	ver = uint32(r0)
+	if ver == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) {
+	var _p0 *uint16
+	if len(buf) > 0 {
+		_p0 = &buf[0]
+	}
+	r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0)
+	n = uint32(r0)
+	if n == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func ExitProcess(exitcode uint32) {
+	syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0)
+	return
+}
+
+func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile int32) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) {
+	var _p0 *byte
+	if len(buf) > 0 {
+		_p0 = &buf[0]
+	}
+	r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) {
+	var _p0 *byte
+	if len(buf) > 0 {
+		_p0 = &buf[0]
+	}
+	r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) {
+	r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0)
+	newlowoffset = uint32(r0)
+	if newlowoffset == 0xffffffff {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CloseHandle(handle Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetStdHandle(stdhandle uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0)
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func SetStdHandle(stdhandle uint32, handle Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0)
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func findNextFile1(handle Handle, data *win32finddata1) (err error) {
+	r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func FindClose(handle Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) {
+	r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) {
+	r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0)
+	n = uint32(r0)
+	if n == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func SetCurrentDirectory(path *uint16) (err error) {
+	r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) {
+	r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func RemoveDirectory(path *uint16) (err error) {
+	r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func DeleteFile(path *uint16) (err error) {
+	r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func MoveFile(from *uint16, to *uint16) (err error) {
+	r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetComputerName(buf *uint16, n *uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func SetEndOfFile(handle Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetSystemTimeAsFileTime(time *Filetime) {
+	syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0)
+	return
+}
+
+func GetSystemTimePreciseAsFileTime(time *Filetime) {
+	syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0)
+	return
+}
+
+func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) {
+	r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0)
+	rc = uint32(r0)
+	if rc == 0xffffffff {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0)
+	handle = Handle(r0)
+	if handle == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) {
+	r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) {
+	r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CancelIo(s Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CancelIoEx(s Handle, o *Overlapped) (err error) {
+	r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) {
+	var _p0 uint32
+	if inheritHandles {
+		_p0 = 1
+	} else {
+		_p0 = 0
+	}
+	r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error) {
+	var _p0 uint32
+	if inheritHandle {
+		_p0 = 1
+	} else {
+		_p0 = 0
+	}
+	r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(da), uintptr(_p0), uintptr(pid))
+	handle = Handle(r0)
+	if handle == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func TerminateProcess(handle Handle, exitcode uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetStartupInfo(startupInfo *StartupInfo) (err error) {
+	r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetCurrentProcess() (pseudoHandle Handle, err error) {
+	r0, _, e1 := syscall.Syscall(procGetCurrentProcess.Addr(), 0, 0, 0, 0)
+	pseudoHandle = Handle(r0)
+	if pseudoHandle == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) {
+	r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) {
+	var _p0 uint32
+	if bInheritHandle {
+		_p0 = 1
+	} else {
+		_p0 = 0
+	}
+	r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) {
+	r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0)
+	event = uint32(r0)
+	if event == 0xffffffff {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) {
+	r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0)
+	n = uint32(r0)
+	if n == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) {
+	r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetFileType(filehandle Handle) (n uint32, err error) {
+	r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0)
+	n = uint32(r0)
+	if n == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) {
+	r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CryptReleaseContext(provhandle Handle, flags uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) {
+	r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetEnvironmentStrings() (envs *uint16, err error) {
+	r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0)
+	envs = (*uint16)(unsafe.Pointer(r0))
+	if envs == nil {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func FreeEnvironmentStrings(envs *uint16) (err error) {
+	r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) {
+	r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size))
+	n = uint32(r0)
+	if n == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func SetEnvironmentVariable(name *uint16, value *uint16) (err error) {
+	r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) {
+	r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetFileAttributes(name *uint16) (attrs uint32, err error) {
+	r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0)
+	attrs = uint32(r0)
+	if attrs == INVALID_FILE_ATTRIBUTES {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func SetFileAttributes(name *uint16, attrs uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) {
+	r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info)))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetCommandLine() (cmd *uint16) {
+	r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0)
+	cmd = (*uint16)(unsafe.Pointer(r0))
+	return
+}
+
+func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) {
+	r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0)
+	argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0))
+	if argv == nil {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func LocalFree(hmem Handle) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0)
+	handle = Handle(r0)
+	if handle != 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func FlushFileBuffers(handle Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) {
+	r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0)
+	n = uint32(r0)
+	if n == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) {
+	r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen))
+	n = uint32(r0)
+	if n == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) {
+	r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen))
+	n = uint32(r0)
+	if n == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name)))
+	handle = Handle(r0)
+	if handle == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) {
+	r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0)
+	addr = uintptr(r0)
+	if addr == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func UnmapViewOfFile(addr uintptr) (err error) {
+	r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func FlushViewOfFile(addr uintptr, length uintptr) (err error) {
+	r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func VirtualLock(addr uintptr, length uintptr) (err error) {
+	r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func VirtualUnlock(addr uintptr, length uintptr) (err error) {
+	r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) {
+	r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0)
+	value = uintptr(r0)
+	if value == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) {
+	r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) {
+	r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) {
+	var _p0 uint32
+	if watchSubTree {
+		_p0 = 1
+	} else {
+		_p0 = 0
+	}
+	r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) {
+	r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0)
+	store = Handle(r0)
+	if store == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0)
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) {
+	r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0)
+	context = (*CertContext)(unsafe.Pointer(r0))
+	if context == nil {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) {
+	r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CertCloseStore(store Handle, flags uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) {
+	r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CertFreeCertificateChain(ctx *CertChainContext) {
+	syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0)
+	return
+}
+
+func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) {
+	r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen))
+	context = (*CertContext)(unsafe.Pointer(r0))
+	if context == nil {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CertFreeCertificateContext(ctx *CertContext) (err error) {
+	r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) {
+	r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) {
+	r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0)
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func RegCloseKey(key Handle) (regerrno error) {
+	r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0)
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) {
+	r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime)))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) {
+	r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0)
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) {
+	r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func getCurrentProcessId() (pid uint32) {
+	r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0)
+	pid = uint32(r0)
+	return
+}
+
+func GetConsoleMode(console Handle, mode *uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func SetConsoleMode(console Handle, mode uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) {
+	r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) {
+	r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) {
+	r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0)
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) {
+	r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) {
+	r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) {
+	r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags))
+	if r1&0xff == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) {
+	r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved))
+	if r1&0xff == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetCurrentThreadId() (id uint32) {
+	r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0)
+	id = uint32(r0)
+	return
+}
+
+func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0)
+	handle = Handle(r0)
+	if handle == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0)
+	handle = Handle(r0)
+	if handle == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) {
+	var _p0 uint32
+	if inheritHandle {
+		_p0 = 1
+	} else {
+		_p0 = 0
+	}
+	r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
+	handle = Handle(r0)
+	if handle == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func SetEvent(event Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func ResetEvent(event Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func PulseEvent(event Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) {
+	r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) {
+	r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0)
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func FindVolumeClose(findVolume Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetDriveType(rootPathName *uint16) (driveType uint32) {
+	r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0)
+	driveType = uint32(r0)
+	return
+}
+
+func GetLogicalDrives() (drivesBitMask uint32, err error) {
+	r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0)
+	drivesBitMask = uint32(r0)
+	if drivesBitMask == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) {
+	r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0)
+	n = uint32(r0)
+	if n == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) {
+	r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) {
+	r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) {
+	r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) {
+	r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max))
+	n = uint32(r0)
+	if n == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) {
+	r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) {
+	r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func WSAStartup(verreq uint32, data *WSAData) (sockerr error) {
+	r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0)
+	if r0 != 0 {
+		sockerr = syscall.Errno(r0)
+	}
+	return
+}
+
+func WSACleanup() (err error) {
+	r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0)
+	if r1 == socket_error {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) {
+	r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine))
+	if r1 == socket_error {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func socket(af int32, typ int32, protocol int32) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol))
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) {
+	r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0)
+	if r1 == socket_error {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) {
+	r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0)
+	if r1 == socket_error {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) {
+	r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
+	if r1 == socket_error {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) {
+	r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
+	if r1 == socket_error {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) {
+	r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	if r1 == socket_error {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) {
+	r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	if r1 == socket_error {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func listen(s Handle, backlog int32) (err error) {
+	r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0)
+	if r1 == socket_error {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func shutdown(s Handle, how int32) (err error) {
+	r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0)
+	if r1 == socket_error {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func Closesocket(s Handle) (err error) {
+	r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0)
+	if r1 == socket_error {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) {
+	r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) {
+	syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0)
+	return
+}
+
+func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) {
+	r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0)
+	if r1 == socket_error {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) {
+	r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0)
+	if r1 == socket_error {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) {
+	r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
+	if r1 == socket_error {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) {
+	r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
+	if r1 == socket_error {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetHostByName(name string) (h *Hostent, err error) {
+	var _p0 *byte
+	_p0, err = syscall.BytePtrFromString(name)
+	if err != nil {
+		return
+	}
+	return _GetHostByName(_p0)
+}
+
+func _GetHostByName(name *byte) (h *Hostent, err error) {
+	r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0)
+	h = (*Hostent)(unsafe.Pointer(r0))
+	if h == nil {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetServByName(name string, proto string) (s *Servent, err error) {
+	var _p0 *byte
+	_p0, err = syscall.BytePtrFromString(name)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = syscall.BytePtrFromString(proto)
+	if err != nil {
+		return
+	}
+	return _GetServByName(_p0, _p1)
+}
+
+func _GetServByName(name *byte, proto *byte) (s *Servent, err error) {
+	r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0)
+	s = (*Servent)(unsafe.Pointer(r0))
+	if s == nil {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func Ntohs(netshort uint16) (u uint16) {
+	r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0)
+	u = uint16(r0)
+	return
+}
+
+func GetProtoByName(name string) (p *Protoent, err error) {
+	var _p0 *byte
+	_p0, err = syscall.BytePtrFromString(name)
+	if err != nil {
+		return
+	}
+	return _GetProtoByName(_p0)
+}
+
+func _GetProtoByName(name *byte) (p *Protoent, err error) {
+	r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0)
+	p = (*Protoent)(unsafe.Pointer(r0))
+	if p == nil {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) {
+	var _p0 *uint16
+	_p0, status = syscall.UTF16PtrFromString(name)
+	if status != nil {
+		return
+	}
+	return _DnsQuery(_p0, qtype, options, extra, qrs, pr)
+}
+
+func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) {
+	r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr)))
+	if r0 != 0 {
+		status = syscall.Errno(r0)
+	}
+	return
+}
+
+func DnsRecordListFree(rl *DNSRecord, freetype uint32) {
+	syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0)
+	return
+}
+
+func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) {
+	r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0)
+	same = r0 != 0
+	return
+}
+
+func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) {
+	r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0)
+	if r0 != 0 {
+		sockerr = syscall.Errno(r0)
+	}
+	return
+}
+
+func FreeAddrInfoW(addrinfo *AddrinfoW) {
+	syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0)
+	return
+}
+
+func GetIfEntry(pIfRow *MibIfRow) (errcode error) {
+	r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0)
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) {
+	r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0)
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) {
+	r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) {
+	r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength)))
+	n = int32(r0)
+	if n == -1 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) {
+	r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0)
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func GetACP() (acp uint32) {
+	r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0)
+	acp = uint32(r0)
+	return
+}
+
+func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) {
+	r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar))
+	nwrite = int32(r0)
+	if nwrite == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) {
+	r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0)
+	if r1&0xff == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize)))
+	if r1&0xff == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) {
+	r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0)
+	if r0 != 0 {
+		neterr = syscall.Errno(r0)
+	}
+	return
+}
+
+func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) {
+	r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType)))
+	if r0 != 0 {
+		neterr = syscall.Errno(r0)
+	}
+	return
+}
+
+func NetApiBufferFree(buf *byte) (neterr error) {
+	r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0)
+	if r0 != 0 {
+		neterr = syscall.Errno(r0)
+	}
+	return
+}
+
+func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) {
+	r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) {
+	r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) {
+	r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) {
+	r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetLengthSid(sid *SID) (len uint32) {
+	r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+	len = uint32(r0)
+	return
+}
+
+func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) {
+	r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid)))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) {
+	r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func FreeSid(sid *SID) (err error) {
+	r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+	if r1 != 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) {
+	r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0)
+	isEqual = r0 != 0
+	return
+}
+
+func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) {
+	r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember)))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func OpenProcessToken(h Handle, access uint32, token *Token) (err error) {
+	r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(h), uintptr(access), uintptr(unsafe.Pointer(token)))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) {
+	r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(t), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen)))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
diff --git a/vendor/golang.org/x/time/AUTHORS b/vendor/golang.org/x/time/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/golang.org/x/time/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/golang.org/x/time/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/golang.org/x/time/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/golang.org/x/time/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/time/PATENTS b/vendor/golang.org/x/time/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/time/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go.  This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation.  If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
new file mode 100644
index 0000000..ae93e24
--- /dev/null
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -0,0 +1,374 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package rate provides a rate limiter.
+package rate
+
+import (
+	"context"
+	"fmt"
+	"math"
+	"sync"
+	"time"
+)
+
+// Limit defines the maximum frequency of some events.
+// Limit is represented as number of events per second.
+// A zero Limit allows no events.
+type Limit float64
+
+// Inf is the infinite rate limit; it allows all events (even if burst is zero).
+const Inf = Limit(math.MaxFloat64)
+
+// Every converts a minimum time interval between events to a Limit.
+func Every(interval time.Duration) Limit {
+	if interval <= 0 {
+		return Inf
+	}
+	return 1 / Limit(interval.Seconds())
+}
+
+// A Limiter controls how frequently events are allowed to happen.
+// It implements a "token bucket" of size b, initially full and refilled
+// at rate r tokens per second.
+// Informally, in any large enough time interval, the Limiter limits the
+// rate to r tokens per second, with a maximum burst size of b events.
+// As a special case, if r == Inf (the infinite rate), b is ignored.
+// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets.
+//
+// The zero value is a valid Limiter, but it will reject all events.
+// Use NewLimiter to create non-zero Limiters.
+//
+// Limiter has three main methods, Allow, Reserve, and Wait.
+// Most callers should use Wait.
+//
+// Each of the three methods consumes a single token.
+// They differ in their behavior when no token is available.
+// If no token is available, Allow returns false.
+// If no token is available, Reserve returns a reservation for a future token
+// and the amount of time the caller must wait before using it.
+// If no token is available, Wait blocks until one can be obtained
+// or its associated context.Context is canceled.
+//
+// The methods AllowN, ReserveN, and WaitN consume n tokens.
+type Limiter struct {
+	limit Limit
+	burst int
+
+	mu     sync.Mutex
+	tokens float64
+	// last is the last time the limiter's tokens field was updated
+	last time.Time
+	// lastEvent is the latest time of a rate-limited event (past or future)
+	lastEvent time.Time
+}
+
+// Limit returns the maximum overall event rate.
+func (lim *Limiter) Limit() Limit {
+	lim.mu.Lock()
+	defer lim.mu.Unlock()
+	return lim.limit
+}
+
+// Burst returns the maximum burst size. Burst is the maximum number of tokens
+// that can be consumed in a single call to Allow, Reserve, or Wait, so higher
+// Burst values allow more events to happen at once.
+// A zero Burst allows no events, unless limit == Inf.
+func (lim *Limiter) Burst() int {
+	return lim.burst
+}
+
+// NewLimiter returns a new Limiter that allows events up to rate r and permits
+// bursts of at most b tokens.
+func NewLimiter(r Limit, b int) *Limiter {
+	return &Limiter{
+		limit: r,
+		burst: b,
+	}
+}
+
+// Allow is shorthand for AllowN(time.Now(), 1).
+func (lim *Limiter) Allow() bool {
+	return lim.AllowN(time.Now(), 1)
+}
+
+// AllowN reports whether n events may happen at time now.
+// Use this method if you intend to drop / skip events that exceed the rate limit.
+// Otherwise use Reserve or Wait.
+func (lim *Limiter) AllowN(now time.Time, n int) bool {
+	return lim.reserveN(now, n, 0).ok
+}
+
+// A Reservation holds information about events that are permitted by a Limiter to happen after a delay.
+// A Reservation may be canceled, which may enable the Limiter to permit additional events.
+type Reservation struct {
+	ok        bool
+	lim       *Limiter
+	tokens    int
+	timeToAct time.Time
+	// This is the Limit at reservation time, it can change later.
+	limit Limit
+}
+
+// OK returns whether the limiter can provide the requested number of tokens
+// within the maximum wait time.  If OK is false, Delay returns InfDuration, and
+// Cancel does nothing.
+func (r *Reservation) OK() bool {
+	return r.ok
+}
+
+// Delay is shorthand for DelayFrom(time.Now()).
+func (r *Reservation) Delay() time.Duration {
+	return r.DelayFrom(time.Now())
+}
+
+// InfDuration is the duration returned by Delay when a Reservation is not OK.
+const InfDuration = time.Duration(1<<63 - 1)
+
+// DelayFrom returns the duration for which the reservation holder must wait
+// before taking the reserved action.  Zero duration means act immediately.
+// InfDuration means the limiter cannot grant the tokens requested in this
+// Reservation within the maximum wait time.
+func (r *Reservation) DelayFrom(now time.Time) time.Duration {
+	if !r.ok {
+		return InfDuration
+	}
+	delay := r.timeToAct.Sub(now)
+	if delay < 0 {
+		return 0
+	}
+	return delay
+}
+
+// Cancel is shorthand for CancelAt(time.Now()).
+func (r *Reservation) Cancel() {
+	r.CancelAt(time.Now())
+	return
+}
+
+// CancelAt indicates that the reservation holder will not perform the reserved action
+// and reverses the effects of this Reservation on the rate limit as much as possible,
+// considering that other reservations may have already been made.
+func (r *Reservation) CancelAt(now time.Time) {
+	if !r.ok {
+		return
+	}
+
+	r.lim.mu.Lock()
+	defer r.lim.mu.Unlock()
+
+	if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) {
+		return
+	}
+
+	// calculate tokens to restore
+	// The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved
+	// after r was obtained. These tokens should not be restored.
+	restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct))
+	if restoreTokens <= 0 {
+		return
+	}
+	// advance time to now
+	now, _, tokens := r.lim.advance(now)
+	// calculate new number of tokens
+	tokens += restoreTokens
+	if burst := float64(r.lim.burst); tokens > burst {
+		tokens = burst
+	}
+	// update state
+	r.lim.last = now
+	r.lim.tokens = tokens
+	if r.timeToAct == r.lim.lastEvent {
+		prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens)))
+		if !prevEvent.Before(now) {
+			r.lim.lastEvent = prevEvent
+		}
+	}
+
+	return
+}
+
+// Reserve is shorthand for ReserveN(time.Now(), 1).
+func (lim *Limiter) Reserve() *Reservation {
+	return lim.ReserveN(time.Now(), 1)
+}
+
+// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen.
+// The Limiter takes this Reservation into account when allowing future events.
+// ReserveN returns false if n exceeds the Limiter's burst size.
+// Usage example:
+//   r := lim.ReserveN(time.Now(), 1)
+//   if !r.OK() {
+//     // Not allowed to act! Did you remember to set lim.burst to be > 0 ?
+//     return
+//   }
+//   time.Sleep(r.Delay())
+//   Act()
+// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events.
+// If you need to respect a deadline or cancel the delay, use Wait instead.
+// To drop or skip events exceeding rate limit, use Allow instead.
+func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation {
+	r := lim.reserveN(now, n, InfDuration)
+	return &r
+}
+
+// Wait is shorthand for WaitN(ctx, 1).
+func (lim *Limiter) Wait(ctx context.Context) (err error) {
+	return lim.WaitN(ctx, 1)
+}
+
+// WaitN blocks until lim permits n events to happen.
+// It returns an error if n exceeds the Limiter's burst size, the Context is
+// canceled, or the expected wait time exceeds the Context's Deadline.
+// The burst limit is ignored if the rate limit is Inf.
+func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) {
+	if n > lim.burst && lim.limit != Inf {
+		return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst)
+	}
+	// Check if ctx is already cancelled
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+	// Determine wait limit
+	now := time.Now()
+	waitLimit := InfDuration
+	if deadline, ok := ctx.Deadline(); ok {
+		waitLimit = deadline.Sub(now)
+	}
+	// Reserve
+	r := lim.reserveN(now, n, waitLimit)
+	if !r.ok {
+		return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n)
+	}
+	// Wait if necessary
+	delay := r.DelayFrom(now)
+	if delay == 0 {
+		return nil
+	}
+	t := time.NewTimer(delay)
+	defer t.Stop()
+	select {
+	case <-t.C:
+		// We can proceed.
+		return nil
+	case <-ctx.Done():
+		// Context was canceled before we could proceed.  Cancel the
+		// reservation, which may permit other events to proceed sooner.
+		r.Cancel()
+		return ctx.Err()
+	}
+}
+
+// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit).
+func (lim *Limiter) SetLimit(newLimit Limit) {
+	lim.SetLimitAt(time.Now(), newLimit)
+}
+
+// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated
+// or underutilized by those which reserved (using Reserve or Wait) but did not yet act
+// before SetLimitAt was called.
+func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) {
+	lim.mu.Lock()
+	defer lim.mu.Unlock()
+
+	now, _, tokens := lim.advance(now)
+
+	lim.last = now
+	lim.tokens = tokens
+	lim.limit = newLimit
+}
+
+// reserveN is a helper method for AllowN, ReserveN, and WaitN.
+// maxFutureReserve specifies the maximum reservation wait duration allowed.
+// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN.
+func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation {
+	lim.mu.Lock()
+
+	if lim.limit == Inf {
+		lim.mu.Unlock()
+		return Reservation{
+			ok:        true,
+			lim:       lim,
+			tokens:    n,
+			timeToAct: now,
+		}
+	}
+
+	now, last, tokens := lim.advance(now)
+
+	// Calculate the remaining number of tokens resulting from the request.
+	tokens -= float64(n)
+
+	// Calculate the wait duration
+	var waitDuration time.Duration
+	if tokens < 0 {
+		waitDuration = lim.limit.durationFromTokens(-tokens)
+	}
+
+	// Decide result
+	ok := n <= lim.burst && waitDuration <= maxFutureReserve
+
+	// Prepare reservation
+	r := Reservation{
+		ok:    ok,
+		lim:   lim,
+		limit: lim.limit,
+	}
+	if ok {
+		r.tokens = n
+		r.timeToAct = now.Add(waitDuration)
+	}
+
+	// Update state
+	if ok {
+		lim.last = now
+		lim.tokens = tokens
+		lim.lastEvent = r.timeToAct
+	} else {
+		lim.last = last
+	}
+
+	lim.mu.Unlock()
+	return r
+}
+
+// advance calculates and returns an updated state for lim resulting from the passage of time.
+// lim is not changed.
+func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) {
+	last := lim.last
+	if now.Before(last) {
+		last = now
+	}
+
+	// Avoid making delta overflow below when last is very old.
+	maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens)
+	elapsed := now.Sub(last)
+	if elapsed > maxElapsed {
+		elapsed = maxElapsed
+	}
+
+	// Calculate the new number of tokens, due to time that passed.
+	delta := lim.limit.tokensFromDuration(elapsed)
+	tokens := lim.tokens + delta
+	if burst := float64(lim.burst); tokens > burst {
+		tokens = burst
+	}
+
+	return now, last, tokens
+}
+
+// durationFromTokens is a unit conversion function from the number of tokens to the duration
+// of time it takes to accumulate them at a rate of limit tokens per second.
+func (limit Limit) durationFromTokens(tokens float64) time.Duration {
+	seconds := tokens / float64(limit)
+	return time.Nanosecond * time.Duration(1e9*seconds)
+}
+
+// tokensFromDuration is a unit conversion function from a time duration to the number of tokens
+// which could be accumulated during that duration at a rate of limit tokens per second.
+func (limit Limit) tokensFromDuration(d time.Duration) float64 {
+	return d.Seconds() * float64(limit)
+}
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/google.golang.org/appengine/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
new file mode 100644
index 0000000..bbc1cb9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api.go
@@ -0,0 +1,671 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	netcontext "golang.org/x/net/context"
+
+	basepb "google.golang.org/appengine/internal/base"
+	logpb "google.golang.org/appengine/internal/log"
+	remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const (
+	apiPath             = "/rpc_http"
+	defaultTicketSuffix = "/default.20150612t184001.0"
+)
+
+var (
+	// Incoming headers.
+	ticketHeader       = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
+	dapperHeader       = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
+	traceHeader        = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
+	curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+	userIPHeader       = http.CanonicalHeaderKey("X-AppEngine-User-IP")
+	remoteAddrHeader   = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
+
+	// Outgoing headers.
+	apiEndpointHeader      = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
+	apiEndpointHeaderValue = []string{"app-engine-apis"}
+	apiMethodHeader        = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
+	apiMethodHeaderValue   = []string{"/VMRemoteAPI.CallRemoteAPI"}
+	apiDeadlineHeader      = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
+	apiContentType         = http.CanonicalHeaderKey("Content-Type")
+	apiContentTypeValue    = []string{"application/octet-stream"}
+	logFlushHeader         = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
+
+	apiHTTPClient = &http.Client{
+		Transport: &http.Transport{
+			Proxy: http.ProxyFromEnvironment,
+			Dial:  limitDial,
+		},
+	}
+
+	defaultTicketOnce     sync.Once
+	defaultTicket         string
+	backgroundContextOnce sync.Once
+	backgroundContext     netcontext.Context
+)
+
+func apiURL() *url.URL {
+	host, port := "appengine.googleapis.internal", "10001"
+	if h := os.Getenv("API_HOST"); h != "" {
+		host = h
+	}
+	if p := os.Getenv("API_PORT"); p != "" {
+		port = p
+	}
+	return &url.URL{
+		Scheme: "http",
+		Host:   host + ":" + port,
+		Path:   apiPath,
+	}
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+	c := &context{
+		req:       r,
+		outHeader: w.Header(),
+		apiURL:    apiURL(),
+	}
+	r = r.WithContext(withContext(r.Context(), c))
+	c.req = r
+
+	stopFlushing := make(chan int)
+
+	// Patch up RemoteAddr so it looks reasonable.
+	if addr := r.Header.Get(userIPHeader); addr != "" {
+		r.RemoteAddr = addr
+	} else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
+		r.RemoteAddr = addr
+	} else {
+		// Should not normally reach here, but pick a sensible default anyway.
+		r.RemoteAddr = "127.0.0.1"
+	}
+	// The address in the headers will most likely be of these forms:
+	//	123.123.123.123
+	//	2001:db8::1
+	// net/http.Request.RemoteAddr is specified to be in "IP:port" form.
+	if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
+		// Assume the remote address is only a host; add a default port.
+		r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
+	}
+
+	// Start goroutine responsible for flushing app logs.
+	// This is done after adding c to ctx.m (and stopped before removing it)
+	// because flushing logs requires making an API call.
+	go c.logFlusher(stopFlushing)
+
+	executeRequestSafely(c, r)
+	c.outHeader = nil // make sure header changes aren't respected any more
+
+	stopFlushing <- 1 // any logging beyond this point will be dropped
+
+	// Flush any pending logs asynchronously.
+	c.pendingLogs.Lock()
+	flushes := c.pendingLogs.flushes
+	if len(c.pendingLogs.lines) > 0 {
+		flushes++
+	}
+	c.pendingLogs.Unlock()
+	flushed := make(chan struct{})
+	go func() {
+		defer close(flushed)
+		// Force a log flush, because with very short requests we
+		// may not ever flush logs.
+		c.flushLog(true)
+	}()
+	w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
+
+	// Avoid nil Write call if c.Write is never called.
+	if c.outCode != 0 {
+		w.WriteHeader(c.outCode)
+	}
+	if c.outBody != nil {
+		w.Write(c.outBody)
+	}
+	// Wait for the last flush to complete before returning,
+	// otherwise the security ticket will not be valid.
+	<-flushed
+}
+
+func executeRequestSafely(c *context, r *http.Request) {
+	defer func() {
+		if x := recover(); x != nil {
+			logf(c, 4, "%s", renderPanic(x)) // 4 == critical
+			c.outCode = 500
+		}
+	}()
+
+	http.DefaultServeMux.ServeHTTP(c, r)
+}
+
+func renderPanic(x interface{}) string {
+	buf := make([]byte, 16<<10) // 16 KB should be plenty
+	buf = buf[:runtime.Stack(buf, false)]
+
+	// Remove the first few stack frames:
+	//   this func
+	//   the recover closure in the caller
+	// That will root the stack trace at the site of the panic.
+	const (
+		skipStart  = "internal.renderPanic"
+		skipFrames = 2
+	)
+	start := bytes.Index(buf, []byte(skipStart))
+	p := start
+	for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
+		p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
+		if p < 0 {
+			break
+		}
+	}
+	if p >= 0 {
+		// buf[start:p+1] is the block to remove.
+		// Copy buf[p+1:] over buf[start:] and shrink buf.
+		copy(buf[start:], buf[p+1:])
+		buf = buf[:len(buf)-(p+1-start)]
+	}
+
+	// Add panic heading.
+	head := fmt.Sprintf("panic: %v\n\n", x)
+	if len(head) > len(buf) {
+		// Extremely unlikely to happen.
+		return head
+	}
+	copy(buf[len(head):], buf)
+	copy(buf, head)
+
+	return string(buf)
+}
+
+// context represents the context of an in-flight HTTP request.
+// It implements the appengine.Context and http.ResponseWriter interfaces.
+type context struct {
+	req *http.Request
+
+	outCode   int
+	outHeader http.Header
+	outBody   []byte
+
+	pendingLogs struct {
+		sync.Mutex
+		lines   []*logpb.UserAppLogLine
+		flushes int
+	}
+
+	apiURL *url.URL
+}
+
+var contextKey = "holds a *context"
+
+// jointContext joins two contexts in a superficial way.
+// It takes values and timeouts from a base context, and only values from another context.
+type jointContext struct {
+	base       netcontext.Context
+	valuesOnly netcontext.Context
+}
+
+func (c jointContext) Deadline() (time.Time, bool) {
+	return c.base.Deadline()
+}
+
+func (c jointContext) Done() <-chan struct{} {
+	return c.base.Done()
+}
+
+func (c jointContext) Err() error {
+	return c.base.Err()
+}
+
+func (c jointContext) Value(key interface{}) interface{} {
+	if val := c.base.Value(key); val != nil {
+		return val
+	}
+	return c.valuesOnly.Value(key)
+}
+
+// fromContext returns the App Engine context or nil if ctx is not
+// derived from an App Engine context.
+func fromContext(ctx netcontext.Context) *context {
+	c, _ := ctx.Value(&contextKey).(*context)
+	return c
+}
+
+func withContext(parent netcontext.Context, c *context) netcontext.Context {
+	ctx := netcontext.WithValue(parent, &contextKey, c)
+	if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
+		ctx = withNamespace(ctx, ns)
+	}
+	return ctx
+}
+
+func toContext(c *context) netcontext.Context {
+	return withContext(netcontext.Background(), c)
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+	if c := fromContext(ctx); c != nil {
+		return c.req.Header
+	}
+	return nil
+}
+
+func ReqContext(req *http.Request) netcontext.Context {
+	return req.Context()
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+	return jointContext{
+		base:       parent,
+		valuesOnly: req.Context(),
+	}
+}
+
+// DefaultTicket returns a ticket used for background context or dev_appserver.
+func DefaultTicket() string {
+	defaultTicketOnce.Do(func() {
+		if IsDevAppServer() {
+			defaultTicket = "testapp" + defaultTicketSuffix
+			return
+		}
+		appID := partitionlessAppID()
+		escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
+		majVersion := VersionID(nil)
+		if i := strings.Index(majVersion, "."); i > 0 {
+			majVersion = majVersion[:i]
+		}
+		defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
+	})
+	return defaultTicket
+}
+
+func BackgroundContext() netcontext.Context {
+	backgroundContextOnce.Do(func() {
+		// Compute background security ticket.
+		ticket := DefaultTicket()
+
+		c := &context{
+			req: &http.Request{
+				Header: http.Header{
+					ticketHeader: []string{ticket},
+				},
+			},
+			apiURL: apiURL(),
+		}
+		backgroundContext = toContext(c)
+
+		// TODO(dsymonds): Wire up the shutdown handler to do a final flush.
+		go c.logFlusher(make(chan int))
+	})
+
+	return backgroundContext
+}
+
+// RegisterTestRequest registers the HTTP request req for testing, such that
+// any API calls are sent to the provided URL. It returns a closure to delete
+// the registration.
+// It should only be used by aetest package.
+func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
+	c := &context{
+		req:    req,
+		apiURL: apiURL,
+	}
+	ctx := withContext(decorate(req.Context()), c)
+	req = req.WithContext(ctx)
+	c.req = req
+	return req, func() {}
+}
+
+var errTimeout = &CallError{
+	Detail:  "Deadline exceeded",
+	Code:    int32(remotepb.RpcError_CANCELLED),
+	Timeout: true,
+}
+
+func (c *context) Header() http.Header { return c.outHeader }
+
+// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
+// codes do not permit a response body (nor response entity headers such as
+// Content-Length, Content-Type, etc).
+func bodyAllowedForStatus(status int) bool {
+	switch {
+	case status >= 100 && status <= 199:
+		return false
+	case status == 204:
+		return false
+	case status == 304:
+		return false
+	}
+	return true
+}
+
+func (c *context) Write(b []byte) (int, error) {
+	if c.outCode == 0 {
+		c.WriteHeader(http.StatusOK)
+	}
+	if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
+		return 0, http.ErrBodyNotAllowed
+	}
+	c.outBody = append(c.outBody, b...)
+	return len(b), nil
+}
+
+func (c *context) WriteHeader(code int) {
+	if c.outCode != 0 {
+		logf(c, 3, "WriteHeader called multiple times on request.") // error level
+		return
+	}
+	c.outCode = code
+}
+
+func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
+	hreq := &http.Request{
+		Method: "POST",
+		URL:    c.apiURL,
+		Header: http.Header{
+			apiEndpointHeader: apiEndpointHeaderValue,
+			apiMethodHeader:   apiMethodHeaderValue,
+			apiContentType:    apiContentTypeValue,
+			apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
+		},
+		Body:          ioutil.NopCloser(bytes.NewReader(body)),
+		ContentLength: int64(len(body)),
+		Host:          c.apiURL.Host,
+	}
+	if info := c.req.Header.Get(dapperHeader); info != "" {
+		hreq.Header.Set(dapperHeader, info)
+	}
+	if info := c.req.Header.Get(traceHeader); info != "" {
+		hreq.Header.Set(traceHeader, info)
+	}
+
+	tr := apiHTTPClient.Transport.(*http.Transport)
+
+	var timedOut int32 // atomic; set to 1 if timed out
+	t := time.AfterFunc(timeout, func() {
+		atomic.StoreInt32(&timedOut, 1)
+		tr.CancelRequest(hreq)
+	})
+	defer t.Stop()
+	defer func() {
+		// Check if timeout was exceeded.
+		if atomic.LoadInt32(&timedOut) != 0 {
+			err = errTimeout
+		}
+	}()
+
+	hresp, err := apiHTTPClient.Do(hreq)
+	if err != nil {
+		return nil, &CallError{
+			Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
+			Code:   int32(remotepb.RpcError_UNKNOWN),
+		}
+	}
+	defer hresp.Body.Close()
+	hrespBody, err := ioutil.ReadAll(hresp.Body)
+	if hresp.StatusCode != 200 {
+		return nil, &CallError{
+			Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
+			Code:   int32(remotepb.RpcError_UNKNOWN),
+		}
+	}
+	if err != nil {
+		return nil, &CallError{
+			Detail: fmt.Sprintf("service bridge response bad: %v", err),
+			Code:   int32(remotepb.RpcError_UNKNOWN),
+		}
+	}
+	return hrespBody, nil
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+	if ns := NamespaceFromContext(ctx); ns != "" {
+		if fn, ok := NamespaceMods[service]; ok {
+			fn(in, ns)
+		}
+	}
+
+	if f, ctx, ok := callOverrideFromContext(ctx); ok {
+		return f(ctx, service, method, in, out)
+	}
+
+	// Handle already-done contexts quickly.
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+
+	c := fromContext(ctx)
+	if c == nil {
+		// Give a good error message rather than a panic lower down.
+		return errNotAppEngineContext
+	}
+
+	// Apply transaction modifications if we're in a transaction.
+	if t := transactionFromContext(ctx); t != nil {
+		if t.finished {
+			return errors.New("transaction context has expired")
+		}
+		applyTransaction(in, &t.transaction)
+	}
+
+	// Default RPC timeout is 60s.
+	timeout := 60 * time.Second
+	if deadline, ok := ctx.Deadline(); ok {
+		timeout = deadline.Sub(time.Now())
+	}
+
+	data, err := proto.Marshal(in)
+	if err != nil {
+		return err
+	}
+
+	ticket := c.req.Header.Get(ticketHeader)
+	// Use a test ticket under test environment.
+	if ticket == "" {
+		if appid := ctx.Value(&appIDOverrideKey); appid != nil {
+			ticket = appid.(string) + defaultTicketSuffix
+		}
+	}
+	// Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
+	if ticket == "" {
+		ticket = DefaultTicket()
+	}
+	req := &remotepb.Request{
+		ServiceName: &service,
+		Method:      &method,
+		Request:     data,
+		RequestId:   &ticket,
+	}
+	hreqBody, err := proto.Marshal(req)
+	if err != nil {
+		return err
+	}
+
+	hrespBody, err := c.post(hreqBody, timeout)
+	if err != nil {
+		return err
+	}
+
+	res := &remotepb.Response{}
+	if err := proto.Unmarshal(hrespBody, res); err != nil {
+		return err
+	}
+	if res.RpcError != nil {
+		ce := &CallError{
+			Detail: res.RpcError.GetDetail(),
+			Code:   *res.RpcError.Code,
+		}
+		switch remotepb.RpcError_ErrorCode(ce.Code) {
+		case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
+			ce.Timeout = true
+		}
+		return ce
+	}
+	if res.ApplicationError != nil {
+		return &APIError{
+			Service: *req.ServiceName,
+			Detail:  res.ApplicationError.GetDetail(),
+			Code:    *res.ApplicationError.Code,
+		}
+	}
+	if res.Exception != nil || res.JavaException != nil {
+		// This shouldn't happen, but let's be defensive.
+		return &CallError{
+			Detail: "service bridge returned exception",
+			Code:   int32(remotepb.RpcError_UNKNOWN),
+		}
+	}
+	return proto.Unmarshal(res.Response, out)
+}
+
+func (c *context) Request() *http.Request {
+	return c.req
+}
+
+func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
+	// Truncate long log lines.
+	// TODO(dsymonds): Check if this is still necessary.
+	const lim = 8 << 10
+	if len(*ll.Message) > lim {
+		suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
+		ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
+	}
+
+	c.pendingLogs.Lock()
+	c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
+	c.pendingLogs.Unlock()
+}
+
+var logLevelName = map[int64]string{
+	0: "DEBUG",
+	1: "INFO",
+	2: "WARNING",
+	3: "ERROR",
+	4: "CRITICAL",
+}
+
+func logf(c *context, level int64, format string, args ...interface{}) {
+	if c == nil {
+		panic("not an App Engine context")
+	}
+	s := fmt.Sprintf(format, args...)
+	s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
+	c.addLogLine(&logpb.UserAppLogLine{
+		TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
+		Level:         &level,
+		Message:       &s,
+	})
+	// Only duplicate log to stderr if not running on App Engine second generation
+	if !IsSecondGen() {
+		log.Print(logLevelName[level] + ": " + s)
+	}
+}
+
+// flushLog attempts to flush any pending logs to the appserver.
+// It should not be called concurrently.
+func (c *context) flushLog(force bool) (flushed bool) {
+	c.pendingLogs.Lock()
+	// Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
+	n, rem := 0, 30<<20
+	for ; n < len(c.pendingLogs.lines); n++ {
+		ll := c.pendingLogs.lines[n]
+		// Each log line will require about 3 bytes of overhead.
+		nb := proto.Size(ll) + 3
+		if nb > rem {
+			break
+		}
+		rem -= nb
+	}
+	lines := c.pendingLogs.lines[:n]
+	c.pendingLogs.lines = c.pendingLogs.lines[n:]
+	c.pendingLogs.Unlock()
+
+	if len(lines) == 0 && !force {
+		// Nothing to flush.
+		return false
+	}
+
+	rescueLogs := false
+	defer func() {
+		if rescueLogs {
+			c.pendingLogs.Lock()
+			c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
+			c.pendingLogs.Unlock()
+		}
+	}()
+
+	buf, err := proto.Marshal(&logpb.UserAppLogGroup{
+		LogLine: lines,
+	})
+	if err != nil {
+		log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
+		rescueLogs = true
+		return false
+	}
+
+	req := &logpb.FlushRequest{
+		Logs: buf,
+	}
+	res := &basepb.VoidProto{}
+	c.pendingLogs.Lock()
+	c.pendingLogs.flushes++
+	c.pendingLogs.Unlock()
+	if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
+		log.Printf("internal.flushLog: Flush RPC: %v", err)
+		rescueLogs = true
+		return false
+	}
+	return true
+}
+
+const (
+	// Log flushing parameters.
+	flushInterval      = 1 * time.Second
+	forceFlushInterval = 60 * time.Second
+)
+
+func (c *context) logFlusher(stop <-chan int) {
+	lastFlush := time.Now()
+	tick := time.NewTicker(flushInterval)
+	for {
+		select {
+		case <-stop:
+			// Request finished.
+			tick.Stop()
+			return
+		case <-tick.C:
+			force := time.Now().Sub(lastFlush) > forceFlushInterval
+			if c.flushLog(force) {
+				lastFlush = time.Now()
+			}
+		}
+	}
+}
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+	return toContext(&context{req: req})
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go
new file mode 100644
index 0000000..f0f40b2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_classic.go
@@ -0,0 +1,169 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"time"
+
+	"appengine"
+	"appengine_internal"
+	basepb "appengine_internal/base"
+
+	"github.com/golang/protobuf/proto"
+	netcontext "golang.org/x/net/context"
+)
+
+var contextKey = "holds an appengine.Context"
+
+// fromContext returns the App Engine context or nil if ctx is not
+// derived from an App Engine context.
+func fromContext(ctx netcontext.Context) appengine.Context {
+	c, _ := ctx.Value(&contextKey).(appengine.Context)
+	return c
+}
+
+// This is only for classic App Engine adapters.
+func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) {
+	c := fromContext(ctx)
+	if c == nil {
+		return nil, errNotAppEngineContext
+	}
+	return c, nil
+}
+
+func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
+	ctx := netcontext.WithValue(parent, &contextKey, c)
+
+	s := &basepb.StringProto{}
+	c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
+	if ns := s.GetValue(); ns != "" {
+		ctx = NamespacedContext(ctx, ns)
+	}
+
+	return ctx
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+	if c := fromContext(ctx); c != nil {
+		if req, ok := c.Request().(*http.Request); ok {
+			return req.Header
+		}
+	}
+	return nil
+}
+
+func ReqContext(req *http.Request) netcontext.Context {
+	return WithContext(netcontext.Background(), req)
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+	c := appengine.NewContext(req)
+	return withContext(parent, c)
+}
+
+type testingContext struct {
+	appengine.Context
+
+	req *http.Request
+}
+
+func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
+func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
+	if service == "__go__" && method == "GetNamespace" {
+		return nil
+	}
+	return fmt.Errorf("testingContext: unsupported Call")
+}
+func (t *testingContext) Request() interface{} { return t.req }
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+	return withContext(netcontext.Background(), &testingContext{req: req})
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+	if ns := NamespaceFromContext(ctx); ns != "" {
+		if fn, ok := NamespaceMods[service]; ok {
+			fn(in, ns)
+		}
+	}
+
+	if f, ctx, ok := callOverrideFromContext(ctx); ok {
+		return f(ctx, service, method, in, out)
+	}
+
+	// Handle already-done contexts quickly.
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+
+	c := fromContext(ctx)
+	if c == nil {
+		// Give a good error message rather than a panic lower down.
+		return errNotAppEngineContext
+	}
+
+	// Apply transaction modifications if we're in a transaction.
+	if t := transactionFromContext(ctx); t != nil {
+		if t.finished {
+			return errors.New("transaction context has expired")
+		}
+		applyTransaction(in, &t.transaction)
+	}
+
+	var opts *appengine_internal.CallOptions
+	if d, ok := ctx.Deadline(); ok {
+		opts = &appengine_internal.CallOptions{
+			Timeout: d.Sub(time.Now()),
+		}
+	}
+
+	err := c.Call(service, method, in, out, opts)
+	switch v := err.(type) {
+	case *appengine_internal.APIError:
+		return &APIError{
+			Service: v.Service,
+			Detail:  v.Detail,
+			Code:    v.Code,
+		}
+	case *appengine_internal.CallError:
+		return &CallError{
+			Detail:  v.Detail,
+			Code:    v.Code,
+			Timeout: v.Timeout,
+		}
+	}
+	return err
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+	panic("handleHTTP called; this should be impossible")
+}
+
+func logf(c appengine.Context, level int64, format string, args ...interface{}) {
+	var fn func(format string, args ...interface{})
+	switch level {
+	case 0:
+		fn = c.Debugf
+	case 1:
+		fn = c.Infof
+	case 2:
+		fn = c.Warningf
+	case 3:
+		fn = c.Errorf
+	case 4:
+		fn = c.Criticalf
+	default:
+		// This shouldn't happen.
+		fn = c.Criticalf
+	}
+	fn(format, args...)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
new file mode 100644
index 0000000..e0c0b21
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_common.go
@@ -0,0 +1,123 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+	"errors"
+	"os"
+
+	"github.com/golang/protobuf/proto"
+	netcontext "golang.org/x/net/context"
+)
+
+var errNotAppEngineContext = errors.New("not an App Engine context")
+
+type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
+
+var callOverrideKey = "holds []CallOverrideFunc"
+
+func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
+	// We avoid appending to any existing call override
+	// so we don't risk overwriting a popped stack below.
+	var cofs []CallOverrideFunc
+	if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
+		cofs = append(cofs, uf...)
+	}
+	cofs = append(cofs, f)
+	return netcontext.WithValue(ctx, &callOverrideKey, cofs)
+}
+
+func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
+	cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
+	if len(cofs) == 0 {
+		return nil, nil, false
+	}
+	// We found a list of overrides; grab the last, and reconstitute a
+	// context that will hide it.
+	f := cofs[len(cofs)-1]
+	ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
+	return f, ctx, true
+}
+
+type logOverrideFunc func(level int64, format string, args ...interface{})
+
+var logOverrideKey = "holds a logOverrideFunc"
+
+func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
+	return netcontext.WithValue(ctx, &logOverrideKey, f)
+}
+
+var appIDOverrideKey = "holds a string, being the full app ID"
+
+func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
+	return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
+}
+
+var namespaceKey = "holds the namespace string"
+
+func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
+	return netcontext.WithValue(ctx, &namespaceKey, ns)
+}
+
+func NamespaceFromContext(ctx netcontext.Context) string {
+	// If there's no namespace, return the empty string.
+	ns, _ := ctx.Value(&namespaceKey).(string)
+	return ns
+}
+
+// FullyQualifiedAppID returns the fully-qualified application ID.
+// This may contain a partition prefix (e.g. "s~" for High Replication apps),
+// or a domain prefix (e.g. "example.com:").
+func FullyQualifiedAppID(ctx netcontext.Context) string {
+	if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
+		return id
+	}
+	return fullyQualifiedAppID(ctx)
+}
+
+func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
+	if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
+		f(level, format, args...)
+		return
+	}
+	c := fromContext(ctx)
+	if c == nil {
+		panic(errNotAppEngineContext)
+	}
+	logf(c, level, format, args...)
+}
+
+// NamespacedContext wraps a Context to support namespaces.
+func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
+	return withNamespace(ctx, namespace)
+}
+
+// SetTestEnv sets the env variables for testing background ticket in Flex.
+func SetTestEnv() func() {
+	var environ = []struct {
+		key, value string
+	}{
+		{"GAE_LONG_APP_ID", "my-app-id"},
+		{"GAE_MINOR_VERSION", "067924799508853122"},
+		{"GAE_MODULE_INSTANCE", "0"},
+		{"GAE_MODULE_NAME", "default"},
+		{"GAE_MODULE_VERSION", "20150612t184001"},
+	}
+
+	for _, v := range environ {
+		old := os.Getenv(v.key)
+		os.Setenv(v.key, v.value)
+		v.value = old
+	}
+	return func() { // Restore old environment after the test completes.
+		for _, v := range environ {
+			if v.value == "" {
+				os.Unsetenv(v.key)
+				continue
+			}
+			os.Setenv(v.key, v.value)
+		}
+	}
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go
new file mode 100644
index 0000000..11df8c0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_id.go
@@ -0,0 +1,28 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+	"strings"
+)
+
+func parseFullAppID(appid string) (partition, domain, displayID string) {
+	if i := strings.Index(appid, "~"); i != -1 {
+		partition, appid = appid[:i], appid[i+1:]
+	}
+	if i := strings.Index(appid, ":"); i != -1 {
+		domain, appid = appid[:i], appid[i+1:]
+	}
+	return partition, domain, appid
+}
+
+// appID returns "appid" or "domain.com:appid".
+func appID(fullAppID string) string {
+	_, dom, dis := parseFullAppID(fullAppID)
+	if dom != "" {
+		return dom + ":" + dis
+	}
+	return dis
+}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto
new file mode 100644
index 0000000..56cd7a3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.proto
@@ -0,0 +1,33 @@
+// Built-in base types for API calls. Primarily useful as return types.
+
+syntax = "proto2";
+option go_package = "base";
+
+package appengine.base;
+
+message StringProto {
+  required string value = 1;
+}
+
+message Integer32Proto {
+  required int32 value = 1;
+}
+
+message Integer64Proto {
+  required int64 value = 1;
+}
+
+message BoolProto {
+  required bool value = 1;
+}
+
+message DoubleProto {
+  required double value = 1;
+}
+
+message BytesProto {
+  required bytes value = 1 [ctype=CORD];
+}
+
+message VoidProto {
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
new file mode 100755
index 0000000..497b4d9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
@@ -0,0 +1,551 @@
+syntax = "proto2";
+option go_package = "datastore";
+
+package appengine;
+
+message Action{}
+
+message PropertyValue {
+  optional int64 int64Value = 1;
+  optional bool booleanValue = 2;
+  optional string stringValue = 3;
+  optional double doubleValue = 4;
+
+  optional group PointValue = 5 {
+    required double x = 6;
+    required double y = 7;
+  }
+
+  optional group UserValue = 8 {
+    required string email = 9;
+    required string auth_domain = 10;
+    optional string nickname = 11;
+    optional string federated_identity = 21;
+    optional string federated_provider = 22;
+  }
+
+  optional group ReferenceValue = 12 {
+    required string app = 13;
+    optional string name_space = 20;
+    repeated group PathElement = 14 {
+      required string type = 15;
+      optional int64 id = 16;
+      optional string name = 17;
+    }
+  }
+}
+
+message Property {
+  enum Meaning {
+    NO_MEANING = 0;
+    BLOB = 14;
+    TEXT = 15;
+    BYTESTRING = 16;
+
+    ATOM_CATEGORY = 1;
+    ATOM_LINK = 2;
+    ATOM_TITLE = 3;
+    ATOM_CONTENT = 4;
+    ATOM_SUMMARY = 5;
+    ATOM_AUTHOR = 6;
+
+    GD_WHEN = 7;
+    GD_EMAIL = 8;
+    GEORSS_POINT = 9;
+    GD_IM = 10;
+
+    GD_PHONENUMBER = 11;
+    GD_POSTALADDRESS = 12;
+
+    GD_RATING = 13;
+
+    BLOBKEY = 17;
+    ENTITY_PROTO = 19;
+
+    INDEX_VALUE = 18;
+  };
+
+  optional Meaning meaning = 1 [default = NO_MEANING];
+  optional string meaning_uri = 2;
+
+  required string name = 3;
+
+  required PropertyValue value = 5;
+
+  required bool multiple = 4;
+
+  optional bool searchable = 6 [default=false];
+
+  enum FtsTokenizationOption {
+    HTML = 1;
+    ATOM = 2;
+  }
+
+  optional FtsTokenizationOption fts_tokenization_option = 8;
+
+  optional string locale = 9 [default = "en"];
+}
+
+message Path {
+  repeated group Element = 1 {
+    required string type = 2;
+    optional int64 id = 3;
+    optional string name = 4;
+  }
+}
+
+message Reference {
+  required string app = 13;
+  optional string name_space = 20;
+  required Path path = 14;
+}
+
+message User {
+  required string email = 1;
+  required string auth_domain = 2;
+  optional string nickname = 3;
+  optional string federated_identity = 6;
+  optional string federated_provider = 7;
+}
+
+message EntityProto {
+  required Reference key = 13;
+  required Path entity_group = 16;
+  optional User owner = 17;
+
+  enum Kind {
+    GD_CONTACT = 1;
+    GD_EVENT = 2;
+    GD_MESSAGE = 3;
+  }
+  optional Kind kind = 4;
+  optional string kind_uri = 5;
+
+  repeated Property property = 14;
+  repeated Property raw_property = 15;
+
+  optional int32 rank = 18;
+}
+
+message CompositeProperty {
+  required int64 index_id = 1;
+  repeated string value = 2;
+}
+
+message Index {
+  required string entity_type = 1;
+  required bool ancestor = 5;
+  repeated group Property = 2 {
+    required string name = 3;
+    enum Direction {
+      ASCENDING = 1;
+      DESCENDING = 2;
+    }
+    optional Direction direction = 4 [default = ASCENDING];
+  }
+}
+
+message CompositeIndex {
+  required string app_id = 1;
+  required int64 id = 2;
+  required Index definition = 3;
+
+  enum State {
+    WRITE_ONLY = 1;
+    READ_WRITE = 2;
+    DELETED = 3;
+    ERROR = 4;
+  }
+  required State state = 4;
+
+  optional bool only_use_if_required = 6 [default = false];
+}
+
+message IndexPostfix {
+  message IndexValue {
+    required string property_name = 1;
+    required PropertyValue value = 2;
+  }
+
+  repeated IndexValue index_value = 1;
+
+  optional Reference key = 2;
+
+  optional bool before = 3 [default=true];
+}
+
+message IndexPosition {
+  optional string key = 1;
+
+  optional bool before = 2 [default=true];
+}
+
+message Snapshot {
+  enum Status {
+    INACTIVE = 0;
+    ACTIVE = 1;
+  }
+
+  required int64 ts = 1;
+}
+
+message InternalHeader {
+  optional string qos = 1;
+}
+
+message Transaction {
+  optional InternalHeader header = 4;
+  required fixed64 handle = 1;
+  required string app = 2;
+  optional bool mark_changes = 3 [default = false];
+}
+
+message Query {
+  optional InternalHeader header = 39;
+
+  required string app = 1;
+  optional string name_space = 29;
+
+  optional string kind = 3;
+  optional Reference ancestor = 17;
+
+  repeated group Filter = 4 {
+    enum Operator {
+      LESS_THAN = 1;
+      LESS_THAN_OR_EQUAL = 2;
+      GREATER_THAN = 3;
+      GREATER_THAN_OR_EQUAL = 4;
+      EQUAL = 5;
+      IN = 6;
+      EXISTS = 7;
+    }
+
+    required Operator op = 6;
+    repeated Property property = 14;
+  }
+
+  optional string search_query = 8;
+
+  repeated group Order = 9 {
+    enum Direction {
+      ASCENDING = 1;
+      DESCENDING = 2;
+    }
+
+    required string property = 10;
+    optional Direction direction = 11 [default = ASCENDING];
+  }
+
+  enum Hint {
+    ORDER_FIRST = 1;
+    ANCESTOR_FIRST = 2;
+    FILTER_FIRST = 3;
+  }
+  optional Hint hint = 18;
+
+  optional int32 count = 23;
+
+  optional int32 offset = 12 [default = 0];
+
+  optional int32 limit = 16;
+
+  optional CompiledCursor compiled_cursor = 30;
+  optional CompiledCursor end_compiled_cursor = 31;
+
+  repeated CompositeIndex composite_index = 19;
+
+  optional bool require_perfect_plan = 20 [default = false];
+
+  optional bool keys_only = 21 [default = false];
+
+  optional Transaction transaction = 22;
+
+  optional bool compile = 25 [default = false];
+
+  optional int64 failover_ms = 26;
+
+  optional bool strong = 32;
+
+  repeated string property_name = 33;
+
+  repeated string group_by_property_name = 34;
+
+  optional bool distinct = 24;
+
+  optional int64 min_safe_time_seconds = 35;
+
+  repeated string safe_replica_name = 36;
+
+  optional bool persist_offset = 37 [default=false];
+}
+
+message CompiledQuery {
+  required group PrimaryScan = 1 {
+    optional string index_name = 2;
+
+    optional string start_key = 3;
+    optional bool start_inclusive = 4;
+    optional string end_key = 5;
+    optional bool end_inclusive = 6;
+
+    repeated string start_postfix_value = 22;
+    repeated string end_postfix_value = 23;
+
+    optional int64 end_unapplied_log_timestamp_us = 19;
+  }
+
+  repeated group MergeJoinScan = 7 {
+    required string index_name = 8;
+
+    repeated string prefix_value = 9;
+
+    optional bool value_prefix = 20 [default=false];
+  }
+
+  optional Index index_def = 21;
+
+  optional int32 offset = 10 [default = 0];
+
+  optional int32 limit = 11;
+
+  required bool keys_only = 12;
+
+  repeated string property_name = 24;
+
+  optional int32 distinct_infix_size = 25;
+
+  optional group EntityFilter = 13 {
+    optional bool distinct = 14 [default=false];
+
+    optional string kind = 17;
+    optional Reference ancestor = 18;
+  }
+}
+
+message CompiledCursor {
+  optional group Position = 2 {
+    optional string start_key = 27;
+
+    repeated group IndexValue = 29 {
+      optional string property = 30;
+      required PropertyValue value = 31;
+    }
+
+    optional Reference key = 32;
+
+    optional bool start_inclusive = 28 [default=true];
+  }
+}
+
+message Cursor {
+  required fixed64 cursor = 1;
+
+  optional string app = 2;
+}
+
+message Error {
+  enum ErrorCode {
+    BAD_REQUEST = 1;
+    CONCURRENT_TRANSACTION = 2;
+    INTERNAL_ERROR = 3;
+    NEED_INDEX = 4;
+    TIMEOUT = 5;
+    PERMISSION_DENIED = 6;
+    BIGTABLE_ERROR = 7;
+    COMMITTED_BUT_STILL_APPLYING = 8;
+    CAPABILITY_DISABLED = 9;
+    TRY_ALTERNATE_BACKEND = 10;
+    SAFE_TIME_TOO_OLD = 11;
+  }
+}
+
+message Cost {
+  optional int32 index_writes = 1;
+  optional int32 index_write_bytes = 2;
+  optional int32 entity_writes = 3;
+  optional int32 entity_write_bytes = 4;
+  optional group CommitCost = 5 {
+    optional int32 requested_entity_puts = 6;
+    optional int32 requested_entity_deletes = 7;
+  };
+  optional int32 approximate_storage_delta = 8;
+  optional int32 id_sequence_updates = 9;
+}
+
+message GetRequest {
+  optional InternalHeader header = 6;
+
+  repeated Reference key = 1;
+  optional Transaction transaction = 2;
+
+  optional int64 failover_ms = 3;
+
+  optional bool strong = 4;
+
+  optional bool allow_deferred = 5 [default=false];
+}
+
+message GetResponse {
+  repeated group Entity = 1 {
+    optional EntityProto entity = 2;
+    optional Reference key = 4;
+
+    optional int64 version = 3;
+  }
+
+  repeated Reference deferred = 5;
+
+  optional bool in_order = 6 [default=true];
+}
+
+message PutRequest {
+  optional InternalHeader header = 11;
+
+  repeated EntityProto entity = 1;
+  optional Transaction transaction = 2;
+  repeated CompositeIndex composite_index = 3;
+
+  optional bool trusted = 4 [default = false];
+
+  optional bool force = 7 [default = false];
+
+  optional bool mark_changes = 8 [default = false];
+  repeated Snapshot snapshot = 9;
+
+  enum AutoIdPolicy {
+    CURRENT = 0;
+    SEQUENTIAL = 1;
+  }
+  optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
+}
+
+message PutResponse {
+  repeated Reference key = 1;
+  optional Cost cost = 2;
+  repeated int64 version = 3;
+}
+
+message TouchRequest {
+  optional InternalHeader header = 10;
+
+  repeated Reference key = 1;
+  repeated CompositeIndex composite_index = 2;
+  optional bool force = 3 [default = false];
+  repeated Snapshot snapshot = 9;
+}
+
+message TouchResponse {
+  optional Cost cost = 1;
+}
+
+message DeleteRequest {
+  optional InternalHeader header = 10;
+
+  repeated Reference key = 6;
+  optional Transaction transaction = 5;
+
+  optional bool trusted = 4 [default = false];
+
+  optional bool force = 7 [default = false];
+
+  optional bool mark_changes = 8 [default = false];
+  repeated Snapshot snapshot = 9;
+}
+
+message DeleteResponse {
+  optional Cost cost = 1;
+  repeated int64 version = 3;
+}
+
+message NextRequest {
+  optional InternalHeader header = 5;
+
+  required Cursor cursor = 1;
+  optional int32 count = 2;
+
+  optional int32 offset = 4 [default = 0];
+
+  optional bool compile = 3 [default = false];
+}
+
+message QueryResult {
+  optional Cursor cursor = 1;
+
+  repeated EntityProto result = 2;
+
+  optional int32 skipped_results = 7;
+
+  required bool more_results = 3;
+
+  optional bool keys_only = 4;
+
+  optional bool index_only = 9;
+
+  optional bool small_ops = 10;
+
+  optional CompiledQuery compiled_query = 5;
+
+  optional CompiledCursor compiled_cursor = 6;
+
+  repeated CompositeIndex index = 8;
+
+  repeated int64 version = 11;
+}
+
+message AllocateIdsRequest {
+  optional InternalHeader header = 4;
+
+  optional Reference model_key = 1;
+
+  optional int64 size = 2;
+
+  optional int64 max = 3;
+
+  repeated Reference reserve = 5;
+}
+
+message AllocateIdsResponse {
+  required int64 start = 1;
+  required int64 end = 2;
+  optional Cost cost = 3;
+}
+
+message CompositeIndices {
+  repeated CompositeIndex index = 1;
+}
+
+message AddActionsRequest {
+  optional InternalHeader header = 3;
+
+  required Transaction transaction = 1;
+  repeated Action action = 2;
+}
+
+message AddActionsResponse {
+}
+
+message BeginTransactionRequest {
+  optional InternalHeader header = 3;
+
+  required string app = 1;
+  optional bool allow_multiple_eg = 2 [default = false];
+  optional string database_id = 4;
+
+  enum TransactionMode {
+    UNKNOWN = 0;
+    READ_ONLY = 1;
+    READ_WRITE = 2;
+  }
+  optional TransactionMode mode = 5 [default = UNKNOWN];
+
+  optional Transaction previous_transaction = 7;
+}
+
+message CommitResponse {
+  optional Cost cost = 1;
+
+  repeated group Version = 3 {
+    required Reference root_entity_key = 4;
+    required int64 version = 5;
+  }
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
new file mode 100644
index 0000000..9b4134e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity.go
@@ -0,0 +1,55 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+	"os"
+
+	netcontext "golang.org/x/net/context"
+)
+
+var (
+	// This is set to true in identity_classic.go, which is behind the appengine build tag.
+	// The appengine build tag is set for the first generation runtimes (<= Go 1.9) but not
+	// the second generation runtimes (>= Go 1.11), so this indicates whether we're on a
+	// first-gen runtime. See IsStandard below for the second-gen check.
+	appengineStandard bool
+
+	// This is set to true in identity_flex.go, which is behind the appenginevm build tag.
+	appengineFlex bool
+)
+
+// AppID is the implementation of the wrapper function of the same name in
+// ../identity.go. See that file for commentary.
+func AppID(c netcontext.Context) string {
+	return appID(FullyQualifiedAppID(c))
+}
+
+// IsStandard is the implementation of the wrapper function of the same name in
+// ../appengine.go. See that file for commentary.
+func IsStandard() bool {
+	// appengineStandard will be true for first-gen runtimes (<= Go 1.9) but not
+	// second-gen (>= Go 1.11).
+	return appengineStandard || IsSecondGen()
+}
+
+// IsStandard is the implementation of the wrapper function of the same name in
+// ../appengine.go. See that file for commentary.
+func IsSecondGen() bool {
+	// Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime.
+	return os.Getenv("GAE_ENV") == "standard"
+}
+
+// IsFlex is the implementation of the wrapper function of the same name in
+// ../appengine.go. See that file for commentary.
+func IsFlex() bool {
+	return appengineFlex
+}
+
+// IsAppEngine is the implementation of the wrapper function of the same name in
+// ../appengine.go. See that file for commentary.
+func IsAppEngine() bool {
+	return IsStandard() || IsFlex()
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go
new file mode 100644
index 0000000..4e979f4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_classic.go
@@ -0,0 +1,61 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+	"appengine"
+
+	netcontext "golang.org/x/net/context"
+)
+
+func init() {
+	appengineStandard = true
+}
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+	c := fromContext(ctx)
+	if c == nil {
+		panic(errNotAppEngineContext)
+	}
+	return appengine.DefaultVersionHostname(c)
+}
+
+func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
+func ServerSoftware() string                 { return appengine.ServerSoftware() }
+func InstanceID() string                     { return appengine.InstanceID() }
+func IsDevAppServer() bool                   { return appengine.IsDevAppServer() }
+
+func RequestID(ctx netcontext.Context) string {
+	c := fromContext(ctx)
+	if c == nil {
+		panic(errNotAppEngineContext)
+	}
+	return appengine.RequestID(c)
+}
+
+func ModuleName(ctx netcontext.Context) string {
+	c := fromContext(ctx)
+	if c == nil {
+		panic(errNotAppEngineContext)
+	}
+	return appengine.ModuleName(c)
+}
+func VersionID(ctx netcontext.Context) string {
+	c := fromContext(ctx)
+	if c == nil {
+		panic(errNotAppEngineContext)
+	}
+	return appengine.VersionID(c)
+}
+
+func fullyQualifiedAppID(ctx netcontext.Context) string {
+	c := fromContext(ctx)
+	if c == nil {
+		panic(errNotAppEngineContext)
+	}
+	return c.FullyQualifiedAppID()
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go
new file mode 100644
index 0000000..d5e2e7b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_flex.go
@@ -0,0 +1,11 @@
+// Copyright 2018 Google LLC. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appenginevm
+
+package internal
+
+func init() {
+	appengineFlex = true
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
new file mode 100644
index 0000000..5d80672
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_vm.go
@@ -0,0 +1,134 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+	"log"
+	"net/http"
+	"os"
+	"strings"
+
+	netcontext "golang.org/x/net/context"
+)
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+const (
+	hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
+	hRequestLogId           = "X-AppEngine-Request-Log-Id"
+	hDatacenter             = "X-AppEngine-Datacenter"
+)
+
+func ctxHeaders(ctx netcontext.Context) http.Header {
+	c := fromContext(ctx)
+	if c == nil {
+		return nil
+	}
+	return c.Request().Header
+}
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+	return ctxHeaders(ctx).Get(hDefaultVersionHostname)
+}
+
+func RequestID(ctx netcontext.Context) string {
+	return ctxHeaders(ctx).Get(hRequestLogId)
+}
+
+func Datacenter(ctx netcontext.Context) string {
+	if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" {
+		return dc
+	}
+	// If the header isn't set, read zone from the metadata service.
+	// It has the format projects/[NUMERIC_PROJECT_ID]/zones/[ZONE]
+	zone, err := getMetadata("instance/zone")
+	if err != nil {
+		log.Printf("Datacenter: %v", err)
+		return ""
+	}
+	parts := strings.Split(string(zone), "/")
+	if len(parts) == 0 {
+		return ""
+	}
+	return parts[len(parts)-1]
+}
+
+func ServerSoftware() string {
+	// TODO(dsymonds): Remove fallback when we've verified this.
+	if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
+		return s
+	}
+	if s := os.Getenv("GAE_ENV"); s != "" {
+		return s
+	}
+	return "Google App Engine/1.x.x"
+}
+
+// TODO(dsymonds): Remove the metadata fetches.
+
+func ModuleName(_ netcontext.Context) string {
+	if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
+		return s
+	}
+	if s := os.Getenv("GAE_SERVICE"); s != "" {
+		return s
+	}
+	return string(mustGetMetadata("instance/attributes/gae_backend_name"))
+}
+
+func VersionID(_ netcontext.Context) string {
+	if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
+		return s1 + "." + s2
+	}
+	if s1, s2 := os.Getenv("GAE_VERSION"), os.Getenv("GAE_DEPLOYMENT_ID"); s1 != "" && s2 != "" {
+		return s1 + "." + s2
+	}
+	return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
+}
+
+func InstanceID() string {
+	if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
+		return s
+	}
+	if s := os.Getenv("GAE_INSTANCE"); s != "" {
+		return s
+	}
+	return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
+}
+
+func partitionlessAppID() string {
+	// gae_project has everything except the partition prefix.
+	if appID := os.Getenv("GAE_LONG_APP_ID"); appID != "" {
+		return appID
+	}
+	if project := os.Getenv("GOOGLE_CLOUD_PROJECT"); project != "" {
+		return project
+	}
+	return string(mustGetMetadata("instance/attributes/gae_project"))
+}
+
+func fullyQualifiedAppID(_ netcontext.Context) string {
+	if s := os.Getenv("GAE_APPLICATION"); s != "" {
+		return s
+	}
+	appID := partitionlessAppID()
+
+	part := os.Getenv("GAE_PARTITION")
+	if part == "" {
+		part = string(mustGetMetadata("instance/attributes/gae_partition"))
+	}
+
+	if part != "" {
+		appID = part + "~" + appID
+	}
+	return appID
+}
+
+func IsDevAppServer() bool {
+	return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
+}
diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go
new file mode 100644
index 0000000..051ea39
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/internal.go
@@ -0,0 +1,110 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package internal provides support for package appengine.
+//
+// Programs should not use this package directly. Its API is not stable.
+// Use packages appengine and appengine/* instead.
+package internal
+
+import (
+	"fmt"
+
+	"github.com/golang/protobuf/proto"
+
+	remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+// errorCodeMaps is a map of service name to the error code map for the service.
+var errorCodeMaps = make(map[string]map[int32]string)
+
+// RegisterErrorCodeMap is called from API implementations to register their
+// error code map. This should only be called from init functions.
+func RegisterErrorCodeMap(service string, m map[int32]string) {
+	errorCodeMaps[service] = m
+}
+
+type timeoutCodeKey struct {
+	service string
+	code    int32
+}
+
+// timeoutCodes is the set of service+code pairs that represent timeouts.
+var timeoutCodes = make(map[timeoutCodeKey]bool)
+
+func RegisterTimeoutErrorCode(service string, code int32) {
+	timeoutCodes[timeoutCodeKey{service, code}] = true
+}
+
+// APIError is the type returned by appengine.Context's Call method
+// when an API call fails in an API-specific way. This may be, for instance,
+// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
+type APIError struct {
+	Service string
+	Detail  string
+	Code    int32 // API-specific error code
+}
+
+func (e *APIError) Error() string {
+	if e.Code == 0 {
+		if e.Detail == "" {
+			return "APIError <empty>"
+		}
+		return e.Detail
+	}
+	s := fmt.Sprintf("API error %d", e.Code)
+	if m, ok := errorCodeMaps[e.Service]; ok {
+		s += " (" + e.Service + ": " + m[e.Code] + ")"
+	} else {
+		// Shouldn't happen, but provide a bit more detail if it does.
+		s = e.Service + " " + s
+	}
+	if e.Detail != "" {
+		s += ": " + e.Detail
+	}
+	return s
+}
+
+func (e *APIError) IsTimeout() bool {
+	return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
+}
+
+// CallError is the type returned by appengine.Context's Call method when an
+// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
+type CallError struct {
+	Detail string
+	Code   int32
+	// TODO: Remove this if we get a distinguishable error code.
+	Timeout bool
+}
+
+func (e *CallError) Error() string {
+	var msg string
+	switch remotepb.RpcError_ErrorCode(e.Code) {
+	case remotepb.RpcError_UNKNOWN:
+		return e.Detail
+	case remotepb.RpcError_OVER_QUOTA:
+		msg = "Over quota"
+	case remotepb.RpcError_CAPABILITY_DISABLED:
+		msg = "Capability disabled"
+	case remotepb.RpcError_CANCELLED:
+		msg = "Canceled"
+	default:
+		msg = fmt.Sprintf("Call error %d", e.Code)
+	}
+	s := msg + ": " + e.Detail
+	if e.Timeout {
+		s += " (timeout)"
+	}
+	return s
+}
+
+func (e *CallError) IsTimeout() bool {
+	return e.Timeout
+}
+
+// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
+// The function should be prepared to be called on the same message more than once; it should only modify the
+// RPC request the first time.
+var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto
new file mode 100644
index 0000000..8981dc4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.proto
@@ -0,0 +1,150 @@
+syntax = "proto2";
+option go_package = "log";
+
+package appengine;
+
+message LogServiceError {
+  enum ErrorCode {
+    OK  = 0;
+    INVALID_REQUEST = 1;
+    STORAGE_ERROR = 2;
+  }
+}
+
+message UserAppLogLine {
+  required int64 timestamp_usec = 1;
+  required int64 level = 2;
+  required string message = 3;
+}
+
+message UserAppLogGroup {
+  repeated UserAppLogLine log_line = 2;
+}
+
+message FlushRequest {
+  optional bytes logs = 1;
+}
+
+message SetStatusRequest {
+  required string status = 1;
+}
+
+
+message LogOffset {
+  optional bytes request_id = 1;
+}
+
+message LogLine {
+  required int64 time = 1;
+  required int32 level = 2;
+  required string log_message = 3;
+}
+
+message RequestLog {
+  required string app_id = 1;
+  optional string module_id = 37 [default="default"];
+  required string version_id = 2;
+  required bytes request_id = 3;
+  optional LogOffset offset = 35;
+  required string ip = 4;
+  optional string nickname = 5;
+  required int64 start_time = 6;
+  required int64 end_time = 7;
+  required int64 latency = 8;
+  required int64 mcycles = 9;
+  required string method = 10;
+  required string resource = 11;
+  required string http_version = 12;
+  required int32 status = 13;
+  required int64 response_size = 14;
+  optional string referrer = 15;
+  optional string user_agent = 16;
+  required string url_map_entry = 17;
+  required string combined = 18;
+  optional int64 api_mcycles = 19;
+  optional string host = 20;
+  optional double cost = 21;
+
+  optional string task_queue_name = 22;
+  optional string task_name = 23;
+
+  optional bool was_loading_request = 24;
+  optional int64 pending_time = 25;
+  optional int32 replica_index = 26 [default = -1];
+  optional bool finished = 27 [default = true];
+  optional bytes clone_key = 28;
+
+  repeated LogLine line = 29;
+
+  optional bool lines_incomplete = 36;
+  optional bytes app_engine_release = 38;
+
+  optional int32 exit_reason = 30;
+  optional bool was_throttled_for_time = 31;
+  optional bool was_throttled_for_requests = 32;
+  optional int64 throttled_time = 33;
+
+  optional bytes server_name = 34;
+}
+
+message LogModuleVersion {
+  optional string module_id = 1 [default="default"];
+  optional string version_id = 2;
+}
+
+message LogReadRequest {
+  required string app_id = 1;
+  repeated string version_id = 2;
+  repeated LogModuleVersion module_version = 19;
+
+  optional int64 start_time = 3;
+  optional int64 end_time = 4;
+  optional LogOffset offset = 5;
+  repeated bytes request_id = 6;
+
+  optional int32 minimum_log_level = 7;
+  optional bool include_incomplete = 8;
+  optional int64 count = 9;
+
+  optional string combined_log_regex = 14;
+  optional string host_regex = 15;
+  optional int32 replica_index = 16;
+
+  optional bool include_app_logs = 10;
+  optional int32 app_logs_per_request = 17;
+  optional bool include_host = 11;
+  optional bool include_all = 12;
+  optional bool cache_iterator = 13;
+  optional int32 num_shards = 18;
+}
+
+message LogReadResponse {
+  repeated RequestLog log = 1;
+  optional LogOffset offset = 2;
+  optional int64 last_end_time = 3;
+}
+
+message LogUsageRecord {
+  optional string version_id = 1;
+  optional int32 start_time = 2;
+  optional int32 end_time = 3;
+  optional int64 count = 4;
+  optional int64 total_size = 5;
+  optional int32 records = 6;
+}
+
+message LogUsageRequest {
+  required string app_id = 1;
+  repeated string version_id = 2;
+  optional int32 start_time = 3;
+  optional int32 end_time = 4;
+  optional uint32 resolution_hours = 5 [default = 1];
+  optional bool combine_versions = 6;
+  optional int32 usage_version = 7;
+  optional bool versions_only = 8;
+}
+
+message LogUsageResponse {
+  repeated LogUsageRecord usage = 1;
+  optional LogUsageRecord summary = 2;
+}
diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go
new file mode 100644
index 0000000..1e76531
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main.go
@@ -0,0 +1,16 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+	"appengine_internal"
+)
+
+func Main() {
+	MainPath = ""
+	appengine_internal.Main()
+}
diff --git a/vendor/google.golang.org/appengine/internal/main_common.go b/vendor/google.golang.org/appengine/internal/main_common.go
new file mode 100644
index 0000000..357dce4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main_common.go
@@ -0,0 +1,7 @@
+package internal
+
+// MainPath stores the file path of the main package. On App Engine Standard
+// using Go version 1.9 and below, this will be unset. On App Engine Flex and
+// App Engine Standard second-gen (Go 1.11 and above), this will be the
+// filepath to package main.
+var MainPath string
diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go
new file mode 100644
index 0000000..ddb79a3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main_vm.go
@@ -0,0 +1,69 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+	"io"
+	"log"
+	"net/http"
+	"net/url"
+	"os"
+	"path/filepath"
+	"runtime"
+)
+
+func Main() {
+	MainPath = filepath.Dir(findMainPath())
+	installHealthChecker(http.DefaultServeMux)
+
+	port := "8080"
+	if s := os.Getenv("PORT"); s != "" {
+		port = s
+	}
+
+	host := ""
+	if IsDevAppServer() {
+		host = "127.0.0.1"
+	}
+	if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil {
+		log.Fatalf("http.ListenAndServe: %v", err)
+	}
+}
+
+// Find the path to package main by looking at the root Caller.
+func findMainPath() string {
+	pc := make([]uintptr, 100)
+	n := runtime.Callers(2, pc)
+	frames := runtime.CallersFrames(pc[:n])
+	for {
+		frame, more := frames.Next()
+		// Tests won't have package main, instead they have testing.tRunner
+		if frame.Function == "main.main" || frame.Function == "testing.tRunner" {
+			return frame.File
+		}
+		if !more {
+			break
+		}
+	}
+	return ""
+}
+
+func installHealthChecker(mux *http.ServeMux) {
+	// If no health check handler has been installed by this point, add a trivial one.
+	const healthPath = "/_ah/health"
+	hreq := &http.Request{
+		Method: "GET",
+		URL: &url.URL{
+			Path: healthPath,
+		},
+	}
+	if _, pat := mux.Handler(hreq); pat != healthPath {
+		mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
+			io.WriteString(w, "ok")
+		})
+	}
+}
diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go
new file mode 100644
index 0000000..c4ba63b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/metadata.go
@@ -0,0 +1,60 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file has code for accessing metadata.
+//
+// References:
+//	https://cloud.google.com/compute/docs/metadata
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+)
+
+const (
+	metadataHost = "metadata"
+	metadataPath = "/computeMetadata/v1/"
+)
+
+var (
+	metadataRequestHeaders = http.Header{
+		"Metadata-Flavor": []string{"Google"},
+	}
+)
+
+// TODO(dsymonds): Do we need to support default values, like Python?
+func mustGetMetadata(key string) []byte {
+	b, err := getMetadata(key)
+	if err != nil {
+		panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err))
+	}
+	return b
+}
+
+func getMetadata(key string) ([]byte, error) {
+	// TODO(dsymonds): May need to use url.Parse to support keys with query args.
+	req := &http.Request{
+		Method: "GET",
+		URL: &url.URL{
+			Scheme: "http",
+			Host:   metadataHost,
+			Path:   metadataPath + key,
+		},
+		Header: metadataRequestHeaders,
+		Host:   metadataHost,
+	}
+	resp, err := http.DefaultClient.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != 200 {
+		return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
+	}
+	return ioutil.ReadAll(resp.Body)
+}
diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go
new file mode 100644
index 0000000..3b94cf0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/net.go
@@ -0,0 +1,56 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements a network dialer that limits the number of concurrent connections.
+// It is only used for API calls.
+
+import (
+	"log"
+	"net"
+	"runtime"
+	"sync"
+	"time"
+)
+
+var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
+
+func limitRelease() {
+	// non-blocking
+	select {
+	case <-limitSem:
+	default:
+		// This should not normally happen.
+		log.Print("appengine: unbalanced limitSem release!")
+	}
+}
+
+func limitDial(network, addr string) (net.Conn, error) {
+	limitSem <- 1
+
+	// Dial with a timeout in case the API host is MIA.
+	// The connection should normally be very fast.
+	conn, err := net.DialTimeout(network, addr, 500*time.Millisecond)
+	if err != nil {
+		limitRelease()
+		return nil, err
+	}
+	lc := &limitConn{Conn: conn}
+	runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
+	return lc, nil
+}
+
+type limitConn struct {
+	close sync.Once
+	net.Conn
+}
+
+func (lc *limitConn) Close() error {
+	defer lc.close.Do(func() {
+		limitRelease()
+		runtime.SetFinalizer(lc, nil)
+	})
+	return lc.Conn.Close()
+}
diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh
new file mode 100755
index 0000000..2fdb546
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/regen.sh
@@ -0,0 +1,40 @@
+#!/bin/bash -e
+#
+# This script rebuilds the generated code for the protocol buffers.
+# To run this you will need protoc and goprotobuf installed;
+# see https://github.com/golang/protobuf for instructions.
+
+PKG=google.golang.org/appengine
+
+function die() {
+	echo 1>&2 $*
+	exit 1
+}
+
+# Sanity check that the right tools are accessible.
+for tool in go protoc protoc-gen-go; do
+	q=$(which $tool) || die "didn't find $tool"
+	echo 1>&2 "$tool: $q"
+done
+
+echo -n 1>&2 "finding package dir... "
+pkgdir=$(go list -f '{{.Dir}}' $PKG)
+echo 1>&2 $pkgdir
+base=$(echo $pkgdir | sed "s,/$PKG\$,,")
+echo 1>&2 "base: $base"
+cd $base
+
+# Run protoc once per package.
+for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
+	echo 1>&2 "* $dir"
+	protoc --go_out=. $dir/*.proto
+done
+
+for f in $(find $PKG/internal -name '*.pb.go'); do
+  # Remove proto.RegisterEnum calls.
+  # These cause duplicate registration panics when these packages
+  # are used on classic App Engine. proto.RegisterEnum only affects
+  # parsing the text format; we don't care about that.
+  # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
+  sed -i '/proto.RegisterEnum/d' $f
+done
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
new file mode 100644
index 0000000..f21763a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
@@ -0,0 +1,44 @@
+syntax = "proto2";
+option go_package = "remote_api";
+
+package remote_api;
+
+message Request {
+  required string service_name = 2;
+  required string method = 3;
+  required bytes request = 4;
+  optional string request_id = 5;
+}
+
+message ApplicationError {
+  required int32 code = 1;
+  required string detail = 2;
+}
+
+message RpcError {
+  enum ErrorCode {
+    UNKNOWN = 0;
+    CALL_NOT_FOUND = 1;
+    PARSE_ERROR = 2;
+    SECURITY_VIOLATION = 3;
+    OVER_QUOTA = 4;
+    REQUEST_TOO_LARGE = 5;
+    CAPABILITY_DISABLED = 6;
+    FEATURE_DISABLED = 7;
+    BAD_REQUEST = 8;
+    RESPONSE_TOO_LARGE = 9;
+    CANCELLED = 10;
+    REPLAY_ERROR = 11;
+    DEADLINE_EXCEEDED = 12;
+  }
+  required int32 code = 1;
+  optional string detail = 2;
+}
+
+message Response {
+  optional bytes response = 1;
+  optional bytes exception = 2;
+  optional ApplicationError application_error = 3;
+  optional bytes java_exception = 4;
+  optional RpcError rpc_error = 5;
+}
diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
new file mode 100644
index 0000000..9006ae6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/transaction.go
@@ -0,0 +1,115 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements hooks for applying datastore transactions.
+
+import (
+	"errors"
+	"reflect"
+
+	"github.com/golang/protobuf/proto"
+	netcontext "golang.org/x/net/context"
+
+	basepb "google.golang.org/appengine/internal/base"
+	pb "google.golang.org/appengine/internal/datastore"
+)
+
+var transactionSetters = make(map[reflect.Type]reflect.Value)
+
+// RegisterTransactionSetter registers a function that sets transaction information
+// in a protocol buffer message. f should be a function with two arguments,
+// the first being a protocol buffer type, and the second being *datastore.Transaction.
+func RegisterTransactionSetter(f interface{}) {
+	v := reflect.ValueOf(f)
+	transactionSetters[v.Type().In(0)] = v
+}
+
+// applyTransaction applies the transaction t to message pb
+// by using the relevant setter passed to RegisterTransactionSetter.
+func applyTransaction(pb proto.Message, t *pb.Transaction) {
+	v := reflect.ValueOf(pb)
+	if f, ok := transactionSetters[v.Type()]; ok {
+		f.Call([]reflect.Value{v, reflect.ValueOf(t)})
+	}
+}
+
+var transactionKey = "used for *Transaction"
+
+func transactionFromContext(ctx netcontext.Context) *transaction {
+	t, _ := ctx.Value(&transactionKey).(*transaction)
+	return t
+}
+
+func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
+	return netcontext.WithValue(ctx, &transactionKey, t)
+}
+
+type transaction struct {
+	transaction pb.Transaction
+	finished    bool
+}
+
+var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
+
+func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) {
+	if transactionFromContext(c) != nil {
+		return nil, errors.New("nested transactions are not supported")
+	}
+
+	// Begin the transaction.
+	t := &transaction{}
+	req := &pb.BeginTransactionRequest{
+		App: proto.String(FullyQualifiedAppID(c)),
+	}
+	if xg {
+		req.AllowMultipleEg = proto.Bool(true)
+	}
+	if previousTransaction != nil {
+		req.PreviousTransaction = previousTransaction
+	}
+	if readOnly {
+		req.Mode = pb.BeginTransactionRequest_READ_ONLY.Enum()
+	} else {
+		req.Mode = pb.BeginTransactionRequest_READ_WRITE.Enum()
+	}
+	if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
+		return nil, err
+	}
+
+	// Call f, rolling back the transaction if f returns a non-nil error, or panics.
+	// The panic is not recovered.
+	defer func() {
+		if t.finished {
+			return
+		}
+		t.finished = true
+		// Ignore the error return value, since we are already returning a non-nil
+		// error (or we're panicking).
+		Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
+	}()
+	if err := f(withTransaction(c, t)); err != nil {
+		return &t.transaction, err
+	}
+	t.finished = true
+
+	// Commit the transaction.
+	res := &pb.CommitResponse{}
+	err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
+	if ae, ok := err.(*APIError); ok {
+		/* TODO: restore this conditional
+		if appengine.IsDevAppServer() {
+		*/
+		// The Python Dev AppServer raises an ApplicationError with error code 2 (which is
+		// Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
+		if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
+			return &t.transaction, ErrConcurrentTransaction
+		}
+		if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
+			return &t.transaction, ErrConcurrentTransaction
+		}
+	}
+	return &t.transaction, err
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
new file mode 100644
index 0000000..f695edf
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "urlfetch";
+
+package appengine;
+
+message URLFetchServiceError {
+  enum ErrorCode {
+    OK = 0;
+    INVALID_URL = 1;
+    FETCH_ERROR = 2;
+    UNSPECIFIED_ERROR = 3;
+    RESPONSE_TOO_LARGE = 4;
+    DEADLINE_EXCEEDED = 5;
+    SSL_CERTIFICATE_ERROR = 6;
+    DNS_ERROR = 7;
+    CLOSED = 8;
+    INTERNAL_TRANSIENT_ERROR = 9;
+    TOO_MANY_REDIRECTS = 10;
+    MALFORMED_REPLY = 11;
+    CONNECTION_ERROR = 12;
+  }
+}
+
+message URLFetchRequest {
+  enum RequestMethod {
+    GET = 1;
+    POST = 2;
+    HEAD = 3;
+    PUT = 4;
+    DELETE = 5;
+    PATCH = 6;
+  }
+  required RequestMethod Method = 1;
+  required string Url = 2;
+  repeated group Header = 3 {
+    required string Key = 4;
+    required string Value = 5;
+  }
+  optional bytes Payload = 6 [ctype=CORD];
+
+  optional bool FollowRedirects = 7 [default=true];
+
+  optional double Deadline = 8;
+
+  optional bool MustValidateServerCertificate = 9 [default=true];
+}
+
+message URLFetchResponse {
+  optional bytes Content = 1;
+  required int32 StatusCode = 2;
+  repeated group Header = 3 {
+    required string Key = 4;
+    required string Value = 5;
+  }
+  optional bool ContentWasTruncated = 6 [default=false];
+  optional int64 ExternalBytesSent = 7;
+  optional int64 ExternalBytesReceived = 8;
+
+  optional string FinalUrl = 9;
+
+  optional int64 ApiCpuMilliseconds = 10 [default=0];
+  optional int64 ApiBytesSent = 11 [default=0];
+  optional int64 ApiBytesReceived = 12 [default=0];
+}
diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
new file mode 100644
index 0000000..6ffe1e6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
@@ -0,0 +1,210 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package urlfetch provides an http.RoundTripper implementation
+// for fetching URLs via App Engine's urlfetch service.
+package urlfetch // import "google.golang.org/appengine/urlfetch"
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+	pb "google.golang.org/appengine/internal/urlfetch"
+)
+
+// Transport is an implementation of http.RoundTripper for
+// App Engine. Users should generally create an http.Client using
+// this transport and use the Client rather than using this transport
+// directly.
+type Transport struct {
+	Context context.Context
+
+	// Controls whether the application checks the validity of SSL certificates
+	// over HTTPS connections. A value of false (the default) instructs the
+	// application to send a request to the server only if the certificate is
+	// valid and signed by a trusted certificate authority (CA), and also
+	// includes a hostname that matches the certificate. A value of true
+	// instructs the application to perform no certificate validation.
+	AllowInvalidServerCertificate bool
+}
+
+// Verify statically that *Transport implements http.RoundTripper.
+var _ http.RoundTripper = (*Transport)(nil)
+
+// Client returns an *http.Client using a default urlfetch Transport. This
+// client will have the default deadline of 5 seconds, and will check the
+// validity of SSL certificates.
+//
+// Any deadline of the provided context will be used for requests through this client;
+// if the client does not have a deadline then a 5 second default is used.
+func Client(ctx context.Context) *http.Client {
+	return &http.Client{
+		Transport: &Transport{
+			Context: ctx,
+		},
+	}
+}
+
+type bodyReader struct {
+	content   []byte
+	truncated bool
+	closed    bool
+}
+
+// ErrTruncatedBody is the error returned after the final Read() from a
+// response's Body if the body has been truncated by App Engine's proxy.
+var ErrTruncatedBody = errors.New("urlfetch: truncated body")
+
+func statusCodeToText(code int) string {
+	if t := http.StatusText(code); t != "" {
+		return t
+	}
+	return strconv.Itoa(code)
+}
+
+func (br *bodyReader) Read(p []byte) (n int, err error) {
+	if br.closed {
+		if br.truncated {
+			return 0, ErrTruncatedBody
+		}
+		return 0, io.EOF
+	}
+	n = copy(p, br.content)
+	if n > 0 {
+		br.content = br.content[n:]
+		return
+	}
+	if br.truncated {
+		br.closed = true
+		return 0, ErrTruncatedBody
+	}
+	return 0, io.EOF
+}
+
+func (br *bodyReader) Close() error {
+	br.closed = true
+	br.content = nil
+	return nil
+}
+
+// A map of the URL Fetch-accepted methods that take a request body.
+var methodAcceptsRequestBody = map[string]bool{
+	"POST":  true,
+	"PUT":   true,
+	"PATCH": true,
+}
+
+// urlString returns a valid string given a URL. This function is necessary because
+// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
+// See http://code.google.com/p/go/issues/detail?id=4860.
+func urlString(u *url.URL) string {
+	if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
+		return u.String()
+	}
+	aux := *u
+	aux.Opaque = "//" + aux.Host + aux.Opaque
+	return aux.String()
+}
+
+// RoundTrip issues a single HTTP request and returns its response. Per the
+// http.RoundTripper interface, RoundTrip only returns an error if there
+// was an unsupported request or the URL Fetch proxy fails.
+// Note that HTTP response codes such as 5xx, 403, 404, etc are not
+// errors as far as the transport is concerned and will be returned
+// with err set to nil.
+func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
+	methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
+	if !ok {
+		return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
+	}
+
+	method := pb.URLFetchRequest_RequestMethod(methNum)
+
+	freq := &pb.URLFetchRequest{
+		Method:                        &method,
+		Url:                           proto.String(urlString(req.URL)),
+		FollowRedirects:               proto.Bool(false), // http.Client's responsibility
+		MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
+	}
+	if deadline, ok := t.Context.Deadline(); ok {
+		freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())
+	}
+
+	for k, vals := range req.Header {
+		for _, val := range vals {
+			freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
+				Key:   proto.String(k),
+				Value: proto.String(val),
+			})
+		}
+	}
+	if methodAcceptsRequestBody[req.Method] && req.Body != nil {
+		// Avoid a []byte copy if req.Body has a Bytes method.
+		switch b := req.Body.(type) {
+		case interface {
+			Bytes() []byte
+		}:
+			freq.Payload = b.Bytes()
+		default:
+			freq.Payload, err = ioutil.ReadAll(req.Body)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+
+	fres := &pb.URLFetchResponse{}
+	if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil {
+		return nil, err
+	}
+
+	res = &http.Response{}
+	res.StatusCode = int(*fres.StatusCode)
+	res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
+	res.Header = make(http.Header)
+	res.Request = req
+
+	// Faked:
+	res.ProtoMajor = 1
+	res.ProtoMinor = 1
+	res.Proto = "HTTP/1.1"
+	res.Close = true
+
+	for _, h := range fres.Header {
+		hkey := http.CanonicalHeaderKey(*h.Key)
+		hval := *h.Value
+		if hkey == "Content-Length" {
+			// Will get filled in below for all but HEAD requests.
+			if req.Method == "HEAD" {
+				res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
+			}
+			continue
+		}
+		res.Header.Add(hkey, hval)
+	}
+
+	if req.Method != "HEAD" {
+		res.ContentLength = int64(len(fres.Content))
+	}
+
+	truncated := fres.GetContentWasTruncated()
+	res.Body = &bodyReader{content: fres.Content, truncated: truncated}
+	return
+}
+
+func init() {
+	internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
+	internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
+}
diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go
new file mode 100644
index 0000000..e15f04c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/health/client.go
@@ -0,0 +1,107 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package health
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"time"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	healthpb "google.golang.org/grpc/health/grpc_health_v1"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/internal/backoff"
+	"google.golang.org/grpc/status"
+)
+
+const maxDelay = 120 * time.Second
+
+var backoffStrategy = backoff.Exponential{MaxDelay: maxDelay}
+var backoffFunc = func(ctx context.Context, retries int) bool {
+	d := backoffStrategy.Backoff(retries)
+	timer := time.NewTimer(d)
+	select {
+	case <-timer.C:
+		return true
+	case <-ctx.Done():
+		timer.Stop()
+		return false
+	}
+}
+
+func init() {
+	internal.HealthCheckFunc = clientHealthCheck
+}
+
+func clientHealthCheck(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), service string) error {
+	tryCnt := 0
+
+retryConnection:
+	for {
+		// Backs off if the connection has failed in some way without receiving a message in the previous retry.
+		if tryCnt > 0 && !backoffFunc(ctx, tryCnt-1) {
+			return nil
+		}
+		tryCnt++
+
+		if ctx.Err() != nil {
+			return nil
+		}
+		rawS, err := newStream()
+		if err != nil {
+			continue retryConnection
+		}
+
+		s, ok := rawS.(grpc.ClientStream)
+		// Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes.
+		if !ok {
+			reportHealth(true)
+			return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS)
+		}
+
+		if err = s.SendMsg(&healthpb.HealthCheckRequest{Service: service}); err != nil && err != io.EOF {
+			// Stream should have been closed, so we can safely continue to create a new stream.
+			continue retryConnection
+		}
+		s.CloseSend()
+
+		resp := new(healthpb.HealthCheckResponse)
+		for {
+			err = s.RecvMsg(resp)
+
+			// Reports healthy for the LBing purposes if health check is not implemented in the server.
+			if status.Code(err) == codes.Unimplemented {
+				reportHealth(true)
+				return err
+			}
+
+			// Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED.
+			if err != nil {
+				reportHealth(false)
+				continue retryConnection
+			}
+
+			// As a message has been received, removes the need for backoff for the next retry by reseting the try count.
+			tryCnt = 0
+			reportHealth(resp.Status == healthpb.HealthCheckResponse_SERVING)
+		}
+	}
+}
diff --git a/vendor/google.golang.org/grpc/health/regenerate.sh b/vendor/google.golang.org/grpc/health/regenerate.sh
new file mode 100755
index 0000000..b11eccb
--- /dev/null
+++ b/vendor/google.golang.org/grpc/health/regenerate.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eux -o pipefail
+
+TMP=$(mktemp -d)
+
+function finish {
+  rm -rf "$TMP"
+}
+trap finish EXIT
+
+pushd "$TMP"
+mkdir -p grpc/health/v1
+curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto > grpc/health/v1/health.proto
+
+protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/health/v1/*.proto
+popd
+rm -f grpc_health_v1/*.pb.go
+cp "$TMP"/grpc/health/v1/*.pb.go grpc_health_v1/
+
diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go
new file mode 100644
index 0000000..c86e499
--- /dev/null
+++ b/vendor/google.golang.org/grpc/health/server.go
@@ -0,0 +1,125 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+//go:generate ./regenerate.sh
+
+// Package health provides a service that exposes server's health and it must be
+// imported to enable support for client-side health checks.
+package health
+
+import (
+	"context"
+	"sync"
+
+	"google.golang.org/grpc/codes"
+	healthgrpc "google.golang.org/grpc/health/grpc_health_v1"
+	healthpb "google.golang.org/grpc/health/grpc_health_v1"
+	"google.golang.org/grpc/status"
+)
+
+// Server implements `service Health`.
+type Server struct {
+	mu sync.Mutex
+	// statusMap stores the serving status of the services this Server monitors.
+	statusMap map[string]healthpb.HealthCheckResponse_ServingStatus
+	updates   map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus
+}
+
+// NewServer returns a new Server.
+func NewServer() *Server {
+	return &Server{
+		statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING},
+		updates:   make(map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus),
+	}
+}
+
+// Check implements `service Health`.
+func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if servingStatus, ok := s.statusMap[in.Service]; ok {
+		return &healthpb.HealthCheckResponse{
+			Status: servingStatus,
+		}, nil
+	}
+	return nil, status.Error(codes.NotFound, "unknown service")
+}
+
+// Watch implements `service Health`.
+func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error {
+	service := in.Service
+	// update channel is used for getting service status updates.
+	update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1)
+	s.mu.Lock()
+	// Puts the initial status to the channel.
+	if servingStatus, ok := s.statusMap[service]; ok {
+		update <- servingStatus
+	} else {
+		update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN
+	}
+
+	// Registers the update channel to the correct place in the updates map.
+	if _, ok := s.updates[service]; !ok {
+		s.updates[service] = make(map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus)
+	}
+	s.updates[service][stream] = update
+	defer func() {
+		s.mu.Lock()
+		delete(s.updates[service], stream)
+		s.mu.Unlock()
+	}()
+	s.mu.Unlock()
+
+	var lastSentStatus healthpb.HealthCheckResponse_ServingStatus = -1
+	for {
+		select {
+		// Status updated. Sends the up-to-date status to the client.
+		case servingStatus := <-update:
+			if lastSentStatus == servingStatus {
+				continue
+			}
+			lastSentStatus = servingStatus
+			err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus})
+			if err != nil {
+				return status.Error(codes.Canceled, "Stream has ended.")
+			}
+		// Context done. Removes the update channel from the updates map.
+		case <-stream.Context().Done():
+			return status.Error(codes.Canceled, "Stream has ended.")
+		}
+	}
+}
+
+// SetServingStatus is called when need to reset the serving status of a service
+// or insert a new service entry into the statusMap.
+func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	s.statusMap[service] = servingStatus
+	for _, update := range s.updates[service] {
+		// Clears previous updates, that are not sent to the client, from the channel.
+		// This can happen if the client is not reading and the server gets flow control limited.
+		select {
+		case <-update:
+		default:
+		}
+		// Puts the most recent update to the channel.
+		update <- servingStatus
+	}
+}
diff --git a/vendor/gopkg.in/inf.v0/LICENSE b/vendor/gopkg.in/inf.v0/LICENSE
new file mode 100644
index 0000000..87a5ced
--- /dev/null
+++ b/vendor/gopkg.in/inf.v0/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go
+Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/inf.v0/dec.go b/vendor/gopkg.in/inf.v0/dec.go
new file mode 100644
index 0000000..26548b6
--- /dev/null
+++ b/vendor/gopkg.in/inf.v0/dec.go
@@ -0,0 +1,615 @@
+// Package inf (type inf.Dec) implements "infinite-precision" decimal
+// arithmetic.
+// "Infinite precision" describes two characteristics: practically unlimited
+// precision for decimal number representation and no support for calculating
+// with any specific fixed precision.
+// (Although there is no practical limit on precision, inf.Dec can only
+// represent finite decimals.)
+//
+// This package is currently in experimental stage and the API may change.
+//
+// This package does NOT support:
+//  - rounding to specific precisions (as opposed to specific decimal positions)
+//  - the notion of context (each rounding must be explicit)
+//  - NaN and Inf values, and distinguishing between positive and negative zero
+//  - conversions to and from float32/64 types
+//
+// Features considered for possible addition:
+//  + formatting options
+//  + Exp method
+//  + combined operations such as AddRound/MulAdd etc
+//  + exchanging data in decimal32/64/128 formats
+//
+package inf // import "gopkg.in/inf.v0"
+
+// TODO:
+//  - avoid excessive deep copying (quo and rounders)
+
+import (
+	"fmt"
+	"io"
+	"math/big"
+	"strings"
+)
+
+// A Dec represents a signed arbitrary-precision decimal.
+// It is a combination of a sign, an arbitrary-precision integer coefficient
+// value, and a signed fixed-precision exponent value.
+// The sign and the coefficient value are handled together as a signed value
+// and referred to as the unscaled value.
+// (Positive and negative zero values are not distinguished.)
+// Since the exponent is most commonly non-positive, it is handled in negated
+// form and referred to as scale.
+//
+// The mathematical value of a Dec equals:
+//
+//  unscaled * 10**(-scale)
+//
+// Note that different Dec representations may have equal mathematical values.
+//
+//  unscaled  scale  String()
+//  -------------------------
+//         0      0    "0"
+//         0      2    "0.00"
+//         0     -2    "0"
+//         1      0    "1"
+//       100      2    "1.00"
+//        10      0   "10"
+//         1     -1   "10"
+//
+// The zero value for a Dec represents the value 0 with scale 0.
+//
+// Operations are typically performed through the *Dec type.
+// The semantics of the assignment operation "=" for "bare" Dec values is
+// undefined and should not be relied on.
+//
+// Methods are typically of the form:
+//
+//	func (z *Dec) Op(x, y *Dec) *Dec
+//
+// and implement operations z = x Op y with the result as receiver; if it
+// is one of the operands it may be overwritten (and its memory reused).
+// To enable chaining of operations, the result is also returned. Methods
+// returning a result other than *Dec take one of the operands as the receiver.
+//
+// A "bare" Quo method (quotient / division operation) is not provided, as the
+// result is not always a finite decimal and thus in general cannot be
+// represented as a Dec.
+// Instead, in the common case when rounding is (potentially) necessary,
+// QuoRound should be used with a Scale and a Rounder.
+// QuoExact or QuoRound with RoundExact can be used in the special cases when it
+// is known that the result is always a finite decimal.
+//
+type Dec struct {
+	unscaled big.Int
+	scale    Scale
+}
+
+// Scale represents the type used for the scale of a Dec.
+type Scale int32
+
+const scaleSize = 4 // bytes in a Scale value
+
+// Scaler represents a method for obtaining the scale to use for the result of
+// an operation on x and y.
+type scaler interface {
+	Scale(x *Dec, y *Dec) Scale
+}
+
+var bigInt = [...]*big.Int{
+	big.NewInt(0), big.NewInt(1), big.NewInt(2), big.NewInt(3), big.NewInt(4),
+	big.NewInt(5), big.NewInt(6), big.NewInt(7), big.NewInt(8), big.NewInt(9),
+	big.NewInt(10),
+}
+
+var exp10cache [64]big.Int = func() [64]big.Int {
+	e10, e10i := [64]big.Int{}, bigInt[1]
+	for i := range e10 {
+		e10[i].Set(e10i)
+		e10i = new(big.Int).Mul(e10i, bigInt[10])
+	}
+	return e10
+}()
+
+// NewDec allocates and returns a new Dec set to the given int64 unscaled value
+// and scale.
+func NewDec(unscaled int64, scale Scale) *Dec {
+	return new(Dec).SetUnscaled(unscaled).SetScale(scale)
+}
+
+// NewDecBig allocates and returns a new Dec set to the given *big.Int unscaled
+// value and scale.
+func NewDecBig(unscaled *big.Int, scale Scale) *Dec {
+	return new(Dec).SetUnscaledBig(unscaled).SetScale(scale)
+}
+
+// Scale returns the scale of x.
+func (x *Dec) Scale() Scale {
+	return x.scale
+}
+
+// Unscaled returns the unscaled value of x for u and true for ok when the
+// unscaled value can be represented as int64; otherwise it returns an undefined
+// int64 value for u and false for ok. Use x.UnscaledBig().Int64() to avoid
+// checking the validity of the value when the check is known to be redundant.
+func (x *Dec) Unscaled() (u int64, ok bool) {
+	u = x.unscaled.Int64()
+	var i big.Int
+	ok = i.SetInt64(u).Cmp(&x.unscaled) == 0
+	return
+}
+
+// UnscaledBig returns the unscaled value of x as *big.Int.
+func (x *Dec) UnscaledBig() *big.Int {
+	return &x.unscaled
+}
+
+// SetScale sets the scale of z, with the unscaled value unchanged, and returns
+// z.
+// The mathematical value of the Dec changes as if it was multiplied by
+// 10**(oldscale-scale).
+func (z *Dec) SetScale(scale Scale) *Dec {
+	z.scale = scale
+	return z
+}
+
+// SetUnscaled sets the unscaled value of z, with the scale unchanged, and
+// returns z.
+func (z *Dec) SetUnscaled(unscaled int64) *Dec {
+	z.unscaled.SetInt64(unscaled)
+	return z
+}
+
+// SetUnscaledBig sets the unscaled value of z, with the scale unchanged, and
+// returns z.
+func (z *Dec) SetUnscaledBig(unscaled *big.Int) *Dec {
+	z.unscaled.Set(unscaled)
+	return z
+}
+
+// Set sets z to the value of x and returns z.
+// It does nothing if z == x.
+func (z *Dec) Set(x *Dec) *Dec {
+	if z != x {
+		z.SetUnscaledBig(x.UnscaledBig())
+		z.SetScale(x.Scale())
+	}
+	return z
+}
+
+// Sign returns:
+//
+//	-1 if x <  0
+//	 0 if x == 0
+//	+1 if x >  0
+//
+func (x *Dec) Sign() int {
+	return x.UnscaledBig().Sign()
+}
+
+// Neg sets z to -x and returns z.
+func (z *Dec) Neg(x *Dec) *Dec {
+	z.SetScale(x.Scale())
+	z.UnscaledBig().Neg(x.UnscaledBig())
+	return z
+}
+
+// Cmp compares x and y and returns:
+//
+//   -1 if x <  y
+//    0 if x == y
+//   +1 if x >  y
+//
+func (x *Dec) Cmp(y *Dec) int {
+	xx, yy := upscale(x, y)
+	return xx.UnscaledBig().Cmp(yy.UnscaledBig())
+}
+
+// Abs sets z to |x| (the absolute value of x) and returns z.
+func (z *Dec) Abs(x *Dec) *Dec {
+	z.SetScale(x.Scale())
+	z.UnscaledBig().Abs(x.UnscaledBig())
+	return z
+}
+
+// Add sets z to the sum x+y and returns z.
+// The scale of z is the greater of the scales of x and y.
+func (z *Dec) Add(x, y *Dec) *Dec {
+	xx, yy := upscale(x, y)
+	z.SetScale(xx.Scale())
+	z.UnscaledBig().Add(xx.UnscaledBig(), yy.UnscaledBig())
+	return z
+}
+
+// Sub sets z to the difference x-y and returns z.
+// The scale of z is the greater of the scales of x and y.
+func (z *Dec) Sub(x, y *Dec) *Dec {
+	xx, yy := upscale(x, y)
+	z.SetScale(xx.Scale())
+	z.UnscaledBig().Sub(xx.UnscaledBig(), yy.UnscaledBig())
+	return z
+}
+
+// Mul sets z to the product x*y and returns z.
+// The scale of z is the sum of the scales of x and y.
+func (z *Dec) Mul(x, y *Dec) *Dec {
+	z.SetScale(x.Scale() + y.Scale())
+	z.UnscaledBig().Mul(x.UnscaledBig(), y.UnscaledBig())
+	return z
+}
+
+// Round sets z to the value of x rounded to Scale s using Rounder r, and
+// returns z.
+func (z *Dec) Round(x *Dec, s Scale, r Rounder) *Dec {
+	return z.QuoRound(x, NewDec(1, 0), s, r)
+}
+
+// QuoRound sets z to the quotient x/y, rounded using the given Rounder to the
+// specified scale.
+//
+// If the rounder is RoundExact but the result can not be expressed exactly at
+// the specified scale, QuoRound returns nil, and the value of z is undefined.
+//
+// There is no corresponding Div method; the equivalent can be achieved through
+// the choice of Rounder used.
+//
+func (z *Dec) QuoRound(x, y *Dec, s Scale, r Rounder) *Dec {
+	return z.quo(x, y, sclr{s}, r)
+}
+
+func (z *Dec) quo(x, y *Dec, s scaler, r Rounder) *Dec {
+	scl := s.Scale(x, y)
+	var zzz *Dec
+	if r.UseRemainder() {
+		zz, rA, rB := new(Dec).quoRem(x, y, scl, true, new(big.Int), new(big.Int))
+		zzz = r.Round(new(Dec), zz, rA, rB)
+	} else {
+		zz, _, _ := new(Dec).quoRem(x, y, scl, false, nil, nil)
+		zzz = r.Round(new(Dec), zz, nil, nil)
+	}
+	if zzz == nil {
+		return nil
+	}
+	return z.Set(zzz)
+}
+
+// QuoExact sets z to the quotient x/y and returns z when x/y is a finite
+// decimal. Otherwise it returns nil and the value of z is undefined.
+//
+// The scale of a non-nil result is "x.Scale() - y.Scale()" or greater; it is
+// calculated so that the remainder will be zero whenever x/y is a finite
+// decimal.
+func (z *Dec) QuoExact(x, y *Dec) *Dec {
+	return z.quo(x, y, scaleQuoExact{}, RoundExact)
+}
+
+// quoRem sets z to the quotient x/y with the scale s, and if useRem is true,
+// it sets remNum and remDen to the numerator and denominator of the remainder.
+// It returns z, remNum and remDen.
+//
+// The remainder is normalized to the range -1 < r < 1 to simplify rounding;
+// that is, the results satisfy the following equation:
+//
+//  x / y = z + (remNum/remDen) * 10**(-z.Scale())
+//
+// See Rounder for more details about rounding.
+//
+func (z *Dec) quoRem(x, y *Dec, s Scale, useRem bool,
+	remNum, remDen *big.Int) (*Dec, *big.Int, *big.Int) {
+	// difference (required adjustment) compared to "canonical" result scale
+	shift := s - (x.Scale() - y.Scale())
+	// pointers to adjusted unscaled dividend and divisor
+	var ix, iy *big.Int
+	switch {
+	case shift > 0:
+		// increased scale: decimal-shift dividend left
+		ix = new(big.Int).Mul(x.UnscaledBig(), exp10(shift))
+		iy = y.UnscaledBig()
+	case shift < 0:
+		// decreased scale: decimal-shift divisor left
+		ix = x.UnscaledBig()
+		iy = new(big.Int).Mul(y.UnscaledBig(), exp10(-shift))
+	default:
+		ix = x.UnscaledBig()
+		iy = y.UnscaledBig()
+	}
+	// save a copy of iy in case it to be overwritten with the result
+	iy2 := iy
+	if iy == z.UnscaledBig() {
+		iy2 = new(big.Int).Set(iy)
+	}
+	// set scale
+	z.SetScale(s)
+	// set unscaled
+	if useRem {
+		// Int division
+		_, intr := z.UnscaledBig().QuoRem(ix, iy, new(big.Int))
+		// set remainder
+		remNum.Set(intr)
+		remDen.Set(iy2)
+	} else {
+		z.UnscaledBig().Quo(ix, iy)
+	}
+	return z, remNum, remDen
+}
+
+type sclr struct{ s Scale }
+
+func (s sclr) Scale(x, y *Dec) Scale {
+	return s.s
+}
+
+type scaleQuoExact struct{}
+
+func (sqe scaleQuoExact) Scale(x, y *Dec) Scale {
+	rem := new(big.Rat).SetFrac(x.UnscaledBig(), y.UnscaledBig())
+	f2, f5 := factor2(rem.Denom()), factor(rem.Denom(), bigInt[5])
+	var f10 Scale
+	if f2 > f5 {
+		f10 = Scale(f2)
+	} else {
+		f10 = Scale(f5)
+	}
+	return x.Scale() - y.Scale() + f10
+}
+
+func factor(n *big.Int, p *big.Int) int {
+	// could be improved for large factors
+	d, f := n, 0
+	for {
+		dd, dm := new(big.Int).DivMod(d, p, new(big.Int))
+		if dm.Sign() == 0 {
+			f++
+			d = dd
+		} else {
+			break
+		}
+	}
+	return f
+}
+
+func factor2(n *big.Int) int {
+	// could be improved for large factors
+	f := 0
+	for ; n.Bit(f) == 0; f++ {
+	}
+	return f
+}
+
+func upscale(a, b *Dec) (*Dec, *Dec) {
+	if a.Scale() == b.Scale() {
+		return a, b
+	}
+	if a.Scale() > b.Scale() {
+		bb := b.rescale(a.Scale())
+		return a, bb
+	}
+	aa := a.rescale(b.Scale())
+	return aa, b
+}
+
+func exp10(x Scale) *big.Int {
+	if int(x) < len(exp10cache) {
+		return &exp10cache[int(x)]
+	}
+	return new(big.Int).Exp(bigInt[10], big.NewInt(int64(x)), nil)
+}
+
+func (x *Dec) rescale(newScale Scale) *Dec {
+	shift := newScale - x.Scale()
+	switch {
+	case shift < 0:
+		e := exp10(-shift)
+		return NewDecBig(new(big.Int).Quo(x.UnscaledBig(), e), newScale)
+	case shift > 0:
+		e := exp10(shift)
+		return NewDecBig(new(big.Int).Mul(x.UnscaledBig(), e), newScale)
+	}
+	return x
+}
+
+var zeros = []byte("00000000000000000000000000000000" +
+	"00000000000000000000000000000000")
+var lzeros = Scale(len(zeros))
+
+func appendZeros(s []byte, n Scale) []byte {
+	for i := Scale(0); i < n; i += lzeros {
+		if n > i+lzeros {
+			s = append(s, zeros...)
+		} else {
+			s = append(s, zeros[0:n-i]...)
+		}
+	}
+	return s
+}
+
+func (x *Dec) String() string {
+	if x == nil {
+		return "<nil>"
+	}
+	scale := x.Scale()
+	s := []byte(x.UnscaledBig().String())
+	if scale <= 0 {
+		if scale != 0 && x.unscaled.Sign() != 0 {
+			s = appendZeros(s, -scale)
+		}
+		return string(s)
+	}
+	negbit := Scale(-((x.Sign() - 1) / 2))
+	// scale > 0
+	lens := Scale(len(s))
+	if lens-negbit <= scale {
+		ss := make([]byte, 0, scale+2)
+		if negbit == 1 {
+			ss = append(ss, '-')
+		}
+		ss = append(ss, '0', '.')
+		ss = appendZeros(ss, scale-lens+negbit)
+		ss = append(ss, s[negbit:]...)
+		return string(ss)
+	}
+	// lens > scale
+	ss := make([]byte, 0, lens+1)
+	ss = append(ss, s[:lens-scale]...)
+	ss = append(ss, '.')
+	ss = append(ss, s[lens-scale:]...)
+	return string(ss)
+}
+
+// Format is a support routine for fmt.Formatter. It accepts the decimal
+// formats 'd' and 'f', and handles both equivalently.
+// Width, precision, flags and bases 2, 8, 16 are not supported.
+func (x *Dec) Format(s fmt.State, ch rune) {
+	if ch != 'd' && ch != 'f' && ch != 'v' && ch != 's' {
+		fmt.Fprintf(s, "%%!%c(dec.Dec=%s)", ch, x.String())
+		return
+	}
+	fmt.Fprintf(s, x.String())
+}
+
+func (z *Dec) scan(r io.RuneScanner) (*Dec, error) {
+	unscaled := make([]byte, 0, 256) // collects chars of unscaled as bytes
+	dp, dg := -1, -1                 // indexes of decimal point, first digit
+loop:
+	for {
+		ch, _, err := r.ReadRune()
+		if err == io.EOF {
+			break loop
+		}
+		if err != nil {
+			return nil, err
+		}
+		switch {
+		case ch == '+' || ch == '-':
+			if len(unscaled) > 0 || dp >= 0 { // must be first character
+				r.UnreadRune()
+				break loop
+			}
+		case ch == '.':
+			if dp >= 0 {
+				r.UnreadRune()
+				break loop
+			}
+			dp = len(unscaled)
+			continue // don't add to unscaled
+		case ch >= '0' && ch <= '9':
+			if dg == -1 {
+				dg = len(unscaled)
+			}
+		default:
+			r.UnreadRune()
+			break loop
+		}
+		unscaled = append(unscaled, byte(ch))
+	}
+	if dg == -1 {
+		return nil, fmt.Errorf("no digits read")
+	}
+	if dp >= 0 {
+		z.SetScale(Scale(len(unscaled) - dp))
+	} else {
+		z.SetScale(0)
+	}
+	_, ok := z.UnscaledBig().SetString(string(unscaled), 10)
+	if !ok {
+		return nil, fmt.Errorf("invalid decimal: %s", string(unscaled))
+	}
+	return z, nil
+}
+
+// SetString sets z to the value of s, interpreted as a decimal (base 10),
+// and returns z and a boolean indicating success. The scale of z is the
+// number of digits after the decimal point (including any trailing 0s),
+// or 0 if there is no decimal point. If SetString fails, the value of z
+// is undefined but the returned value is nil.
+func (z *Dec) SetString(s string) (*Dec, bool) {
+	r := strings.NewReader(s)
+	_, err := z.scan(r)
+	if err != nil {
+		return nil, false
+	}
+	_, _, err = r.ReadRune()
+	if err != io.EOF {
+		return nil, false
+	}
+	// err == io.EOF => scan consumed all of s
+	return z, true
+}
+
+// Scan is a support routine for fmt.Scanner; it sets z to the value of
+// the scanned number. It accepts the decimal formats 'd' and 'f', and
+// handles both equivalently. Bases 2, 8, 16 are not supported.
+// The scale of z is the number of digits after the decimal point
+// (including any trailing 0s), or 0 if there is no decimal point.
+func (z *Dec) Scan(s fmt.ScanState, ch rune) error {
+	if ch != 'd' && ch != 'f' && ch != 's' && ch != 'v' {
+		return fmt.Errorf("Dec.Scan: invalid verb '%c'", ch)
+	}
+	s.SkipSpace()
+	_, err := z.scan(s)
+	return err
+}
+
+// Gob encoding version
+const decGobVersion byte = 1
+
+func scaleBytes(s Scale) []byte {
+	buf := make([]byte, scaleSize)
+	i := scaleSize
+	for j := 0; j < scaleSize; j++ {
+		i--
+		buf[i] = byte(s)
+		s >>= 8
+	}
+	return buf
+}
+
+func scale(b []byte) (s Scale) {
+	for j := 0; j < scaleSize; j++ {
+		s <<= 8
+		s |= Scale(b[j])
+	}
+	return
+}
+
+// GobEncode implements the gob.GobEncoder interface.
+func (x *Dec) GobEncode() ([]byte, error) {
+	buf, err := x.UnscaledBig().GobEncode()
+	if err != nil {
+		return nil, err
+	}
+	buf = append(append(buf, scaleBytes(x.Scale())...), decGobVersion)
+	return buf, nil
+}
+
+// GobDecode implements the gob.GobDecoder interface.
+func (z *Dec) GobDecode(buf []byte) error {
+	if len(buf) == 0 {
+		return fmt.Errorf("Dec.GobDecode: no data")
+	}
+	b := buf[len(buf)-1]
+	if b != decGobVersion {
+		return fmt.Errorf("Dec.GobDecode: encoding version %d not supported", b)
+	}
+	l := len(buf) - scaleSize - 1
+	err := z.UnscaledBig().GobDecode(buf[:l])
+	if err != nil {
+		return err
+	}
+	z.SetScale(scale(buf[l : l+scaleSize]))
+	return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (x *Dec) MarshalText() ([]byte, error) {
+	return []byte(x.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (z *Dec) UnmarshalText(data []byte) error {
+	_, ok := z.SetString(string(data))
+	if !ok {
+		return fmt.Errorf("invalid inf.Dec")
+	}
+	return nil
+}
diff --git a/vendor/gopkg.in/inf.v0/rounder.go b/vendor/gopkg.in/inf.v0/rounder.go
new file mode 100644
index 0000000..3a97ef5
--- /dev/null
+++ b/vendor/gopkg.in/inf.v0/rounder.go
@@ -0,0 +1,145 @@
+package inf
+
+import (
+	"math/big"
+)
+
+// Rounder represents a method for rounding the (possibly infinite decimal)
+// result of a division to a finite Dec. It is used by Dec.Round() and
+// Dec.Quo().
+//
+// See the Example for results of using each Rounder with some sample values.
+//
+type Rounder rounder
+
+// See http://speleotrove.com/decimal/damodel.html#refround for more detailed
+// definitions of these rounding modes.
+var (
+	RoundDown     Rounder // towards 0
+	RoundUp       Rounder // away from 0
+	RoundFloor    Rounder // towards -infinity
+	RoundCeil     Rounder // towards +infinity
+	RoundHalfDown Rounder // to nearest; towards 0 if same distance
+	RoundHalfUp   Rounder // to nearest; away from 0 if same distance
+	RoundHalfEven Rounder // to nearest; even last digit if same distance
+)
+
+// RoundExact is to be used in the case when rounding is not necessary.
+// When used with Quo or Round, it returns the result verbatim when it can be
+// expressed exactly with the given precision, and it returns nil otherwise.
+// QuoExact is a shorthand for using Quo with RoundExact.
+var RoundExact Rounder
+
+type rounder interface {
+
+	// When UseRemainder() returns true, the Round() method is passed the
+	// remainder of the division, expressed as the numerator and denominator of
+	// a rational.
+	UseRemainder() bool
+
+	// Round sets the rounded value of a quotient to z, and returns z.
+	// quo is rounded down (truncated towards zero) to the scale obtained from
+	// the Scaler in Quo().
+	//
+	// When the remainder is not used, remNum and remDen are nil.
+	// When used, the remainder is normalized between -1 and 1; that is:
+	//
+	//  -|remDen| < remNum < |remDen|
+	//
+	// remDen has the same sign as y, and remNum is zero or has the same sign
+	// as x.
+	Round(z, quo *Dec, remNum, remDen *big.Int) *Dec
+}
+
+type rndr struct {
+	useRem bool
+	round  func(z, quo *Dec, remNum, remDen *big.Int) *Dec
+}
+
+func (r rndr) UseRemainder() bool {
+	return r.useRem
+}
+
+func (r rndr) Round(z, quo *Dec, remNum, remDen *big.Int) *Dec {
+	return r.round(z, quo, remNum, remDen)
+}
+
+var intSign = []*big.Int{big.NewInt(-1), big.NewInt(0), big.NewInt(1)}
+
+func roundHalf(f func(c int, odd uint) (roundUp bool)) func(z, q *Dec, rA, rB *big.Int) *Dec {
+	return func(z, q *Dec, rA, rB *big.Int) *Dec {
+		z.Set(q)
+		brA, brB := rA.BitLen(), rB.BitLen()
+		if brA < brB-1 {
+			// brA < brB-1 => |rA| < |rB/2|
+			return z
+		}
+		roundUp := false
+		srA, srB := rA.Sign(), rB.Sign()
+		s := srA * srB
+		if brA == brB-1 {
+			rA2 := new(big.Int).Lsh(rA, 1)
+			if s < 0 {
+				rA2.Neg(rA2)
+			}
+			roundUp = f(rA2.Cmp(rB)*srB, z.UnscaledBig().Bit(0))
+		} else {
+			// brA > brB-1 => |rA| > |rB/2|
+			roundUp = true
+		}
+		if roundUp {
+			z.UnscaledBig().Add(z.UnscaledBig(), intSign[s+1])
+		}
+		return z
+	}
+}
+
+func init() {
+	RoundExact = rndr{true,
+		func(z, q *Dec, rA, rB *big.Int) *Dec {
+			if rA.Sign() != 0 {
+				return nil
+			}
+			return z.Set(q)
+		}}
+	RoundDown = rndr{false,
+		func(z, q *Dec, rA, rB *big.Int) *Dec {
+			return z.Set(q)
+		}}
+	RoundUp = rndr{true,
+		func(z, q *Dec, rA, rB *big.Int) *Dec {
+			z.Set(q)
+			if rA.Sign() != 0 {
+				z.UnscaledBig().Add(z.UnscaledBig(), intSign[rA.Sign()*rB.Sign()+1])
+			}
+			return z
+		}}
+	RoundFloor = rndr{true,
+		func(z, q *Dec, rA, rB *big.Int) *Dec {
+			z.Set(q)
+			if rA.Sign()*rB.Sign() < 0 {
+				z.UnscaledBig().Add(z.UnscaledBig(), intSign[0])
+			}
+			return z
+		}}
+	RoundCeil = rndr{true,
+		func(z, q *Dec, rA, rB *big.Int) *Dec {
+			z.Set(q)
+			if rA.Sign()*rB.Sign() > 0 {
+				z.UnscaledBig().Add(z.UnscaledBig(), intSign[2])
+			}
+			return z
+		}}
+	RoundHalfDown = rndr{true, roundHalf(
+		func(c int, odd uint) bool {
+			return c > 0
+		})}
+	RoundHalfUp = rndr{true, roundHalf(
+		func(c int, odd uint) bool {
+			return c >= 0
+		})}
+	RoundHalfEven = rndr{true, roundHalf(
+		func(c int, odd uint) bool {
+			return c > 0 || c == 0 && odd == 1
+		})}
+}
diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml
new file mode 100644
index 0000000..9f55693
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+
+go:
+    - 1.4
+    - 1.5
+    - 1.6
+    - 1.7
+    - 1.8
+    - 1.9
+    - tip
+
+go_import_path: gopkg.in/yaml.v2
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
new file mode 100644
index 0000000..8da58fb
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+    apic.go
+    emitterc.go
+    parserc.go
+    readerc.go
+    scannerc.go
+    writerc.go
+    yamlh.go
+    yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE
new file mode 100644
index 0000000..866d74a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md
new file mode 100644
index 0000000..b50c6e8
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/README.md
@@ -0,0 +1,133 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v2*.
+
+To install it, run:
+
+    go get gopkg.in/yaml.v2
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+  * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+        "fmt"
+        "log"
+
+        "gopkg.in/yaml.v2"
+)
+
+var data = `
+a: Easy!
+b:
+  c: 2
+  d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+        A string
+        B struct {
+                RenamedC int   `yaml:"c"`
+                D        []int `yaml:",flow"`
+        }
+}
+
+func main() {
+        t := T{}
+    
+        err := yaml.Unmarshal([]byte(data), &t)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- t:\n%v\n\n", t)
+    
+        d, err := yaml.Marshal(&t)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- t dump:\n%s\n\n", string(d))
+    
+        m := make(map[interface{}]interface{})
+    
+        err = yaml.Unmarshal([]byte(data), &m)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- m:\n%v\n\n", m)
+    
+        d, err = yaml.Marshal(&m)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+  c: 2
+  d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+  c: 2
+  d:
+  - 3
+  - 4
+```
+
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go
new file mode 100644
index 0000000..1f7e87e
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/apic.go
@@ -0,0 +1,739 @@
+package yaml
+
+import (
+	"io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+	//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+	// Check if we can move the queue at the beginning of the buffer.
+	if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+		if parser.tokens_head != len(parser.tokens) {
+			copy(parser.tokens, parser.tokens[parser.tokens_head:])
+		}
+		parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+		parser.tokens_head = 0
+	}
+	parser.tokens = append(parser.tokens, *token)
+	if pos < 0 {
+		return
+	}
+	copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+	parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+	*parser = yaml_parser_t{
+		raw_buffer: make([]byte, 0, input_raw_buffer_size),
+		buffer:     make([]byte, 0, input_buffer_size),
+	}
+	return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+	*parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+	if parser.input_pos == len(parser.input) {
+		return 0, io.EOF
+	}
+	n = copy(buffer, parser.input[parser.input_pos:])
+	parser.input_pos += n
+	return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+	return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+	if parser.read_handler != nil {
+		panic("must set the input source only once")
+	}
+	parser.read_handler = yaml_string_read_handler
+	parser.input = input
+	parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+	if parser.read_handler != nil {
+		panic("must set the input source only once")
+	}
+	parser.read_handler = yaml_reader_read_handler
+	parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+	if parser.encoding != yaml_ANY_ENCODING {
+		panic("must set the encoding only once")
+	}
+	parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+	*emitter = yaml_emitter_t{
+		buffer:     make([]byte, output_buffer_size),
+		raw_buffer: make([]byte, 0, output_raw_buffer_size),
+		states:     make([]yaml_emitter_state_t, 0, initial_stack_size),
+		events:     make([]yaml_event_t, 0, initial_queue_size),
+	}
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+	*emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+	*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+	return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+	_, err := emitter.output_writer.Write(buffer)
+	return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+	if emitter.write_handler != nil {
+		panic("must set the output target only once")
+	}
+	emitter.write_handler = yaml_string_write_handler
+	emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+	if emitter.write_handler != nil {
+		panic("must set the output target only once")
+	}
+	emitter.write_handler = yaml_writer_write_handler
+	emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+	if emitter.encoding != yaml_ANY_ENCODING {
+		panic("must set the output encoding only once")
+	}
+	emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+	emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+	if indent < 2 || indent > 9 {
+		indent = 2
+	}
+	emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+	if width < 0 {
+		width = -1
+	}
+	emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+	emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+	emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+//    assert(token);  // Non-NULL token object expected.
+//
+//    switch (token.type)
+//    {
+//        case YAML_TAG_DIRECTIVE_TOKEN:
+//            yaml_free(token.data.tag_directive.handle);
+//            yaml_free(token.data.tag_directive.prefix);
+//            break;
+//
+//        case YAML_ALIAS_TOKEN:
+//            yaml_free(token.data.alias.value);
+//            break;
+//
+//        case YAML_ANCHOR_TOKEN:
+//            yaml_free(token.data.anchor.value);
+//            break;
+//
+//        case YAML_TAG_TOKEN:
+//            yaml_free(token.data.tag.handle);
+//            yaml_free(token.data.tag.suffix);
+//            break;
+//
+//        case YAML_SCALAR_TOKEN:
+//            yaml_free(token.data.scalar.value);
+//            break;
+//
+//        default:
+//            break;
+//    }
+//
+//    memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+//    yaml_char_t *end = start+length;
+//    yaml_char_t *pointer = start;
+//
+//    while (pointer < end) {
+//        unsigned char octet;
+//        unsigned int width;
+//        unsigned int value;
+//        size_t k;
+//
+//        octet = pointer[0];
+//        width = (octet & 0x80) == 0x00 ? 1 :
+//                (octet & 0xE0) == 0xC0 ? 2 :
+//                (octet & 0xF0) == 0xE0 ? 3 :
+//                (octet & 0xF8) == 0xF0 ? 4 : 0;
+//        value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+//                (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+//                (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+//                (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+//        if (!width) return 0;
+//        if (pointer+width > end) return 0;
+//        for (k = 1; k < width; k ++) {
+//            octet = pointer[k];
+//            if ((octet & 0xC0) != 0x80) return 0;
+//            value = (value << 6) + (octet & 0x3F);
+//        }
+//        if (!((width == 1) ||
+//            (width == 2 && value >= 0x80) ||
+//            (width == 3 && value >= 0x800) ||
+//            (width == 4 && value >= 0x10000))) return 0;
+//
+//        pointer += width;
+//    }
+//
+//    return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+	*event = yaml_event_t{
+		typ:      yaml_STREAM_START_EVENT,
+		encoding: encoding,
+	}
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+	*event = yaml_event_t{
+		typ: yaml_STREAM_END_EVENT,
+	}
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+	event *yaml_event_t,
+	version_directive *yaml_version_directive_t,
+	tag_directives []yaml_tag_directive_t,
+	implicit bool,
+) {
+	*event = yaml_event_t{
+		typ:               yaml_DOCUMENT_START_EVENT,
+		version_directive: version_directive,
+		tag_directives:    tag_directives,
+		implicit:          implicit,
+	}
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+	*event = yaml_event_t{
+		typ:      yaml_DOCUMENT_END_EVENT,
+		implicit: implicit,
+	}
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    anchor_copy *yaml_char_t = NULL
+//
+//    assert(event) // Non-NULL event object is expected.
+//    assert(anchor) // Non-NULL anchor is expected.
+//
+//    if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+//    anchor_copy = yaml_strdup(anchor)
+//    if (!anchor_copy)
+//        return 0
+//
+//    ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+//    return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+	*event = yaml_event_t{
+		typ:             yaml_SCALAR_EVENT,
+		anchor:          anchor,
+		tag:             tag,
+		value:           value,
+		implicit:        plain_implicit,
+		quoted_implicit: quoted_implicit,
+		style:           yaml_style_t(style),
+	}
+	return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+	*event = yaml_event_t{
+		typ:      yaml_SEQUENCE_START_EVENT,
+		anchor:   anchor,
+		tag:      tag,
+		implicit: implicit,
+		style:    yaml_style_t(style),
+	}
+	return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+	*event = yaml_event_t{
+		typ: yaml_SEQUENCE_END_EVENT,
+	}
+	return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+	*event = yaml_event_t{
+		typ:      yaml_MAPPING_START_EVENT,
+		anchor:   anchor,
+		tag:      tag,
+		implicit: implicit,
+		style:    yaml_style_t(style),
+	}
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+	*event = yaml_event_t{
+		typ: yaml_MAPPING_END_EVENT,
+	}
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+	*event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+//        version_directive *yaml_version_directive_t,
+//        tag_directives_start *yaml_tag_directive_t,
+//        tag_directives_end *yaml_tag_directive_t,
+//        start_implicit int, end_implicit int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    struct {
+//        start *yaml_node_t
+//        end *yaml_node_t
+//        top *yaml_node_t
+//    } nodes = { NULL, NULL, NULL }
+//    version_directive_copy *yaml_version_directive_t = NULL
+//    struct {
+//        start *yaml_tag_directive_t
+//        end *yaml_tag_directive_t
+//        top *yaml_tag_directive_t
+//    } tag_directives_copy = { NULL, NULL, NULL }
+//    value yaml_tag_directive_t = { NULL, NULL }
+//    mark yaml_mark_t = { 0, 0, 0 }
+//
+//    assert(document) // Non-NULL document object is expected.
+//    assert((tag_directives_start && tag_directives_end) ||
+//            (tag_directives_start == tag_directives_end))
+//                            // Valid tag directives are expected.
+//
+//    if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+//    if (version_directive) {
+//        version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+//        if (!version_directive_copy) goto error
+//        version_directive_copy.major = version_directive.major
+//        version_directive_copy.minor = version_directive.minor
+//    }
+//
+//    if (tag_directives_start != tag_directives_end) {
+//        tag_directive *yaml_tag_directive_t
+//        if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+//            goto error
+//        for (tag_directive = tag_directives_start
+//                tag_directive != tag_directives_end; tag_directive ++) {
+//            assert(tag_directive.handle)
+//            assert(tag_directive.prefix)
+//            if (!yaml_check_utf8(tag_directive.handle,
+//                        strlen((char *)tag_directive.handle)))
+//                goto error
+//            if (!yaml_check_utf8(tag_directive.prefix,
+//                        strlen((char *)tag_directive.prefix)))
+//                goto error
+//            value.handle = yaml_strdup(tag_directive.handle)
+//            value.prefix = yaml_strdup(tag_directive.prefix)
+//            if (!value.handle || !value.prefix) goto error
+//            if (!PUSH(&context, tag_directives_copy, value))
+//                goto error
+//            value.handle = NULL
+//            value.prefix = NULL
+//        }
+//    }
+//
+//    DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+//            tag_directives_copy.start, tag_directives_copy.top,
+//            start_implicit, end_implicit, mark, mark)
+//
+//    return 1
+//
+//error:
+//    STACK_DEL(&context, nodes)
+//    yaml_free(version_directive_copy)
+//    while (!STACK_EMPTY(&context, tag_directives_copy)) {
+//        value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+//        yaml_free(value.handle)
+//        yaml_free(value.prefix)
+//    }
+//    STACK_DEL(&context, tag_directives_copy)
+//    yaml_free(value.handle)
+//    yaml_free(value.prefix)
+//
+//    return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    tag_directive *yaml_tag_directive_t
+//
+//    context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    while (!STACK_EMPTY(&context, document.nodes)) {
+//        node yaml_node_t = POP(&context, document.nodes)
+//        yaml_free(node.tag)
+//        switch (node.type) {
+//            case YAML_SCALAR_NODE:
+//                yaml_free(node.data.scalar.value)
+//                break
+//            case YAML_SEQUENCE_NODE:
+//                STACK_DEL(&context, node.data.sequence.items)
+//                break
+//            case YAML_MAPPING_NODE:
+//                STACK_DEL(&context, node.data.mapping.pairs)
+//                break
+//            default:
+//                assert(0) // Should not happen.
+//        }
+//    }
+//    STACK_DEL(&context, document.nodes)
+//
+//    yaml_free(document.version_directive)
+//    for (tag_directive = document.tag_directives.start
+//            tag_directive != document.tag_directives.end
+//            tag_directive++) {
+//        yaml_free(tag_directive.handle)
+//        yaml_free(tag_directive.prefix)
+//    }
+//    yaml_free(document.tag_directives.start)
+//
+//    memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+//        return document.nodes.start + index - 1
+//    }
+//    return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (document.nodes.top != document.nodes.start) {
+//        return document.nodes.start
+//    }
+//    return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+//        tag *yaml_char_t, value *yaml_char_t, length int,
+//        style yaml_scalar_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    value_copy *yaml_char_t = NULL
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//    assert(value) // Non-NULL value is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (length < 0) {
+//        length = strlen((char *)value)
+//    }
+//
+//    if (!yaml_check_utf8(value, length)) goto error
+//    value_copy = yaml_malloc(length+1)
+//    if (!value_copy) goto error
+//    memcpy(value_copy, value, length)
+//    value_copy[length] = '\0'
+//
+//    SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    yaml_free(tag_copy)
+//    yaml_free(value_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+//        tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    struct {
+//        start *yaml_node_item_t
+//        end *yaml_node_item_t
+//        top *yaml_node_item_t
+//    } items = { NULL, NULL, NULL }
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+//    SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+//            style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    STACK_DEL(&context, items)
+//    yaml_free(tag_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+//        tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    struct {
+//        start *yaml_node_pair_t
+//        end *yaml_node_pair_t
+//        top *yaml_node_pair_t
+//    } pairs = { NULL, NULL, NULL }
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+//    MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+//            style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    STACK_DEL(&context, pairs)
+//    yaml_free(tag_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+//        sequence int, item int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//
+//    assert(document) // Non-NULL document is required.
+//    assert(sequence > 0
+//            && document.nodes.start + sequence <= document.nodes.top)
+//                            // Valid sequence id is required.
+//    assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+//                            // A sequence node is required.
+//    assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+//                            // Valid item id is required.
+//
+//    if (!PUSH(&context,
+//                document.nodes.start[sequence-1].data.sequence.items, item))
+//        return 0
+//
+//    return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+//        mapping int, key int, value int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//
+//    pair yaml_node_pair_t
+//
+//    assert(document) // Non-NULL document is required.
+//    assert(mapping > 0
+//            && document.nodes.start + mapping <= document.nodes.top)
+//                            // Valid mapping id is required.
+//    assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+//                            // A mapping node is required.
+//    assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+//                            // Valid key id is required.
+//    assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+//                            // Valid value id is required.
+//
+//    pair.key = key
+//    pair.value = value
+//
+//    if (!PUSH(&context,
+//                document.nodes.start[mapping-1].data.mapping.pairs, pair))
+//        return 0
+//
+//    return 1
+//}
+//
+//
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go
new file mode 100644
index 0000000..e4e56e2
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/decode.go
@@ -0,0 +1,775 @@
+package yaml
+
+import (
+	"encoding"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"strconv"
+	"time"
+)
+
+const (
+	documentNode = 1 << iota
+	mappingNode
+	sequenceNode
+	scalarNode
+	aliasNode
+)
+
+type node struct {
+	kind         int
+	line, column int
+	tag          string
+	// For an alias node, alias holds the resolved alias.
+	alias    *node
+	value    string
+	implicit bool
+	children []*node
+	anchors  map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+	parser   yaml_parser_t
+	event    yaml_event_t
+	doc      *node
+	doneInit bool
+}
+
+func newParser(b []byte) *parser {
+	p := parser{}
+	if !yaml_parser_initialize(&p.parser) {
+		panic("failed to initialize YAML emitter")
+	}
+	if len(b) == 0 {
+		b = []byte{'\n'}
+	}
+	yaml_parser_set_input_string(&p.parser, b)
+	return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+	p := parser{}
+	if !yaml_parser_initialize(&p.parser) {
+		panic("failed to initialize YAML emitter")
+	}
+	yaml_parser_set_input_reader(&p.parser, r)
+	return &p
+}
+
+func (p *parser) init() {
+	if p.doneInit {
+		return
+	}
+	p.expect(yaml_STREAM_START_EVENT)
+	p.doneInit = true
+}
+
+func (p *parser) destroy() {
+	if p.event.typ != yaml_NO_EVENT {
+		yaml_event_delete(&p.event)
+	}
+	yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+	if p.event.typ == yaml_NO_EVENT {
+		if !yaml_parser_parse(&p.parser, &p.event) {
+			p.fail()
+		}
+	}
+	if p.event.typ == yaml_STREAM_END_EVENT {
+		failf("attempted to go past the end of stream; corrupted value?")
+	}
+	if p.event.typ != e {
+		p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+		p.fail()
+	}
+	yaml_event_delete(&p.event)
+	p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+	if p.event.typ != yaml_NO_EVENT {
+		return p.event.typ
+	}
+	if !yaml_parser_parse(&p.parser, &p.event) {
+		p.fail()
+	}
+	return p.event.typ
+}
+
+func (p *parser) fail() {
+	var where string
+	var line int
+	if p.parser.problem_mark.line != 0 {
+		line = p.parser.problem_mark.line
+		// Scanner errors don't iterate line before returning error
+		if p.parser.error == yaml_SCANNER_ERROR {
+			line++
+		}
+	} else if p.parser.context_mark.line != 0 {
+		line = p.parser.context_mark.line
+	}
+	if line != 0 {
+		where = "line " + strconv.Itoa(line) + ": "
+	}
+	var msg string
+	if len(p.parser.problem) > 0 {
+		msg = p.parser.problem
+	} else {
+		msg = "unknown problem parsing YAML content"
+	}
+	failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+	if anchor != nil {
+		p.doc.anchors[string(anchor)] = n
+	}
+}
+
+func (p *parser) parse() *node {
+	p.init()
+	switch p.peek() {
+	case yaml_SCALAR_EVENT:
+		return p.scalar()
+	case yaml_ALIAS_EVENT:
+		return p.alias()
+	case yaml_MAPPING_START_EVENT:
+		return p.mapping()
+	case yaml_SEQUENCE_START_EVENT:
+		return p.sequence()
+	case yaml_DOCUMENT_START_EVENT:
+		return p.document()
+	case yaml_STREAM_END_EVENT:
+		// Happens when attempting to decode an empty buffer.
+		return nil
+	default:
+		panic("attempted to parse unknown event: " + p.event.typ.String())
+	}
+}
+
+func (p *parser) node(kind int) *node {
+	return &node{
+		kind:   kind,
+		line:   p.event.start_mark.line,
+		column: p.event.start_mark.column,
+	}
+}
+
+func (p *parser) document() *node {
+	n := p.node(documentNode)
+	n.anchors = make(map[string]*node)
+	p.doc = n
+	p.expect(yaml_DOCUMENT_START_EVENT)
+	n.children = append(n.children, p.parse())
+	p.expect(yaml_DOCUMENT_END_EVENT)
+	return n
+}
+
+func (p *parser) alias() *node {
+	n := p.node(aliasNode)
+	n.value = string(p.event.anchor)
+	n.alias = p.doc.anchors[n.value]
+	if n.alias == nil {
+		failf("unknown anchor '%s' referenced", n.value)
+	}
+	p.expect(yaml_ALIAS_EVENT)
+	return n
+}
+
+func (p *parser) scalar() *node {
+	n := p.node(scalarNode)
+	n.value = string(p.event.value)
+	n.tag = string(p.event.tag)
+	n.implicit = p.event.implicit
+	p.anchor(n, p.event.anchor)
+	p.expect(yaml_SCALAR_EVENT)
+	return n
+}
+
+func (p *parser) sequence() *node {
+	n := p.node(sequenceNode)
+	p.anchor(n, p.event.anchor)
+	p.expect(yaml_SEQUENCE_START_EVENT)
+	for p.peek() != yaml_SEQUENCE_END_EVENT {
+		n.children = append(n.children, p.parse())
+	}
+	p.expect(yaml_SEQUENCE_END_EVENT)
+	return n
+}
+
+func (p *parser) mapping() *node {
+	n := p.node(mappingNode)
+	p.anchor(n, p.event.anchor)
+	p.expect(yaml_MAPPING_START_EVENT)
+	for p.peek() != yaml_MAPPING_END_EVENT {
+		n.children = append(n.children, p.parse(), p.parse())
+	}
+	p.expect(yaml_MAPPING_END_EVENT)
+	return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+	doc     *node
+	aliases map[*node]bool
+	mapType reflect.Type
+	terrors []string
+	strict  bool
+}
+
+var (
+	mapItemType    = reflect.TypeOf(MapItem{})
+	durationType   = reflect.TypeOf(time.Duration(0))
+	defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+	ifaceType      = defaultMapType.Elem()
+	timeType       = reflect.TypeOf(time.Time{})
+	ptrTimeType    = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder(strict bool) *decoder {
+	d := &decoder{mapType: defaultMapType, strict: strict}
+	d.aliases = make(map[*node]bool)
+	return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+	if n.tag != "" {
+		tag = n.tag
+	}
+	value := n.value
+	if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+		if len(value) > 10 {
+			value = " `" + value[:7] + "...`"
+		} else {
+			value = " `" + value + "`"
+		}
+	}
+	d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+	terrlen := len(d.terrors)
+	err := u.UnmarshalYAML(func(v interface{}) (err error) {
+		defer handleErr(&err)
+		d.unmarshal(n, reflect.ValueOf(v))
+		if len(d.terrors) > terrlen {
+			issues := d.terrors[terrlen:]
+			d.terrors = d.terrors[:terrlen]
+			return &TypeError{issues}
+		}
+		return nil
+	})
+	if e, ok := err.(*TypeError); ok {
+		d.terrors = append(d.terrors, e.Errors...)
+		return false
+	}
+	if err != nil {
+		fail(err)
+	}
+	return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+	if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
+		return out, false, false
+	}
+	again := true
+	for again {
+		again = false
+		if out.Kind() == reflect.Ptr {
+			if out.IsNil() {
+				out.Set(reflect.New(out.Type().Elem()))
+			}
+			out = out.Elem()
+			again = true
+		}
+		if out.CanAddr() {
+			if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+				good = d.callUnmarshaler(n, u)
+				return out, true, good
+			}
+		}
+	}
+	return out, false, false
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+	switch n.kind {
+	case documentNode:
+		return d.document(n, out)
+	case aliasNode:
+		return d.alias(n, out)
+	}
+	out, unmarshaled, good := d.prepare(n, out)
+	if unmarshaled {
+		return good
+	}
+	switch n.kind {
+	case scalarNode:
+		good = d.scalar(n, out)
+	case mappingNode:
+		good = d.mapping(n, out)
+	case sequenceNode:
+		good = d.sequence(n, out)
+	default:
+		panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+	}
+	return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+	if len(n.children) == 1 {
+		d.doc = n
+		d.unmarshal(n.children[0], out)
+		return true
+	}
+	return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+	if d.aliases[n] {
+		// TODO this could actually be allowed in some circumstances.
+		failf("anchor '%s' value contains itself", n.value)
+	}
+	d.aliases[n] = true
+	good = d.unmarshal(n.alias, out)
+	delete(d.aliases, n)
+	return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+	for _, k := range out.MapKeys() {
+		out.SetMapIndex(k, zeroValue)
+	}
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) bool {
+	var tag string
+	var resolved interface{}
+	if n.tag == "" && !n.implicit {
+		tag = yaml_STR_TAG
+		resolved = n.value
+	} else {
+		tag, resolved = resolve(n.tag, n.value)
+		if tag == yaml_BINARY_TAG {
+			data, err := base64.StdEncoding.DecodeString(resolved.(string))
+			if err != nil {
+				failf("!!binary value contains invalid base64 data")
+			}
+			resolved = string(data)
+		}
+	}
+	if resolved == nil {
+		if out.Kind() == reflect.Map && !out.CanAddr() {
+			resetMap(out)
+		} else {
+			out.Set(reflect.Zero(out.Type()))
+		}
+		return true
+	}
+	if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+		// We've resolved to exactly the type we want, so use that.
+		out.Set(resolvedv)
+		return true
+	}
+	// Perhaps we can use the value as a TextUnmarshaler to
+	// set its value.
+	if out.CanAddr() {
+		u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+		if ok {
+			var text []byte
+			if tag == yaml_BINARY_TAG {
+				text = []byte(resolved.(string))
+			} else {
+				// We let any value be unmarshaled into TextUnmarshaler.
+				// That might be more lax than we'd like, but the
+				// TextUnmarshaler itself should bowl out any dubious values.
+				text = []byte(n.value)
+			}
+			err := u.UnmarshalText(text)
+			if err != nil {
+				fail(err)
+			}
+			return true
+		}
+	}
+	switch out.Kind() {
+	case reflect.String:
+		if tag == yaml_BINARY_TAG {
+			out.SetString(resolved.(string))
+			return true
+		}
+		if resolved != nil {
+			out.SetString(n.value)
+			return true
+		}
+	case reflect.Interface:
+		if resolved == nil {
+			out.Set(reflect.Zero(out.Type()))
+		} else if tag == yaml_TIMESTAMP_TAG {
+			// It looks like a timestamp but for backward compatibility
+			// reasons we set it as a string, so that code that unmarshals
+			// timestamp-like values into interface{} will continue to
+			// see a string and not a time.Time.
+			// TODO(v3) Drop this.
+			out.Set(reflect.ValueOf(n.value))
+		} else {
+			out.Set(reflect.ValueOf(resolved))
+		}
+		return true
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		switch resolved := resolved.(type) {
+		case int:
+			if !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				return true
+			}
+		case int64:
+			if !out.OverflowInt(resolved) {
+				out.SetInt(resolved)
+				return true
+			}
+		case uint64:
+			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				return true
+			}
+		case float64:
+			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				return true
+			}
+		case string:
+			if out.Type() == durationType {
+				d, err := time.ParseDuration(resolved)
+				if err == nil {
+					out.SetInt(int64(d))
+					return true
+				}
+			}
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		switch resolved := resolved.(type) {
+		case int:
+			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		case int64:
+			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		case uint64:
+			if !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		case float64:
+			if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		}
+	case reflect.Bool:
+		switch resolved := resolved.(type) {
+		case bool:
+			out.SetBool(resolved)
+			return true
+		}
+	case reflect.Float32, reflect.Float64:
+		switch resolved := resolved.(type) {
+		case int:
+			out.SetFloat(float64(resolved))
+			return true
+		case int64:
+			out.SetFloat(float64(resolved))
+			return true
+		case uint64:
+			out.SetFloat(float64(resolved))
+			return true
+		case float64:
+			out.SetFloat(resolved)
+			return true
+		}
+	case reflect.Struct:
+		if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+			out.Set(resolvedv)
+			return true
+		}
+	case reflect.Ptr:
+		if out.Type().Elem() == reflect.TypeOf(resolved) {
+			// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+			elem := reflect.New(out.Type().Elem())
+			elem.Elem().Set(reflect.ValueOf(resolved))
+			out.Set(elem)
+			return true
+		}
+	}
+	d.terror(n, tag, out)
+	return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+	v := reflect.ValueOf(i)
+	sv := reflect.New(v.Type()).Elem()
+	sv.Set(v)
+	return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+	l := len(n.children)
+
+	var iface reflect.Value
+	switch out.Kind() {
+	case reflect.Slice:
+		out.Set(reflect.MakeSlice(out.Type(), l, l))
+	case reflect.Array:
+		if l != out.Len() {
+			failf("invalid array: want %d elements but got %d", out.Len(), l)
+		}
+	case reflect.Interface:
+		// No type hints. Will have to use a generic sequence.
+		iface = out
+		out = settableValueOf(make([]interface{}, l))
+	default:
+		d.terror(n, yaml_SEQ_TAG, out)
+		return false
+	}
+	et := out.Type().Elem()
+
+	j := 0
+	for i := 0; i < l; i++ {
+		e := reflect.New(et).Elem()
+		if ok := d.unmarshal(n.children[i], e); ok {
+			out.Index(j).Set(e)
+			j++
+		}
+	}
+	if out.Kind() != reflect.Array {
+		out.Set(out.Slice(0, j))
+	}
+	if iface.IsValid() {
+		iface.Set(out)
+	}
+	return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+	switch out.Kind() {
+	case reflect.Struct:
+		return d.mappingStruct(n, out)
+	case reflect.Slice:
+		return d.mappingSlice(n, out)
+	case reflect.Map:
+		// okay
+	case reflect.Interface:
+		if d.mapType.Kind() == reflect.Map {
+			iface := out
+			out = reflect.MakeMap(d.mapType)
+			iface.Set(out)
+		} else {
+			slicev := reflect.New(d.mapType).Elem()
+			if !d.mappingSlice(n, slicev) {
+				return false
+			}
+			out.Set(slicev)
+			return true
+		}
+	default:
+		d.terror(n, yaml_MAP_TAG, out)
+		return false
+	}
+	outt := out.Type()
+	kt := outt.Key()
+	et := outt.Elem()
+
+	mapType := d.mapType
+	if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+		d.mapType = outt
+	}
+
+	if out.IsNil() {
+		out.Set(reflect.MakeMap(outt))
+	}
+	l := len(n.children)
+	for i := 0; i < l; i += 2 {
+		if isMerge(n.children[i]) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		k := reflect.New(kt).Elem()
+		if d.unmarshal(n.children[i], k) {
+			kkind := k.Kind()
+			if kkind == reflect.Interface {
+				kkind = k.Elem().Kind()
+			}
+			if kkind == reflect.Map || kkind == reflect.Slice {
+				failf("invalid map key: %#v", k.Interface())
+			}
+			e := reflect.New(et).Elem()
+			if d.unmarshal(n.children[i+1], e) {
+				d.setMapIndex(n.children[i+1], out, k, e)
+			}
+		}
+	}
+	d.mapType = mapType
+	return true
+}
+
+func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
+	if d.strict && out.MapIndex(k) != zeroValue {
+		d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
+		return
+	}
+	out.SetMapIndex(k, v)
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+	outt := out.Type()
+	if outt.Elem() != mapItemType {
+		d.terror(n, yaml_MAP_TAG, out)
+		return false
+	}
+
+	mapType := d.mapType
+	d.mapType = outt
+
+	var slice []MapItem
+	var l = len(n.children)
+	for i := 0; i < l; i += 2 {
+		if isMerge(n.children[i]) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		item := MapItem{}
+		k := reflect.ValueOf(&item.Key).Elem()
+		if d.unmarshal(n.children[i], k) {
+			v := reflect.ValueOf(&item.Value).Elem()
+			if d.unmarshal(n.children[i+1], v) {
+				slice = append(slice, item)
+			}
+		}
+	}
+	out.Set(reflect.ValueOf(slice))
+	d.mapType = mapType
+	return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+	sinfo, err := getStructInfo(out.Type())
+	if err != nil {
+		panic(err)
+	}
+	name := settableValueOf("")
+	l := len(n.children)
+
+	var inlineMap reflect.Value
+	var elemType reflect.Type
+	if sinfo.InlineMap != -1 {
+		inlineMap = out.Field(sinfo.InlineMap)
+		inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+		elemType = inlineMap.Type().Elem()
+	}
+
+	var doneFields []bool
+	if d.strict {
+		doneFields = make([]bool, len(sinfo.FieldsList))
+	}
+	for i := 0; i < l; i += 2 {
+		ni := n.children[i]
+		if isMerge(ni) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		if !d.unmarshal(ni, name) {
+			continue
+		}
+		if info, ok := sinfo.FieldsMap[name.String()]; ok {
+			if d.strict {
+				if doneFields[info.Id] {
+					d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
+					continue
+				}
+				doneFields[info.Id] = true
+			}
+			var field reflect.Value
+			if info.Inline == nil {
+				field = out.Field(info.Num)
+			} else {
+				field = out.FieldByIndex(info.Inline)
+			}
+			d.unmarshal(n.children[i+1], field)
+		} else if sinfo.InlineMap != -1 {
+			if inlineMap.IsNil() {
+				inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+			}
+			value := reflect.New(elemType).Elem()
+			d.unmarshal(n.children[i+1], value)
+			d.setMapIndex(n.children[i+1], inlineMap, name, value)
+		} else if d.strict {
+			d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
+		}
+	}
+	return true
+}
+
+func failWantMap() {
+	failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+	switch n.kind {
+	case mappingNode:
+		d.unmarshal(n, out)
+	case aliasNode:
+		an, ok := d.doc.anchors[n.value]
+		if ok && an.kind != mappingNode {
+			failWantMap()
+		}
+		d.unmarshal(n, out)
+	case sequenceNode:
+		// Step backwards as earlier nodes take precedence.
+		for i := len(n.children) - 1; i >= 0; i-- {
+			ni := n.children[i]
+			if ni.kind == aliasNode {
+				an, ok := d.doc.anchors[ni.value]
+				if ok && an.kind != mappingNode {
+					failWantMap()
+				}
+			} else if ni.kind != mappingNode {
+				failWantMap()
+			}
+			d.unmarshal(ni, out)
+		}
+	default:
+		failWantMap()
+	}
+}
+
+func isMerge(n *node) bool {
+	return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go
new file mode 100644
index 0000000..a1c2cc5
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/emitterc.go
@@ -0,0 +1,1685 @@
+package yaml
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) {
+		return yaml_emitter_flush(emitter)
+	}
+	return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	emitter.buffer[emitter.buffer_pos] = value
+	emitter.buffer_pos++
+	emitter.column++
+	return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	switch emitter.line_break {
+	case yaml_CR_BREAK:
+		emitter.buffer[emitter.buffer_pos] = '\r'
+		emitter.buffer_pos += 1
+	case yaml_LN_BREAK:
+		emitter.buffer[emitter.buffer_pos] = '\n'
+		emitter.buffer_pos += 1
+	case yaml_CRLN_BREAK:
+		emitter.buffer[emitter.buffer_pos+0] = '\r'
+		emitter.buffer[emitter.buffer_pos+1] = '\n'
+		emitter.buffer_pos += 2
+	default:
+		panic("unknown line break setting")
+	}
+	emitter.column = 0
+	emitter.line++
+	return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	p := emitter.buffer_pos
+	w := width(s[*i])
+	switch w {
+	case 4:
+		emitter.buffer[p+3] = s[*i+3]
+		fallthrough
+	case 3:
+		emitter.buffer[p+2] = s[*i+2]
+		fallthrough
+	case 2:
+		emitter.buffer[p+1] = s[*i+1]
+		fallthrough
+	case 1:
+		emitter.buffer[p+0] = s[*i+0]
+	default:
+		panic("unknown character width")
+	}
+	emitter.column++
+	emitter.buffer_pos += w
+	*i += w
+	return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+	for i := 0; i < len(s); {
+		if !write(emitter, s, &i) {
+			return false
+		}
+	}
+	return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+	if s[*i] == '\n' {
+		if !put_break(emitter) {
+			return false
+		}
+		*i++
+	} else {
+		if !write(emitter, s, i) {
+			return false
+		}
+		emitter.column = 0
+		emitter.line++
+	}
+	return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+	emitter.error = yaml_EMITTER_ERROR
+	emitter.problem = problem
+	return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	emitter.events = append(emitter.events, *event)
+	for !yaml_emitter_need_more_events(emitter) {
+		event := &emitter.events[emitter.events_head]
+		if !yaml_emitter_analyze_event(emitter, event) {
+			return false
+		}
+		if !yaml_emitter_state_machine(emitter, event) {
+			return false
+		}
+		yaml_event_delete(event)
+		emitter.events_head++
+	}
+	return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+//  - 1 event for DOCUMENT-START
+//  - 2 events for SEQUENCE-START
+//  - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+	if emitter.events_head == len(emitter.events) {
+		return true
+	}
+	var accumulate int
+	switch emitter.events[emitter.events_head].typ {
+	case yaml_DOCUMENT_START_EVENT:
+		accumulate = 1
+		break
+	case yaml_SEQUENCE_START_EVENT:
+		accumulate = 2
+		break
+	case yaml_MAPPING_START_EVENT:
+		accumulate = 3
+		break
+	default:
+		return false
+	}
+	if len(emitter.events)-emitter.events_head > accumulate {
+		return false
+	}
+	var level int
+	for i := emitter.events_head; i < len(emitter.events); i++ {
+		switch emitter.events[i].typ {
+		case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+			level++
+		case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+			level--
+		}
+		if level == 0 {
+			return false
+		}
+	}
+	return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+	for i := 0; i < len(emitter.tag_directives); i++ {
+		if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+			if allow_duplicates {
+				return true
+			}
+			return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+		}
+	}
+
+	// [Go] Do we actually need to copy this given garbage collection
+	// and the lack of deallocating destructors?
+	tag_copy := yaml_tag_directive_t{
+		handle: make([]byte, len(value.handle)),
+		prefix: make([]byte, len(value.prefix)),
+	}
+	copy(tag_copy.handle, value.handle)
+	copy(tag_copy.prefix, value.prefix)
+	emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+	return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+	emitter.indents = append(emitter.indents, emitter.indent)
+	if emitter.indent < 0 {
+		if flow {
+			emitter.indent = emitter.best_indent
+		} else {
+			emitter.indent = 0
+		}
+	} else if !indentless {
+		emitter.indent += emitter.best_indent
+	}
+	return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	switch emitter.state {
+	default:
+	case yaml_EMIT_STREAM_START_STATE:
+		return yaml_emitter_emit_stream_start(emitter, event)
+
+	case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+		return yaml_emitter_emit_document_start(emitter, event, true)
+
+	case yaml_EMIT_DOCUMENT_START_STATE:
+		return yaml_emitter_emit_document_start(emitter, event, false)
+
+	case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+		return yaml_emitter_emit_document_content(emitter, event)
+
+	case yaml_EMIT_DOCUMENT_END_STATE:
+		return yaml_emitter_emit_document_end(emitter, event)
+
+	case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+	case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+	case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+	case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+	case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+		return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+	case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+		return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+		return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+		return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+		return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+		return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+		return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+	case yaml_EMIT_END_STATE:
+		return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+	}
+	panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if event.typ != yaml_STREAM_START_EVENT {
+		return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+	}
+	if emitter.encoding == yaml_ANY_ENCODING {
+		emitter.encoding = event.encoding
+		if emitter.encoding == yaml_ANY_ENCODING {
+			emitter.encoding = yaml_UTF8_ENCODING
+		}
+	}
+	if emitter.best_indent < 2 || emitter.best_indent > 9 {
+		emitter.best_indent = 2
+	}
+	if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+		emitter.best_width = 80
+	}
+	if emitter.best_width < 0 {
+		emitter.best_width = 1<<31 - 1
+	}
+	if emitter.line_break == yaml_ANY_BREAK {
+		emitter.line_break = yaml_LN_BREAK
+	}
+
+	emitter.indent = -1
+	emitter.line = 0
+	emitter.column = 0
+	emitter.whitespace = true
+	emitter.indention = true
+
+	if emitter.encoding != yaml_UTF8_ENCODING {
+		if !yaml_emitter_write_bom(emitter) {
+			return false
+		}
+	}
+	emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+	return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+	if event.typ == yaml_DOCUMENT_START_EVENT {
+
+		if event.version_directive != nil {
+			if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+				return false
+			}
+		}
+
+		for i := 0; i < len(event.tag_directives); i++ {
+			tag_directive := &event.tag_directives[i]
+			if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+				return false
+			}
+			if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+				return false
+			}
+		}
+
+		for i := 0; i < len(default_tag_directives); i++ {
+			tag_directive := &default_tag_directives[i]
+			if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+				return false
+			}
+		}
+
+		implicit := event.implicit
+		if !first || emitter.canonical {
+			implicit = false
+		}
+
+		if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+
+		if event.version_directive != nil {
+			implicit = false
+			if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+
+		if len(event.tag_directives) > 0 {
+			implicit = false
+			for i := 0; i < len(event.tag_directives); i++ {
+				tag_directive := &event.tag_directives[i]
+				if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+					return false
+				}
+				if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+					return false
+				}
+				if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+					return false
+				}
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+		}
+
+		if yaml_emitter_check_empty_document(emitter) {
+			implicit = false
+		}
+		if !implicit {
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+			if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+				return false
+			}
+			if emitter.canonical {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+		}
+
+		emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+		return true
+	}
+
+	if event.typ == yaml_STREAM_END_EVENT {
+		if emitter.open_ended {
+			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_flush(emitter) {
+			return false
+		}
+		emitter.state = yaml_EMIT_END_STATE
+		return true
+	}
+
+	return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+	return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if event.typ != yaml_DOCUMENT_END_EVENT {
+		return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !event.implicit {
+		// [Go] Allocate the slice elsewhere.
+		if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+			return false
+		}
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+	if !yaml_emitter_flush(emitter) {
+		return false
+	}
+	emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+	emitter.tag_directives = emitter.tag_directives[:0]
+	return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+			return false
+		}
+		if !yaml_emitter_increase_indent(emitter, true, false) {
+			return false
+		}
+		emitter.flow_level++
+	}
+
+	if event.typ == yaml_SEQUENCE_END_EVENT {
+		emitter.flow_level--
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		if emitter.canonical && !first {
+			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+			return false
+		}
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+
+		return true
+	}
+
+	if !first {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+
+	if emitter.canonical || emitter.column > emitter.best_width {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+			return false
+		}
+		if !yaml_emitter_increase_indent(emitter, true, false) {
+			return false
+		}
+		emitter.flow_level++
+	}
+
+	if event.typ == yaml_MAPPING_END_EVENT {
+		emitter.flow_level--
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		if emitter.canonical && !first {
+			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+			return false
+		}
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+
+	if !first {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+	if emitter.canonical || emitter.column > emitter.best_width {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+
+	if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+		emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+	if simple {
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+			return false
+		}
+	} else {
+		if emitter.canonical || emitter.column > emitter.best_width {
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+			return false
+		}
+	}
+	if event.typ == yaml_SEQUENCE_END_EVENT {
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_increase_indent(emitter, false, false) {
+			return false
+		}
+	}
+	if event.typ == yaml_MAPPING_END_EVENT {
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if yaml_emitter_check_simple_key(emitter) {
+		emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+	if simple {
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+			return false
+		}
+	} else {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+	root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+	emitter.root_context = root
+	emitter.sequence_context = sequence
+	emitter.mapping_context = mapping
+	emitter.simple_key_context = simple_key
+
+	switch event.typ {
+	case yaml_ALIAS_EVENT:
+		return yaml_emitter_emit_alias(emitter, event)
+	case yaml_SCALAR_EVENT:
+		return yaml_emitter_emit_scalar(emitter, event)
+	case yaml_SEQUENCE_START_EVENT:
+		return yaml_emitter_emit_sequence_start(emitter, event)
+	case yaml_MAPPING_START_EVENT:
+		return yaml_emitter_emit_mapping_start(emitter, event)
+	default:
+		return yaml_emitter_set_emitter_error(emitter,
+			fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+	}
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	emitter.state = emitter.states[len(emitter.states)-1]
+	emitter.states = emitter.states[:len(emitter.states)-1]
+	return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_select_scalar_style(emitter, event) {
+		return false
+	}
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if !yaml_emitter_increase_indent(emitter, true, false) {
+		return false
+	}
+	if !yaml_emitter_process_scalar(emitter) {
+		return false
+	}
+	emitter.indent = emitter.indents[len(emitter.indents)-1]
+	emitter.indents = emitter.indents[:len(emitter.indents)-1]
+	emitter.state = emitter.states[len(emitter.states)-1]
+	emitter.states = emitter.states[:len(emitter.states)-1]
+	return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+		yaml_emitter_check_empty_sequence(emitter) {
+		emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+	} else {
+		emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+	}
+	return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+		yaml_emitter_check_empty_mapping(emitter) {
+		emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+	} else {
+		emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+	}
+	return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+	return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+	if len(emitter.events)-emitter.events_head < 2 {
+		return false
+	}
+	return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+		emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+	if len(emitter.events)-emitter.events_head < 2 {
+		return false
+	}
+	return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+		emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+	length := 0
+	switch emitter.events[emitter.events_head].typ {
+	case yaml_ALIAS_EVENT:
+		length += len(emitter.anchor_data.anchor)
+	case yaml_SCALAR_EVENT:
+		if emitter.scalar_data.multiline {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix) +
+			len(emitter.scalar_data.value)
+	case yaml_SEQUENCE_START_EVENT:
+		if !yaml_emitter_check_empty_sequence(emitter) {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix)
+	case yaml_MAPPING_START_EVENT:
+		if !yaml_emitter_check_empty_mapping(emitter) {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix)
+	default:
+		return false
+	}
+	return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+	no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+	if no_tag && !event.implicit && !event.quoted_implicit {
+		return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+	}
+
+	style := event.scalar_style()
+	if style == yaml_ANY_SCALAR_STYLE {
+		style = yaml_PLAIN_SCALAR_STYLE
+	}
+	if emitter.canonical {
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	if emitter.simple_key_context && emitter.scalar_data.multiline {
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+
+	if style == yaml_PLAIN_SCALAR_STYLE {
+		if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+			emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+		if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+		if no_tag && !event.implicit {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+	}
+	if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+		if !emitter.scalar_data.single_quoted_allowed {
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		}
+	}
+	if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+		if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		}
+	}
+
+	if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+		emitter.tag_data.handle = []byte{'!'}
+	}
+	emitter.scalar_data.style = style
+	return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+	if emitter.anchor_data.anchor == nil {
+		return true
+	}
+	c := []byte{'&'}
+	if emitter.anchor_data.alias {
+		c[0] = '*'
+	}
+	if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+		return false
+	}
+	return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+	if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+		return true
+	}
+	if len(emitter.tag_data.handle) > 0 {
+		if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+			return false
+		}
+		if len(emitter.tag_data.suffix) > 0 {
+			if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+				return false
+			}
+		}
+	} else {
+		// [Go] Allocate these slices elsewhere.
+		if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+			return false
+		}
+		if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+			return false
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+			return false
+		}
+	}
+	return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+	switch emitter.scalar_data.style {
+	case yaml_PLAIN_SCALAR_STYLE:
+		return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+		return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+		return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_LITERAL_SCALAR_STYLE:
+		return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+	case yaml_FOLDED_SCALAR_STYLE:
+		return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+	}
+	panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+	if version_directive.major != 1 || version_directive.minor != 1 {
+		return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+	}
+	return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+	handle := tag_directive.handle
+	prefix := tag_directive.prefix
+	if len(handle) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+	}
+	if handle[0] != '!' {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+	}
+	if handle[len(handle)-1] != '!' {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+	}
+	for i := 1; i < len(handle)-1; i += width(handle[i]) {
+		if !is_alpha(handle, i) {
+			return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+		}
+	}
+	if len(prefix) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+	}
+	return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+	if len(anchor) == 0 {
+		problem := "anchor value must not be empty"
+		if alias {
+			problem = "alias value must not be empty"
+		}
+		return yaml_emitter_set_emitter_error(emitter, problem)
+	}
+	for i := 0; i < len(anchor); i += width(anchor[i]) {
+		if !is_alpha(anchor, i) {
+			problem := "anchor value must contain alphanumerical characters only"
+			if alias {
+				problem = "alias value must contain alphanumerical characters only"
+			}
+			return yaml_emitter_set_emitter_error(emitter, problem)
+		}
+	}
+	emitter.anchor_data.anchor = anchor
+	emitter.anchor_data.alias = alias
+	return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+	if len(tag) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+	}
+	for i := 0; i < len(emitter.tag_directives); i++ {
+		tag_directive := &emitter.tag_directives[i]
+		if bytes.HasPrefix(tag, tag_directive.prefix) {
+			emitter.tag_data.handle = tag_directive.handle
+			emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+			return true
+		}
+	}
+	emitter.tag_data.suffix = tag
+	return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	var (
+		block_indicators   = false
+		flow_indicators    = false
+		line_breaks        = false
+		special_characters = false
+
+		leading_space  = false
+		leading_break  = false
+		trailing_space = false
+		trailing_break = false
+		break_space    = false
+		space_break    = false
+
+		preceded_by_whitespace = false
+		followed_by_whitespace = false
+		previous_space         = false
+		previous_break         = false
+	)
+
+	emitter.scalar_data.value = value
+
+	if len(value) == 0 {
+		emitter.scalar_data.multiline = false
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = true
+		emitter.scalar_data.single_quoted_allowed = true
+		emitter.scalar_data.block_allowed = false
+		return true
+	}
+
+	if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+		block_indicators = true
+		flow_indicators = true
+	}
+
+	preceded_by_whitespace = true
+	for i, w := 0, 0; i < len(value); i += w {
+		w = width(value[i])
+		followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+		if i == 0 {
+			switch value[i] {
+			case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+				flow_indicators = true
+				block_indicators = true
+			case '?', ':':
+				flow_indicators = true
+				if followed_by_whitespace {
+					block_indicators = true
+				}
+			case '-':
+				if followed_by_whitespace {
+					flow_indicators = true
+					block_indicators = true
+				}
+			}
+		} else {
+			switch value[i] {
+			case ',', '?', '[', ']', '{', '}':
+				flow_indicators = true
+			case ':':
+				flow_indicators = true
+				if followed_by_whitespace {
+					block_indicators = true
+				}
+			case '#':
+				if preceded_by_whitespace {
+					flow_indicators = true
+					block_indicators = true
+				}
+			}
+		}
+
+		if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+			special_characters = true
+		}
+		if is_space(value, i) {
+			if i == 0 {
+				leading_space = true
+			}
+			if i+width(value[i]) == len(value) {
+				trailing_space = true
+			}
+			if previous_break {
+				break_space = true
+			}
+			previous_space = true
+			previous_break = false
+		} else if is_break(value, i) {
+			line_breaks = true
+			if i == 0 {
+				leading_break = true
+			}
+			if i+width(value[i]) == len(value) {
+				trailing_break = true
+			}
+			if previous_space {
+				space_break = true
+			}
+			previous_space = false
+			previous_break = true
+		} else {
+			previous_space = false
+			previous_break = false
+		}
+
+		// [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+		preceded_by_whitespace = is_blankz(value, i)
+	}
+
+	emitter.scalar_data.multiline = line_breaks
+	emitter.scalar_data.flow_plain_allowed = true
+	emitter.scalar_data.block_plain_allowed = true
+	emitter.scalar_data.single_quoted_allowed = true
+	emitter.scalar_data.block_allowed = true
+
+	if leading_space || leading_break || trailing_space || trailing_break {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	if trailing_space {
+		emitter.scalar_data.block_allowed = false
+	}
+	if break_space {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+		emitter.scalar_data.single_quoted_allowed = false
+	}
+	if space_break || special_characters {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+		emitter.scalar_data.single_quoted_allowed = false
+		emitter.scalar_data.block_allowed = false
+	}
+	if line_breaks {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	if flow_indicators {
+		emitter.scalar_data.flow_plain_allowed = false
+	}
+	if block_indicators {
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+	emitter.anchor_data.anchor = nil
+	emitter.tag_data.handle = nil
+	emitter.tag_data.suffix = nil
+	emitter.scalar_data.value = nil
+
+	switch event.typ {
+	case yaml_ALIAS_EVENT:
+		if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+			return false
+		}
+
+	case yaml_SCALAR_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+		if !yaml_emitter_analyze_scalar(emitter, event.value) {
+			return false
+		}
+
+	case yaml_SEQUENCE_START_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+
+	case yaml_MAPPING_START_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+	if !flush(emitter) {
+		return false
+	}
+	pos := emitter.buffer_pos
+	emitter.buffer[pos+0] = '\xEF'
+	emitter.buffer[pos+1] = '\xBB'
+	emitter.buffer[pos+2] = '\xBF'
+	emitter.buffer_pos += 3
+	return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+	indent := emitter.indent
+	if indent < 0 {
+		indent = 0
+	}
+	if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+		if !put_break(emitter) {
+			return false
+		}
+	}
+	for emitter.column < indent {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	emitter.whitespace = true
+	emitter.indention = true
+	return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+	if need_whitespace && !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	if !write_all(emitter, indicator) {
+		return false
+	}
+	emitter.whitespace = is_whitespace
+	emitter.indention = (emitter.indention && is_indention)
+	emitter.open_ended = false
+	return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+	if !write_all(emitter, value) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+	if !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	if !write_all(emitter, value) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+	if need_whitespace && !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	for i := 0; i < len(value); {
+		var must_write bool
+		switch value[i] {
+		case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+			must_write = true
+		default:
+			must_write = is_alpha(value, i)
+		}
+		if must_write {
+			if !write(emitter, value, &i) {
+				return false
+			}
+		} else {
+			w := width(value[i])
+			for k := 0; k < w; k++ {
+				octet := value[i]
+				i++
+				if !put(emitter, '%') {
+					return false
+				}
+
+				c := octet >> 4
+				if c < 10 {
+					c += '0'
+				} else {
+					c += 'A' - 10
+				}
+				if !put(emitter, c) {
+					return false
+				}
+
+				c = octet & 0x0f
+				if c < 10 {
+					c += '0'
+				} else {
+					c += 'A' - 10
+				}
+				if !put(emitter, c) {
+					return false
+				}
+			}
+		}
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+	if !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+
+	spaces := false
+	breaks := false
+	for i := 0; i < len(value); {
+		if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			spaces = true
+		} else if is_break(value, i) {
+			if !breaks && value[i] == '\n' {
+				if !put_break(emitter) {
+					return false
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			spaces = false
+			breaks = false
+		}
+	}
+
+	emitter.whitespace = false
+	emitter.indention = false
+	if emitter.root_context {
+		emitter.open_ended = true
+	}
+
+	return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+		return false
+	}
+
+	spaces := false
+	breaks := false
+	for i := 0; i < len(value); {
+		if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			spaces = true
+		} else if is_break(value, i) {
+			if !breaks && value[i] == '\n' {
+				if !put_break(emitter) {
+					return false
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if value[i] == '\'' {
+				if !put(emitter, '\'') {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			spaces = false
+			breaks = false
+		}
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+	spaces := false
+	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+		return false
+	}
+
+	for i := 0; i < len(value); {
+		if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+			is_bom(value, i) || is_break(value, i) ||
+			value[i] == '"' || value[i] == '\\' {
+
+			octet := value[i]
+
+			var w int
+			var v rune
+			switch {
+			case octet&0x80 == 0x00:
+				w, v = 1, rune(octet&0x7F)
+			case octet&0xE0 == 0xC0:
+				w, v = 2, rune(octet&0x1F)
+			case octet&0xF0 == 0xE0:
+				w, v = 3, rune(octet&0x0F)
+			case octet&0xF8 == 0xF0:
+				w, v = 4, rune(octet&0x07)
+			}
+			for k := 1; k < w; k++ {
+				octet = value[i+k]
+				v = (v << 6) + (rune(octet) & 0x3F)
+			}
+			i += w
+
+			if !put(emitter, '\\') {
+				return false
+			}
+
+			var ok bool
+			switch v {
+			case 0x00:
+				ok = put(emitter, '0')
+			case 0x07:
+				ok = put(emitter, 'a')
+			case 0x08:
+				ok = put(emitter, 'b')
+			case 0x09:
+				ok = put(emitter, 't')
+			case 0x0A:
+				ok = put(emitter, 'n')
+			case 0x0b:
+				ok = put(emitter, 'v')
+			case 0x0c:
+				ok = put(emitter, 'f')
+			case 0x0d:
+				ok = put(emitter, 'r')
+			case 0x1b:
+				ok = put(emitter, 'e')
+			case 0x22:
+				ok = put(emitter, '"')
+			case 0x5c:
+				ok = put(emitter, '\\')
+			case 0x85:
+				ok = put(emitter, 'N')
+			case 0xA0:
+				ok = put(emitter, '_')
+			case 0x2028:
+				ok = put(emitter, 'L')
+			case 0x2029:
+				ok = put(emitter, 'P')
+			default:
+				if v <= 0xFF {
+					ok = put(emitter, 'x')
+					w = 2
+				} else if v <= 0xFFFF {
+					ok = put(emitter, 'u')
+					w = 4
+				} else {
+					ok = put(emitter, 'U')
+					w = 8
+				}
+				for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+					digit := byte((v >> uint(k)) & 0x0F)
+					if digit < 10 {
+						ok = put(emitter, digit+'0')
+					} else {
+						ok = put(emitter, digit+'A'-10)
+					}
+				}
+			}
+			if !ok {
+				return false
+			}
+			spaces = false
+		} else if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				if is_space(value, i+1) {
+					if !put(emitter, '\\') {
+						return false
+					}
+				}
+				i += width(value[i])
+			} else if !write(emitter, value, &i) {
+				return false
+			}
+			spaces = true
+		} else {
+			if !write(emitter, value, &i) {
+				return false
+			}
+			spaces = false
+		}
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+	if is_space(value, 0) || is_break(value, 0) {
+		indent_hint := []byte{'0' + byte(emitter.best_indent)}
+		if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+			return false
+		}
+	}
+
+	emitter.open_ended = false
+
+	var chomp_hint [1]byte
+	if len(value) == 0 {
+		chomp_hint[0] = '-'
+	} else {
+		i := len(value) - 1
+		for value[i]&0xC0 == 0x80 {
+			i--
+		}
+		if !is_break(value, i) {
+			chomp_hint[0] = '-'
+		} else if i == 0 {
+			chomp_hint[0] = '+'
+			emitter.open_ended = true
+		} else {
+			i--
+			for value[i]&0xC0 == 0x80 {
+				i--
+			}
+			if is_break(value, i) {
+				chomp_hint[0] = '+'
+				emitter.open_ended = true
+			}
+		}
+	}
+	if chomp_hint[0] != 0 {
+		if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+			return false
+		}
+	}
+	return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+		return false
+	}
+	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+		return false
+	}
+	if !put_break(emitter) {
+		return false
+	}
+	emitter.indention = true
+	emitter.whitespace = true
+	breaks := true
+	for i := 0; i < len(value); {
+		if is_break(value, i) {
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			breaks = false
+		}
+	}
+
+	return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+		return false
+	}
+	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+		return false
+	}
+
+	if !put_break(emitter) {
+		return false
+	}
+	emitter.indention = true
+	emitter.whitespace = true
+
+	breaks := true
+	leading_spaces := true
+	for i := 0; i < len(value); {
+		if is_break(value, i) {
+			if !breaks && !leading_spaces && value[i] == '\n' {
+				k := 0
+				for is_break(value, k) {
+					k += width(value[k])
+				}
+				if !is_blankz(value, k) {
+					if !put_break(emitter) {
+						return false
+					}
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				leading_spaces = is_blank(value, i)
+			}
+			if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			emitter.indention = false
+			breaks = false
+		}
+	}
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go
new file mode 100644
index 0000000..0ee738e
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/encode.go
@@ -0,0 +1,390 @@
+package yaml
+
+import (
+	"encoding"
+	"fmt"
+	"io"
+	"reflect"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+)
+
+// jsonNumber is the interface of the encoding/json.Number datatype.
+// Repeating the interface here avoids a dependency on encoding/json, and also
+// supports other libraries like jsoniter, which use a similar datatype with
+// the same interface. Detecting this interface is useful when dealing with
+// structures containing json.Number, which is a string under the hood. The
+// encoder should prefer the use of Int64(), Float64() and string(), in that
+// order, when encoding this type.
+type jsonNumber interface {
+	Float64() (float64, error)
+	Int64() (int64, error)
+	String() string
+}
+
+type encoder struct {
+	emitter yaml_emitter_t
+	event   yaml_event_t
+	out     []byte
+	flow    bool
+	// doneInit holds whether the initial stream_start_event has been
+	// emitted.
+	doneInit bool
+}
+
+func newEncoder() *encoder {
+	e := &encoder{}
+	yaml_emitter_initialize(&e.emitter)
+	yaml_emitter_set_output_string(&e.emitter, &e.out)
+	yaml_emitter_set_unicode(&e.emitter, true)
+	return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+	e := &encoder{}
+	yaml_emitter_initialize(&e.emitter)
+	yaml_emitter_set_output_writer(&e.emitter, w)
+	yaml_emitter_set_unicode(&e.emitter, true)
+	return e
+}
+
+func (e *encoder) init() {
+	if e.doneInit {
+		return
+	}
+	yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+	e.emit()
+	e.doneInit = true
+}
+
+func (e *encoder) finish() {
+	e.emitter.open_ended = false
+	yaml_stream_end_event_initialize(&e.event)
+	e.emit()
+}
+
+func (e *encoder) destroy() {
+	yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+	// This will internally delete the e.event value.
+	e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+	if !ok {
+		msg := e.emitter.problem
+		if msg == "" {
+			msg = "unknown problem generating YAML content"
+		}
+		failf("%s", msg)
+	}
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+	e.init()
+	yaml_document_start_event_initialize(&e.event, nil, nil, true)
+	e.emit()
+	e.marshal(tag, in)
+	yaml_document_end_event_initialize(&e.event, true)
+	e.emit()
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+	if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+		e.nilv()
+		return
+	}
+	iface := in.Interface()
+	switch m := iface.(type) {
+	case jsonNumber:
+		integer, err := m.Int64()
+		if err == nil {
+			// In this case the json.Number is a valid int64
+			in = reflect.ValueOf(integer)
+			break
+		}
+		float, err := m.Float64()
+		if err == nil {
+			// In this case the json.Number is a valid float64
+			in = reflect.ValueOf(float)
+			break
+		}
+		// fallback case - no number could be obtained
+		in = reflect.ValueOf(m.String())
+	case time.Time, *time.Time:
+		// Although time.Time implements TextMarshaler,
+		// we don't want to treat it as a string for YAML
+		// purposes because YAML has special support for
+		// timestamps.
+	case Marshaler:
+		v, err := m.MarshalYAML()
+		if err != nil {
+			fail(err)
+		}
+		if v == nil {
+			e.nilv()
+			return
+		}
+		in = reflect.ValueOf(v)
+	case encoding.TextMarshaler:
+		text, err := m.MarshalText()
+		if err != nil {
+			fail(err)
+		}
+		in = reflect.ValueOf(string(text))
+	case nil:
+		e.nilv()
+		return
+	}
+	switch in.Kind() {
+	case reflect.Interface:
+		e.marshal(tag, in.Elem())
+	case reflect.Map:
+		e.mapv(tag, in)
+	case reflect.Ptr:
+		if in.Type() == ptrTimeType {
+			e.timev(tag, in.Elem())
+		} else {
+			e.marshal(tag, in.Elem())
+		}
+	case reflect.Struct:
+		if in.Type() == timeType {
+			e.timev(tag, in)
+		} else {
+			e.structv(tag, in)
+		}
+	case reflect.Slice, reflect.Array:
+		if in.Type().Elem() == mapItemType {
+			e.itemsv(tag, in)
+		} else {
+			e.slicev(tag, in)
+		}
+	case reflect.String:
+		e.stringv(tag, in)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		if in.Type() == durationType {
+			e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+		} else {
+			e.intv(tag, in)
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		e.uintv(tag, in)
+	case reflect.Float32, reflect.Float64:
+		e.floatv(tag, in)
+	case reflect.Bool:
+		e.boolv(tag, in)
+	default:
+		panic("cannot marshal type: " + in.Type().String())
+	}
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+	e.mappingv(tag, func() {
+		keys := keyList(in.MapKeys())
+		sort.Sort(keys)
+		for _, k := range keys {
+			e.marshal("", k)
+			e.marshal("", in.MapIndex(k))
+		}
+	})
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+	e.mappingv(tag, func() {
+		slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+		for _, item := range slice {
+			e.marshal("", reflect.ValueOf(item.Key))
+			e.marshal("", reflect.ValueOf(item.Value))
+		}
+	})
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+	sinfo, err := getStructInfo(in.Type())
+	if err != nil {
+		panic(err)
+	}
+	e.mappingv(tag, func() {
+		for _, info := range sinfo.FieldsList {
+			var value reflect.Value
+			if info.Inline == nil {
+				value = in.Field(info.Num)
+			} else {
+				value = in.FieldByIndex(info.Inline)
+			}
+			if info.OmitEmpty && isZero(value) {
+				continue
+			}
+			e.marshal("", reflect.ValueOf(info.Key))
+			e.flow = info.Flow
+			e.marshal("", value)
+		}
+		if sinfo.InlineMap >= 0 {
+			m := in.Field(sinfo.InlineMap)
+			if m.Len() > 0 {
+				e.flow = false
+				keys := keyList(m.MapKeys())
+				sort.Sort(keys)
+				for _, k := range keys {
+					if _, found := sinfo.FieldsMap[k.String()]; found {
+						panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
+					}
+					e.marshal("", k)
+					e.flow = false
+					e.marshal("", m.MapIndex(k))
+				}
+			}
+		}
+	})
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+	implicit := tag == ""
+	style := yaml_BLOCK_MAPPING_STYLE
+	if e.flow {
+		e.flow = false
+		style = yaml_FLOW_MAPPING_STYLE
+	}
+	yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+	e.emit()
+	f()
+	yaml_mapping_end_event_initialize(&e.event)
+	e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+	implicit := tag == ""
+	style := yaml_BLOCK_SEQUENCE_STYLE
+	if e.flow {
+		e.flow = false
+		style = yaml_FLOW_SEQUENCE_STYLE
+	}
+	e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+	e.emit()
+	n := in.Len()
+	for i := 0; i < n; i++ {
+		e.marshal("", in.Index(i))
+	}
+	e.must(yaml_sequence_end_event_initialize(&e.event))
+	e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+	// Fast path.
+	if s == "" {
+		return false
+	}
+	c := s[0]
+	if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+		return false
+	}
+	// Do the full match.
+	return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+	var style yaml_scalar_style_t
+	s := in.String()
+	canUsePlain := true
+	switch {
+	case !utf8.ValidString(s):
+		if tag == yaml_BINARY_TAG {
+			failf("explicitly tagged !!binary data must be base64-encoded")
+		}
+		if tag != "" {
+			failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+		}
+		// It can't be encoded directly as YAML so use a binary tag
+		// and encode it as base64.
+		tag = yaml_BINARY_TAG
+		s = encodeBase64(s)
+	case tag == "":
+		// Check to see if it would resolve to a specific
+		// tag when encoded unquoted. If it doesn't,
+		// there's no need to quote it.
+		rtag, _ := resolve("", s)
+		canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
+	}
+	// Note: it's possible for user code to emit invalid YAML
+	// if they explicitly specify a tag and a string containing
+	// text that's incompatible with that tag.
+	switch {
+	case strings.Contains(s, "\n"):
+		style = yaml_LITERAL_SCALAR_STYLE
+	case canUsePlain:
+		style = yaml_PLAIN_SCALAR_STYLE
+	default:
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+	var s string
+	if in.Bool() {
+		s = "true"
+	} else {
+		s = "false"
+	}
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+	s := strconv.FormatInt(in.Int(), 10)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+	s := strconv.FormatUint(in.Uint(), 10)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+	t := in.Interface().(time.Time)
+	s := t.Format(time.RFC3339Nano)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+	// Issue #352: When formatting, use the precision of the underlying value
+	precision := 64
+	if in.Kind() == reflect.Float32 {
+		precision = 32
+	}
+
+	s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+	switch s {
+	case "+Inf":
+		s = ".inf"
+	case "-Inf":
+		s = "-.inf"
+	case "NaN":
+		s = ".nan"
+	}
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+	e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+	implicit := tag == ""
+	e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+	e.emit()
+}
diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod
new file mode 100644
index 0000000..1934e87
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/go.mod
@@ -0,0 +1,5 @@
+module "gopkg.in/yaml.v2"
+
+require (
+	"gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
+)
diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go
new file mode 100644
index 0000000..81d05df
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/parserc.go
@@ -0,0 +1,1095 @@
+package yaml
+
+import (
+	"bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream               ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document    ::= block_node DOCUMENT-END*
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence    ::=
+//                          ALIAS
+//                          | properties (block_content | indentless_block_sequence)?
+//                          | block_content
+//                          | indentless_block_sequence
+// block_node           ::= ALIAS
+//                          | properties block_content?
+//                          | block_content
+// flow_node            ::= ALIAS
+//                          | properties flow_content?
+//                          | flow_content
+// properties           ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content        ::= block_collection | flow_collection | SCALAR
+// flow_content         ::= flow_collection | SCALAR
+// block_collection     ::= block_sequence | block_mapping
+// flow_collection      ::= flow_sequence | flow_mapping
+// block_sequence       ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
+// block_mapping        ::= BLOCK-MAPPING_START
+//                          ((KEY block_node_or_indentless_sequence?)?
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//                          BLOCK-END
+// flow_sequence        ::= FLOW-SEQUENCE-START
+//                          (flow_sequence_entry FLOW-ENTRY)*
+//                          flow_sequence_entry?
+//                          FLOW-SEQUENCE-END
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping         ::= FLOW-MAPPING-START
+//                          (flow_mapping_entry FLOW-ENTRY)*
+//                          flow_mapping_entry?
+//                          FLOW-MAPPING-END
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+	if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+		return &parser.tokens[parser.tokens_head]
+	}
+	return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+	parser.token_available = false
+	parser.tokens_parsed++
+	parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+	parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+	// Erase the event object.
+	*event = yaml_event_t{}
+
+	// No events after the end of the stream or error.
+	if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+		return true
+	}
+
+	// Generate the next event.
+	return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+	parser.error = yaml_PARSER_ERROR
+	parser.problem = problem
+	parser.problem_mark = problem_mark
+	return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+	parser.error = yaml_PARSER_ERROR
+	parser.context = context
+	parser.context_mark = context_mark
+	parser.problem = problem
+	parser.problem_mark = problem_mark
+	return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+	//trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+	switch parser.state {
+	case yaml_PARSE_STREAM_START_STATE:
+		return yaml_parser_parse_stream_start(parser, event)
+
+	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+		return yaml_parser_parse_document_start(parser, event, true)
+
+	case yaml_PARSE_DOCUMENT_START_STATE:
+		return yaml_parser_parse_document_start(parser, event, false)
+
+	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+		return yaml_parser_parse_document_content(parser, event)
+
+	case yaml_PARSE_DOCUMENT_END_STATE:
+		return yaml_parser_parse_document_end(parser, event)
+
+	case yaml_PARSE_BLOCK_NODE_STATE:
+		return yaml_parser_parse_node(parser, event, true, false)
+
+	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+		return yaml_parser_parse_node(parser, event, true, true)
+
+	case yaml_PARSE_FLOW_NODE_STATE:
+		return yaml_parser_parse_node(parser, event, false, false)
+
+	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+		return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+		return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_block_mapping_value(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+		return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+		return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+		return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+		return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+	default:
+		panic("invalid parser state")
+	}
+}
+
+// Parse the production:
+// stream   ::= STREAM-START implicit_document? explicit_document* STREAM-END
+//              ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_STREAM_START_TOKEN {
+		return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
+	}
+	parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+	*event = yaml_event_t{
+		typ:        yaml_STREAM_START_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+		encoding:   token.encoding,
+	}
+	skip_token(parser)
+	return true
+}
+
+// Parse the productions:
+// implicit_document    ::= block_node DOCUMENT-END*
+//                          *
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//                          *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	// Parse extra document end indicators.
+	if !implicit {
+		for token.typ == yaml_DOCUMENT_END_TOKEN {
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	}
+
+	if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+		token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+		token.typ != yaml_DOCUMENT_START_TOKEN &&
+		token.typ != yaml_STREAM_END_TOKEN {
+		// Parse an implicit document.
+		if !yaml_parser_process_directives(parser, nil, nil) {
+			return false
+		}
+		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+		parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+		*event = yaml_event_t{
+			typ:        yaml_DOCUMENT_START_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+
+	} else if token.typ != yaml_STREAM_END_TOKEN {
+		// Parse an explicit document.
+		var version_directive *yaml_version_directive_t
+		var tag_directives []yaml_tag_directive_t
+		start_mark := token.start_mark
+		if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+			return false
+		}
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_DOCUMENT_START_TOKEN {
+			yaml_parser_set_parser_error(parser,
+				"did not find expected <document start>", token.start_mark)
+			return false
+		}
+		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+		parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+		end_mark := token.end_mark
+
+		*event = yaml_event_t{
+			typ:               yaml_DOCUMENT_START_EVENT,
+			start_mark:        start_mark,
+			end_mark:          end_mark,
+			version_directive: version_directive,
+			tag_directives:    tag_directives,
+			implicit:          false,
+		}
+		skip_token(parser)
+
+	} else {
+		// Parse the stream end.
+		parser.state = yaml_PARSE_END_STATE
+		*event = yaml_event_t{
+			typ:        yaml_STREAM_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+		skip_token(parser)
+	}
+
+	return true
+}
+
+// Parse the productions:
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//                                                    ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+		token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+		token.typ == yaml_DOCUMENT_START_TOKEN ||
+		token.typ == yaml_DOCUMENT_END_TOKEN ||
+		token.typ == yaml_STREAM_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		return yaml_parser_process_empty_scalar(parser, event,
+			token.start_mark)
+	}
+	return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document    ::= block_node DOCUMENT-END*
+//                                     *************
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	start_mark := token.start_mark
+	end_mark := token.start_mark
+
+	implicit := true
+	if token.typ == yaml_DOCUMENT_END_TOKEN {
+		end_mark = token.end_mark
+		skip_token(parser)
+		implicit = false
+	}
+
+	parser.tag_directives = parser.tag_directives[:0]
+
+	parser.state = yaml_PARSE_DOCUMENT_START_STATE
+	*event = yaml_event_t{
+		typ:        yaml_DOCUMENT_END_EVENT,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		implicit:   implicit,
+	}
+	return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence    ::=
+//                          ALIAS
+//                          *****
+//                          | properties (block_content | indentless_block_sequence)?
+//                            **********  *
+//                          | block_content | indentless_block_sequence
+//                            *
+// block_node           ::= ALIAS
+//                          *****
+//                          | properties block_content?
+//                            ********** *
+//                          | block_content
+//                            *
+// flow_node            ::= ALIAS
+//                          *****
+//                          | properties flow_content?
+//                            ********** *
+//                          | flow_content
+//                            *
+// properties           ::= TAG ANCHOR? | ANCHOR TAG?
+//                          *************************
+// block_content        ::= block_collection | flow_collection | SCALAR
+//                                                               ******
+// flow_content         ::= flow_collection | SCALAR
+//                                            ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+	//defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_ALIAS_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		*event = yaml_event_t{
+			typ:        yaml_ALIAS_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+			anchor:     token.value,
+		}
+		skip_token(parser)
+		return true
+	}
+
+	start_mark := token.start_mark
+	end_mark := token.start_mark
+
+	var tag_token bool
+	var tag_handle, tag_suffix, anchor []byte
+	var tag_mark yaml_mark_t
+	if token.typ == yaml_ANCHOR_TOKEN {
+		anchor = token.value
+		start_mark = token.start_mark
+		end_mark = token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ == yaml_TAG_TOKEN {
+			tag_token = true
+			tag_handle = token.value
+			tag_suffix = token.suffix
+			tag_mark = token.start_mark
+			end_mark = token.end_mark
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	} else if token.typ == yaml_TAG_TOKEN {
+		tag_token = true
+		tag_handle = token.value
+		tag_suffix = token.suffix
+		start_mark = token.start_mark
+		tag_mark = token.start_mark
+		end_mark = token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ == yaml_ANCHOR_TOKEN {
+			anchor = token.value
+			end_mark = token.end_mark
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	}
+
+	var tag []byte
+	if tag_token {
+		if len(tag_handle) == 0 {
+			tag = tag_suffix
+			tag_suffix = nil
+		} else {
+			for i := range parser.tag_directives {
+				if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+					tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+					tag = append(tag, tag_suffix...)
+					break
+				}
+			}
+			if len(tag) == 0 {
+				yaml_parser_set_parser_error_context(parser,
+					"while parsing a node", start_mark,
+					"found undefined tag handle", tag_mark)
+				return false
+			}
+		}
+	}
+
+	implicit := len(tag) == 0
+	if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if token.typ == yaml_SCALAR_TOKEN {
+		var plain_implicit, quoted_implicit bool
+		end_mark = token.end_mark
+		if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+			plain_implicit = true
+		} else if len(tag) == 0 {
+			quoted_implicit = true
+		}
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+
+		*event = yaml_event_t{
+			typ:             yaml_SCALAR_EVENT,
+			start_mark:      start_mark,
+			end_mark:        end_mark,
+			anchor:          anchor,
+			tag:             tag,
+			value:           token.value,
+			implicit:        plain_implicit,
+			quoted_implicit: quoted_implicit,
+			style:           yaml_style_t(token.style),
+		}
+		skip_token(parser)
+		return true
+	}
+	if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+		// [Go] Some of the events below can be merged as they differ only on style.
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+		}
+		return true
+	}
+	if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+		}
+		return true
+	}
+	if len(anchor) > 0 || len(tag) > 0 {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+
+		*event = yaml_event_t{
+			typ:             yaml_SCALAR_EVENT,
+			start_mark:      start_mark,
+			end_mark:        end_mark,
+			anchor:          anchor,
+			tag:             tag,
+			implicit:        implicit,
+			quoted_implicit: false,
+			style:           yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+		}
+		return true
+	}
+
+	context := "while parsing a flow node"
+	if block {
+		context = "while parsing a block node"
+	}
+	yaml_parser_set_parser_error_context(parser, context, start_mark,
+		"did not find expected node content", token.start_mark)
+	return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+//                    ********************  *********** *             *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, true, false)
+		} else {
+			parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+			return yaml_parser_process_empty_scalar(parser, event, mark)
+		}
+	}
+	if token.typ == yaml_BLOCK_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		parser.marks = parser.marks[:len(parser.marks)-1]
+
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+
+		skip_token(parser)
+		return true
+	}
+
+	context_mark := parser.marks[len(parser.marks)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	return yaml_parser_set_parser_error_context(parser,
+		"while parsing a block collection", context_mark,
+		"did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
+//                           *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+			token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, true, false)
+		}
+		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, mark)
+	}
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+
+	*event = yaml_event_t{
+		typ:        yaml_SEQUENCE_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.start_mark, // [Go] Shouldn't this be token.end_mark?
+	}
+	return true
+}
+
+// Parse the productions:
+// block_mapping        ::= BLOCK-MAPPING_START
+//                          *******************
+//                          ((KEY block_node_or_indentless_sequence?)?
+//                            *** *
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//
+//                          BLOCK-END
+//                          *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_KEY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+			return yaml_parser_parse_node(parser, event, true, true)
+		} else {
+			parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+			return yaml_parser_process_empty_scalar(parser, event, mark)
+		}
+	} else if token.typ == yaml_BLOCK_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		parser.marks = parser.marks[:len(parser.marks)-1]
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+		skip_token(parser)
+		return true
+	}
+
+	context_mark := parser.marks[len(parser.marks)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	return yaml_parser_set_parser_error_context(parser,
+		"while parsing a block mapping", context_mark,
+		"did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping        ::= BLOCK-MAPPING_START
+//
+//                          ((KEY block_node_or_indentless_sequence?)?
+//
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//                           ***** *
+//                          BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+			return yaml_parser_parse_node(parser, event, true, true)
+		}
+		parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, mark)
+	}
+	parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence        ::= FLOW-SEQUENCE-START
+//                          *******************
+//                          (flow_sequence_entry FLOW-ENTRY)*
+//                           *                   **********
+//                          flow_sequence_entry?
+//                          *
+//                          FLOW-SEQUENCE-END
+//                          *****************
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                          *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+		if !first {
+			if token.typ == yaml_FLOW_ENTRY_TOKEN {
+				skip_token(parser)
+				token = peek_token(parser)
+				if token == nil {
+					return false
+				}
+			} else {
+				context_mark := parser.marks[len(parser.marks)-1]
+				parser.marks = parser.marks[:len(parser.marks)-1]
+				return yaml_parser_set_parser_error_context(parser,
+					"while parsing a flow sequence", context_mark,
+					"did not find expected ',' or ']'", token.start_mark)
+			}
+		}
+
+		if token.typ == yaml_KEY_TOKEN {
+			parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+			*event = yaml_event_t{
+				typ:        yaml_MAPPING_START_EVENT,
+				start_mark: token.start_mark,
+				end_mark:   token.end_mark,
+				implicit:   true,
+				style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+			}
+			skip_token(parser)
+			return true
+		} else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+
+	*event = yaml_event_t{
+		typ:        yaml_SEQUENCE_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+	}
+
+	skip_token(parser)
+	return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                      *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_VALUE_TOKEN &&
+		token.typ != yaml_FLOW_ENTRY_TOKEN &&
+		token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+		parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+		return yaml_parser_parse_node(parser, event, false, false)
+	}
+	mark := token.end_mark
+	skip_token(parser)
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+	return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                                      ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		skip_token(parser)
+		token := peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                                                      *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+	*event = yaml_event_t{
+		typ:        yaml_MAPPING_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.start_mark, // [Go] Shouldn't this be end_mark?
+	}
+	return true
+}
+
+// Parse the productions:
+// flow_mapping         ::= FLOW-MAPPING-START
+//                          ******************
+//                          (flow_mapping_entry FLOW-ENTRY)*
+//                           *                  **********
+//                          flow_mapping_entry?
+//                          ******************
+//                          FLOW-MAPPING-END
+//                          ****************
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                          *           *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+		if !first {
+			if token.typ == yaml_FLOW_ENTRY_TOKEN {
+				skip_token(parser)
+				token = peek_token(parser)
+				if token == nil {
+					return false
+				}
+			} else {
+				context_mark := parser.marks[len(parser.marks)-1]
+				parser.marks = parser.marks[:len(parser.marks)-1]
+				return yaml_parser_set_parser_error_context(parser,
+					"while parsing a flow mapping", context_mark,
+					"did not find expected ',' or '}'", token.start_mark)
+			}
+		}
+
+		if token.typ == yaml_KEY_TOKEN {
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+			if token.typ != yaml_VALUE_TOKEN &&
+				token.typ != yaml_FLOW_ENTRY_TOKEN &&
+				token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+				parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+				return yaml_parser_parse_node(parser, event, false, false)
+			} else {
+				parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+				return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+			}
+		} else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	*event = yaml_event_t{
+		typ:        yaml_MAPPING_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+	}
+	skip_token(parser)
+	return true
+}
+
+// Parse the productions:
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                   *                  ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if empty {
+		parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+	parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+	*event = yaml_event_t{
+		typ:        yaml_SCALAR_EVENT,
+		start_mark: mark,
+		end_mark:   mark,
+		value:      nil, // Empty
+		implicit:   true,
+		style:      yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+	}
+	return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+	{[]byte("!"), []byte("!")},
+	{[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+	version_directive_ref **yaml_version_directive_t,
+	tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+	var version_directive *yaml_version_directive_t
+	var tag_directives []yaml_tag_directive_t
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+		if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+			if version_directive != nil {
+				yaml_parser_set_parser_error(parser,
+					"found duplicate %YAML directive", token.start_mark)
+				return false
+			}
+			if token.major != 1 || token.minor != 1 {
+				yaml_parser_set_parser_error(parser,
+					"found incompatible YAML document", token.start_mark)
+				return false
+			}
+			version_directive = &yaml_version_directive_t{
+				major: token.major,
+				minor: token.minor,
+			}
+		} else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+			value := yaml_tag_directive_t{
+				handle: token.value,
+				prefix: token.prefix,
+			}
+			if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+				return false
+			}
+			tag_directives = append(tag_directives, value)
+		}
+
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+	}
+
+	for i := range default_tag_directives {
+		if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+			return false
+		}
+	}
+
+	if version_directive_ref != nil {
+		*version_directive_ref = version_directive
+	}
+	if tag_directives_ref != nil {
+		*tag_directives_ref = tag_directives
+	}
+	return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+	for i := range parser.tag_directives {
+		if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+			if allow_duplicates {
+				return true
+			}
+			return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+		}
+	}
+
+	// [Go] I suspect the copy is unnecessary. This was likely done
+	// because there was no way to track ownership of the data.
+	value_copy := yaml_tag_directive_t{
+		handle: make([]byte, len(value.handle)),
+		prefix: make([]byte, len(value.prefix)),
+	}
+	copy(value_copy.handle, value.handle)
+	copy(value_copy.prefix, value.prefix)
+	parser.tag_directives = append(parser.tag_directives, value_copy)
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go
new file mode 100644
index 0000000..7c1f5fa
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/readerc.go
@@ -0,0 +1,412 @@
+package yaml
+
+import (
+	"io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+	parser.error = yaml_READER_ERROR
+	parser.problem = problem
+	parser.problem_offset = offset
+	parser.problem_value = value
+	return false
+}
+
+// Byte order marks.
+const (
+	bom_UTF8    = "\xef\xbb\xbf"
+	bom_UTF16LE = "\xff\xfe"
+	bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+	// Ensure that we had enough bytes in the raw buffer.
+	for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+		if !yaml_parser_update_raw_buffer(parser) {
+			return false
+		}
+	}
+
+	// Determine the encoding.
+	buf := parser.raw_buffer
+	pos := parser.raw_buffer_pos
+	avail := len(buf) - pos
+	if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+		parser.encoding = yaml_UTF16LE_ENCODING
+		parser.raw_buffer_pos += 2
+		parser.offset += 2
+	} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+		parser.encoding = yaml_UTF16BE_ENCODING
+		parser.raw_buffer_pos += 2
+		parser.offset += 2
+	} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+		parser.encoding = yaml_UTF8_ENCODING
+		parser.raw_buffer_pos += 3
+		parser.offset += 3
+	} else {
+		parser.encoding = yaml_UTF8_ENCODING
+	}
+	return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+	size_read := 0
+
+	// Return if the raw buffer is full.
+	if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+		return true
+	}
+
+	// Return on EOF.
+	if parser.eof {
+		return true
+	}
+
+	// Move the remaining bytes in the raw buffer to the beginning.
+	if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+		copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+	}
+	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+	parser.raw_buffer_pos = 0
+
+	// Call the read handler to fill the buffer.
+	size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+	if err == io.EOF {
+		parser.eof = true
+	} else if err != nil {
+		return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+	}
+	return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+	if parser.read_handler == nil {
+		panic("read handler must be set")
+	}
+
+	// [Go] This function was changed to guarantee the requested length size at EOF.
+	// The fact we need to do this is pretty awful, but the description above implies
+	// for that to be the case, and there are tests 
+
+	// If the EOF flag is set and the raw buffer is empty, do nothing.
+	if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+		// [Go] ACTUALLY! Read the documentation of this function above.
+		// This is just broken. To return true, we need to have the
+		// given length in the buffer. Not doing that means every single
+		// check that calls this function to make sure the buffer has a
+		// given length is Go) panicking; or C) accessing invalid memory.
+		//return true
+	}
+
+	// Return if the buffer contains enough characters.
+	if parser.unread >= length {
+		return true
+	}
+
+	// Determine the input encoding if it is not known yet.
+	if parser.encoding == yaml_ANY_ENCODING {
+		if !yaml_parser_determine_encoding(parser) {
+			return false
+		}
+	}
+
+	// Move the unread characters to the beginning of the buffer.
+	buffer_len := len(parser.buffer)
+	if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+		copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+		buffer_len -= parser.buffer_pos
+		parser.buffer_pos = 0
+	} else if parser.buffer_pos == buffer_len {
+		buffer_len = 0
+		parser.buffer_pos = 0
+	}
+
+	// Open the whole buffer for writing, and cut it before returning.
+	parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+	// Fill the buffer until it has enough characters.
+	first := true
+	for parser.unread < length {
+
+		// Fill the raw buffer if necessary.
+		if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+			if !yaml_parser_update_raw_buffer(parser) {
+				parser.buffer = parser.buffer[:buffer_len]
+				return false
+			}
+		}
+		first = false
+
+		// Decode the raw buffer.
+	inner:
+		for parser.raw_buffer_pos != len(parser.raw_buffer) {
+			var value rune
+			var width int
+
+			raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+			// Decode the next character.
+			switch parser.encoding {
+			case yaml_UTF8_ENCODING:
+				// Decode a UTF-8 character.  Check RFC 3629
+				// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+				//
+				// The following table (taken from the RFC) is used for
+				// decoding.
+				//
+				//    Char. number range |        UTF-8 octet sequence
+				//      (hexadecimal)    |              (binary)
+				//   --------------------+------------------------------------
+				//   0000 0000-0000 007F | 0xxxxxxx
+				//   0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+				//   0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+				//   0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+				//
+				// Additionally, the characters in the range 0xD800-0xDFFF
+				// are prohibited as they are reserved for use with UTF-16
+				// surrogate pairs.
+
+				// Determine the length of the UTF-8 sequence.
+				octet := parser.raw_buffer[parser.raw_buffer_pos]
+				switch {
+				case octet&0x80 == 0x00:
+					width = 1
+				case octet&0xE0 == 0xC0:
+					width = 2
+				case octet&0xF0 == 0xE0:
+					width = 3
+				case octet&0xF8 == 0xF0:
+					width = 4
+				default:
+					// The leading octet is invalid.
+					return yaml_parser_set_reader_error(parser,
+						"invalid leading UTF-8 octet",
+						parser.offset, int(octet))
+				}
+
+				// Check if the raw buffer contains an incomplete character.
+				if width > raw_unread {
+					if parser.eof {
+						return yaml_parser_set_reader_error(parser,
+							"incomplete UTF-8 octet sequence",
+							parser.offset, -1)
+					}
+					break inner
+				}
+
+				// Decode the leading octet.
+				switch {
+				case octet&0x80 == 0x00:
+					value = rune(octet & 0x7F)
+				case octet&0xE0 == 0xC0:
+					value = rune(octet & 0x1F)
+				case octet&0xF0 == 0xE0:
+					value = rune(octet & 0x0F)
+				case octet&0xF8 == 0xF0:
+					value = rune(octet & 0x07)
+				default:
+					value = 0
+				}
+
+				// Check and decode the trailing octets.
+				for k := 1; k < width; k++ {
+					octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+					// Check if the octet is valid.
+					if (octet & 0xC0) != 0x80 {
+						return yaml_parser_set_reader_error(parser,
+							"invalid trailing UTF-8 octet",
+							parser.offset+k, int(octet))
+					}
+
+					// Decode the octet.
+					value = (value << 6) + rune(octet&0x3F)
+				}
+
+				// Check the length of the sequence against the value.
+				switch {
+				case width == 1:
+				case width == 2 && value >= 0x80:
+				case width == 3 && value >= 0x800:
+				case width == 4 && value >= 0x10000:
+				default:
+					return yaml_parser_set_reader_error(parser,
+						"invalid length of a UTF-8 sequence",
+						parser.offset, -1)
+				}
+
+				// Check the range of the value.
+				if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+					return yaml_parser_set_reader_error(parser,
+						"invalid Unicode character",
+						parser.offset, int(value))
+				}
+
+			case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+				var low, high int
+				if parser.encoding == yaml_UTF16LE_ENCODING {
+					low, high = 0, 1
+				} else {
+					low, high = 1, 0
+				}
+
+				// The UTF-16 encoding is not as simple as one might
+				// naively think.  Check RFC 2781
+				// (http://www.ietf.org/rfc/rfc2781.txt).
+				//
+				// Normally, two subsequent bytes describe a Unicode
+				// character.  However a special technique (called a
+				// surrogate pair) is used for specifying character
+				// values larger than 0xFFFF.
+				//
+				// A surrogate pair consists of two pseudo-characters:
+				//      high surrogate area (0xD800-0xDBFF)
+				//      low surrogate area (0xDC00-0xDFFF)
+				//
+				// The following formulas are used for decoding
+				// and encoding characters using surrogate pairs:
+				//
+				//  U  = U' + 0x10000   (0x01 00 00 <= U <= 0x10 FF FF)
+				//  U' = yyyyyyyyyyxxxxxxxxxx   (0 <= U' <= 0x0F FF FF)
+				//  W1 = 110110yyyyyyyyyy
+				//  W2 = 110111xxxxxxxxxx
+				//
+				// where U is the character value, W1 is the high surrogate
+				// area, W2 is the low surrogate area.
+
+				// Check for incomplete UTF-16 character.
+				if raw_unread < 2 {
+					if parser.eof {
+						return yaml_parser_set_reader_error(parser,
+							"incomplete UTF-16 character",
+							parser.offset, -1)
+					}
+					break inner
+				}
+
+				// Get the character.
+				value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+					(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+				// Check for unexpected low surrogate area.
+				if value&0xFC00 == 0xDC00 {
+					return yaml_parser_set_reader_error(parser,
+						"unexpected low surrogate area",
+						parser.offset, int(value))
+				}
+
+				// Check for a high surrogate area.
+				if value&0xFC00 == 0xD800 {
+					width = 4
+
+					// Check for incomplete surrogate pair.
+					if raw_unread < 4 {
+						if parser.eof {
+							return yaml_parser_set_reader_error(parser,
+								"incomplete UTF-16 surrogate pair",
+								parser.offset, -1)
+						}
+						break inner
+					}
+
+					// Get the next character.
+					value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+						(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+					// Check for a low surrogate area.
+					if value2&0xFC00 != 0xDC00 {
+						return yaml_parser_set_reader_error(parser,
+							"expected low surrogate area",
+							parser.offset+2, int(value2))
+					}
+
+					// Generate the value of the surrogate pair.
+					value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+				} else {
+					width = 2
+				}
+
+			default:
+				panic("impossible")
+			}
+
+			// Check if the character is in the allowed range:
+			//      #x9 | #xA | #xD | [#x20-#x7E]               (8 bit)
+			//      | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD]    (16 bit)
+			//      | [#x10000-#x10FFFF]                        (32 bit)
+			switch {
+			case value == 0x09:
+			case value == 0x0A:
+			case value == 0x0D:
+			case value >= 0x20 && value <= 0x7E:
+			case value == 0x85:
+			case value >= 0xA0 && value <= 0xD7FF:
+			case value >= 0xE000 && value <= 0xFFFD:
+			case value >= 0x10000 && value <= 0x10FFFF:
+			default:
+				return yaml_parser_set_reader_error(parser,
+					"control characters are not allowed",
+					parser.offset, int(value))
+			}
+
+			// Move the raw pointers.
+			parser.raw_buffer_pos += width
+			parser.offset += width
+
+			// Finally put the character into the buffer.
+			if value <= 0x7F {
+				// 0000 0000-0000 007F . 0xxxxxxx
+				parser.buffer[buffer_len+0] = byte(value)
+				buffer_len += 1
+			} else if value <= 0x7FF {
+				// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+				parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+				buffer_len += 2
+			} else if value <= 0xFFFF {
+				// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+				parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+				buffer_len += 3
+			} else {
+				// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+				parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+				parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+				buffer_len += 4
+			}
+
+			parser.unread++
+		}
+
+		// On EOF, put NUL into the buffer and return.
+		if parser.eof {
+			parser.buffer[buffer_len] = 0
+			buffer_len++
+			parser.unread++
+			break
+		}
+	}
+	// [Go] Read the documentation of this function above. To return true,
+	// we need to have the given length in the buffer. Not doing that means
+	// every single check that calls this function to make sure the buffer
+	// has a given length is Go) panicking; or C) accessing invalid memory.
+	// This happens here due to the EOF above breaking early.
+	for buffer_len < length {
+		parser.buffer[buffer_len] = 0
+		buffer_len++
+	}
+	parser.buffer = parser.buffer[:buffer_len]
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go
new file mode 100644
index 0000000..6c151db
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/resolve.go
@@ -0,0 +1,258 @@
+package yaml
+
+import (
+	"encoding/base64"
+	"math"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+)
+
+type resolveMapItem struct {
+	value interface{}
+	tag   string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+	t := resolveTable
+	t[int('+')] = 'S' // Sign
+	t[int('-')] = 'S'
+	for _, c := range "0123456789" {
+		t[int(c)] = 'D' // Digit
+	}
+	for _, c := range "yYnNtTfFoO~" {
+		t[int(c)] = 'M' // In map
+	}
+	t[int('.')] = '.' // Float (potentially in map)
+
+	var resolveMapList = []struct {
+		v   interface{}
+		tag string
+		l   []string
+	}{
+		{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+		{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+		{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+		{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+		{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+		{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+		{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+		{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+		{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+		{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+		{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+		{"<<", yaml_MERGE_TAG, []string{"<<"}},
+	}
+
+	m := resolveMap
+	for _, item := range resolveMapList {
+		for _, s := range item.l {
+			m[s] = resolveMapItem{item.v, item.tag}
+		}
+	}
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+	// TODO This can easily be made faster and produce less garbage.
+	if strings.HasPrefix(tag, longTagPrefix) {
+		return "!!" + tag[len(longTagPrefix):]
+	}
+	return tag
+}
+
+func longTag(tag string) string {
+	if strings.HasPrefix(tag, "!!") {
+		return longTagPrefix + tag[2:]
+	}
+	return tag
+}
+
+func resolvableTag(tag string) bool {
+	switch tag {
+	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
+		return true
+	}
+	return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+	if !resolvableTag(tag) {
+		return tag, in
+	}
+
+	defer func() {
+		switch tag {
+		case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+			return
+		case yaml_FLOAT_TAG:
+			if rtag == yaml_INT_TAG {
+				switch v := out.(type) {
+				case int64:
+					rtag = yaml_FLOAT_TAG
+					out = float64(v)
+					return
+				case int:
+					rtag = yaml_FLOAT_TAG
+					out = float64(v)
+					return
+				}
+			}
+		}
+		failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+	}()
+
+	// Any data is accepted as a !!str or !!binary.
+	// Otherwise, the prefix is enough of a hint about what it might be.
+	hint := byte('N')
+	if in != "" {
+		hint = resolveTable[in[0]]
+	}
+	if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+		// Handle things we can lookup in a map.
+		if item, ok := resolveMap[in]; ok {
+			return item.tag, item.value
+		}
+
+		// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+		// are purposefully unsupported here. They're still quoted on
+		// the way out for compatibility with other parser, though.
+
+		switch hint {
+		case 'M':
+			// We've already checked the map above.
+
+		case '.':
+			// Not in the map, so maybe a normal float.
+			floatv, err := strconv.ParseFloat(in, 64)
+			if err == nil {
+				return yaml_FLOAT_TAG, floatv
+			}
+
+		case 'D', 'S':
+			// Int, float, or timestamp.
+			// Only try values as a timestamp if the value is unquoted or there's an explicit
+			// !!timestamp tag.
+			if tag == "" || tag == yaml_TIMESTAMP_TAG {
+				t, ok := parseTimestamp(in)
+				if ok {
+					return yaml_TIMESTAMP_TAG, t
+				}
+			}
+
+			plain := strings.Replace(in, "_", "", -1)
+			intv, err := strconv.ParseInt(plain, 0, 64)
+			if err == nil {
+				if intv == int64(int(intv)) {
+					return yaml_INT_TAG, int(intv)
+				} else {
+					return yaml_INT_TAG, intv
+				}
+			}
+			uintv, err := strconv.ParseUint(plain, 0, 64)
+			if err == nil {
+				return yaml_INT_TAG, uintv
+			}
+			if yamlStyleFloat.MatchString(plain) {
+				floatv, err := strconv.ParseFloat(plain, 64)
+				if err == nil {
+					return yaml_FLOAT_TAG, floatv
+				}
+			}
+			if strings.HasPrefix(plain, "0b") {
+				intv, err := strconv.ParseInt(plain[2:], 2, 64)
+				if err == nil {
+					if intv == int64(int(intv)) {
+						return yaml_INT_TAG, int(intv)
+					} else {
+						return yaml_INT_TAG, intv
+					}
+				}
+				uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+				if err == nil {
+					return yaml_INT_TAG, uintv
+				}
+			} else if strings.HasPrefix(plain, "-0b") {
+				intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
+				if err == nil {
+					if true || intv == int64(int(intv)) {
+						return yaml_INT_TAG, int(intv)
+					} else {
+						return yaml_INT_TAG, intv
+					}
+				}
+			}
+		default:
+			panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+		}
+	}
+	return yaml_STR_TAG, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+	const lineLen = 70
+	encLen := base64.StdEncoding.EncodedLen(len(s))
+	lines := encLen/lineLen + 1
+	buf := make([]byte, encLen*2+lines)
+	in := buf[0:encLen]
+	out := buf[encLen:]
+	base64.StdEncoding.Encode(in, []byte(s))
+	k := 0
+	for i := 0; i < len(in); i += lineLen {
+		j := i + lineLen
+		if j > len(in) {
+			j = len(in)
+		}
+		k += copy(out[k:], in[i:j])
+		if lines > 1 {
+			out[k] = '\n'
+			k++
+		}
+	}
+	return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+	"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+	"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+	"2006-1-2 15:4:5.999999999",       // space separated with no time zone
+	"2006-1-2",                        // date only
+	// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+	// from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+	// TODO write code to check all the formats supported by
+	// http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+	// Quick check: all date formats start with YYYY-.
+	i := 0
+	for ; i < len(s); i++ {
+		if c := s[i]; c < '0' || c > '9' {
+			break
+		}
+	}
+	if i != 4 || i == len(s) || s[i] != '-' {
+		return time.Time{}, false
+	}
+	for _, format := range allowedTimestampFormats {
+		if t, err := time.Parse(format, s); err == nil {
+			return t, true
+		}
+	}
+	return time.Time{}, false
+}
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go
new file mode 100644
index 0000000..077fd1d
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -0,0 +1,2696 @@
+package yaml
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html).  We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward.  The issues are "block collection start" and
+// "simple keys".  Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented.  We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+//      STREAM-START(encoding)          # The stream start.
+//      STREAM-END                      # The stream end.
+//      VERSION-DIRECTIVE(major,minor)  # The '%YAML' directive.
+//      TAG-DIRECTIVE(handle,prefix)    # The '%TAG' directive.
+//      DOCUMENT-START                  # '---'
+//      DOCUMENT-END                    # '...'
+//      BLOCK-SEQUENCE-START            # Indentation increase denoting a block
+//      BLOCK-MAPPING-START             # sequence or a block mapping.
+//      BLOCK-END                       # Indentation decrease.
+//      FLOW-SEQUENCE-START             # '['
+//      FLOW-SEQUENCE-END               # ']'
+//      BLOCK-SEQUENCE-START            # '{'
+//      BLOCK-SEQUENCE-END              # '}'
+//      BLOCK-ENTRY                     # '-'
+//      FLOW-ENTRY                      # ','
+//      KEY                             # '?' or nothing (simple keys).
+//      VALUE                           # ':'
+//      ALIAS(anchor)                   # '*anchor'
+//      ANCHOR(anchor)                  # '&anchor'
+//      TAG(handle,suffix)              # '!handle!suffix'
+//      SCALAR(value,style)             # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+//      STREAM-START(encoding)
+//      STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+//      VERSION-DIRECTIVE(major,minor)
+//      TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+//      %YAML   1.1
+//      %TAG    !   !foo
+//      %TAG    !yaml!  tag:yaml.org,2002:
+//      ---
+//
+// The correspoding sequence of tokens:
+//
+//      STREAM-START(utf-8)
+//      VERSION-DIRECTIVE(1,1)
+//      TAG-DIRECTIVE("!","!foo")
+//      TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+//      DOCUMENT-START
+//      STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+//      DOCUMENT-START
+//      DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+//      1. An implicit document:
+//
+//          'a scalar'
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          SCALAR("a scalar",single-quoted)
+//          STREAM-END
+//
+//      2. An explicit document:
+//
+//          ---
+//          'a scalar'
+//          ...
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          DOCUMENT-START
+//          SCALAR("a scalar",single-quoted)
+//          DOCUMENT-END
+//          STREAM-END
+//
+//      3. Several documents in a stream:
+//
+//          'a scalar'
+//          ---
+//          'another scalar'
+//          ---
+//          'yet another scalar'
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          SCALAR("a scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("another scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("yet another scalar",single-quoted)
+//          STREAM-END
+//
+// We have already introduced the SCALAR token above.  The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+//      ALIAS(anchor)
+//      ANCHOR(anchor)
+//      TAG(handle,suffix)
+//      SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+//      1. A recursive sequence:
+//
+//          &A [ *A ]
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          ANCHOR("A")
+//          FLOW-SEQUENCE-START
+//          ALIAS("A")
+//          FLOW-SEQUENCE-END
+//          STREAM-END
+//
+//      2. A tagged scalar:
+//
+//          !!float "3.14"  # A good approximation.
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          TAG("!!","float")
+//          SCALAR("3.14",double-quoted)
+//          STREAM-END
+//
+//      3. Various scalar styles:
+//
+//          --- # Implicit empty plain scalars do not produce tokens.
+//          --- a plain scalar
+//          --- 'a single-quoted scalar'
+//          --- "a double-quoted scalar"
+//          --- |-
+//            a literal scalar
+//          --- >-
+//            a folded
+//            scalar
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          DOCUMENT-START
+//          DOCUMENT-START
+//          SCALAR("a plain scalar",plain)
+//          DOCUMENT-START
+//          SCALAR("a single-quoted scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("a double-quoted scalar",double-quoted)
+//          DOCUMENT-START
+//          SCALAR("a literal scalar",literal)
+//          DOCUMENT-START
+//          SCALAR("a folded scalar",folded)
+//          STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+//      FLOW-SEQUENCE-START
+//      FLOW-SEQUENCE-END
+//      FLOW-MAPPING-START
+//      FLOW-MAPPING-END
+//      FLOW-ENTRY
+//      KEY
+//      VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly.  FLOW-ENTRY represent the ',' indicator.  Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+//      1. A flow sequence:
+//
+//          [item 1, item 2, item 3]
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          FLOW-SEQUENCE-START
+//          SCALAR("item 1",plain)
+//          FLOW-ENTRY
+//          SCALAR("item 2",plain)
+//          FLOW-ENTRY
+//          SCALAR("item 3",plain)
+//          FLOW-SEQUENCE-END
+//          STREAM-END
+//
+//      2. A flow mapping:
+//
+//          {
+//              a simple key: a value,  # Note that the KEY token is produced.
+//              ? a complex key: another value,
+//          }
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          FLOW-MAPPING-START
+//          KEY
+//          SCALAR("a simple key",plain)
+//          VALUE
+//          SCALAR("a value",plain)
+//          FLOW-ENTRY
+//          KEY
+//          SCALAR("a complex key",plain)
+//          VALUE
+//          SCALAR("another value",plain)
+//          FLOW-ENTRY
+//          FLOW-MAPPING-END
+//          STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator.  Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+//      BLOCK-SEQUENCE-START
+//      BLOCK-MAPPING-START
+//      BLOCK-END
+//      BLOCK-ENTRY
+//      KEY
+//      VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python).  However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+//      1. Block sequences:
+//
+//          - item 1
+//          - item 2
+//          -
+//            - item 3.1
+//            - item 3.2
+//          -
+//            key 1: value 1
+//            key 2: value 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-ENTRY
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 3.1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 3.2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+//      2. Block mappings:
+//
+//          a simple key: a value   # The KEY token is produced here.
+//          ? a complex key
+//          : another value
+//          a mapping:
+//            key 1: value 1
+//            key 2: value 2
+//          a sequence:
+//            - item 1
+//            - item 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("a simple key",plain)
+//          VALUE
+//          SCALAR("a value",plain)
+//          KEY
+//          SCALAR("a complex key",plain)
+//          VALUE
+//          SCALAR("another value",plain)
+//          KEY
+//          SCALAR("a mapping",plain)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          KEY
+//          SCALAR("a sequence",plain)
+//          VALUE
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line.  If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line.  The following examples
+// illustrate this case:
+//
+//      1. Collections in a sequence:
+//
+//          - - item 1
+//            - item 2
+//          - key 1: value 1
+//            key 2: value 2
+//          - ? complex key
+//            : complex value
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("complex key")
+//          VALUE
+//          SCALAR("complex value")
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+//      2. Collections in a mapping:
+//
+//          ? a sequence
+//          : - item 1
+//            - item 2
+//          ? a mapping
+//          : key 1: value 1
+//            key 2: value 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("a sequence",plain)
+//          VALUE
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          KEY
+//          SCALAR("a mapping",plain)
+//          VALUE
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping.  In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+//      key:
+//      - item 1    # BLOCK-SEQUENCE-START is NOT produced here.
+//      - item 2
+//
+// Tokens:
+//
+//      STREAM-START(utf-8)
+//      BLOCK-MAPPING-START
+//      KEY
+//      SCALAR("key",plain)
+//      VALUE
+//      BLOCK-ENTRY
+//      SCALAR("item 1",plain)
+//      BLOCK-ENTRY
+//      SCALAR("item 2",plain)
+//      BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+	// [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+	return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+	parser.mark.index++
+	parser.mark.column++
+	parser.unread--
+	parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+	if is_crlf(parser.buffer, parser.buffer_pos) {
+		parser.mark.index += 2
+		parser.mark.column = 0
+		parser.mark.line++
+		parser.unread -= 2
+		parser.buffer_pos += 2
+	} else if is_break(parser.buffer, parser.buffer_pos) {
+		parser.mark.index++
+		parser.mark.column = 0
+		parser.mark.line++
+		parser.unread--
+		parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+	}
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+	w := width(parser.buffer[parser.buffer_pos])
+	if w == 0 {
+		panic("invalid character sequence")
+	}
+	if len(s) == 0 {
+		s = make([]byte, 0, 32)
+	}
+	if w == 1 && len(s)+w <= cap(s) {
+		s = s[:len(s)+1]
+		s[len(s)-1] = parser.buffer[parser.buffer_pos]
+		parser.buffer_pos++
+	} else {
+		s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+		parser.buffer_pos += w
+	}
+	parser.mark.index++
+	parser.mark.column++
+	parser.unread--
+	return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+	buf := parser.buffer
+	pos := parser.buffer_pos
+	switch {
+	case buf[pos] == '\r' && buf[pos+1] == '\n':
+		// CR LF . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 2
+		parser.mark.index++
+		parser.unread--
+	case buf[pos] == '\r' || buf[pos] == '\n':
+		// CR|LF . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 1
+	case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+		// NEL . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 2
+	case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+		// LS|PS . LS|PS
+		s = append(s, buf[parser.buffer_pos:pos+3]...)
+		parser.buffer_pos += 3
+	default:
+		return s
+	}
+	parser.mark.index++
+	parser.mark.column = 0
+	parser.mark.line++
+	parser.unread--
+	return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+	// Erase the token object.
+	*token = yaml_token_t{} // [Go] Is this necessary?
+
+	// No tokens after STREAM-END or error.
+	if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+		return true
+	}
+
+	// Ensure that the tokens queue contains enough tokens.
+	if !parser.token_available {
+		if !yaml_parser_fetch_more_tokens(parser) {
+			return false
+		}
+	}
+
+	// Fetch the next token from the queue.
+	*token = parser.tokens[parser.tokens_head]
+	parser.tokens_head++
+	parser.tokens_parsed++
+	parser.token_available = false
+
+	if token.typ == yaml_STREAM_END_TOKEN {
+		parser.stream_end_produced = true
+	}
+	return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+	parser.error = yaml_SCANNER_ERROR
+	parser.context = context
+	parser.context_mark = context_mark
+	parser.problem = problem
+	parser.problem_mark = parser.mark
+	return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+	context := "while parsing a tag"
+	if directive {
+		context = "while parsing a %TAG directive"
+	}
+	return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+	pargs := append([]interface{}{"+++"}, args...)
+	fmt.Println(pargs...)
+	pargs = append([]interface{}{"---"}, args...)
+	return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+	// While we need more tokens to fetch, do it.
+	for {
+		// Check if we really need to fetch more tokens.
+		need_more_tokens := false
+
+		if parser.tokens_head == len(parser.tokens) {
+			// Queue is empty.
+			need_more_tokens = true
+		} else {
+			// Check if any potential simple key may occupy the head position.
+			if !yaml_parser_stale_simple_keys(parser) {
+				return false
+			}
+
+			for i := range parser.simple_keys {
+				simple_key := &parser.simple_keys[i]
+				if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+					need_more_tokens = true
+					break
+				}
+			}
+		}
+
+		// We are finished.
+		if !need_more_tokens {
+			break
+		}
+		// Fetch the next token.
+		if !yaml_parser_fetch_next_token(parser) {
+			return false
+		}
+	}
+
+	parser.token_available = true
+	return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+	// Ensure that the buffer is initialized.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// Check if we just started scanning.  Fetch STREAM-START then.
+	if !parser.stream_start_produced {
+		return yaml_parser_fetch_stream_start(parser)
+	}
+
+	// Eat whitespaces and comments until we reach the next token.
+	if !yaml_parser_scan_to_next_token(parser) {
+		return false
+	}
+
+	// Remove obsolete potential simple keys.
+	if !yaml_parser_stale_simple_keys(parser) {
+		return false
+	}
+
+	// Check the indentation level against the current column.
+	if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+		return false
+	}
+
+	// Ensure that the buffer contains at least 4 characters.  4 is the length
+	// of the longest indicators ('--- ' and '... ').
+	if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+		return false
+	}
+
+	// Is it the end of the stream?
+	if is_z(parser.buffer, parser.buffer_pos) {
+		return yaml_parser_fetch_stream_end(parser)
+	}
+
+	// Is it a directive?
+	if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+		return yaml_parser_fetch_directive(parser)
+	}
+
+	buf := parser.buffer
+	pos := parser.buffer_pos
+
+	// Is it the document start indicator?
+	if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+	}
+
+	// Is it the document end indicator?
+	if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+	}
+
+	// Is it the flow sequence start indicator?
+	if buf[pos] == '[' {
+		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+	}
+
+	// Is it the flow mapping start indicator?
+	if parser.buffer[parser.buffer_pos] == '{' {
+		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+	}
+
+	// Is it the flow sequence end indicator?
+	if parser.buffer[parser.buffer_pos] == ']' {
+		return yaml_parser_fetch_flow_collection_end(parser,
+			yaml_FLOW_SEQUENCE_END_TOKEN)
+	}
+
+	// Is it the flow mapping end indicator?
+	if parser.buffer[parser.buffer_pos] == '}' {
+		return yaml_parser_fetch_flow_collection_end(parser,
+			yaml_FLOW_MAPPING_END_TOKEN)
+	}
+
+	// Is it the flow entry indicator?
+	if parser.buffer[parser.buffer_pos] == ',' {
+		return yaml_parser_fetch_flow_entry(parser)
+	}
+
+	// Is it the block entry indicator?
+	if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+		return yaml_parser_fetch_block_entry(parser)
+	}
+
+	// Is it the key indicator?
+	if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_key(parser)
+	}
+
+	// Is it the value indicator?
+	if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_value(parser)
+	}
+
+	// Is it an alias?
+	if parser.buffer[parser.buffer_pos] == '*' {
+		return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+	}
+
+	// Is it an anchor?
+	if parser.buffer[parser.buffer_pos] == '&' {
+		return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+	}
+
+	// Is it a tag?
+	if parser.buffer[parser.buffer_pos] == '!' {
+		return yaml_parser_fetch_tag(parser)
+	}
+
+	// Is it a literal scalar?
+	if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+		return yaml_parser_fetch_block_scalar(parser, true)
+	}
+
+	// Is it a folded scalar?
+	if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+		return yaml_parser_fetch_block_scalar(parser, false)
+	}
+
+	// Is it a single-quoted scalar?
+	if parser.buffer[parser.buffer_pos] == '\'' {
+		return yaml_parser_fetch_flow_scalar(parser, true)
+	}
+
+	// Is it a double-quoted scalar?
+	if parser.buffer[parser.buffer_pos] == '"' {
+		return yaml_parser_fetch_flow_scalar(parser, false)
+	}
+
+	// Is it a plain scalar?
+	//
+	// A plain scalar may start with any non-blank characters except
+	//
+	//      '-', '?', ':', ',', '[', ']', '{', '}',
+	//      '#', '&', '*', '!', '|', '>', '\'', '\"',
+	//      '%', '@', '`'.
+	//
+	// In the block context (and, for the '-' indicator, in the flow context
+	// too), it may also start with the characters
+	//
+	//      '-', '?', ':'
+	//
+	// if it is followed by a non-space character.
+	//
+	// The last rule is more restrictive than the specification requires.
+	// [Go] Make this logic more reasonable.
+	//switch parser.buffer[parser.buffer_pos] {
+	//case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+	//}
+	if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+		parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+		parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+		parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+		parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+		parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+		parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+		(parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+		(parser.flow_level == 0 &&
+			(parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+			!is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_plain_scalar(parser)
+	}
+
+	// If we don't determine the token type so far, it is an error.
+	return yaml_parser_set_scanner_error(parser,
+		"while scanning for the next token", parser.mark,
+		"found character that cannot start any token")
+}
+
+// Check the list of potential simple keys and remove the positions that
+// cannot contain simple keys anymore.
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+	// Check for a potential simple key for each flow level.
+	for i := range parser.simple_keys {
+		simple_key := &parser.simple_keys[i]
+
+		// The specification requires that a simple key
+		//
+		//  - is limited to a single line,
+		//  - is shorter than 1024 characters.
+		if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
+
+			// Check if the potential simple key to be removed is required.
+			if simple_key.required {
+				return yaml_parser_set_scanner_error(parser,
+					"while scanning a simple key", simple_key.mark,
+					"could not find expected ':'")
+			}
+			simple_key.possible = false
+		}
+	}
+	return true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+	// A simple key is required at the current position if the scanner is in
+	// the block context and the current column coincides with the indentation
+	// level.
+
+	required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+	//
+	// If the current position may start a simple key, save it.
+	//
+	if parser.simple_key_allowed {
+		simple_key := yaml_simple_key_t{
+			possible:     true,
+			required:     required,
+			token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+		}
+		simple_key.mark = parser.mark
+
+		if !yaml_parser_remove_simple_key(parser) {
+			return false
+		}
+		parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+	}
+	return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+	i := len(parser.simple_keys) - 1
+	if parser.simple_keys[i].possible {
+		// If the key is required, it is an error.
+		if parser.simple_keys[i].required {
+			return yaml_parser_set_scanner_error(parser,
+				"while scanning a simple key", parser.simple_keys[i].mark,
+				"could not find expected ':'")
+		}
+	}
+	// Remove the key from the stack.
+	parser.simple_keys[i].possible = false
+	return true
+}
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+	// Reset the simple key on the next level.
+	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+	// Increase the flow level.
+	parser.flow_level++
+	return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+	if parser.flow_level > 0 {
+		parser.flow_level--
+		parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+	}
+	return true
+}
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level.  In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+	// In the flow context, do nothing.
+	if parser.flow_level > 0 {
+		return true
+	}
+
+	if parser.indent < column {
+		// Push the current indentation level to the stack and set the new
+		// indentation level.
+		parser.indents = append(parser.indents, parser.indent)
+		parser.indent = column
+
+		// Create a token and insert it into the queue.
+		token := yaml_token_t{
+			typ:        typ,
+			start_mark: mark,
+			end_mark:   mark,
+		}
+		if number > -1 {
+			number -= parser.tokens_parsed
+		}
+		yaml_insert_token(parser, number, &token)
+	}
+	return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column.  For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+	// In the flow context, do nothing.
+	if parser.flow_level > 0 {
+		return true
+	}
+
+	// Loop through the indentation levels in the stack.
+	for parser.indent > column {
+		// Create a token and append it to the queue.
+		token := yaml_token_t{
+			typ:        yaml_BLOCK_END_TOKEN,
+			start_mark: parser.mark,
+			end_mark:   parser.mark,
+		}
+		yaml_insert_token(parser, -1, &token)
+
+		// Pop the indentation level.
+		parser.indent = parser.indents[len(parser.indents)-1]
+		parser.indents = parser.indents[:len(parser.indents)-1]
+	}
+	return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+	// Set the initial indentation.
+	parser.indent = -1
+
+	// Initialize the simple key stack.
+	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+	// A simple key is allowed at the beginning of the stream.
+	parser.simple_key_allowed = true
+
+	// We have started.
+	parser.stream_start_produced = true
+
+	// Create the STREAM-START token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_STREAM_START_TOKEN,
+		start_mark: parser.mark,
+		end_mark:   parser.mark,
+		encoding:   parser.encoding,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+	// Force new line.
+	if parser.mark.column != 0 {
+		parser.mark.column = 0
+		parser.mark.line++
+	}
+
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Create the STREAM-END token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_STREAM_END_TOKEN,
+		start_mark: parser.mark,
+		end_mark:   parser.mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+	token := yaml_token_t{}
+	if !yaml_parser_scan_directive(parser, &token) {
+		return false
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Consume the token.
+	start_mark := parser.mark
+
+	skip(parser)
+	skip(parser)
+	skip(parser)
+
+	end_mark := parser.mark
+
+	// Create the DOCUMENT-START or DOCUMENT-END token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// The indicators '[' and '{' may start a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// Increase the flow level.
+	if !yaml_parser_increase_flow_level(parser) {
+		return false
+	}
+
+	// A simple key may follow the indicators '[' and '{'.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// Reset any potential simple key on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Decrease the flow level.
+	if !yaml_parser_decrease_flow_level(parser) {
+		return false
+	}
+
+	// No simple keys after the indicators ']' and '}'.
+	parser.simple_key_allowed = false
+
+	// Consume the token.
+
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after ','.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-ENTRY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_FLOW_ENTRY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+	// Check if the scanner is in the block context.
+	if parser.flow_level == 0 {
+		// Check if we are allowed to start a new entry.
+		if !parser.simple_key_allowed {
+			return yaml_parser_set_scanner_error(parser, "", parser.mark,
+				"block sequence entries are not allowed in this context")
+		}
+		// Add the BLOCK-SEQUENCE-START token if needed.
+		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+			return false
+		}
+	} else {
+		// It is an error for the '-' indicator to occur in the flow context,
+		// but we let the Parser detect and report about it because the Parser
+		// is able to point to the context.
+	}
+
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after '-'.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the BLOCK-ENTRY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_BLOCK_ENTRY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+	// In the block context, additional checks are required.
+	if parser.flow_level == 0 {
+		// Check if we are allowed to start a new key (not nessesary simple).
+		if !parser.simple_key_allowed {
+			return yaml_parser_set_scanner_error(parser, "", parser.mark,
+				"mapping keys are not allowed in this context")
+		}
+		// Add the BLOCK-MAPPING-START token if needed.
+		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+			return false
+		}
+	}
+
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after '?' in the block context.
+	parser.simple_key_allowed = parser.flow_level == 0
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the KEY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_KEY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+	simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+	// Have we found a simple key?
+	if simple_key.possible {
+		// Create the KEY token and insert it into the queue.
+		token := yaml_token_t{
+			typ:        yaml_KEY_TOKEN,
+			start_mark: simple_key.mark,
+			end_mark:   simple_key.mark,
+		}
+		yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+		// In the block context, we may need to add the BLOCK-MAPPING-START token.
+		if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+			simple_key.token_number,
+			yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+			return false
+		}
+
+		// Remove the simple key.
+		simple_key.possible = false
+
+		// A simple key cannot follow another simple key.
+		parser.simple_key_allowed = false
+
+	} else {
+		// The ':' indicator follows a complex key.
+
+		// In the block context, extra checks are required.
+		if parser.flow_level == 0 {
+
+			// Check if we are allowed to start a complex value.
+			if !parser.simple_key_allowed {
+				return yaml_parser_set_scanner_error(parser, "", parser.mark,
+					"mapping values are not allowed in this context")
+			}
+
+			// Add the BLOCK-MAPPING-START token if needed.
+			if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+				return false
+			}
+		}
+
+		// Simple keys after ':' are allowed in the block context.
+		parser.simple_key_allowed = parser.flow_level == 0
+	}
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the VALUE token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_VALUE_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// An anchor or an alias could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow an anchor or an alias.
+	parser.simple_key_allowed = false
+
+	// Create the ALIAS or ANCHOR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_anchor(parser, &token, typ) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+	// A tag could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a tag.
+	parser.simple_key_allowed = false
+
+	// Create the TAG token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_tag(parser, &token) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+	// Remove any potential simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// A simple key may follow a block scalar.
+	parser.simple_key_allowed = true
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+	// A plain scalar could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a flow scalar.
+	parser.simple_key_allowed = false
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+	// A plain scalar could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a flow scalar.
+	parser.simple_key_allowed = false
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_plain_scalar(parser, &token) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+	// Until the next token is not found.
+	for {
+		// Allow the BOM mark to start a line.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+		}
+
+		// Eat whitespaces.
+		// Tabs are allowed:
+		//  - in the flow context
+		//  - in the block context, but not at the beginning of the line or
+		//  after '-', '?', or ':' (complex value).
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Eat a comment until a line break.
+		if parser.buffer[parser.buffer_pos] == '#' {
+			for !is_breakz(parser.buffer, parser.buffer_pos) {
+				skip(parser)
+				if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+					return false
+				}
+			}
+		}
+
+		// If it is a line break, eat it.
+		if is_break(parser.buffer, parser.buffer_pos) {
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+			skip_line(parser)
+
+			// In the block context, a new line may start a simple key.
+			if parser.flow_level == 0 {
+				parser.simple_key_allowed = true
+			}
+		} else {
+			break // We have found a token.
+		}
+	}
+
+	return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+//      %YAML    1.1    # a comment \n
+//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+	// Eat '%'.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Scan the directive name.
+	var name []byte
+	if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+		return false
+	}
+
+	// Is it a YAML directive?
+	if bytes.Equal(name, []byte("YAML")) {
+		// Scan the VERSION directive value.
+		var major, minor int8
+		if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+			return false
+		}
+		end_mark := parser.mark
+
+		// Create a VERSION-DIRECTIVE token.
+		*token = yaml_token_t{
+			typ:        yaml_VERSION_DIRECTIVE_TOKEN,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			major:      major,
+			minor:      minor,
+		}
+
+		// Is it a TAG directive?
+	} else if bytes.Equal(name, []byte("TAG")) {
+		// Scan the TAG directive value.
+		var handle, prefix []byte
+		if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+			return false
+		}
+		end_mark := parser.mark
+
+		// Create a TAG-DIRECTIVE token.
+		*token = yaml_token_t{
+			typ:        yaml_TAG_DIRECTIVE_TOKEN,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			value:      handle,
+			prefix:     prefix,
+		}
+
+		// Unknown directive.
+	} else {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "found unknown directive name")
+		return false
+	}
+
+	// Eat the rest of the line including any comments.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	if parser.buffer[parser.buffer_pos] == '#' {
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+	}
+
+	// Check if we are at the end of the line.
+	if !is_breakz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "did not find expected comment or line break")
+		return false
+	}
+
+	// Eat a line break.
+	if is_break(parser.buffer, parser.buffer_pos) {
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		skip_line(parser)
+	}
+
+	return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//       ^^^^
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//       ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+	// Consume the directive name.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	var s []byte
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the name is empty.
+	if len(s) == 0 {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "could not find expected directive name")
+		return false
+	}
+
+	// Check for an blank character after the name.
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "found unexpected non-alphabetical character")
+		return false
+	}
+	*name = s
+	return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//           ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+	// Eat whitespaces.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Consume the major version number.
+	if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+		return false
+	}
+
+	// Eat '.'.
+	if parser.buffer[parser.buffer_pos] != '.' {
+		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+			start_mark, "did not find expected digit or '.' character")
+	}
+
+	skip(parser)
+
+	// Consume the minor version number.
+	if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+		return false
+	}
+	return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//              ^
+//      %YAML   1.1     # a comment \n
+//                ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+	// Repeat while the next character is digit.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	var value, length int8
+	for is_digit(parser.buffer, parser.buffer_pos) {
+		// Check if the number is too long.
+		length++
+		if length > max_number_length {
+			return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+				start_mark, "found extremely long version number")
+		}
+		value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the number was present.
+	if length == 0 {
+		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+			start_mark, "did not find expected version number")
+	}
+	*number = value
+	return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+	var handle_value, prefix_value []byte
+
+	// Eat whitespaces.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Scan a handle.
+	if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+		return false
+	}
+
+	// Expect a whitespace.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blank(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+			start_mark, "did not find expected whitespace")
+		return false
+	}
+
+	// Eat whitespaces.
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Scan a prefix.
+	if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+		return false
+	}
+
+	// Expect a whitespace or line break.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+			start_mark, "did not find expected whitespace or line break")
+		return false
+	}
+
+	*handle = handle_value
+	*prefix = prefix_value
+	return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+	var s []byte
+
+	// Eat the indicator character.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Consume the value.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	end_mark := parser.mark
+
+	/*
+	 * Check if length of the anchor is greater than 0 and it is followed by
+	 * a whitespace character or one of the indicators:
+	 *
+	 *      '?', ':', ',', ']', '}', '%', '@', '`'.
+	 */
+
+	if len(s) == 0 ||
+		!(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+			parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+			parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+			parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+			parser.buffer[parser.buffer_pos] == '`') {
+		context := "while scanning an alias"
+		if typ == yaml_ANCHOR_TOKEN {
+			context = "while scanning an anchor"
+		}
+		yaml_parser_set_scanner_error(parser, context, start_mark,
+			"did not find expected alphabetic or numeric character")
+		return false
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+	}
+
+	return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+	var handle, suffix []byte
+
+	start_mark := parser.mark
+
+	// Check if the tag is in the canonical form.
+	if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+		return false
+	}
+
+	if parser.buffer[parser.buffer_pos+1] == '<' {
+		// Keep the handle as ''
+
+		// Eat '!<'
+		skip(parser)
+		skip(parser)
+
+		// Consume the tag value.
+		if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+			return false
+		}
+
+		// Check for '>' and eat it.
+		if parser.buffer[parser.buffer_pos] != '>' {
+			yaml_parser_set_scanner_error(parser, "while scanning a tag",
+				start_mark, "did not find the expected '>'")
+			return false
+		}
+
+		skip(parser)
+	} else {
+		// The tag has either the '!suffix' or the '!handle!suffix' form.
+
+		// First, try to scan a handle.
+		if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+			return false
+		}
+
+		// Check if it is, indeed, handle.
+		if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+			// Scan the suffix now.
+			if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+				return false
+			}
+		} else {
+			// It wasn't a handle after all.  Scan the rest of the tag.
+			if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+				return false
+			}
+
+			// Set the handle to '!'.
+			handle = []byte{'!'}
+
+			// A special case: the '!' tag.  Set the handle to '' and the
+			// suffix to '!'.
+			if len(suffix) == 0 {
+				handle, suffix = suffix, handle
+			}
+		}
+	}
+
+	// Check the character which ends the tag.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a tag",
+			start_mark, "did not find expected whitespace or line break")
+		return false
+	}
+
+	end_mark := parser.mark
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_TAG_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      handle,
+		suffix:     suffix,
+	}
+	return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+	// Check the initial '!' character.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if parser.buffer[parser.buffer_pos] != '!' {
+		yaml_parser_set_scanner_tag_error(parser, directive,
+			start_mark, "did not find expected '!'")
+		return false
+	}
+
+	var s []byte
+
+	// Copy the '!' character.
+	s = read(parser, s)
+
+	// Copy all subsequent alphabetical and numerical characters.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the trailing character is '!' and copy it.
+	if parser.buffer[parser.buffer_pos] == '!' {
+		s = read(parser, s)
+	} else {
+		// It's either the '!' tag or not really a tag handle.  If it's a %TAG
+		// directive, it's an error.  If it's a tag token, it must be a part of URI.
+		if directive && string(s) != "!" {
+			yaml_parser_set_scanner_tag_error(parser, directive,
+				start_mark, "did not find expected '!'")
+			return false
+		}
+	}
+
+	*handle = s
+	return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+	//size_t length = head ? strlen((char *)head) : 0
+	var s []byte
+	hasTag := len(head) > 0
+
+	// Copy the head if needed.
+	//
+	// Note that we don't copy the leading '!' character.
+	if len(head) > 1 {
+		s = append(s, head[1:]...)
+	}
+
+	// Scan the tag.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// The set of characters that may appear in URI is as follows:
+	//
+	//      '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+	//      '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+	//      '%'.
+	// [Go] Convert this into more reasonable logic.
+	for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+		parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+		parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+		parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+		parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+		parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+		parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+		parser.buffer[parser.buffer_pos] == '%' {
+		// Check if it is a URI-escape sequence.
+		if parser.buffer[parser.buffer_pos] == '%' {
+			if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+				return false
+			}
+		} else {
+			s = read(parser, s)
+		}
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		hasTag = true
+	}
+
+	if !hasTag {
+		yaml_parser_set_scanner_tag_error(parser, directive,
+			start_mark, "did not find expected tag URI")
+		return false
+	}
+	*uri = s
+	return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+	// Decode the required number of characters.
+	w := 1024
+	for w > 0 {
+		// Check for a URI-escaped octet.
+		if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+			return false
+		}
+
+		if !(parser.buffer[parser.buffer_pos] == '%' &&
+			is_hex(parser.buffer, parser.buffer_pos+1) &&
+			is_hex(parser.buffer, parser.buffer_pos+2)) {
+			return yaml_parser_set_scanner_tag_error(parser, directive,
+				start_mark, "did not find URI escaped octet")
+		}
+
+		// Get the octet.
+		octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+		// If it is the leading octet, determine the length of the UTF-8 sequence.
+		if w == 1024 {
+			w = width(octet)
+			if w == 0 {
+				return yaml_parser_set_scanner_tag_error(parser, directive,
+					start_mark, "found an incorrect leading UTF-8 octet")
+			}
+		} else {
+			// Check if the trailing octet is correct.
+			if octet&0xC0 != 0x80 {
+				return yaml_parser_set_scanner_tag_error(parser, directive,
+					start_mark, "found an incorrect trailing UTF-8 octet")
+			}
+		}
+
+		// Copy the octet and move the pointers.
+		*s = append(*s, octet)
+		skip(parser)
+		skip(parser)
+		skip(parser)
+		w--
+	}
+	return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+	// Eat the indicator '|' or '>'.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Scan the additional block scalar indicators.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// Check for a chomping indicator.
+	var chomping, increment int
+	if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+		// Set the chomping method and eat the indicator.
+		if parser.buffer[parser.buffer_pos] == '+' {
+			chomping = +1
+		} else {
+			chomping = -1
+		}
+		skip(parser)
+
+		// Check for an indentation indicator.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if is_digit(parser.buffer, parser.buffer_pos) {
+			// Check that the indentation is greater than 0.
+			if parser.buffer[parser.buffer_pos] == '0' {
+				yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+					start_mark, "found an indentation indicator equal to 0")
+				return false
+			}
+
+			// Get the indentation level and eat the indicator.
+			increment = as_digit(parser.buffer, parser.buffer_pos)
+			skip(parser)
+		}
+
+	} else if is_digit(parser.buffer, parser.buffer_pos) {
+		// Do the same as above, but in the opposite order.
+
+		if parser.buffer[parser.buffer_pos] == '0' {
+			yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+				start_mark, "found an indentation indicator equal to 0")
+			return false
+		}
+		increment = as_digit(parser.buffer, parser.buffer_pos)
+		skip(parser)
+
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+			if parser.buffer[parser.buffer_pos] == '+' {
+				chomping = +1
+			} else {
+				chomping = -1
+			}
+			skip(parser)
+		}
+	}
+
+	// Eat whitespaces and comments to the end of the line.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+	if parser.buffer[parser.buffer_pos] == '#' {
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+	}
+
+	// Check if we are at the end of the line.
+	if !is_breakz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+			start_mark, "did not find expected comment or line break")
+		return false
+	}
+
+	// Eat a line break.
+	if is_break(parser.buffer, parser.buffer_pos) {
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		skip_line(parser)
+	}
+
+	end_mark := parser.mark
+
+	// Set the indentation level if it was specified.
+	var indent int
+	if increment > 0 {
+		if parser.indent >= 0 {
+			indent = parser.indent + increment
+		} else {
+			indent = increment
+		}
+	}
+
+	// Scan the leading line breaks and determine the indentation level if needed.
+	var s, leading_break, trailing_breaks []byte
+	if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+		return false
+	}
+
+	// Scan the block scalar content.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	var leading_blank, trailing_blank bool
+	for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+		// We are at the beginning of a non-empty line.
+
+		// Is it a trailing whitespace?
+		trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+		// Check if we need to fold the leading line break.
+		if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+			// Do we need to join the lines by space?
+			if len(trailing_breaks) == 0 {
+				s = append(s, ' ')
+			}
+		} else {
+			s = append(s, leading_break...)
+		}
+		leading_break = leading_break[:0]
+
+		// Append the remaining line breaks.
+		s = append(s, trailing_breaks...)
+		trailing_breaks = trailing_breaks[:0]
+
+		// Is it a leading whitespace?
+		leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+		// Consume the current line.
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			s = read(parser, s)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Consume the line break.
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+
+		leading_break = read_line(parser, leading_break)
+
+		// Eat the following indentation spaces and line breaks.
+		if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+			return false
+		}
+	}
+
+	// Chomp the tail.
+	if chomping != -1 {
+		s = append(s, leading_break...)
+	}
+	if chomping == 1 {
+		s = append(s, trailing_breaks...)
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_LITERAL_SCALAR_STYLE,
+	}
+	if !literal {
+		token.style = yaml_FOLDED_SCALAR_STYLE
+	}
+	return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar.  Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+	*end_mark = parser.mark
+
+	// Eat the indentation spaces and line breaks.
+	max_indent := 0
+	for {
+		// Eat the indentation spaces.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+		if parser.mark.column > max_indent {
+			max_indent = parser.mark.column
+		}
+
+		// Check for a tab character messing the indentation.
+		if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+			return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+				start_mark, "found a tab character where an indentation space is expected")
+		}
+
+		// Have we found a non-empty line?
+		if !is_break(parser.buffer, parser.buffer_pos) {
+			break
+		}
+
+		// Consume the line break.
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		// [Go] Should really be returning breaks instead.
+		*breaks = read_line(parser, *breaks)
+		*end_mark = parser.mark
+	}
+
+	// Determine the indentation level if needed.
+	if *indent == 0 {
+		*indent = max_indent
+		if *indent < parser.indent+1 {
+			*indent = parser.indent + 1
+		}
+		if *indent < 1 {
+			*indent = 1
+		}
+	}
+	return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+	// Eat the left quote.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Consume the content of the quoted scalar.
+	var s, leading_break, trailing_breaks, whitespaces []byte
+	for {
+		// Check that there are no document indicators at the beginning of the line.
+		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+			return false
+		}
+
+		if parser.mark.column == 0 &&
+			((parser.buffer[parser.buffer_pos+0] == '-' &&
+				parser.buffer[parser.buffer_pos+1] == '-' &&
+				parser.buffer[parser.buffer_pos+2] == '-') ||
+				(parser.buffer[parser.buffer_pos+0] == '.' &&
+					parser.buffer[parser.buffer_pos+1] == '.' &&
+					parser.buffer[parser.buffer_pos+2] == '.')) &&
+			is_blankz(parser.buffer, parser.buffer_pos+3) {
+			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+				start_mark, "found unexpected document indicator")
+			return false
+		}
+
+		// Check for EOF.
+		if is_z(parser.buffer, parser.buffer_pos) {
+			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+				start_mark, "found unexpected end of stream")
+			return false
+		}
+
+		// Consume non-blank characters.
+		leading_blanks := false
+		for !is_blankz(parser.buffer, parser.buffer_pos) {
+			if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+				// Is is an escaped single quote.
+				s = append(s, '\'')
+				skip(parser)
+				skip(parser)
+
+			} else if single && parser.buffer[parser.buffer_pos] == '\'' {
+				// It is a right single quote.
+				break
+			} else if !single && parser.buffer[parser.buffer_pos] == '"' {
+				// It is a right double quote.
+				break
+
+			} else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+				// It is an escaped line break.
+				if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+					return false
+				}
+				skip(parser)
+				skip_line(parser)
+				leading_blanks = true
+				break
+
+			} else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+				// It is an escape sequence.
+				code_length := 0
+
+				// Check the escape character.
+				switch parser.buffer[parser.buffer_pos+1] {
+				case '0':
+					s = append(s, 0)
+				case 'a':
+					s = append(s, '\x07')
+				case 'b':
+					s = append(s, '\x08')
+				case 't', '\t':
+					s = append(s, '\x09')
+				case 'n':
+					s = append(s, '\x0A')
+				case 'v':
+					s = append(s, '\x0B')
+				case 'f':
+					s = append(s, '\x0C')
+				case 'r':
+					s = append(s, '\x0D')
+				case 'e':
+					s = append(s, '\x1B')
+				case ' ':
+					s = append(s, '\x20')
+				case '"':
+					s = append(s, '"')
+				case '\'':
+					s = append(s, '\'')
+				case '\\':
+					s = append(s, '\\')
+				case 'N': // NEL (#x85)
+					s = append(s, '\xC2')
+					s = append(s, '\x85')
+				case '_': // #xA0
+					s = append(s, '\xC2')
+					s = append(s, '\xA0')
+				case 'L': // LS (#x2028)
+					s = append(s, '\xE2')
+					s = append(s, '\x80')
+					s = append(s, '\xA8')
+				case 'P': // PS (#x2029)
+					s = append(s, '\xE2')
+					s = append(s, '\x80')
+					s = append(s, '\xA9')
+				case 'x':
+					code_length = 2
+				case 'u':
+					code_length = 4
+				case 'U':
+					code_length = 8
+				default:
+					yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+						start_mark, "found unknown escape character")
+					return false
+				}
+
+				skip(parser)
+				skip(parser)
+
+				// Consume an arbitrary escape code.
+				if code_length > 0 {
+					var value int
+
+					// Scan the character value.
+					if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+						return false
+					}
+					for k := 0; k < code_length; k++ {
+						if !is_hex(parser.buffer, parser.buffer_pos+k) {
+							yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+								start_mark, "did not find expected hexdecimal number")
+							return false
+						}
+						value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+					}
+
+					// Check the value and write the character.
+					if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+						yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+							start_mark, "found invalid Unicode character escape code")
+						return false
+					}
+					if value <= 0x7F {
+						s = append(s, byte(value))
+					} else if value <= 0x7FF {
+						s = append(s, byte(0xC0+(value>>6)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					} else if value <= 0xFFFF {
+						s = append(s, byte(0xE0+(value>>12)))
+						s = append(s, byte(0x80+((value>>6)&0x3F)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					} else {
+						s = append(s, byte(0xF0+(value>>18)))
+						s = append(s, byte(0x80+((value>>12)&0x3F)))
+						s = append(s, byte(0x80+((value>>6)&0x3F)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					}
+
+					// Advance the pointer.
+					for k := 0; k < code_length; k++ {
+						skip(parser)
+					}
+				}
+			} else {
+				// It is a non-escaped non-blank character.
+				s = read(parser, s)
+			}
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+		}
+
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		// Check if we are at the end of the scalar.
+		if single {
+			if parser.buffer[parser.buffer_pos] == '\'' {
+				break
+			}
+		} else {
+			if parser.buffer[parser.buffer_pos] == '"' {
+				break
+			}
+		}
+
+		// Consume blank characters.
+		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+			if is_blank(parser.buffer, parser.buffer_pos) {
+				// Consume a space or a tab character.
+				if !leading_blanks {
+					whitespaces = read(parser, whitespaces)
+				} else {
+					skip(parser)
+				}
+			} else {
+				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+					return false
+				}
+
+				// Check if it is a first line break.
+				if !leading_blanks {
+					whitespaces = whitespaces[:0]
+					leading_break = read_line(parser, leading_break)
+					leading_blanks = true
+				} else {
+					trailing_breaks = read_line(parser, trailing_breaks)
+				}
+			}
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Join the whitespaces or fold line breaks.
+		if leading_blanks {
+			// Do we need to fold line breaks?
+			if len(leading_break) > 0 && leading_break[0] == '\n' {
+				if len(trailing_breaks) == 0 {
+					s = append(s, ' ')
+				} else {
+					s = append(s, trailing_breaks...)
+				}
+			} else {
+				s = append(s, leading_break...)
+				s = append(s, trailing_breaks...)
+			}
+			trailing_breaks = trailing_breaks[:0]
+			leading_break = leading_break[:0]
+		} else {
+			s = append(s, whitespaces...)
+			whitespaces = whitespaces[:0]
+		}
+	}
+
+	// Eat the right quote.
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_SINGLE_QUOTED_SCALAR_STYLE,
+	}
+	if !single {
+		token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+	var s, leading_break, trailing_breaks, whitespaces []byte
+	var leading_blanks bool
+	var indent = parser.indent + 1
+
+	start_mark := parser.mark
+	end_mark := parser.mark
+
+	// Consume the content of the plain scalar.
+	for {
+		// Check for a document indicator.
+		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+			return false
+		}
+		if parser.mark.column == 0 &&
+			((parser.buffer[parser.buffer_pos+0] == '-' &&
+				parser.buffer[parser.buffer_pos+1] == '-' &&
+				parser.buffer[parser.buffer_pos+2] == '-') ||
+				(parser.buffer[parser.buffer_pos+0] == '.' &&
+					parser.buffer[parser.buffer_pos+1] == '.' &&
+					parser.buffer[parser.buffer_pos+2] == '.')) &&
+			is_blankz(parser.buffer, parser.buffer_pos+3) {
+			break
+		}
+
+		// Check for a comment.
+		if parser.buffer[parser.buffer_pos] == '#' {
+			break
+		}
+
+		// Consume non-blank characters.
+		for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+			// Check for indicators that may end a plain scalar.
+			if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+				(parser.flow_level > 0 &&
+					(parser.buffer[parser.buffer_pos] == ',' ||
+						parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+						parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+						parser.buffer[parser.buffer_pos] == '}')) {
+				break
+			}
+
+			// Check if we need to join whitespaces and breaks.
+			if leading_blanks || len(whitespaces) > 0 {
+				if leading_blanks {
+					// Do we need to fold line breaks?
+					if leading_break[0] == '\n' {
+						if len(trailing_breaks) == 0 {
+							s = append(s, ' ')
+						} else {
+							s = append(s, trailing_breaks...)
+						}
+					} else {
+						s = append(s, leading_break...)
+						s = append(s, trailing_breaks...)
+					}
+					trailing_breaks = trailing_breaks[:0]
+					leading_break = leading_break[:0]
+					leading_blanks = false
+				} else {
+					s = append(s, whitespaces...)
+					whitespaces = whitespaces[:0]
+				}
+			}
+
+			// Copy the character.
+			s = read(parser, s)
+
+			end_mark = parser.mark
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+		}
+
+		// Is it the end?
+		if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+			break
+		}
+
+		// Consume blank characters.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+			if is_blank(parser.buffer, parser.buffer_pos) {
+
+				// Check for tab characters that abuse indentation.
+				if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+					yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+						start_mark, "found a tab character that violates indentation")
+					return false
+				}
+
+				// Consume a space or a tab character.
+				if !leading_blanks {
+					whitespaces = read(parser, whitespaces)
+				} else {
+					skip(parser)
+				}
+			} else {
+				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+					return false
+				}
+
+				// Check if it is a first line break.
+				if !leading_blanks {
+					whitespaces = whitespaces[:0]
+					leading_break = read_line(parser, leading_break)
+					leading_blanks = true
+				} else {
+					trailing_breaks = read_line(parser, trailing_breaks)
+				}
+			}
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Check indentation level.
+		if parser.flow_level == 0 && parser.mark.column < indent {
+			break
+		}
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_PLAIN_SCALAR_STYLE,
+	}
+
+	// Note that we change the 'simple_key_allowed' flag.
+	if leading_blanks {
+		parser.simple_key_allowed = true
+	}
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go
new file mode 100644
index 0000000..4c45e66
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/sorter.go
@@ -0,0 +1,113 @@
+package yaml
+
+import (
+	"reflect"
+	"unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int      { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+	a := l[i]
+	b := l[j]
+	ak := a.Kind()
+	bk := b.Kind()
+	for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+		a = a.Elem()
+		ak = a.Kind()
+	}
+	for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+		b = b.Elem()
+		bk = b.Kind()
+	}
+	af, aok := keyFloat(a)
+	bf, bok := keyFloat(b)
+	if aok && bok {
+		if af != bf {
+			return af < bf
+		}
+		if ak != bk {
+			return ak < bk
+		}
+		return numLess(a, b)
+	}
+	if ak != reflect.String || bk != reflect.String {
+		return ak < bk
+	}
+	ar, br := []rune(a.String()), []rune(b.String())
+	for i := 0; i < len(ar) && i < len(br); i++ {
+		if ar[i] == br[i] {
+			continue
+		}
+		al := unicode.IsLetter(ar[i])
+		bl := unicode.IsLetter(br[i])
+		if al && bl {
+			return ar[i] < br[i]
+		}
+		if al || bl {
+			return bl
+		}
+		var ai, bi int
+		var an, bn int64
+		if ar[i] == '0' || br[i] == '0' {
+			for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+				if ar[j] != '0' {
+					an = 1
+					bn = 1
+					break
+				}
+			}
+		}
+		for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+			an = an*10 + int64(ar[ai]-'0')
+		}
+		for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+			bn = bn*10 + int64(br[bi]-'0')
+		}
+		if an != bn {
+			return an < bn
+		}
+		if ai != bi {
+			return ai < bi
+		}
+		return ar[i] < br[i]
+	}
+	return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+	switch v.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return float64(v.Int()), true
+	case reflect.Float32, reflect.Float64:
+		return v.Float(), true
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return float64(v.Uint()), true
+	case reflect.Bool:
+		if v.Bool() {
+			return 1, true
+		}
+		return 0, true
+	}
+	return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+	switch a.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return a.Int() < b.Int()
+	case reflect.Float32, reflect.Float64:
+		return a.Float() < b.Float()
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return a.Uint() < b.Uint()
+	case reflect.Bool:
+		return !a.Bool() && b.Bool()
+	}
+	panic("not a number")
+}
diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go
new file mode 100644
index 0000000..a2dde60
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/writerc.go
@@ -0,0 +1,26 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+	emitter.error = yaml_WRITER_ERROR
+	emitter.problem = problem
+	return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+	if emitter.write_handler == nil {
+		panic("write handler not set")
+	}
+
+	// Check if the buffer is empty.
+	if emitter.buffer_pos == 0 {
+		return true
+	}
+
+	if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+		return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+	}
+	emitter.buffer_pos = 0
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go
new file mode 100644
index 0000000..de85aa4
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yaml.go
@@ -0,0 +1,466 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+//   https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"strings"
+	"sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+	Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+	UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+	MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+//     type T struct {
+//         F int `yaml:"a,omitempty"`
+//         B int
+//     }
+//     var t T
+//     yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+	return unmarshal(in, out, false)
+}
+
+// UnmarshalStrict is like Unmarshal except that any fields that are found
+// in the data that do not have corresponding struct members, or mapping
+// keys that are duplicates, will result in
+// an error.
+func UnmarshalStrict(in []byte, out interface{}) (err error) {
+	return unmarshal(in, out, true)
+}
+
+// A Decorder reads and decodes YAML values from an input stream.
+type Decoder struct {
+	strict bool
+	parser *parser
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{
+		parser: newParserFromReader(r),
+	}
+}
+
+// SetStrict sets whether strict decoding behaviour is enabled when
+// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
+func (dec *Decoder) SetStrict(strict bool) {
+	dec.strict = strict
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+	d := newDecoder(dec.strict)
+	defer handleErr(&err)
+	node := dec.parser.parse()
+	if node == nil {
+		return io.EOF
+	}
+	out := reflect.ValueOf(v)
+	if out.Kind() == reflect.Ptr && !out.IsNil() {
+		out = out.Elem()
+	}
+	d.unmarshal(node, out)
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+	defer handleErr(&err)
+	d := newDecoder(strict)
+	p := newParser(in)
+	defer p.destroy()
+	node := p.parse()
+	if node != nil {
+		v := reflect.ValueOf(out)
+		if v.Kind() == reflect.Ptr && !v.IsNil() {
+			v = v.Elem()
+		}
+		d.unmarshal(node, v)
+	}
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+//     `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+//     omitempty    Only include the field if it's not set to the zero
+//                  value for the type or to empty slices or maps.
+//                  Zero valued structs will be omitted if all their public
+//                  fields are zero, unless they implement an IsZero
+//                  method (see the IsZeroer interface type), in which
+//                  case the field will be included if that method returns true.
+//
+//     flow         Marshal using a flow style (useful for structs,
+//                  sequences and maps).
+//
+//     inline       Inline the field, which must be a struct or a map,
+//                  causing all of its fields or keys to be processed as if
+//                  they were part of the outer struct. For maps, keys must
+//                  not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+//     type T struct {
+//         F int `yaml:"a,omitempty"`
+//         B int
+//     }
+//     yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+//     yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+	defer handleErr(&err)
+	e := newEncoder()
+	defer e.destroy()
+	e.marshalDoc("", reflect.ValueOf(in))
+	e.finish()
+	out = e.out
+	return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+	encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{
+		encoder: newEncoderWithWriter(w),
+	}
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+	defer handleErr(&err)
+	e.encoder.marshalDoc("", reflect.ValueOf(v))
+	return nil
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+	defer handleErr(&err)
+	e.encoder.finish()
+	return nil
+}
+
+func handleErr(err *error) {
+	if v := recover(); v != nil {
+		if e, ok := v.(yamlError); ok {
+			*err = e.err
+		} else {
+			panic(v)
+		}
+	}
+}
+
+type yamlError struct {
+	err error
+}
+
+func fail(err error) {
+	panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+	panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+	Errors []string
+}
+
+func (e *TypeError) Error() string {
+	return fmt.Sprintf("yaml: unmarshal errors:\n  %s", strings.Join(e.Errors, "\n  "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+	FieldsMap  map[string]fieldInfo
+	FieldsList []fieldInfo
+
+	// InlineMap is the number of the field in the struct that
+	// contains an ,inline map, or -1 if there's none.
+	InlineMap int
+}
+
+type fieldInfo struct {
+	Key       string
+	Num       int
+	OmitEmpty bool
+	Flow      bool
+	// Id holds the unique field identifier, so we can cheaply
+	// check for field duplicates without maintaining an extra map.
+	Id int
+
+	// Inline holds the field index if the field is part of an inlined struct.
+	Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+	fieldMapMutex.RLock()
+	sinfo, found := structMap[st]
+	fieldMapMutex.RUnlock()
+	if found {
+		return sinfo, nil
+	}
+
+	n := st.NumField()
+	fieldsMap := make(map[string]fieldInfo)
+	fieldsList := make([]fieldInfo, 0, n)
+	inlineMap := -1
+	for i := 0; i != n; i++ {
+		field := st.Field(i)
+		if field.PkgPath != "" && !field.Anonymous {
+			continue // Private field
+		}
+
+		info := fieldInfo{Num: i}
+
+		tag := field.Tag.Get("yaml")
+		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+			tag = string(field.Tag)
+		}
+		if tag == "-" {
+			continue
+		}
+
+		inline := false
+		fields := strings.Split(tag, ",")
+		if len(fields) > 1 {
+			for _, flag := range fields[1:] {
+				switch flag {
+				case "omitempty":
+					info.OmitEmpty = true
+				case "flow":
+					info.Flow = true
+				case "inline":
+					inline = true
+				default:
+					return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+				}
+			}
+			tag = fields[0]
+		}
+
+		if inline {
+			switch field.Type.Kind() {
+			case reflect.Map:
+				if inlineMap >= 0 {
+					return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+				}
+				if field.Type.Key() != reflect.TypeOf("") {
+					return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+				}
+				inlineMap = info.Num
+			case reflect.Struct:
+				sinfo, err := getStructInfo(field.Type)
+				if err != nil {
+					return nil, err
+				}
+				for _, finfo := range sinfo.FieldsList {
+					if _, found := fieldsMap[finfo.Key]; found {
+						msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+						return nil, errors.New(msg)
+					}
+					if finfo.Inline == nil {
+						finfo.Inline = []int{i, finfo.Num}
+					} else {
+						finfo.Inline = append([]int{i}, finfo.Inline...)
+					}
+					finfo.Id = len(fieldsList)
+					fieldsMap[finfo.Key] = finfo
+					fieldsList = append(fieldsList, finfo)
+				}
+			default:
+				//return nil, errors.New("Option ,inline needs a struct value or map field")
+				return nil, errors.New("Option ,inline needs a struct value field")
+			}
+			continue
+		}
+
+		if tag != "" {
+			info.Key = tag
+		} else {
+			info.Key = strings.ToLower(field.Name)
+		}
+
+		if _, found = fieldsMap[info.Key]; found {
+			msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+			return nil, errors.New(msg)
+		}
+
+		info.Id = len(fieldsList)
+		fieldsList = append(fieldsList, info)
+		fieldsMap[info.Key] = info
+	}
+
+	sinfo = &structInfo{
+		FieldsMap:  fieldsMap,
+		FieldsList: fieldsList,
+		InlineMap:  inlineMap,
+	}
+
+	fieldMapMutex.Lock()
+	structMap[st] = sinfo
+	fieldMapMutex.Unlock()
+	return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+	IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+	kind := v.Kind()
+	if z, ok := v.Interface().(IsZeroer); ok {
+		if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+			return true
+		}
+		return z.IsZero()
+	}
+	switch kind {
+	case reflect.String:
+		return len(v.String()) == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	case reflect.Slice:
+		return v.Len() == 0
+	case reflect.Map:
+		return v.Len() == 0
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Struct:
+		vt := v.Type()
+		for i := v.NumField() - 1; i >= 0; i-- {
+			if vt.Field(i).PkgPath != "" {
+				continue // Private field
+			}
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go
new file mode 100644
index 0000000..e25cee5
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -0,0 +1,738 @@
+package yaml
+
+import (
+	"fmt"
+	"io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+	major int8 // The major version number.
+	minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+	handle []byte // The tag handle.
+	prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+	// Let the parser choose the encoding.
+	yaml_ANY_ENCODING yaml_encoding_t = iota
+
+	yaml_UTF8_ENCODING    // The default UTF-8 encoding.
+	yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+	yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+	// Let the parser choose the break type.
+	yaml_ANY_BREAK yaml_break_t = iota
+
+	yaml_CR_BREAK   // Use CR for line breaks (Mac style).
+	yaml_LN_BREAK   // Use LN for line breaks (Unix style).
+	yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+	// No error is produced.
+	yaml_NO_ERROR yaml_error_type_t = iota
+
+	yaml_MEMORY_ERROR   // Cannot allocate or reallocate a block of memory.
+	yaml_READER_ERROR   // Cannot read or decode the input stream.
+	yaml_SCANNER_ERROR  // Cannot scan the input stream.
+	yaml_PARSER_ERROR   // Cannot parse the input stream.
+	yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+	yaml_WRITER_ERROR   // Cannot write to the output stream.
+	yaml_EMITTER_ERROR  // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+	index  int // The position index.
+	line   int // The position line.
+	column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+	yaml_PLAIN_SCALAR_STYLE         // The plain scalar style.
+	yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+	yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+	yaml_LITERAL_SCALAR_STYLE       // The literal scalar style.
+	yaml_FOLDED_SCALAR_STYLE        // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+	yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+	yaml_FLOW_SEQUENCE_STYLE  // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+	yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+	yaml_FLOW_MAPPING_STYLE  // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+	// An empty token.
+	yaml_NO_TOKEN yaml_token_type_t = iota
+
+	yaml_STREAM_START_TOKEN // A STREAM-START token.
+	yaml_STREAM_END_TOKEN   // A STREAM-END token.
+
+	yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+	yaml_TAG_DIRECTIVE_TOKEN     // A TAG-DIRECTIVE token.
+	yaml_DOCUMENT_START_TOKEN    // A DOCUMENT-START token.
+	yaml_DOCUMENT_END_TOKEN      // A DOCUMENT-END token.
+
+	yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+	yaml_BLOCK_MAPPING_START_TOKEN  // A BLOCK-SEQUENCE-END token.
+	yaml_BLOCK_END_TOKEN            // A BLOCK-END token.
+
+	yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+	yaml_FLOW_SEQUENCE_END_TOKEN   // A FLOW-SEQUENCE-END token.
+	yaml_FLOW_MAPPING_START_TOKEN  // A FLOW-MAPPING-START token.
+	yaml_FLOW_MAPPING_END_TOKEN    // A FLOW-MAPPING-END token.
+
+	yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+	yaml_FLOW_ENTRY_TOKEN  // A FLOW-ENTRY token.
+	yaml_KEY_TOKEN         // A KEY token.
+	yaml_VALUE_TOKEN       // A VALUE token.
+
+	yaml_ALIAS_TOKEN  // An ALIAS token.
+	yaml_ANCHOR_TOKEN // An ANCHOR token.
+	yaml_TAG_TOKEN    // A TAG token.
+	yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+	switch tt {
+	case yaml_NO_TOKEN:
+		return "yaml_NO_TOKEN"
+	case yaml_STREAM_START_TOKEN:
+		return "yaml_STREAM_START_TOKEN"
+	case yaml_STREAM_END_TOKEN:
+		return "yaml_STREAM_END_TOKEN"
+	case yaml_VERSION_DIRECTIVE_TOKEN:
+		return "yaml_VERSION_DIRECTIVE_TOKEN"
+	case yaml_TAG_DIRECTIVE_TOKEN:
+		return "yaml_TAG_DIRECTIVE_TOKEN"
+	case yaml_DOCUMENT_START_TOKEN:
+		return "yaml_DOCUMENT_START_TOKEN"
+	case yaml_DOCUMENT_END_TOKEN:
+		return "yaml_DOCUMENT_END_TOKEN"
+	case yaml_BLOCK_SEQUENCE_START_TOKEN:
+		return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+	case yaml_BLOCK_MAPPING_START_TOKEN:
+		return "yaml_BLOCK_MAPPING_START_TOKEN"
+	case yaml_BLOCK_END_TOKEN:
+		return "yaml_BLOCK_END_TOKEN"
+	case yaml_FLOW_SEQUENCE_START_TOKEN:
+		return "yaml_FLOW_SEQUENCE_START_TOKEN"
+	case yaml_FLOW_SEQUENCE_END_TOKEN:
+		return "yaml_FLOW_SEQUENCE_END_TOKEN"
+	case yaml_FLOW_MAPPING_START_TOKEN:
+		return "yaml_FLOW_MAPPING_START_TOKEN"
+	case yaml_FLOW_MAPPING_END_TOKEN:
+		return "yaml_FLOW_MAPPING_END_TOKEN"
+	case yaml_BLOCK_ENTRY_TOKEN:
+		return "yaml_BLOCK_ENTRY_TOKEN"
+	case yaml_FLOW_ENTRY_TOKEN:
+		return "yaml_FLOW_ENTRY_TOKEN"
+	case yaml_KEY_TOKEN:
+		return "yaml_KEY_TOKEN"
+	case yaml_VALUE_TOKEN:
+		return "yaml_VALUE_TOKEN"
+	case yaml_ALIAS_TOKEN:
+		return "yaml_ALIAS_TOKEN"
+	case yaml_ANCHOR_TOKEN:
+		return "yaml_ANCHOR_TOKEN"
+	case yaml_TAG_TOKEN:
+		return "yaml_TAG_TOKEN"
+	case yaml_SCALAR_TOKEN:
+		return "yaml_SCALAR_TOKEN"
+	}
+	return "<unknown token>"
+}
+
+// The token structure.
+type yaml_token_t struct {
+	// The token type.
+	typ yaml_token_type_t
+
+	// The start/end of the token.
+	start_mark, end_mark yaml_mark_t
+
+	// The stream encoding (for yaml_STREAM_START_TOKEN).
+	encoding yaml_encoding_t
+
+	// The alias/anchor/scalar value or tag/tag directive handle
+	// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+	value []byte
+
+	// The tag suffix (for yaml_TAG_TOKEN).
+	suffix []byte
+
+	// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+	prefix []byte
+
+	// The scalar style (for yaml_SCALAR_TOKEN).
+	style yaml_scalar_style_t
+
+	// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+	major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+	// An empty event.
+	yaml_NO_EVENT yaml_event_type_t = iota
+
+	yaml_STREAM_START_EVENT   // A STREAM-START event.
+	yaml_STREAM_END_EVENT     // A STREAM-END event.
+	yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+	yaml_DOCUMENT_END_EVENT   // A DOCUMENT-END event.
+	yaml_ALIAS_EVENT          // An ALIAS event.
+	yaml_SCALAR_EVENT         // A SCALAR event.
+	yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+	yaml_SEQUENCE_END_EVENT   // A SEQUENCE-END event.
+	yaml_MAPPING_START_EVENT  // A MAPPING-START event.
+	yaml_MAPPING_END_EVENT    // A MAPPING-END event.
+)
+
+var eventStrings = []string{
+	yaml_NO_EVENT:             "none",
+	yaml_STREAM_START_EVENT:   "stream start",
+	yaml_STREAM_END_EVENT:     "stream end",
+	yaml_DOCUMENT_START_EVENT: "document start",
+	yaml_DOCUMENT_END_EVENT:   "document end",
+	yaml_ALIAS_EVENT:          "alias",
+	yaml_SCALAR_EVENT:         "scalar",
+	yaml_SEQUENCE_START_EVENT: "sequence start",
+	yaml_SEQUENCE_END_EVENT:   "sequence end",
+	yaml_MAPPING_START_EVENT:  "mapping start",
+	yaml_MAPPING_END_EVENT:    "mapping end",
+}
+
+func (e yaml_event_type_t) String() string {
+	if e < 0 || int(e) >= len(eventStrings) {
+		return fmt.Sprintf("unknown event %d", e)
+	}
+	return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+	// The event type.
+	typ yaml_event_type_t
+
+	// The start and end of the event.
+	start_mark, end_mark yaml_mark_t
+
+	// The document encoding (for yaml_STREAM_START_EVENT).
+	encoding yaml_encoding_t
+
+	// The version directive (for yaml_DOCUMENT_START_EVENT).
+	version_directive *yaml_version_directive_t
+
+	// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+	tag_directives []yaml_tag_directive_t
+
+	// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+	anchor []byte
+
+	// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+	tag []byte
+
+	// The scalar value (for yaml_SCALAR_EVENT).
+	value []byte
+
+	// Is the document start/end indicator implicit, or the tag optional?
+	// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+	implicit bool
+
+	// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+	quoted_implicit bool
+
+	// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+	style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t     { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t   { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+	yaml_NULL_TAG      = "tag:yaml.org,2002:null"      // The tag !!null with the only possible value: null.
+	yaml_BOOL_TAG      = "tag:yaml.org,2002:bool"      // The tag !!bool with the values: true and false.
+	yaml_STR_TAG       = "tag:yaml.org,2002:str"       // The tag !!str for string values.
+	yaml_INT_TAG       = "tag:yaml.org,2002:int"       // The tag !!int for integer values.
+	yaml_FLOAT_TAG     = "tag:yaml.org,2002:float"     // The tag !!float for float values.
+	yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+	yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+	yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+	// Not in original libyaml.
+	yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+	yaml_MERGE_TAG  = "tag:yaml.org,2002:merge"
+
+	yaml_DEFAULT_SCALAR_TAG   = yaml_STR_TAG // The default scalar tag is !!str.
+	yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+	yaml_DEFAULT_MAPPING_TAG  = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+	// An empty node.
+	yaml_NO_NODE yaml_node_type_t = iota
+
+	yaml_SCALAR_NODE   // A scalar node.
+	yaml_SEQUENCE_NODE // A sequence node.
+	yaml_MAPPING_NODE  // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+	key   int // The key of the element.
+	value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+	typ yaml_node_type_t // The node type.
+	tag []byte           // The node tag.
+
+	// The node data.
+
+	// The scalar parameters (for yaml_SCALAR_NODE).
+	scalar struct {
+		value  []byte              // The scalar value.
+		length int                 // The length of the scalar value.
+		style  yaml_scalar_style_t // The scalar style.
+	}
+
+	// The sequence parameters (for YAML_SEQUENCE_NODE).
+	sequence struct {
+		items_data []yaml_node_item_t    // The stack of sequence items.
+		style      yaml_sequence_style_t // The sequence style.
+	}
+
+	// The mapping parameters (for yaml_MAPPING_NODE).
+	mapping struct {
+		pairs_data  []yaml_node_pair_t   // The stack of mapping pairs (key, value).
+		pairs_start *yaml_node_pair_t    // The beginning of the stack.
+		pairs_end   *yaml_node_pair_t    // The end of the stack.
+		pairs_top   *yaml_node_pair_t    // The top of the stack.
+		style       yaml_mapping_style_t // The mapping style.
+	}
+
+	start_mark yaml_mark_t // The beginning of the node.
+	end_mark   yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+	// The document nodes.
+	nodes []yaml_node_t
+
+	// The version directive.
+	version_directive *yaml_version_directive_t
+
+	// The list of tag directives.
+	tag_directives_data  []yaml_tag_directive_t
+	tag_directives_start int // The beginning of the tag directives list.
+	tag_directives_end   int // The end of the tag directives list.
+
+	start_implicit int // Is the document start indicator implicit?
+	end_implicit   int // Is the document end indicator implicit?
+
+	// The start/end of the document.
+	start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out]   data        A pointer to an application data specified by
+//                        yaml_parser_set_input().
+// [out]      buffer      The buffer to write the data from the source.
+// [in]       size        The size of the buffer.
+// [out]      size_read   The actual number of bytes read from the source.
+//
+// On success, the handler should return 1.  If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+	possible     bool        // Is a simple key possible?
+	required     bool        // Is a simple key required?
+	token_number int         // The number of the token.
+	mark         yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+	yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+	yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE           // Expect the beginning of an implicit document.
+	yaml_PARSE_DOCUMENT_START_STATE                    // Expect DOCUMENT-START.
+	yaml_PARSE_DOCUMENT_CONTENT_STATE                  // Expect the content of a document.
+	yaml_PARSE_DOCUMENT_END_STATE                      // Expect DOCUMENT-END.
+	yaml_PARSE_BLOCK_NODE_STATE                        // Expect a block node.
+	yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+	yaml_PARSE_FLOW_NODE_STATE                         // Expect a flow node.
+	yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE        // Expect the first entry of a block sequence.
+	yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE              // Expect an entry of a block sequence.
+	yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE         // Expect an entry of an indentless sequence.
+	yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE           // Expect the first key of a block mapping.
+	yaml_PARSE_BLOCK_MAPPING_KEY_STATE                 // Expect a block mapping key.
+	yaml_PARSE_BLOCK_MAPPING_VALUE_STATE               // Expect a block mapping value.
+	yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE         // Expect the first entry of a flow sequence.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE               // Expect an entry of a flow sequence.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE   // Expect a key of an ordered mapping.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE   // Expect the and of an ordered mapping entry.
+	yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE            // Expect the first key of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_KEY_STATE                  // Expect a key of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_VALUE_STATE                // Expect a value of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE          // Expect an empty value of a flow mapping.
+	yaml_PARSE_END_STATE                               // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+	switch ps {
+	case yaml_PARSE_STREAM_START_STATE:
+		return "yaml_PARSE_STREAM_START_STATE"
+	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+		return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+	case yaml_PARSE_DOCUMENT_START_STATE:
+		return "yaml_PARSE_DOCUMENT_START_STATE"
+	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+		return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+	case yaml_PARSE_DOCUMENT_END_STATE:
+		return "yaml_PARSE_DOCUMENT_END_STATE"
+	case yaml_PARSE_BLOCK_NODE_STATE:
+		return "yaml_PARSE_BLOCK_NODE_STATE"
+	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+		return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+	case yaml_PARSE_FLOW_NODE_STATE:
+		return "yaml_PARSE_FLOW_NODE_STATE"
+	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+		return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+	case yaml_PARSE_END_STATE:
+		return "yaml_PARSE_END_STATE"
+	}
+	return "<unknown parser state>"
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+	anchor []byte      // The anchor.
+	index  int         // The node id.
+	mark   yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+	// Error handling
+
+	error yaml_error_type_t // Error type.
+
+	problem string // Error description.
+
+	// The byte about which the problem occurred.
+	problem_offset int
+	problem_value  int
+	problem_mark   yaml_mark_t
+
+	// The error context.
+	context      string
+	context_mark yaml_mark_t
+
+	// Reader stuff
+
+	read_handler yaml_read_handler_t // Read handler.
+
+	input_reader io.Reader // File input data.
+	input        []byte    // String input data.
+	input_pos    int
+
+	eof bool // EOF flag
+
+	buffer     []byte // The working buffer.
+	buffer_pos int    // The current position of the buffer.
+
+	unread int // The number of unread characters in the buffer.
+
+	raw_buffer     []byte // The raw buffer.
+	raw_buffer_pos int    // The current position of the buffer.
+
+	encoding yaml_encoding_t // The input encoding.
+
+	offset int         // The offset of the current position (in bytes).
+	mark   yaml_mark_t // The mark of the current position.
+
+	// Scanner stuff
+
+	stream_start_produced bool // Have we started to scan the input stream?
+	stream_end_produced   bool // Have we reached the end of the input stream?
+
+	flow_level int // The number of unclosed '[' and '{' indicators.
+
+	tokens          []yaml_token_t // The tokens queue.
+	tokens_head     int            // The head of the tokens queue.
+	tokens_parsed   int            // The number of tokens fetched from the queue.
+	token_available bool           // Does the tokens queue contain a token ready for dequeueing.
+
+	indent  int   // The current indentation level.
+	indents []int // The indentation levels stack.
+
+	simple_key_allowed bool                // May a simple key occur at the current position?
+	simple_keys        []yaml_simple_key_t // The stack of simple keys.
+
+	// Parser stuff
+
+	state          yaml_parser_state_t    // The current parser state.
+	states         []yaml_parser_state_t  // The parser states stack.
+	marks          []yaml_mark_t          // The stack of marks.
+	tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+	// Dumper stuff
+
+	aliases []yaml_alias_data_t // The alias data.
+
+	document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output.  The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out]   data        A pointer to an application data specified by
+//                              yaml_emitter_set_output().
+// @param[in]       buffer      The buffer with bytes to be written.
+// @param[in]       size        The size of the buffer.
+//
+// @returns On success, the handler should return @c 1.  If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+	// Expect STREAM-START.
+	yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+	yaml_EMIT_FIRST_DOCUMENT_START_STATE       // Expect the first DOCUMENT-START or STREAM-END.
+	yaml_EMIT_DOCUMENT_START_STATE             // Expect DOCUMENT-START or STREAM-END.
+	yaml_EMIT_DOCUMENT_CONTENT_STATE           // Expect the content of a document.
+	yaml_EMIT_DOCUMENT_END_STATE               // Expect DOCUMENT-END.
+	yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE   // Expect the first item of a flow sequence.
+	yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE         // Expect an item of a flow sequence.
+	yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE     // Expect the first key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_KEY_STATE           // Expect a key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE  // Expect a value for a simple key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_VALUE_STATE         // Expect a value of a flow mapping.
+	yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE  // Expect the first item of a block sequence.
+	yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE        // Expect an item of a block sequence.
+	yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE    // Expect the first key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_KEY_STATE          // Expect the key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_VALUE_STATE        // Expect a value of a block mapping.
+	yaml_EMIT_END_STATE                        // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal.  Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+	// Error handling
+
+	error   yaml_error_type_t // Error type.
+	problem string            // Error description.
+
+	// Writer stuff
+
+	write_handler yaml_write_handler_t // Write handler.
+
+	output_buffer *[]byte   // String output data.
+	output_writer io.Writer // File output data.
+
+	buffer     []byte // The working buffer.
+	buffer_pos int    // The current position of the buffer.
+
+	raw_buffer     []byte // The raw buffer.
+	raw_buffer_pos int    // The current position of the buffer.
+
+	encoding yaml_encoding_t // The stream encoding.
+
+	// Emitter stuff
+
+	canonical   bool         // If the output is in the canonical style?
+	best_indent int          // The number of indentation spaces.
+	best_width  int          // The preferred width of the output lines.
+	unicode     bool         // Allow unescaped non-ASCII characters?
+	line_break  yaml_break_t // The preferred line break.
+
+	state  yaml_emitter_state_t   // The current emitter state.
+	states []yaml_emitter_state_t // The stack of states.
+
+	events      []yaml_event_t // The event queue.
+	events_head int            // The head of the event queue.
+
+	indents []int // The stack of indentation levels.
+
+	tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+	indent int // The current indentation level.
+
+	flow_level int // The current flow level.
+
+	root_context       bool // Is it the document root context?
+	sequence_context   bool // Is it a sequence context?
+	mapping_context    bool // Is it a mapping context?
+	simple_key_context bool // Is it a simple mapping key context?
+
+	line       int  // The current line.
+	column     int  // The current column.
+	whitespace bool // If the last character was a whitespace?
+	indention  bool // If the last character was an indentation character (' ', '-', '?', ':')?
+	open_ended bool // If an explicit document end is required?
+
+	// Anchor analysis.
+	anchor_data struct {
+		anchor []byte // The anchor value.
+		alias  bool   // Is it an alias?
+	}
+
+	// Tag analysis.
+	tag_data struct {
+		handle []byte // The tag handle.
+		suffix []byte // The tag suffix.
+	}
+
+	// Scalar analysis.
+	scalar_data struct {
+		value                 []byte              // The scalar value.
+		multiline             bool                // Does the scalar contain line breaks?
+		flow_plain_allowed    bool                // Can the scalar be expessed in the flow plain style?
+		block_plain_allowed   bool                // Can the scalar be expressed in the block plain style?
+		single_quoted_allowed bool                // Can the scalar be expressed in the single quoted style?
+		block_allowed         bool                // Can the scalar be expressed in the literal or folded styles?
+		style                 yaml_scalar_style_t // The output style.
+	}
+
+	// Dumper stuff
+
+	opened bool // If the stream was already opened?
+	closed bool // If the stream was already closed?
+
+	// The information associated with the document nodes.
+	anchors *struct {
+		references int  // The number of references.
+		anchor     int  // The anchor id.
+		serialized bool // If the node has been emitted?
+	}
+
+	last_anchor_id int // The last assigned anchor id.
+
+	document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
new file mode 100644
index 0000000..8110ce3
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+	// The size of the input raw buffer.
+	input_raw_buffer_size = 512
+
+	// The size of the input buffer.
+	// It should be possible to decode the whole raw buffer.
+	input_buffer_size = input_raw_buffer_size * 3
+
+	// The size of the output buffer.
+	output_buffer_size = 128
+
+	// The size of the output raw buffer.
+	// It should be possible to encode the whole output buffer.
+	output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+	// The size of other stacks and queues.
+	initial_stack_size  = 16
+	initial_queue_size  = 16
+	initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+	return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+	bi := b[i]
+	if bi >= 'A' && bi <= 'F' {
+		return int(bi) - 'A' + 10
+	}
+	if bi >= 'a' && bi <= 'f' {
+		return int(bi) - 'a' + 10
+	}
+	return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+	return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+	return ((b[i] == 0x0A) || // . == #x0A
+		(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+		(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+		(b[i] > 0xC2 && b[i] < 0xED) ||
+		(b[i] == 0xED && b[i+1] < 0xA0) ||
+		(b[i] == 0xEE) ||
+		(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+			!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+			!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+	return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+	return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+	return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+	return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+	//return is_space(b, i) || is_tab(b, i)
+	return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+	return (b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+	return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+	//return is_break(b, i) || is_z(b, i)
+	return (        // is_break:
+	b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		// is_z:
+		b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+	//return is_space(b, i) || is_breakz(b, i)
+	return ( // is_space:
+	b[i] == ' ' ||
+		// is_breakz:
+		b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+	//return is_blank(b, i) || is_breakz(b, i)
+	return ( // is_blank:
+	b[i] == ' ' || b[i] == '\t' ||
+		// is_breakz:
+		b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+	// Don't replace these by a switch without first
+	// confirming that it is being inlined.
+	if b&0x80 == 0x00 {
+		return 1
+	}
+	if b&0xE0 == 0xC0 {
+		return 2
+	}
+	if b&0xF0 == 0xE0 {
+		return 3
+	}
+	if b&0xF8 == 0xF0 {
+		return 4
+	}
+	return 0
+
+}
diff --git a/vendor/k8s.io/api/LICENSE b/vendor/k8s.io/api/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/k8s.io/api/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
new file mode 100644
index 0000000..d29913c
--- /dev/null
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
@@ -0,0 +1,25 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+// +groupName=admissionregistration.k8s.io
+
+// Package v1alpha1 is the v1alpha1 version of the API.
+// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration
+// InitializerConfiguration and validatingWebhookConfiguration is for the
+// new dynamic admission controller configuration.
+package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1"
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
new file mode 100644
index 0000000..98e9a57
--- /dev/null
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
@@ -0,0 +1,107 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.admissionregistration.v1alpha1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1alpha1";
+
+// Initializer describes the name and the failure policy of an initializer, and
+// what resources it applies to.
+message Initializer {
+  // Name is the identifier of the initializer. It will be added to the
+  // object that needs to be initialized.
+  // Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where
+  // "alwayspullimages" is the name of the webhook, and kubernetes.io is the name
+  // of the organization.
+  // Required
+  optional string name = 1;
+
+  // Rules describes what resources/subresources the initializer cares about.
+  // The initializer cares about an operation if it matches _any_ Rule.
+  // Rule.Resources must not include subresources.
+  repeated Rule rules = 2;
+}
+
+// InitializerConfiguration describes the configuration of initializers.
+message InitializerConfiguration {
+  // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Initializers is a list of resources and their default initializers
+  // Order-sensitive.
+  // When merging multiple InitializerConfigurations, we sort the initializers
+  // from different InitializerConfigurations by the name of the
+  // InitializerConfigurations; the order of the initializers from the same
+  // InitializerConfiguration is preserved.
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  // +optional
+  repeated Initializer initializers = 2;
+}
+
+// InitializerConfigurationList is a list of InitializerConfiguration.
+message InitializerConfigurationList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of InitializerConfiguration.
+  repeated InitializerConfiguration items = 2;
+}
+
+// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended
+// to make sure that all the tuple expansions are valid.
+message Rule {
+  // APIGroups is the API groups the resources belong to. '*' is all groups.
+  // If '*' is present, the length of the slice must be one.
+  // Required.
+  repeated string apiGroups = 1;
+
+  // APIVersions is the API versions the resources belong to. '*' is all versions.
+  // If '*' is present, the length of the slice must be one.
+  // Required.
+  repeated string apiVersions = 2;
+
+  // Resources is a list of resources this rule applies to.
+  //
+  // For example:
+  // 'pods' means pods.
+  // 'pods/log' means the log subresource of pods.
+  // '*' means all resources, but not subresources.
+  // 'pods/*' means all subresources of pods.
+  // '*/scale' means all scale subresources.
+  // '*/*' means all resources and their subresources.
+  //
+  // If wildcard is present, the validation rule will ensure resources do not
+  // overlap with each other.
+  //
+  // Depending on the enclosing object, subresources might not be allowed.
+  // Required.
+  repeated string resources = 3;
+}
+
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go
new file mode 100644
index 0000000..e9a4164
--- /dev/null
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go
@@ -0,0 +1,51 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const GroupName = "admissionregistration.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&InitializerConfiguration{},
+		&InitializerConfigurationList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
new file mode 100644
index 0000000..a245f1e
--- /dev/null
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// InitializerConfiguration describes the configuration of initializers.
+type InitializerConfiguration struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Initializers is a list of resources and their default initializers
+	// Order-sensitive.
+	// When merging multiple InitializerConfigurations, we sort the initializers
+	// from different InitializerConfigurations by the name of the
+	// InitializerConfigurations; the order of the initializers from the same
+	// InitializerConfiguration is preserved.
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	// +optional
+	Initializers []Initializer `json:"initializers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=initializers"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// InitializerConfigurationList is a list of InitializerConfiguration.
+type InitializerConfigurationList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of InitializerConfiguration.
+	Items []InitializerConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// Initializer describes the name and the failure policy of an initializer, and
+// what resources it applies to.
+type Initializer struct {
+	// Name is the identifier of the initializer. It will be added to the
+	// object that needs to be initialized.
+	// Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where
+	// "alwayspullimages" is the name of the webhook, and kubernetes.io is the name
+	// of the organization.
+	// Required
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+
+	// Rules describes what resources/subresources the initializer cares about.
+	// The initializer cares about an operation if it matches _any_ Rule.
+	// Rule.Resources must not include subresources.
+	Rules []Rule `json:"rules,omitempty" protobuf:"bytes,2,rep,name=rules"`
+}
+
+// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended
+// to make sure that all the tuple expansions are valid.
+type Rule struct {
+	// APIGroups is the API groups the resources belong to. '*' is all groups.
+	// If '*' is present, the length of the slice must be one.
+	// Required.
+	APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"`
+
+	// APIVersions is the API versions the resources belong to. '*' is all versions.
+	// If '*' is present, the length of the slice must be one.
+	// Required.
+	APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"`
+
+	// Resources is a list of resources this rule applies to.
+	//
+	// For example:
+	// 'pods' means pods.
+	// 'pods/log' means the log subresource of pods.
+	// '*' means all resources, but not subresources.
+	// 'pods/*' means all subresources of pods.
+	// '*/scale' means all scale subresources.
+	// '*/*' means all resources and their subresources.
+	//
+	// If wildcard is present, the validation rule will ensure resources do not
+	// overlap with each other.
+	//
+	// Depending on the enclosing object, subresources might not be allowed.
+	// Required.
+	Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"`
+}
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..69e4b7c
--- /dev/null
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
@@ -0,0 +1,71 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_Initializer = map[string]string{
+	"":      "Initializer describes the name and the failure policy of an initializer, and what resources it applies to.",
+	"name":  "Name is the identifier of the initializer. It will be added to the object that needs to be initialized. Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where \"alwayspullimages\" is the name of the webhook, and kubernetes.io is the name of the organization. Required",
+	"rules": "Rules describes what resources/subresources the initializer cares about. The initializer cares about an operation if it matches _any_ Rule. Rule.Resources must not include subresources.",
+}
+
+func (Initializer) SwaggerDoc() map[string]string {
+	return map_Initializer
+}
+
+var map_InitializerConfiguration = map[string]string{
+	"":             "InitializerConfiguration describes the configuration of initializers.",
+	"metadata":     "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.",
+	"initializers": "Initializers is a list of resources and their default initializers Order-sensitive. When merging multiple InitializerConfigurations, we sort the initializers from different InitializerConfigurations by the name of the InitializerConfigurations; the order of the initializers from the same InitializerConfiguration is preserved.",
+}
+
+func (InitializerConfiguration) SwaggerDoc() map[string]string {
+	return map_InitializerConfiguration
+}
+
+var map_InitializerConfigurationList = map[string]string{
+	"":         "InitializerConfigurationList is a list of InitializerConfiguration.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "List of InitializerConfiguration.",
+}
+
+func (InitializerConfigurationList) SwaggerDoc() map[string]string {
+	return map_InitializerConfigurationList
+}
+
+var map_Rule = map[string]string{
+	"":            "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.",
+	"apiGroups":   "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.",
+	"apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.",
+	"resources":   "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.",
+}
+
+func (Rule) SwaggerDoc() map[string]string {
+	return map_Rule
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..9f636b4
--- /dev/null
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,145 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Initializer) DeepCopyInto(out *Initializer) {
+	*out = *in
+	if in.Rules != nil {
+		in, out := &in.Rules, &out.Rules
+		*out = make([]Rule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer.
+func (in *Initializer) DeepCopy() *Initializer {
+	if in == nil {
+		return nil
+	}
+	out := new(Initializer)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InitializerConfiguration) DeepCopyInto(out *InitializerConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Initializers != nil {
+		in, out := &in.Initializers, &out.Initializers
+		*out = make([]Initializer, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfiguration.
+func (in *InitializerConfiguration) DeepCopy() *InitializerConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(InitializerConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *InitializerConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InitializerConfigurationList) DeepCopyInto(out *InitializerConfigurationList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]InitializerConfiguration, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfigurationList.
+func (in *InitializerConfigurationList) DeepCopy() *InitializerConfigurationList {
+	if in == nil {
+		return nil
+	}
+	out := new(InitializerConfigurationList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *InitializerConfigurationList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Rule) DeepCopyInto(out *Rule) {
+	*out = *in
+	if in.APIGroups != nil {
+		in, out := &in.APIGroups, &out.APIGroups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.APIVersions != nil {
+		in, out := &in.APIVersions, &out.APIVersions
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule.
+func (in *Rule) DeepCopy() *Rule {
+	if in == nil {
+		return nil
+	}
+	out := new(Rule)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go
new file mode 100644
index 0000000..2b29efa
--- /dev/null
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go
@@ -0,0 +1,25 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+// +groupName=admissionregistration.k8s.io
+
+// Package v1beta1 is the v1beta1 version of the API.
+// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration
+// InitializerConfiguration and validatingWebhookConfiguration is for the
+// new dynamic admission controller configuration.
+package v1beta1 // import "k8s.io/api/admissionregistration/v1beta1"
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
new file mode 100644
index 0000000..1c40ae5
--- /dev/null
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
@@ -0,0 +1,269 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.admissionregistration.v1beta1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta1";
+
+// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.
+message MutatingWebhookConfiguration {
+  // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Webhooks is a list of webhooks and the affected resources and operations.
+  // +optional
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  repeated Webhook Webhooks = 2;
+}
+
+// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.
+message MutatingWebhookConfigurationList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of MutatingWebhookConfiguration.
+  repeated MutatingWebhookConfiguration items = 2;
+}
+
+// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended
+// to make sure that all the tuple expansions are valid.
+message Rule {
+  // APIGroups is the API groups the resources belong to. '*' is all groups.
+  // If '*' is present, the length of the slice must be one.
+  // Required.
+  repeated string apiGroups = 1;
+
+  // APIVersions is the API versions the resources belong to. '*' is all versions.
+  // If '*' is present, the length of the slice must be one.
+  // Required.
+  repeated string apiVersions = 2;
+
+  // Resources is a list of resources this rule applies to.
+  //
+  // For example:
+  // 'pods' means pods.
+  // 'pods/log' means the log subresource of pods.
+  // '*' means all resources, but not subresources.
+  // 'pods/*' means all subresources of pods.
+  // '*/scale' means all scale subresources.
+  // '*/*' means all resources and their subresources.
+  //
+  // If wildcard is present, the validation rule will ensure resources do not
+  // overlap with each other.
+  //
+  // Depending on the enclosing object, subresources might not be allowed.
+  // Required.
+  repeated string resources = 3;
+}
+
+// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make
+// sure that all the tuple expansions are valid.
+message RuleWithOperations {
+  // Operations is the operations the admission hook cares about - CREATE, UPDATE, or *
+  // for all operations.
+  // If '*' is present, the length of the slice must be one.
+  // Required.
+  repeated string operations = 1;
+
+  // Rule is embedded, it describes other criteria of the rule, like
+  // APIGroups, APIVersions, Resources, etc.
+  optional Rule rule = 2;
+}
+
+// ServiceReference holds a reference to Service.legacy.k8s.io
+message ServiceReference {
+  // `namespace` is the namespace of the service.
+  // Required
+  optional string namespace = 1;
+
+  // `name` is the name of the service.
+  // Required
+  optional string name = 2;
+
+  // `path` is an optional URL path which will be sent in any request to
+  // this service.
+  // +optional
+  optional string path = 3;
+}
+
+// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.
+message ValidatingWebhookConfiguration {
+  // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Webhooks is a list of webhooks and the affected resources and operations.
+  // +optional
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  repeated Webhook Webhooks = 2;
+}
+
+// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.
+message ValidatingWebhookConfigurationList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of ValidatingWebhookConfiguration.
+  repeated ValidatingWebhookConfiguration items = 2;
+}
+
+// Webhook describes an admission webhook and the resources and operations it applies to.
+message Webhook {
+  // The name of the admission webhook.
+  // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where
+  // "imagepolicy" is the name of the webhook, and kubernetes.io is the name
+  // of the organization.
+  // Required.
+  optional string name = 1;
+
+  // ClientConfig defines how to communicate with the hook.
+  // Required
+  optional WebhookClientConfig clientConfig = 2;
+
+  // Rules describes what operations on what resources/subresources the webhook cares about.
+  // The webhook cares about an operation if it matches _any_ Rule.
+  // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks
+  // from putting the cluster in a state which cannot be recovered from without completely
+  // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called
+  // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.
+  repeated RuleWithOperations rules = 3;
+
+  // FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
+  // allowed values are Ignore or Fail. Defaults to Ignore.
+  // +optional
+  optional string failurePolicy = 4;
+
+  // NamespaceSelector decides whether to run the webhook on an object based
+  // on whether the namespace for that object matches the selector. If the
+  // object itself is a namespace, the matching is performed on
+  // object.metadata.labels. If the object is another cluster scoped resource,
+  // it never skips the webhook.
+  //
+  // For example, to run the webhook on any objects whose namespace is not
+  // associated with "runlevel" of "0" or "1";  you will set the selector as
+  // follows:
+  // "namespaceSelector": {
+  //   "matchExpressions": [
+  //     {
+  //       "key": "runlevel",
+  //       "operator": "NotIn",
+  //       "values": [
+  //         "0",
+  //         "1"
+  //       ]
+  //     }
+  //   ]
+  // }
+  //
+  // If instead you want to only run the webhook on any objects whose
+  // namespace is associated with the "environment" of "prod" or "staging";
+  // you will set the selector as follows:
+  // "namespaceSelector": {
+  //   "matchExpressions": [
+  //     {
+  //       "key": "environment",
+  //       "operator": "In",
+  //       "values": [
+  //         "prod",
+  //         "staging"
+  //       ]
+  //     }
+  //   ]
+  // }
+  //
+  // See
+  // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+  // for more examples of label selectors.
+  //
+  // Default to the empty LabelSelector, which matches everything.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
+
+  // SideEffects states whether this webhookk has side effects.
+  // Acceptable values are: Unknown, None, Some, NoneOnDryRun
+  // Webhooks with side effects MUST implement a reconciliation system, since a request may be
+  // rejected by a future step in the admission change and the side effects therefore need to be undone.
+  // Requests with the dryRun attribute will be auto-rejected if they match a webhook with
+  // sideEffects == Unknown or Some. Defaults to Unknown.
+  // +optional
+  optional string sideEffects = 6;
+}
+
+// WebhookClientConfig contains the information to make a TLS
+// connection with the webhook
+message WebhookClientConfig {
+  // `url` gives the location of the webhook, in standard URL form
+  // (`scheme://host:port/path`). Exactly one of `url` or `service`
+  // must be specified.
+  //
+  // The `host` should not refer to a service running in the cluster; use
+  // the `service` field instead. The host might be resolved via external
+  // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve
+  // in-cluster DNS as that would be a layering violation). `host` may
+  // also be an IP address.
+  //
+  // Please note that using `localhost` or `127.0.0.1` as a `host` is
+  // risky unless you take great care to run this webhook on all hosts
+  // which run an apiserver which might need to make calls to this
+  // webhook. Such installs are likely to be non-portable, i.e., not easy
+  // to turn up in a new cluster.
+  //
+  // The scheme must be "https"; the URL must begin with "https://".
+  //
+  // A path is optional, and if present may be any string permissible in
+  // a URL. You may use the path to pass an arbitrary string to the
+  // webhook, for example, a cluster identifier.
+  //
+  // Attempting to use a user or basic auth e.g. "user:password@" is not
+  // allowed. Fragments ("#...") and query parameters ("?...") are not
+  // allowed, either.
+  //
+  // +optional
+  optional string url = 3;
+
+  // `service` is a reference to the service for this webhook. Either
+  // `service` or `url` must be specified.
+  //
+  // If the webhook is running within the cluster, then you should use `service`.
+  //
+  // Port 443 will be used if it is open, otherwise it is an error.
+  //
+  // +optional
+  optional ServiceReference service = 1;
+
+  // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
+  // If unspecified, system trust roots on the apiserver are used.
+  // +optional
+  optional bytes caBundle = 2;
+}
+
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go
new file mode 100644
index 0000000..d126da9
--- /dev/null
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const GroupName = "admissionregistration.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&ValidatingWebhookConfiguration{},
+		&ValidatingWebhookConfigurationList{},
+		&MutatingWebhookConfiguration{},
+		&MutatingWebhookConfigurationList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
new file mode 100644
index 0000000..49d94ec
--- /dev/null
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
@@ -0,0 +1,306 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended
+// to make sure that all the tuple expansions are valid.
+type Rule struct {
+	// APIGroups is the API groups the resources belong to. '*' is all groups.
+	// If '*' is present, the length of the slice must be one.
+	// Required.
+	APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"`
+
+	// APIVersions is the API versions the resources belong to. '*' is all versions.
+	// If '*' is present, the length of the slice must be one.
+	// Required.
+	APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"`
+
+	// Resources is a list of resources this rule applies to.
+	//
+	// For example:
+	// 'pods' means pods.
+	// 'pods/log' means the log subresource of pods.
+	// '*' means all resources, but not subresources.
+	// 'pods/*' means all subresources of pods.
+	// '*/scale' means all scale subresources.
+	// '*/*' means all resources and their subresources.
+	//
+	// If wildcard is present, the validation rule will ensure resources do not
+	// overlap with each other.
+	//
+	// Depending on the enclosing object, subresources might not be allowed.
+	// Required.
+	Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"`
+}
+
+type FailurePolicyType string
+
+const (
+	// Ignore means that an error calling the webhook is ignored.
+	Ignore FailurePolicyType = "Ignore"
+	// Fail means that an error calling the webhook causes the admission to fail.
+	Fail FailurePolicyType = "Fail"
+)
+
+type SideEffectClass string
+
+const (
+	// SideEffectClassUnknown means that no information is known about the side effects of calling the webhook.
+	// If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail.
+	SideEffectClassUnknown SideEffectClass = "Unknown"
+	// SideEffectClassNone means that calling the webhook will have no side effects.
+	SideEffectClassNone SideEffectClass = "None"
+	// SideEffectClassSome means that calling the webhook will possibly have side effects.
+	// If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail.
+	SideEffectClassSome SideEffectClass = "Some"
+	// SideEffectClassNoneOnDryRun means that calling the webhook will possibly have side effects, but if the
+	// request being reviewed has the dry-run attribute, the side effects will be suppressed.
+	SideEffectClassNoneOnDryRun SideEffectClass = "NoneOnDryRun"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.
+type ValidatingWebhookConfiguration struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	// Webhooks is a list of webhooks and the affected resources and operations.
+	// +optional
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.
+type ValidatingWebhookConfigurationList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	// List of ValidatingWebhookConfiguration.
+	Items []ValidatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.
+type MutatingWebhookConfiguration struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	// Webhooks is a list of webhooks and the affected resources and operations.
+	// +optional
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.
+type MutatingWebhookConfigurationList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	// List of MutatingWebhookConfiguration.
+	Items []MutatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// Webhook describes an admission webhook and the resources and operations it applies to.
+type Webhook struct {
+	// The name of the admission webhook.
+	// Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where
+	// "imagepolicy" is the name of the webhook, and kubernetes.io is the name
+	// of the organization.
+	// Required.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+
+	// ClientConfig defines how to communicate with the hook.
+	// Required
+	ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"`
+
+	// Rules describes what operations on what resources/subresources the webhook cares about.
+	// The webhook cares about an operation if it matches _any_ Rule.
+	// However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks
+	// from putting the cluster in a state which cannot be recovered from without completely
+	// disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called
+	// on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.
+	Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"`
+
+	// FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
+	// allowed values are Ignore or Fail. Defaults to Ignore.
+	// +optional
+	FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"`
+
+	// NamespaceSelector decides whether to run the webhook on an object based
+	// on whether the namespace for that object matches the selector. If the
+	// object itself is a namespace, the matching is performed on
+	// object.metadata.labels. If the object is another cluster scoped resource,
+	// it never skips the webhook.
+	//
+	// For example, to run the webhook on any objects whose namespace is not
+	// associated with "runlevel" of "0" or "1";  you will set the selector as
+	// follows:
+	// "namespaceSelector": {
+	//   "matchExpressions": [
+	//     {
+	//       "key": "runlevel",
+	//       "operator": "NotIn",
+	//       "values": [
+	//         "0",
+	//         "1"
+	//       ]
+	//     }
+	//   ]
+	// }
+	//
+	// If instead you want to only run the webhook on any objects whose
+	// namespace is associated with the "environment" of "prod" or "staging";
+	// you will set the selector as follows:
+	// "namespaceSelector": {
+	//   "matchExpressions": [
+	//     {
+	//       "key": "environment",
+	//       "operator": "In",
+	//       "values": [
+	//         "prod",
+	//         "staging"
+	//       ]
+	//     }
+	//   ]
+	// }
+	//
+	// See
+	// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+	// for more examples of label selectors.
+	//
+	// Default to the empty LabelSelector, which matches everything.
+	// +optional
+	NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"`
+
+	// SideEffects states whether this webhookk has side effects.
+	// Acceptable values are: Unknown, None, Some, NoneOnDryRun
+	// Webhooks with side effects MUST implement a reconciliation system, since a request may be
+	// rejected by a future step in the admission change and the side effects therefore need to be undone.
+	// Requests with the dryRun attribute will be auto-rejected if they match a webhook with
+	// sideEffects == Unknown or Some. Defaults to Unknown.
+	// +optional
+	SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"`
+}
+
+// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make
+// sure that all the tuple expansions are valid.
+type RuleWithOperations struct {
+	// Operations is the operations the admission hook cares about - CREATE, UPDATE, or *
+	// for all operations.
+	// If '*' is present, the length of the slice must be one.
+	// Required.
+	Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"`
+	// Rule is embedded, it describes other criteria of the rule, like
+	// APIGroups, APIVersions, Resources, etc.
+	Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"`
+}
+
+type OperationType string
+
+// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go.
+const (
+	OperationAll OperationType = "*"
+	Create       OperationType = "CREATE"
+	Update       OperationType = "UPDATE"
+	Delete       OperationType = "DELETE"
+	Connect      OperationType = "CONNECT"
+)
+
+// WebhookClientConfig contains the information to make a TLS
+// connection with the webhook
+type WebhookClientConfig struct {
+	// `url` gives the location of the webhook, in standard URL form
+	// (`scheme://host:port/path`). Exactly one of `url` or `service`
+	// must be specified.
+	//
+	// The `host` should not refer to a service running in the cluster; use
+	// the `service` field instead. The host might be resolved via external
+	// DNS in some apiservers (e.g., `kube-apiserver` cannot resolve
+	// in-cluster DNS as that would be a layering violation). `host` may
+	// also be an IP address.
+	//
+	// Please note that using `localhost` or `127.0.0.1` as a `host` is
+	// risky unless you take great care to run this webhook on all hosts
+	// which run an apiserver which might need to make calls to this
+	// webhook. Such installs are likely to be non-portable, i.e., not easy
+	// to turn up in a new cluster.
+	//
+	// The scheme must be "https"; the URL must begin with "https://".
+	//
+	// A path is optional, and if present may be any string permissible in
+	// a URL. You may use the path to pass an arbitrary string to the
+	// webhook, for example, a cluster identifier.
+	//
+	// Attempting to use a user or basic auth e.g. "user:password@" is not
+	// allowed. Fragments ("#...") and query parameters ("?...") are not
+	// allowed, either.
+	//
+	// +optional
+	URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"`
+
+	// `service` is a reference to the service for this webhook. Either
+	// `service` or `url` must be specified.
+	//
+	// If the webhook is running within the cluster, then you should use `service`.
+	//
+	// Port 443 will be used if it is open, otherwise it is an error.
+	//
+	// +optional
+	Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"`
+
+	// `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
+	// If unspecified, system trust roots on the apiserver are used.
+	// +optional
+	CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"`
+}
+
+// ServiceReference holds a reference to Service.legacy.k8s.io
+type ServiceReference struct {
+	// `namespace` is the namespace of the service.
+	// Required
+	Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
+	// `name` is the name of the service.
+	// Required
+	Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
+
+	// `path` is an optional URL path which will be sent in any request to
+	// this service.
+	// +optional
+	Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"`
+}
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..e97628a
--- /dev/null
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,126 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_MutatingWebhookConfiguration = map[string]string{
+	"":         "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.",
+	"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.",
+	"webhooks": "Webhooks is a list of webhooks and the affected resources and operations.",
+}
+
+func (MutatingWebhookConfiguration) SwaggerDoc() map[string]string {
+	return map_MutatingWebhookConfiguration
+}
+
+var map_MutatingWebhookConfigurationList = map[string]string{
+	"":         "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "List of MutatingWebhookConfiguration.",
+}
+
+func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string {
+	return map_MutatingWebhookConfigurationList
+}
+
+var map_Rule = map[string]string{
+	"":            "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.",
+	"apiGroups":   "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.",
+	"apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.",
+	"resources":   "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.",
+}
+
+func (Rule) SwaggerDoc() map[string]string {
+	return map_Rule
+}
+
+var map_RuleWithOperations = map[string]string{
+	"":           "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.",
+	"operations": "Operations is the operations the admission hook cares about - CREATE, UPDATE, or * for all operations. If '*' is present, the length of the slice must be one. Required.",
+}
+
+func (RuleWithOperations) SwaggerDoc() map[string]string {
+	return map_RuleWithOperations
+}
+
+var map_ServiceReference = map[string]string{
+	"":          "ServiceReference holds a reference to Service.legacy.k8s.io",
+	"namespace": "`namespace` is the namespace of the service. Required",
+	"name":      "`name` is the name of the service. Required",
+	"path":      "`path` is an optional URL path which will be sent in any request to this service.",
+}
+
+func (ServiceReference) SwaggerDoc() map[string]string {
+	return map_ServiceReference
+}
+
+var map_ValidatingWebhookConfiguration = map[string]string{
+	"":         "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.",
+	"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.",
+	"webhooks": "Webhooks is a list of webhooks and the affected resources and operations.",
+}
+
+func (ValidatingWebhookConfiguration) SwaggerDoc() map[string]string {
+	return map_ValidatingWebhookConfiguration
+}
+
+var map_ValidatingWebhookConfigurationList = map[string]string{
+	"":         "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "List of ValidatingWebhookConfiguration.",
+}
+
+func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string {
+	return map_ValidatingWebhookConfigurationList
+}
+
+var map_Webhook = map[string]string{
+	"":                  "Webhook describes an admission webhook and the resources and operations it applies to.",
+	"name":              "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.",
+	"clientConfig":      "ClientConfig defines how to communicate with the hook. Required",
+	"rules":             "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.",
+	"failurePolicy":     "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.",
+	"namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\";  you will set the selector as follows: \"namespaceSelector\": {\n  \"matchExpressions\": [\n    {\n      \"key\": \"runlevel\",\n      \"operator\": \"NotIn\",\n      \"values\": [\n        \"0\",\n        \"1\"\n      ]\n    }\n  ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n  \"matchExpressions\": [\n    {\n      \"key\": \"environment\",\n      \"operator\": \"In\",\n      \"values\": [\n        \"prod\",\n        \"staging\"\n      ]\n    }\n  ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.",
+	"sideEffects":       "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.",
+}
+
+func (Webhook) SwaggerDoc() map[string]string {
+	return map_Webhook
+}
+
+var map_WebhookClientConfig = map[string]string{
+	"":         "WebhookClientConfig contains the information to make a TLS connection with the webhook",
+	"url":      "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.",
+	"service":  "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.",
+	"caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.",
+}
+
+func (WebhookClientConfig) SwaggerDoc() map[string]string {
+	return map_WebhookClientConfig
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..c6867be
--- /dev/null
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,302 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Webhooks != nil {
+		in, out := &in.Webhooks, &out.Webhooks
+		*out = make([]Webhook, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfiguration.
+func (in *MutatingWebhookConfiguration) DeepCopy() *MutatingWebhookConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(MutatingWebhookConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]MutatingWebhookConfiguration, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfigurationList.
+func (in *MutatingWebhookConfigurationList) DeepCopy() *MutatingWebhookConfigurationList {
+	if in == nil {
+		return nil
+	}
+	out := new(MutatingWebhookConfigurationList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Rule) DeepCopyInto(out *Rule) {
+	*out = *in
+	if in.APIGroups != nil {
+		in, out := &in.APIGroups, &out.APIGroups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.APIVersions != nil {
+		in, out := &in.APIVersions, &out.APIVersions
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule.
+func (in *Rule) DeepCopy() *Rule {
+	if in == nil {
+		return nil
+	}
+	out := new(Rule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) {
+	*out = *in
+	if in.Operations != nil {
+		in, out := &in.Operations, &out.Operations
+		*out = make([]OperationType, len(*in))
+		copy(*out, *in)
+	}
+	in.Rule.DeepCopyInto(&out.Rule)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations.
+func (in *RuleWithOperations) DeepCopy() *RuleWithOperations {
+	if in == nil {
+		return nil
+	}
+	out := new(RuleWithOperations)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceReference) DeepCopyInto(out *ServiceReference) {
+	*out = *in
+	if in.Path != nil {
+		in, out := &in.Path, &out.Path
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference.
+func (in *ServiceReference) DeepCopy() *ServiceReference {
+	if in == nil {
+		return nil
+	}
+	out := new(ServiceReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Webhooks != nil {
+		in, out := &in.Webhooks, &out.Webhooks
+		*out = make([]Webhook, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfiguration.
+func (in *ValidatingWebhookConfiguration) DeepCopy() *ValidatingWebhookConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(ValidatingWebhookConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ValidatingWebhookConfiguration, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfigurationList.
+func (in *ValidatingWebhookConfigurationList) DeepCopy() *ValidatingWebhookConfigurationList {
+	if in == nil {
+		return nil
+	}
+	out := new(ValidatingWebhookConfigurationList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Webhook) DeepCopyInto(out *Webhook) {
+	*out = *in
+	in.ClientConfig.DeepCopyInto(&out.ClientConfig)
+	if in.Rules != nil {
+		in, out := &in.Rules, &out.Rules
+		*out = make([]RuleWithOperations, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.FailurePolicy != nil {
+		in, out := &in.FailurePolicy, &out.FailurePolicy
+		*out = new(FailurePolicyType)
+		**out = **in
+	}
+	if in.NamespaceSelector != nil {
+		in, out := &in.NamespaceSelector, &out.NamespaceSelector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.SideEffects != nil {
+		in, out := &in.SideEffects, &out.SideEffects
+		*out = new(SideEffectClass)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook.
+func (in *Webhook) DeepCopy() *Webhook {
+	if in == nil {
+		return nil
+	}
+	out := new(Webhook)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) {
+	*out = *in
+	if in.URL != nil {
+		in, out := &in.URL, &out.URL
+		*out = new(string)
+		**out = **in
+	}
+	if in.Service != nil {
+		in, out := &in.Service, &out.Service
+		*out = new(ServiceReference)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.CABundle != nil {
+		in, out := &in.CABundle, &out.CABundle
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig.
+func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(WebhookClientConfig)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go
new file mode 100644
index 0000000..1d66c22
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+package v1 // import "k8s.io/api/apps/v1"
diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto
new file mode 100644
index 0000000..fea8192
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/generated.proto
@@ -0,0 +1,701 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.apps.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1";
+
+// ControllerRevision implements an immutable snapshot of state data. Clients
+// are responsible for serializing and deserializing the objects that contain
+// their internal state.
+// Once a ControllerRevision has been successfully created, it can not be updated.
+// The API Server will fail validation of all requests that attempt to mutate
+// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both
+// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However,
+// it may be subject to name and representation changes in future releases, and clients should not
+// depend on its stability. It is primarily for internal use by controllers.
+message ControllerRevision {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Data is the serialized representation of the state.
+  optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2;
+
+  // Revision indicates the revision of the state represented by Data.
+  optional int64 revision = 3;
+}
+
+// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
+message ControllerRevisionList {
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of ControllerRevisions
+  repeated ControllerRevision items = 2;
+}
+
+// DaemonSet represents the configuration of a daemon set.
+message DaemonSet {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // The desired behavior of this daemon set.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional DaemonSetSpec spec = 2;
+
+  // The current status of this daemon set. This data may be
+  // out of date by some window of time.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional DaemonSetStatus status = 3;
+}
+
+// DaemonSetCondition describes the state of a DaemonSet at a certain point.
+message DaemonSetCondition {
+  // Type of DaemonSet condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // Last time the condition transitioned from one status to another.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+  // The reason for the condition's last transition.
+  // +optional
+  optional string reason = 4;
+
+  // A human readable message indicating details about the transition.
+  // +optional
+  optional string message = 5;
+}
+
+// DaemonSetList is a collection of daemon sets.
+message DaemonSetList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // A list of daemon sets.
+  repeated DaemonSet items = 2;
+}
+
+// DaemonSetSpec is the specification of a daemon set.
+message DaemonSetSpec {
+  // A label query over pods that are managed by the daemon set.
+  // Must match in order to be controlled.
+  // It must match the pod template's labels.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
+
+  // An object that describes the pod that will be created.
+  // The DaemonSet will create exactly one copy of this pod on every node
+  // that matches the template's node selector (or on every node if no node
+  // selector is specified).
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+  optional k8s.io.api.core.v1.PodTemplateSpec template = 2;
+
+  // An update strategy to replace existing DaemonSet pods with new pods.
+  // +optional
+  optional DaemonSetUpdateStrategy updateStrategy = 3;
+
+  // The minimum number of seconds for which a newly created DaemonSet pod should
+  // be ready without any of its container crashing, for it to be considered
+  // available. Defaults to 0 (pod will be considered available as soon as it
+  // is ready).
+  // +optional
+  optional int32 minReadySeconds = 4;
+
+  // The number of old history to retain to allow rollback.
+  // This is a pointer to distinguish between explicit zero and not specified.
+  // Defaults to 10.
+  // +optional
+  optional int32 revisionHistoryLimit = 6;
+}
+
+// DaemonSetStatus represents the current status of a daemon set.
+message DaemonSetStatus {
+  // The number of nodes that are running at least 1
+  // daemon pod and are supposed to run the daemon pod.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+  optional int32 currentNumberScheduled = 1;
+
+  // The number of nodes that are running the daemon pod, but are
+  // not supposed to run the daemon pod.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+  optional int32 numberMisscheduled = 2;
+
+  // The total number of nodes that should be running the daemon
+  // pod (including nodes correctly running the daemon pod).
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+  optional int32 desiredNumberScheduled = 3;
+
+  // The number of nodes that should be running the daemon pod and have one
+  // or more of the daemon pod running and ready.
+  optional int32 numberReady = 4;
+
+  // The most recent generation observed by the daemon set controller.
+  // +optional
+  optional int64 observedGeneration = 5;
+
+  // The total number of nodes that are running updated daemon pod
+  // +optional
+  optional int32 updatedNumberScheduled = 6;
+
+  // The number of nodes that should be running the
+  // daemon pod and have one or more of the daemon pod running and
+  // available (ready for at least spec.minReadySeconds)
+  // +optional
+  optional int32 numberAvailable = 7;
+
+  // The number of nodes that should be running the
+  // daemon pod and have none of the daemon pod running and available
+  // (ready for at least spec.minReadySeconds)
+  // +optional
+  optional int32 numberUnavailable = 8;
+
+  // Count of hash collisions for the DaemonSet. The DaemonSet controller
+  // uses this field as a collision avoidance mechanism when it needs to
+  // create the name for the newest ControllerRevision.
+  // +optional
+  optional int32 collisionCount = 9;
+
+  // Represents the latest available observations of a DaemonSet's current state.
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated DaemonSetCondition conditions = 10;
+}
+
+// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.
+message DaemonSetUpdateStrategy {
+  // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.
+  // +optional
+  optional string type = 1;
+
+  // Rolling update config params. Present only if type = "RollingUpdate".
+  // ---
+  // TODO: Update this to follow our convention for oneOf, whatever we decide it
+  // to be. Same as Deployment `strategy.rollingUpdate`.
+  // See https://github.com/kubernetes/kubernetes/issues/35345
+  // +optional
+  optional RollingUpdateDaemonSet rollingUpdate = 2;
+}
+
+// Deployment enables declarative updates for Pods and ReplicaSets.
+message Deployment {
+  // Standard object metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior of the Deployment.
+  // +optional
+  optional DeploymentSpec spec = 2;
+
+  // Most recently observed status of the Deployment.
+  // +optional
+  optional DeploymentStatus status = 3;
+}
+
+// DeploymentCondition describes the state of a deployment at a certain point.
+message DeploymentCondition {
+  // Type of deployment condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // The last time this condition was updated.
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
+
+  // Last time the condition transitioned from one status to another.
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
+
+  // The reason for the condition's last transition.
+  optional string reason = 4;
+
+  // A human readable message indicating details about the transition.
+  optional string message = 5;
+}
+
+// DeploymentList is a list of Deployments.
+message DeploymentList {
+  // Standard list metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of Deployments.
+  repeated Deployment items = 2;
+}
+
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
+message DeploymentSpec {
+  // Number of desired pods. This is a pointer to distinguish between explicit
+  // zero and not specified. Defaults to 1.
+  // +optional
+  optional int32 replicas = 1;
+
+  // Label selector for pods. Existing ReplicaSets whose pods are
+  // selected by this will be the ones affected by this deployment.
+  // It must match the pod template's labels.
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+  // Template describes the pods that will be created.
+  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+
+  // The deployment strategy to use to replace existing pods with new ones.
+  // +optional
+  // +patchStrategy=retainKeys
+  optional DeploymentStrategy strategy = 4;
+
+  // Minimum number of seconds for which a newly created pod should be ready
+  // without any of its container crashing, for it to be considered available.
+  // Defaults to 0 (pod will be considered available as soon as it is ready)
+  // +optional
+  optional int32 minReadySeconds = 5;
+
+  // The number of old ReplicaSets to retain to allow rollback.
+  // This is a pointer to distinguish between explicit zero and not specified.
+  // Defaults to 10.
+  // +optional
+  optional int32 revisionHistoryLimit = 6;
+
+  // Indicates that the deployment is paused.
+  // +optional
+  optional bool paused = 7;
+
+  // The maximum time in seconds for a deployment to make progress before it
+  // is considered to be failed. The deployment controller will continue to
+  // process failed deployments and a condition with a ProgressDeadlineExceeded
+  // reason will be surfaced in the deployment status. Note that progress will
+  // not be estimated during the time a deployment is paused. Defaults to 600s.
+  optional int32 progressDeadlineSeconds = 9;
+}
+
+// DeploymentStatus is the most recently observed status of the Deployment.
+message DeploymentStatus {
+  // The generation observed by the deployment controller.
+  // +optional
+  optional int64 observedGeneration = 1;
+
+  // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+  // +optional
+  optional int32 replicas = 2;
+
+  // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+  // +optional
+  optional int32 updatedReplicas = 3;
+
+  // Total number of ready pods targeted by this deployment.
+  // +optional
+  optional int32 readyReplicas = 7;
+
+  // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+  // +optional
+  optional int32 availableReplicas = 4;
+
+  // Total number of unavailable pods targeted by this deployment. This is the total number of
+  // pods that are still required for the deployment to have 100% available capacity. They may
+  // either be pods that are running but not yet available or pods that still have not been created.
+  // +optional
+  optional int32 unavailableReplicas = 5;
+
+  // Represents the latest available observations of a deployment's current state.
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated DeploymentCondition conditions = 6;
+
+  // Count of hash collisions for the Deployment. The Deployment controller uses this
+  // field as a collision avoidance mechanism when it needs to create the name for the
+  // newest ReplicaSet.
+  // +optional
+  optional int32 collisionCount = 8;
+}
+
+// DeploymentStrategy describes how to replace existing pods with new ones.
+message DeploymentStrategy {
+  // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+  // +optional
+  optional string type = 1;
+
+  // Rolling update config params. Present only if DeploymentStrategyType =
+  // RollingUpdate.
+  // ---
+  // TODO: Update this to follow our convention for oneOf, whatever we decide it
+  // to be.
+  // +optional
+  optional RollingUpdateDeployment rollingUpdate = 2;
+}
+
+// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
+message ReplicaSet {
+  // If the Labels of a ReplicaSet are empty, they are defaulted to
+  // be the same as the Pod(s) that the ReplicaSet manages.
+  // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the specification of the desired behavior of the ReplicaSet.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional ReplicaSetSpec spec = 2;
+
+  // Status is the most recently observed status of the ReplicaSet.
+  // This data may be out of date by some window of time.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional ReplicaSetStatus status = 3;
+}
+
+// ReplicaSetCondition describes the state of a replica set at a certain point.
+message ReplicaSetCondition {
+  // Type of replica set condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // The last time the condition transitioned from one status to another.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+  // The reason for the condition's last transition.
+  // +optional
+  optional string reason = 4;
+
+  // A human readable message indicating details about the transition.
+  // +optional
+  optional string message = 5;
+}
+
+// ReplicaSetList is a collection of ReplicaSets.
+message ReplicaSetList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of ReplicaSets.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+  repeated ReplicaSet items = 2;
+}
+
+// ReplicaSetSpec is the specification of a ReplicaSet.
+message ReplicaSetSpec {
+  // Replicas is the number of desired replicas.
+  // This is a pointer to distinguish between explicit zero and unspecified.
+  // Defaults to 1.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+  // +optional
+  optional int32 replicas = 1;
+
+  // Minimum number of seconds for which a newly created pod should be ready
+  // without any of its container crashing, for it to be considered available.
+  // Defaults to 0 (pod will be considered available as soon as it is ready)
+  // +optional
+  optional int32 minReadySeconds = 4;
+
+  // Selector is a label query over pods that should match the replica count.
+  // Label keys and values that must match in order to be controlled by this replica set.
+  // It must match the pod template's labels.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+  // Template is the object that describes the pod that will be created if
+  // insufficient replicas are detected.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+  // +optional
+  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+}
+
+// ReplicaSetStatus represents the current status of a ReplicaSet.
+message ReplicaSetStatus {
+  // Replicas is the most recently oberved number of replicas.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+  optional int32 replicas = 1;
+
+  // The number of pods that have labels matching the labels of the pod template of the replicaset.
+  // +optional
+  optional int32 fullyLabeledReplicas = 2;
+
+  // The number of ready replicas for this replica set.
+  // +optional
+  optional int32 readyReplicas = 4;
+
+  // The number of available replicas (ready for at least minReadySeconds) for this replica set.
+  // +optional
+  optional int32 availableReplicas = 5;
+
+  // ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
+  // +optional
+  optional int64 observedGeneration = 3;
+
+  // Represents the latest available observations of a replica set's current state.
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated ReplicaSetCondition conditions = 6;
+}
+
+// Spec to control the desired behavior of daemon set rolling update.
+message RollingUpdateDaemonSet {
+  // The maximum number of DaemonSet pods that can be unavailable during the
+  // update. Value can be an absolute number (ex: 5) or a percentage of total
+  // number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+  // number is calculated from percentage by rounding up.
+  // This cannot be 0.
+  // Default value is 1.
+  // Example: when this is set to 30%, at most 30% of the total number of nodes
+  // that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+  // can have their pods stopped for an update at any given
+  // time. The update starts by stopping at most 30% of those DaemonSet pods
+  // and then brings up new DaemonSet pods in their place. Once the new pods
+  // are available, it then proceeds onto other DaemonSet pods, thus ensuring
+  // that at least 70% of original number of DaemonSet pods are available at
+  // all times during the update.
+  // +optional
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
+}
+
+// Spec to control the desired behavior of rolling update.
+message RollingUpdateDeployment {
+  // The maximum number of pods that can be unavailable during the update.
+  // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+  // Absolute number is calculated from percentage by rounding down.
+  // This can not be 0 if MaxSurge is 0.
+  // Defaults to 25%.
+  // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+  // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+  // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+  // that the total number of pods available at all times during the update is at
+  // least 70% of desired pods.
+  // +optional
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
+
+  // The maximum number of pods that can be scheduled above the desired number of
+  // pods.
+  // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+  // This can not be 0 if MaxUnavailable is 0.
+  // Absolute number is calculated from percentage by rounding up.
+  // Defaults to 25%.
+  // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+  // the rolling update starts, such that the total number of old and new pods do not exceed
+  // 130% of desired pods. Once old pods have been killed,
+  // new ReplicaSet can be scaled up further, ensuring that total number of pods running
+  // at any time during the update is at most 130% of desired pods.
+  // +optional
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
+}
+
+// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
+message RollingUpdateStatefulSetStrategy {
+  // Partition indicates the ordinal at which the StatefulSet should be
+  // partitioned.
+  // Default value is 0.
+  // +optional
+  optional int32 partition = 1;
+}
+
+// StatefulSet represents a set of pods with consistent identities.
+// Identities are defined as:
+//  - Network: A single stable DNS and hostname.
+//  - Storage: As many VolumeClaims as requested.
+// The StatefulSet guarantees that a given network identity will always
+// map to the same storage identity.
+message StatefulSet {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the desired identities of pods in this set.
+  // +optional
+  optional StatefulSetSpec spec = 2;
+
+  // Status is the current status of Pods in this StatefulSet. This data
+  // may be out of date by some window of time.
+  // +optional
+  optional StatefulSetStatus status = 3;
+}
+
+// StatefulSetCondition describes the state of a statefulset at a certain point.
+message StatefulSetCondition {
+  // Type of statefulset condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // Last time the condition transitioned from one status to another.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+  // The reason for the condition's last transition.
+  // +optional
+  optional string reason = 4;
+
+  // A human readable message indicating details about the transition.
+  // +optional
+  optional string message = 5;
+}
+
+// StatefulSetList is a collection of StatefulSets.
+message StatefulSetList {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  repeated StatefulSet items = 2;
+}
+
+// A StatefulSetSpec is the specification of a StatefulSet.
+message StatefulSetSpec {
+  // replicas is the desired number of replicas of the given Template.
+  // These are replicas in the sense that they are instantiations of the
+  // same Template, but individual replicas also have a consistent identity.
+  // If unspecified, defaults to 1.
+  // TODO: Consider a rename of this field.
+  // +optional
+  optional int32 replicas = 1;
+
+  // selector is a label query over pods that should match the replica count.
+  // It must match the pod template's labels.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+  // template is the object that describes the pod that will be created if
+  // insufficient replicas are detected. Each pod stamped out by the StatefulSet
+  // will fulfill this Template, but have a unique identity from the rest
+  // of the StatefulSet.
+  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+
+  // volumeClaimTemplates is a list of claims that pods are allowed to reference.
+  // The StatefulSet controller is responsible for mapping network identities to
+  // claims in a way that maintains the identity of a pod. Every claim in
+  // this list must have at least one matching (by name) volumeMount in one
+  // container in the template. A claim in this list takes precedence over
+  // any volumes in the template, with the same name.
+  // TODO: Define the behavior if a claim already exists with the same name.
+  // +optional
+  repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
+
+  // serviceName is the name of the service that governs this StatefulSet.
+  // This service must exist before the StatefulSet, and is responsible for
+  // the network identity of the set. Pods get DNS/hostnames that follow the
+  // pattern: pod-specific-string.serviceName.default.svc.cluster.local
+  // where "pod-specific-string" is managed by the StatefulSet controller.
+  optional string serviceName = 5;
+
+  // podManagementPolicy controls how pods are created during initial scale up,
+  // when replacing pods on nodes, or when scaling down. The default policy is
+  // `OrderedReady`, where pods are created in increasing order (pod-0, then
+  // pod-1, etc) and the controller will wait until each pod is ready before
+  // continuing. When scaling down, the pods are removed in the opposite order.
+  // The alternative policy is `Parallel` which will create pods in parallel
+  // to match the desired scale without waiting, and on scale down will delete
+  // all pods at once.
+  // +optional
+  optional string podManagementPolicy = 6;
+
+  // updateStrategy indicates the StatefulSetUpdateStrategy that will be
+  // employed to update Pods in the StatefulSet when a revision is made to
+  // Template.
+  optional StatefulSetUpdateStrategy updateStrategy = 7;
+
+  // revisionHistoryLimit is the maximum number of revisions that will
+  // be maintained in the StatefulSet's revision history. The revision history
+  // consists of all revisions not represented by a currently applied
+  // StatefulSetSpec version. The default value is 10.
+  optional int32 revisionHistoryLimit = 8;
+}
+
+// StatefulSetStatus represents the current state of a StatefulSet.
+message StatefulSetStatus {
+  // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
+  // StatefulSet's generation, which is updated on mutation by the API Server.
+  // +optional
+  optional int64 observedGeneration = 1;
+
+  // replicas is the number of Pods created by the StatefulSet controller.
+  optional int32 replicas = 2;
+
+  // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.
+  optional int32 readyReplicas = 3;
+
+  // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+  // indicated by currentRevision.
+  optional int32 currentReplicas = 4;
+
+  // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+  // indicated by updateRevision.
+  optional int32 updatedReplicas = 5;
+
+  // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
+  // sequence [0,currentReplicas).
+  optional string currentRevision = 6;
+
+  // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
+  // [replicas-updatedReplicas,replicas)
+  optional string updateRevision = 7;
+
+  // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller
+  // uses this field as a collision avoidance mechanism when it needs to create the name for the
+  // newest ControllerRevision.
+  // +optional
+  optional int32 collisionCount = 9;
+
+  // Represents the latest available observations of a statefulset's current state.
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated StatefulSetCondition conditions = 10;
+}
+
+// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
+// controller will use to perform updates. It includes any additional parameters
+// necessary to perform the update for the indicated strategy.
+message StatefulSetUpdateStrategy {
+  // Type indicates the type of the StatefulSetUpdateStrategy.
+  // Default is RollingUpdate.
+  // +optional
+  optional string type = 1;
+
+  // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
+  // +optional
+  optional RollingUpdateStatefulSetStrategy rollingUpdate = 2;
+}
+
diff --git a/vendor/k8s.io/api/apps/v1/register.go b/vendor/k8s.io/api/apps/v1/register.go
new file mode 100644
index 0000000..0271010
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/register.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "apps"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Deployment{},
+		&DeploymentList{},
+		&StatefulSet{},
+		&StatefulSetList{},
+		&DaemonSet{},
+		&DaemonSetList{},
+		&ReplicaSet{},
+		&ReplicaSetList{},
+		&ControllerRevision{},
+		&ControllerRevisionList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go
new file mode 100644
index 0000000..68ac55b
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/types.go
@@ -0,0 +1,826 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/intstr"
+)
+
+const (
+	ControllerRevisionHashLabelKey = "controller-revision-hash"
+	StatefulSetRevisionLabel       = ControllerRevisionHashLabelKey
+	DeprecatedRollbackTo           = "deprecated.deployment.rollback.to"
+	DeprecatedTemplateGeneration   = "deprecated.daemonset.template.generation"
+	StatefulSetPodNameLabel        = "statefulset.kubernetes.io/pod-name"
+)
+
+// +genclient
+// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
+// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// StatefulSet represents a set of pods with consistent identities.
+// Identities are defined as:
+//  - Network: A single stable DNS and hostname.
+//  - Storage: As many VolumeClaims as requested.
+// The StatefulSet guarantees that a given network identity will always
+// map to the same storage identity.
+type StatefulSet struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the desired identities of pods in this set.
+	// +optional
+	Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is the current status of Pods in this StatefulSet. This data
+	// may be out of date by some window of time.
+	// +optional
+	Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// PodManagementPolicyType defines the policy for creating pods under a stateful set.
+type PodManagementPolicyType string
+
+const (
+	// OrderedReadyPodManagement will create pods in strictly increasing order on
+	// scale up and strictly decreasing order on scale down, progressing only when
+	// the previous pod is ready or terminated. At most one pod will be changed
+	// at any time.
+	OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady"
+	// ParallelPodManagement will create and delete pods as soon as the stateful set
+	// replica count is changed, and will not wait for pods to be ready or complete
+	// termination.
+	ParallelPodManagement = "Parallel"
+)
+
+// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
+// controller will use to perform updates. It includes any additional parameters
+// necessary to perform the update for the indicated strategy.
+type StatefulSetUpdateStrategy struct {
+	// Type indicates the type of the StatefulSetUpdateStrategy.
+	// Default is RollingUpdate.
+	// +optional
+	Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"`
+	// RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
+	// +optional
+	RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
+}
+
+// StatefulSetUpdateStrategyType is a string enumeration type that enumerates
+// all possible update strategies for the StatefulSet controller.
+type StatefulSetUpdateStrategyType string
+
+const (
+	// RollingUpdateStatefulSetStrategyType indicates that update will be
+	// applied to all Pods in the StatefulSet with respect to the StatefulSet
+	// ordering constraints. When a scale operation is performed with this
+	// strategy, new Pods will be created from the specification version indicated
+	// by the StatefulSet's updateRevision.
+	RollingUpdateStatefulSetStrategyType = "RollingUpdate"
+	// OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version
+	// tracking and ordered rolling restarts are disabled. Pods are recreated
+	// from the StatefulSetSpec when they are manually deleted. When a scale
+	// operation is performed with this strategy,specification version indicated
+	// by the StatefulSet's currentRevision.
+	OnDeleteStatefulSetStrategyType = "OnDelete"
+)
+
+// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
+type RollingUpdateStatefulSetStrategy struct {
+	// Partition indicates the ordinal at which the StatefulSet should be
+	// partitioned.
+	// Default value is 0.
+	// +optional
+	Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"`
+}
+
+// A StatefulSetSpec is the specification of a StatefulSet.
+type StatefulSetSpec struct {
+	// replicas is the desired number of replicas of the given Template.
+	// These are replicas in the sense that they are instantiations of the
+	// same Template, but individual replicas also have a consistent identity.
+	// If unspecified, defaults to 1.
+	// TODO: Consider a rename of this field.
+	// +optional
+	Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+	// selector is a label query over pods that should match the replica count.
+	// It must match the pod template's labels.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+	Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"`
+
+	// template is the object that describes the pod that will be created if
+	// insufficient replicas are detected. Each pod stamped out by the StatefulSet
+	// will fulfill this Template, but have a unique identity from the rest
+	// of the StatefulSet.
+	Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"`
+
+	// volumeClaimTemplates is a list of claims that pods are allowed to reference.
+	// The StatefulSet controller is responsible for mapping network identities to
+	// claims in a way that maintains the identity of a pod. Every claim in
+	// this list must have at least one matching (by name) volumeMount in one
+	// container in the template. A claim in this list takes precedence over
+	// any volumes in the template, with the same name.
+	// TODO: Define the behavior if a claim already exists with the same name.
+	// +optional
+	VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"`
+
+	// serviceName is the name of the service that governs this StatefulSet.
+	// This service must exist before the StatefulSet, and is responsible for
+	// the network identity of the set. Pods get DNS/hostnames that follow the
+	// pattern: pod-specific-string.serviceName.default.svc.cluster.local
+	// where "pod-specific-string" is managed by the StatefulSet controller.
+	ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"`
+
+	// podManagementPolicy controls how pods are created during initial scale up,
+	// when replacing pods on nodes, or when scaling down. The default policy is
+	// `OrderedReady`, where pods are created in increasing order (pod-0, then
+	// pod-1, etc) and the controller will wait until each pod is ready before
+	// continuing. When scaling down, the pods are removed in the opposite order.
+	// The alternative policy is `Parallel` which will create pods in parallel
+	// to match the desired scale without waiting, and on scale down will delete
+	// all pods at once.
+	// +optional
+	PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"`
+
+	// updateStrategy indicates the StatefulSetUpdateStrategy that will be
+	// employed to update Pods in the StatefulSet when a revision is made to
+	// Template.
+	UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"`
+
+	// revisionHistoryLimit is the maximum number of revisions that will
+	// be maintained in the StatefulSet's revision history. The revision history
+	// consists of all revisions not represented by a currently applied
+	// StatefulSetSpec version. The default value is 10.
+	RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"`
+}
+
+// StatefulSetStatus represents the current state of a StatefulSet.
+type StatefulSetStatus struct {
+	// observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
+	// StatefulSet's generation, which is updated on mutation by the API Server.
+	// +optional
+	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+	// replicas is the number of Pods created by the StatefulSet controller.
+	Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"`
+
+	// readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.
+	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"`
+
+	// currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+	// indicated by currentRevision.
+	CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"`
+
+	// updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+	// indicated by updateRevision.
+	UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"`
+
+	// currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
+	// sequence [0,currentReplicas).
+	CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"`
+
+	// updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
+	// [replicas-updatedReplicas,replicas)
+	UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"`
+
+	// collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller
+	// uses this field as a collision avoidance mechanism when it needs to create the name for the
+	// newest ControllerRevision.
+	// +optional
+	CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"`
+
+	// Represents the latest available observations of a statefulset's current state.
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"`
+}
+
+type StatefulSetConditionType string
+
+// StatefulSetCondition describes the state of a statefulset at a certain point.
+type StatefulSetCondition struct {
+	// Type of statefulset condition.
+	Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+	// Last time the condition transitioned from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+	// The reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// A human readable message indicating details about the transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// StatefulSetList is a collection of StatefulSets.
+type StatefulSetList struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	Items           []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
+// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Deployment enables declarative updates for Pods and ReplicaSets.
+type Deployment struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired behavior of the Deployment.
+	// +optional
+	Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Most recently observed status of the Deployment.
+	// +optional
+	Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
+type DeploymentSpec struct {
+	// Number of desired pods. This is a pointer to distinguish between explicit
+	// zero and not specified. Defaults to 1.
+	// +optional
+	Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+	// Label selector for pods. Existing ReplicaSets whose pods are
+	// selected by this will be the ones affected by this deployment.
+	// It must match the pod template's labels.
+	Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"`
+
+	// Template describes the pods that will be created.
+	Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"`
+
+	// The deployment strategy to use to replace existing pods with new ones.
+	// +optional
+	// +patchStrategy=retainKeys
+	Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"`
+
+	// Minimum number of seconds for which a newly created pod should be ready
+	// without any of its container crashing, for it to be considered available.
+	// Defaults to 0 (pod will be considered available as soon as it is ready)
+	// +optional
+	MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"`
+
+	// The number of old ReplicaSets to retain to allow rollback.
+	// This is a pointer to distinguish between explicit zero and not specified.
+	// Defaults to 10.
+	// +optional
+	RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"`
+
+	// Indicates that the deployment is paused.
+	// +optional
+	Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"`
+
+	// The maximum time in seconds for a deployment to make progress before it
+	// is considered to be failed. The deployment controller will continue to
+	// process failed deployments and a condition with a ProgressDeadlineExceeded
+	// reason will be surfaced in the deployment status. Note that progress will
+	// not be estimated during the time a deployment is paused. Defaults to 600s.
+	ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"`
+}
+
+const (
+	// DefaultDeploymentUniqueLabelKey is the default key of the selector that is added
+	// to existing ReplicaSets (and label key that is added to its pods) to prevent the existing ReplicaSets
+	// to select new pods (and old pods being select by new ReplicaSet).
+	DefaultDeploymentUniqueLabelKey string = "pod-template-hash"
+)
+
+// DeploymentStrategy describes how to replace existing pods with new ones.
+type DeploymentStrategy struct {
+	// Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+	// +optional
+	Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"`
+
+	// Rolling update config params. Present only if DeploymentStrategyType =
+	// RollingUpdate.
+	//---
+	// TODO: Update this to follow our convention for oneOf, whatever we decide it
+	// to be.
+	// +optional
+	RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
+}
+
+type DeploymentStrategyType string
+
+const (
+	// Kill all existing pods before creating new ones.
+	RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate"
+
+	// Replace the old ReplicaSets by new one using rolling update i.e gradually scale down the old ReplicaSets and scale up the new one.
+	RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate"
+)
+
+// Spec to control the desired behavior of rolling update.
+type RollingUpdateDeployment struct {
+	// The maximum number of pods that can be unavailable during the update.
+	// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+	// Absolute number is calculated from percentage by rounding down.
+	// This can not be 0 if MaxSurge is 0.
+	// Defaults to 25%.
+	// Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+	// immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+	// can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+	// that the total number of pods available at all times during the update is at
+	// least 70% of desired pods.
+	// +optional
+	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
+
+	// The maximum number of pods that can be scheduled above the desired number of
+	// pods.
+	// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+	// This can not be 0 if MaxUnavailable is 0.
+	// Absolute number is calculated from percentage by rounding up.
+	// Defaults to 25%.
+	// Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+	// the rolling update starts, such that the total number of old and new pods do not exceed
+	// 130% of desired pods. Once old pods have been killed,
+	// new ReplicaSet can be scaled up further, ensuring that total number of pods running
+	// at any time during the update is at most 130% of desired pods.
+	// +optional
+	MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"`
+}
+
+// DeploymentStatus is the most recently observed status of the Deployment.
+type DeploymentStatus struct {
+	// The generation observed by the deployment controller.
+	// +optional
+	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+	// Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+	// +optional
+	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"`
+
+	// Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+	// +optional
+	UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"`
+
+	// Total number of ready pods targeted by this deployment.
+	// +optional
+	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"`
+
+	// Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+	// +optional
+	AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"`
+
+	// Total number of unavailable pods targeted by this deployment. This is the total number of
+	// pods that are still required for the deployment to have 100% available capacity. They may
+	// either be pods that are running but not yet available or pods that still have not been created.
+	// +optional
+	UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
+
+	// Represents the latest available observations of a deployment's current state.
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
+
+	// Count of hash collisions for the Deployment. The Deployment controller uses this
+	// field as a collision avoidance mechanism when it needs to create the name for the
+	// newest ReplicaSet.
+	// +optional
+	CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"`
+}
+
+type DeploymentConditionType string
+
+// These are valid conditions of a deployment.
+const (
+	// Available means the deployment is available, ie. at least the minimum available
+	// replicas required are up and running for at least minReadySeconds.
+	DeploymentAvailable DeploymentConditionType = "Available"
+	// Progressing means the deployment is progressing. Progress for a deployment is
+	// considered when a new replica set is created or adopted, and when new pods scale
+	// up or old pods scale down. Progress is not estimated for paused deployments or
+	// when progressDeadlineSeconds is not specified.
+	DeploymentProgressing DeploymentConditionType = "Progressing"
+	// ReplicaFailure is added in a deployment when one of its pods fails to be created
+	// or deleted.
+	DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure"
+)
+
+// DeploymentCondition describes the state of a deployment at a certain point.
+type DeploymentCondition struct {
+	// Type of deployment condition.
+	Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+	// The last time this condition was updated.
+	LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"`
+	// Last time the condition transitioned from one status to another.
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"`
+	// The reason for the condition's last transition.
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// A human readable message indicating details about the transition.
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DeploymentList is a list of Deployments.
+type DeploymentList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of Deployments.
+	Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.
+type DaemonSetUpdateStrategy struct {
+	// Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.
+	// +optional
+	Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"`
+
+	// Rolling update config params. Present only if type = "RollingUpdate".
+	//---
+	// TODO: Update this to follow our convention for oneOf, whatever we decide it
+	// to be. Same as Deployment `strategy.rollingUpdate`.
+	// See https://github.com/kubernetes/kubernetes/issues/35345
+	// +optional
+	RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
+}
+
+type DaemonSetUpdateStrategyType string
+
+const (
+	// Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other.
+	RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate"
+
+	// Replace the old daemons only when it's killed
+	OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete"
+)
+
+// Spec to control the desired behavior of daemon set rolling update.
+type RollingUpdateDaemonSet struct {
+	// The maximum number of DaemonSet pods that can be unavailable during the
+	// update. Value can be an absolute number (ex: 5) or a percentage of total
+	// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+	// number is calculated from percentage by rounding up.
+	// This cannot be 0.
+	// Default value is 1.
+	// Example: when this is set to 30%, at most 30% of the total number of nodes
+	// that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+	// can have their pods stopped for an update at any given
+	// time. The update starts by stopping at most 30% of those DaemonSet pods
+	// and then brings up new DaemonSet pods in their place. Once the new pods
+	// are available, it then proceeds onto other DaemonSet pods, thus ensuring
+	// that at least 70% of original number of DaemonSet pods are available at
+	// all times during the update.
+	// +optional
+	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
+}
+
+// DaemonSetSpec is the specification of a daemon set.
+type DaemonSetSpec struct {
+	// A label query over pods that are managed by the daemon set.
+	// Must match in order to be controlled.
+	// It must match the pod template's labels.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+	Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"`
+
+	// An object that describes the pod that will be created.
+	// The DaemonSet will create exactly one copy of this pod on every node
+	// that matches the template's node selector (or on every node if no node
+	// selector is specified).
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+	Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"`
+
+	// An update strategy to replace existing DaemonSet pods with new pods.
+	// +optional
+	UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"`
+
+	// The minimum number of seconds for which a newly created DaemonSet pod should
+	// be ready without any of its container crashing, for it to be considered
+	// available. Defaults to 0 (pod will be considered available as soon as it
+	// is ready).
+	// +optional
+	MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
+
+	// The number of old history to retain to allow rollback.
+	// This is a pointer to distinguish between explicit zero and not specified.
+	// Defaults to 10.
+	// +optional
+	RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"`
+}
+
+// DaemonSetStatus represents the current status of a daemon set.
+type DaemonSetStatus struct {
+	// The number of nodes that are running at least 1
+	// daemon pod and are supposed to run the daemon pod.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+	CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"`
+
+	// The number of nodes that are running the daemon pod, but are
+	// not supposed to run the daemon pod.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+	NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"`
+
+	// The total number of nodes that should be running the daemon
+	// pod (including nodes correctly running the daemon pod).
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+	DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"`
+
+	// The number of nodes that should be running the daemon pod and have one
+	// or more of the daemon pod running and ready.
+	NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"`
+
+	// The most recent generation observed by the daemon set controller.
+	// +optional
+	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"`
+
+	// The total number of nodes that are running updated daemon pod
+	// +optional
+	UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"`
+
+	// The number of nodes that should be running the
+	// daemon pod and have one or more of the daemon pod running and
+	// available (ready for at least spec.minReadySeconds)
+	// +optional
+	NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"`
+
+	// The number of nodes that should be running the
+	// daemon pod and have none of the daemon pod running and available
+	// (ready for at least spec.minReadySeconds)
+	// +optional
+	NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"`
+
+	// Count of hash collisions for the DaemonSet. The DaemonSet controller
+	// uses this field as a collision avoidance mechanism when it needs to
+	// create the name for the newest ControllerRevision.
+	// +optional
+	CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"`
+
+	// Represents the latest available observations of a DaemonSet's current state.
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"`
+}
+
+type DaemonSetConditionType string
+
+// TODO: Add valid condition types of a DaemonSet.
+
+// DaemonSetCondition describes the state of a DaemonSet at a certain point.
+type DaemonSetCondition struct {
+	// Type of DaemonSet condition.
+	Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+	// Last time the condition transitioned from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+	// The reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// A human readable message indicating details about the transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DaemonSet represents the configuration of a daemon set.
+type DaemonSet struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// The desired behavior of this daemon set.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// The current status of this daemon set. This data may be
+	// out of date by some window of time.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+const (
+	// DefaultDaemonSetUniqueLabelKey is the default label key that is added
+	// to existing DaemonSet pods to distinguish between old and new
+	// DaemonSet pods during DaemonSet template updates.
+	DefaultDaemonSetUniqueLabelKey = ControllerRevisionHashLabelKey
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DaemonSetList is a collection of daemon sets.
+type DaemonSetList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// A list of daemon sets.
+	Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
+// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
+type ReplicaSet struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// If the Labels of a ReplicaSet are empty, they are defaulted to
+	// be the same as the Pod(s) that the ReplicaSet manages.
+	// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the specification of the desired behavior of the ReplicaSet.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is the most recently observed status of the ReplicaSet.
+	// This data may be out of date by some window of time.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ReplicaSetList is a collection of ReplicaSets.
+type ReplicaSetList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of ReplicaSets.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+	Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ReplicaSetSpec is the specification of a ReplicaSet.
+type ReplicaSetSpec struct {
+	// Replicas is the number of desired replicas.
+	// This is a pointer to distinguish between explicit zero and unspecified.
+	// Defaults to 1.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+	// +optional
+	Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+	// Minimum number of seconds for which a newly created pod should be ready
+	// without any of its container crashing, for it to be considered available.
+	// Defaults to 0 (pod will be considered available as soon as it is ready)
+	// +optional
+	MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
+
+	// Selector is a label query over pods that should match the replica count.
+	// Label keys and values that must match in order to be controlled by this replica set.
+	// It must match the pod template's labels.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+	Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"`
+
+	// Template is the object that describes the pod that will be created if
+	// insufficient replicas are detected.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+	// +optional
+	Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
+}
+
+// ReplicaSetStatus represents the current status of a ReplicaSet.
+type ReplicaSetStatus struct {
+	// Replicas is the most recently oberved number of replicas.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+	Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
+
+	// The number of pods that have labels matching the labels of the pod template of the replicaset.
+	// +optional
+	FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
+
+	// The number of ready replicas for this replica set.
+	// +optional
+	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
+
+	// The number of available replicas (ready for at least minReadySeconds) for this replica set.
+	// +optional
+	AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
+
+	// ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
+	// +optional
+	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
+
+	// Represents the latest available observations of a replica set's current state.
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
+}
+
+type ReplicaSetConditionType string
+
+// These are valid conditions of a replica set.
+const (
+	// ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created
+	// due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted
+	// due to kubelet being down or finalizers are failing.
+	ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure"
+)
+
+// ReplicaSetCondition describes the state of a replica set at a certain point.
+type ReplicaSetCondition struct {
+	// Type of replica set condition.
+	Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+	// The last time the condition transitioned from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+	// The reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// A human readable message indicating details about the transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerRevision implements an immutable snapshot of state data. Clients
+// are responsible for serializing and deserializing the objects that contain
+// their internal state.
+// Once a ControllerRevision has been successfully created, it can not be updated.
+// The API Server will fail validation of all requests that attempt to mutate
+// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both
+// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However,
+// it may be subject to name and representation changes in future releases, and clients should not
+// depend on its stability. It is primarily for internal use by controllers.
+type ControllerRevision struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Data is the serialized representation of the state.
+	Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"`
+
+	// Revision indicates the revision of the state represented by Data.
+	Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
+type ControllerRevisionList struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of ControllerRevisions
+	Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..7e992c5
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
@@ -0,0 +1,365 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_ControllerRevision = map[string]string{
+	"":         "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"data":     "Data is the serialized representation of the state.",
+	"revision": "Revision indicates the revision of the state represented by Data.",
+}
+
+func (ControllerRevision) SwaggerDoc() map[string]string {
+	return map_ControllerRevision
+}
+
+var map_ControllerRevisionList = map[string]string{
+	"":         "ControllerRevisionList is a resource containing a list of ControllerRevision objects.",
+	"metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "Items is the list of ControllerRevisions",
+}
+
+func (ControllerRevisionList) SwaggerDoc() map[string]string {
+	return map_ControllerRevisionList
+}
+
+var map_DaemonSet = map[string]string{
+	"":         "DaemonSet represents the configuration of a daemon set.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+	"status":   "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (DaemonSet) SwaggerDoc() map[string]string {
+	return map_DaemonSet
+}
+
+var map_DaemonSetCondition = map[string]string{
+	"":                   "DaemonSetCondition describes the state of a DaemonSet at a certain point.",
+	"type":               "Type of DaemonSet condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastTransitionTime": "Last time the condition transitioned from one status to another.",
+	"reason":             "The reason for the condition's last transition.",
+	"message":            "A human readable message indicating details about the transition.",
+}
+
+func (DaemonSetCondition) SwaggerDoc() map[string]string {
+	return map_DaemonSetCondition
+}
+
+var map_DaemonSetList = map[string]string{
+	"":         "DaemonSetList is a collection of daemon sets.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "A list of daemon sets.",
+}
+
+func (DaemonSetList) SwaggerDoc() map[string]string {
+	return map_DaemonSetList
+}
+
+var map_DaemonSetSpec = map[string]string{
+	"":                     "DaemonSetSpec is the specification of a daemon set.",
+	"selector":             "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+	"template":             "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+	"updateStrategy":       "An update strategy to replace existing DaemonSet pods with new pods.",
+	"minReadySeconds":      "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).",
+	"revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.",
+}
+
+func (DaemonSetSpec) SwaggerDoc() map[string]string {
+	return map_DaemonSetSpec
+}
+
+var map_DaemonSetStatus = map[string]string{
+	"":                       "DaemonSetStatus represents the current status of a daemon set.",
+	"currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+	"numberMisscheduled":     "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+	"desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+	"numberReady":            "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.",
+	"observedGeneration":     "The most recent generation observed by the daemon set controller.",
+	"updatedNumberScheduled": "The total number of nodes that are running updated daemon pod",
+	"numberAvailable":        "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)",
+	"numberUnavailable":      "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)",
+	"collisionCount":         "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.",
+	"conditions":             "Represents the latest available observations of a DaemonSet's current state.",
+}
+
+func (DaemonSetStatus) SwaggerDoc() map[string]string {
+	return map_DaemonSetStatus
+}
+
+var map_DaemonSetUpdateStrategy = map[string]string{
+	"":              "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.",
+	"type":          "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.",
+	"rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".",
+}
+
+func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string {
+	return map_DaemonSetUpdateStrategy
+}
+
+var map_Deployment = map[string]string{
+	"":         "Deployment enables declarative updates for Pods and ReplicaSets.",
+	"metadata": "Standard object metadata.",
+	"spec":     "Specification of the desired behavior of the Deployment.",
+	"status":   "Most recently observed status of the Deployment.",
+}
+
+func (Deployment) SwaggerDoc() map[string]string {
+	return map_Deployment
+}
+
+var map_DeploymentCondition = map[string]string{
+	"":                   "DeploymentCondition describes the state of a deployment at a certain point.",
+	"type":               "Type of deployment condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastUpdateTime":     "The last time this condition was updated.",
+	"lastTransitionTime": "Last time the condition transitioned from one status to another.",
+	"reason":             "The reason for the condition's last transition.",
+	"message":            "A human readable message indicating details about the transition.",
+}
+
+func (DeploymentCondition) SwaggerDoc() map[string]string {
+	return map_DeploymentCondition
+}
+
+var map_DeploymentList = map[string]string{
+	"":         "DeploymentList is a list of Deployments.",
+	"metadata": "Standard list metadata.",
+	"items":    "Items is the list of Deployments.",
+}
+
+func (DeploymentList) SwaggerDoc() map[string]string {
+	return map_DeploymentList
+}
+
+var map_DeploymentSpec = map[string]string{
+	"":                        "DeploymentSpec is the specification of the desired behavior of the Deployment.",
+	"replicas":                "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.",
+	"selector":                "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.",
+	"template":                "Template describes the pods that will be created.",
+	"strategy":                "The deployment strategy to use to replace existing pods with new ones.",
+	"minReadySeconds":         "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+	"revisionHistoryLimit":    "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.",
+	"paused":                  "Indicates that the deployment is paused.",
+	"progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.",
+}
+
+func (DeploymentSpec) SwaggerDoc() map[string]string {
+	return map_DeploymentSpec
+}
+
+var map_DeploymentStatus = map[string]string{
+	"":                    "DeploymentStatus is the most recently observed status of the Deployment.",
+	"observedGeneration":  "The generation observed by the deployment controller.",
+	"replicas":            "Total number of non-terminated pods targeted by this deployment (their labels match the selector).",
+	"updatedReplicas":     "Total number of non-terminated pods targeted by this deployment that have the desired template spec.",
+	"readyReplicas":       "Total number of ready pods targeted by this deployment.",
+	"availableReplicas":   "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
+	"unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
+	"conditions":          "Represents the latest available observations of a deployment's current state.",
+	"collisionCount":      "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
+}
+
+func (DeploymentStatus) SwaggerDoc() map[string]string {
+	return map_DeploymentStatus
+}
+
+var map_DeploymentStrategy = map[string]string{
+	"":              "DeploymentStrategy describes how to replace existing pods with new ones.",
+	"type":          "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.",
+	"rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.",
+}
+
+func (DeploymentStrategy) SwaggerDoc() map[string]string {
+	return map_DeploymentStrategy
+}
+
+var map_ReplicaSet = map[string]string{
+	"":         "ReplicaSet ensures that a specified number of pod replicas are running at any given time.",
+	"metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+	"status":   "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (ReplicaSet) SwaggerDoc() map[string]string {
+	return map_ReplicaSet
+}
+
+var map_ReplicaSetCondition = map[string]string{
+	"":                   "ReplicaSetCondition describes the state of a replica set at a certain point.",
+	"type":               "Type of replica set condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastTransitionTime": "The last time the condition transitioned from one status to another.",
+	"reason":             "The reason for the condition's last transition.",
+	"message":            "A human readable message indicating details about the transition.",
+}
+
+func (ReplicaSetCondition) SwaggerDoc() map[string]string {
+	return map_ReplicaSetCondition
+}
+
+var map_ReplicaSetList = map[string]string{
+	"":         "ReplicaSetList is a collection of ReplicaSets.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller",
+}
+
+func (ReplicaSetList) SwaggerDoc() map[string]string {
+	return map_ReplicaSetList
+}
+
+var map_ReplicaSetSpec = map[string]string{
+	"":                "ReplicaSetSpec is the specification of a ReplicaSet.",
+	"replicas":        "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
+	"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+	"selector":        "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+	"template":        "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+}
+
+func (ReplicaSetSpec) SwaggerDoc() map[string]string {
+	return map_ReplicaSetSpec
+}
+
+var map_ReplicaSetStatus = map[string]string{
+	"":                     "ReplicaSetStatus represents the current status of a ReplicaSet.",
+	"replicas":             "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
+	"fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.",
+	"readyReplicas":        "The number of ready replicas for this replica set.",
+	"availableReplicas":    "The number of available replicas (ready for at least minReadySeconds) for this replica set.",
+	"observedGeneration":   "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
+	"conditions":           "Represents the latest available observations of a replica set's current state.",
+}
+
+func (ReplicaSetStatus) SwaggerDoc() map[string]string {
+	return map_ReplicaSetStatus
+}
+
+var map_RollingUpdateDaemonSet = map[string]string{
+	"":               "Spec to control the desired behavior of daemon set rolling update.",
+	"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
+}
+
+func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
+	return map_RollingUpdateDaemonSet
+}
+
+var map_RollingUpdateDeployment = map[string]string{
+	"":               "Spec to control the desired behavior of rolling update.",
+	"maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.",
+	"maxSurge":       "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.",
+}
+
+func (RollingUpdateDeployment) SwaggerDoc() map[string]string {
+	return map_RollingUpdateDeployment
+}
+
+var map_RollingUpdateStatefulSetStrategy = map[string]string{
+	"":          "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.",
+	"partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.",
+}
+
+func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string {
+	return map_RollingUpdateStatefulSetStrategy
+}
+
+var map_StatefulSet = map[string]string{
+	"":       "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.",
+	"spec":   "Spec defines the desired identities of pods in this set.",
+	"status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.",
+}
+
+func (StatefulSet) SwaggerDoc() map[string]string {
+	return map_StatefulSet
+}
+
+var map_StatefulSetCondition = map[string]string{
+	"":                   "StatefulSetCondition describes the state of a statefulset at a certain point.",
+	"type":               "Type of statefulset condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastTransitionTime": "Last time the condition transitioned from one status to another.",
+	"reason":             "The reason for the condition's last transition.",
+	"message":            "A human readable message indicating details about the transition.",
+}
+
+func (StatefulSetCondition) SwaggerDoc() map[string]string {
+	return map_StatefulSetCondition
+}
+
+var map_StatefulSetList = map[string]string{
+	"": "StatefulSetList is a collection of StatefulSets.",
+}
+
+func (StatefulSetList) SwaggerDoc() map[string]string {
+	return map_StatefulSetList
+}
+
+var map_StatefulSetSpec = map[string]string{
+	"":                     "A StatefulSetSpec is the specification of a StatefulSet.",
+	"replicas":             "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.",
+	"selector":             "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+	"template":             "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.",
+	"volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.",
+	"serviceName":          "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.",
+	"podManagementPolicy":  "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.",
+	"updateStrategy":       "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.",
+	"revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
+}
+
+func (StatefulSetSpec) SwaggerDoc() map[string]string {
+	return map_StatefulSetSpec
+}
+
+var map_StatefulSetStatus = map[string]string{
+	"":                   "StatefulSetStatus represents the current state of a StatefulSet.",
+	"observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.",
+	"replicas":           "replicas is the number of Pods created by the StatefulSet controller.",
+	"readyReplicas":      "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.",
+	"currentReplicas":    "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.",
+	"updatedReplicas":    "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.",
+	"currentRevision":    "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).",
+	"updateRevision":     "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)",
+	"collisionCount":     "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.",
+	"conditions":         "Represents the latest available observations of a statefulset's current state.",
+}
+
+func (StatefulSetStatus) SwaggerDoc() map[string]string {
+	return map_StatefulSetStatus
+}
+
+var map_StatefulSetUpdateStrategy = map[string]string{
+	"":              "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.",
+	"type":          "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.",
+	"rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.",
+}
+
+func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string {
+	return map_StatefulSetUpdateStrategy
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..885203f
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go
@@ -0,0 +1,772 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	corev1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Data.DeepCopyInto(&out.Data)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision.
+func (in *ControllerRevision) DeepCopy() *ControllerRevision {
+	if in == nil {
+		return nil
+	}
+	out := new(ControllerRevision)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerRevision) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ControllerRevision, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList.
+func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList {
+	if in == nil {
+		return nil
+	}
+	out := new(ControllerRevisionList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerRevisionList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSet) DeepCopyInto(out *DaemonSet) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet.
+func (in *DaemonSet) DeepCopy() *DaemonSet {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSet)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DaemonSet) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) {
+	*out = *in
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition.
+func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSetCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]DaemonSet, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList.
+func (in *DaemonSetList) DeepCopy() *DaemonSetList {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSetList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DaemonSetList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) {
+	*out = *in
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Template.DeepCopyInto(&out.Template)
+	in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
+	if in.RevisionHistoryLimit != nil {
+		in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec.
+func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSetSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) {
+	*out = *in
+	if in.CollisionCount != nil {
+		in, out := &in.CollisionCount, &out.CollisionCount
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]DaemonSetCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus.
+func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSetStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) {
+	*out = *in
+	if in.RollingUpdate != nil {
+		in, out := &in.RollingUpdate, &out.RollingUpdate
+		*out = new(RollingUpdateDaemonSet)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy.
+func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSetUpdateStrategy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Deployment) DeepCopyInto(out *Deployment) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment.
+func (in *Deployment) DeepCopy() *Deployment {
+	if in == nil {
+		return nil
+	}
+	out := new(Deployment)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Deployment) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) {
+	*out = *in
+	in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition.
+func (in *DeploymentCondition) DeepCopy() *DeploymentCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentList) DeepCopyInto(out *DeploymentList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Deployment, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList.
+func (in *DeploymentList) DeepCopy() *DeploymentList {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeploymentList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
+	*out = *in
+	if in.Replicas != nil {
+		in, out := &in.Replicas, &out.Replicas
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Template.DeepCopyInto(&out.Template)
+	in.Strategy.DeepCopyInto(&out.Strategy)
+	if in.RevisionHistoryLimit != nil {
+		in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+		*out = new(int32)
+		**out = **in
+	}
+	if in.ProgressDeadlineSeconds != nil {
+		in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec.
+func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]DeploymentCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.CollisionCount != nil {
+		in, out := &in.CollisionCount, &out.CollisionCount
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus.
+func (in *DeploymentStatus) DeepCopy() *DeploymentStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) {
+	*out = *in
+	if in.RollingUpdate != nil {
+		in, out := &in.RollingUpdate, &out.RollingUpdate
+		*out = new(RollingUpdateDeployment)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy.
+func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentStrategy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet.
+func (in *ReplicaSet) DeepCopy() *ReplicaSet {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicaSet)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ReplicaSet) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) {
+	*out = *in
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition.
+func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicaSetCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ReplicaSet, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList.
+func (in *ReplicaSetList) DeepCopy() *ReplicaSetList {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicaSetList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ReplicaSetList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) {
+	*out = *in
+	if in.Replicas != nil {
+		in, out := &in.Replicas, &out.Replicas
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Template.DeepCopyInto(&out.Template)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec.
+func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicaSetSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]ReplicaSetCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus.
+func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicaSetStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) {
+	*out = *in
+	if in.MaxUnavailable != nil {
+		in, out := &in.MaxUnavailable, &out.MaxUnavailable
+		*out = new(intstr.IntOrString)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet.
+func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet {
+	if in == nil {
+		return nil
+	}
+	out := new(RollingUpdateDaemonSet)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) {
+	*out = *in
+	if in.MaxUnavailable != nil {
+		in, out := &in.MaxUnavailable, &out.MaxUnavailable
+		*out = new(intstr.IntOrString)
+		**out = **in
+	}
+	if in.MaxSurge != nil {
+		in, out := &in.MaxSurge, &out.MaxSurge
+		*out = new(intstr.IntOrString)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment.
+func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment {
+	if in == nil {
+		return nil
+	}
+	out := new(RollingUpdateDeployment)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) {
+	*out = *in
+	if in.Partition != nil {
+		in, out := &in.Partition, &out.Partition
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy.
+func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy {
+	if in == nil {
+		return nil
+	}
+	out := new(RollingUpdateStatefulSetStrategy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSet) DeepCopyInto(out *StatefulSet) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet.
+func (in *StatefulSet) DeepCopy() *StatefulSet {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSet)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StatefulSet) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) {
+	*out = *in
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition.
+func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSetCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]StatefulSet, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList.
+func (in *StatefulSetList) DeepCopy() *StatefulSetList {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSetList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StatefulSetList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
+	*out = *in
+	if in.Replicas != nil {
+		in, out := &in.Replicas, &out.Replicas
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Template.DeepCopyInto(&out.Template)
+	if in.VolumeClaimTemplates != nil {
+		in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates
+		*out = make([]corev1.PersistentVolumeClaim, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
+	if in.RevisionHistoryLimit != nil {
+		in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec.
+func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSetSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) {
+	*out = *in
+	if in.CollisionCount != nil {
+		in, out := &in.CollisionCount, &out.CollisionCount
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]StatefulSetCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus.
+func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSetStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) {
+	*out = *in
+	if in.RollingUpdate != nil {
+		in, out := &in.RollingUpdate, &out.RollingUpdate
+		*out = new(RollingUpdateStatefulSetStrategy)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy.
+func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSetUpdateStrategy)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/apps/v1beta1/doc.go b/vendor/k8s.io/api/apps/v1beta1/doc.go
new file mode 100644
index 0000000..6047ed5
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+package v1beta1 // import "k8s.io/api/apps/v1beta1"
diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto
new file mode 100644
index 0000000..f87f39f
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto
@@ -0,0 +1,484 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.apps.v1beta1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta1";
+
+// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the
+// release notes for more information.
+// ControllerRevision implements an immutable snapshot of state data. Clients
+// are responsible for serializing and deserializing the objects that contain
+// their internal state.
+// Once a ControllerRevision has been successfully created, it can not be updated.
+// The API Server will fail validation of all requests that attempt to mutate
+// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both
+// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However,
+// it may be subject to name and representation changes in future releases, and clients should not
+// depend on its stability. It is primarily for internal use by controllers.
+message ControllerRevision {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Data is the serialized representation of the state.
+  optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2;
+
+  // Revision indicates the revision of the state represented by Data.
+  optional int64 revision = 3;
+}
+
+// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
+message ControllerRevisionList {
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of ControllerRevisions
+  repeated ControllerRevision items = 2;
+}
+
+// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for
+// more information.
+// Deployment enables declarative updates for Pods and ReplicaSets.
+message Deployment {
+  // Standard object metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior of the Deployment.
+  // +optional
+  optional DeploymentSpec spec = 2;
+
+  // Most recently observed status of the Deployment.
+  // +optional
+  optional DeploymentStatus status = 3;
+}
+
+// DeploymentCondition describes the state of a deployment at a certain point.
+message DeploymentCondition {
+  // Type of deployment condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // The last time this condition was updated.
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
+
+  // Last time the condition transitioned from one status to another.
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
+
+  // The reason for the condition's last transition.
+  optional string reason = 4;
+
+  // A human readable message indicating details about the transition.
+  optional string message = 5;
+}
+
+// DeploymentList is a list of Deployments.
+message DeploymentList {
+  // Standard list metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of Deployments.
+  repeated Deployment items = 2;
+}
+
+// DEPRECATED.
+// DeploymentRollback stores the information required to rollback a deployment.
+message DeploymentRollback {
+  // Required: This must match the Name of a deployment.
+  optional string name = 1;
+
+  // The annotations to be updated to a deployment
+  // +optional
+  map<string, string> updatedAnnotations = 2;
+
+  // The config of this deployment rollback.
+  optional RollbackConfig rollbackTo = 3;
+}
+
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
+message DeploymentSpec {
+  // Number of desired pods. This is a pointer to distinguish between explicit
+  // zero and not specified. Defaults to 1.
+  // +optional
+  optional int32 replicas = 1;
+
+  // Label selector for pods. Existing ReplicaSets whose pods are
+  // selected by this will be the ones affected by this deployment.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+  // Template describes the pods that will be created.
+  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+
+  // The deployment strategy to use to replace existing pods with new ones.
+  // +optional
+  // +patchStrategy=retainKeys
+  optional DeploymentStrategy strategy = 4;
+
+  // Minimum number of seconds for which a newly created pod should be ready
+  // without any of its container crashing, for it to be considered available.
+  // Defaults to 0 (pod will be considered available as soon as it is ready)
+  // +optional
+  optional int32 minReadySeconds = 5;
+
+  // The number of old ReplicaSets to retain to allow rollback.
+  // This is a pointer to distinguish between explicit zero and not specified.
+  // Defaults to 2.
+  // +optional
+  optional int32 revisionHistoryLimit = 6;
+
+  // Indicates that the deployment is paused.
+  // +optional
+  optional bool paused = 7;
+
+  // DEPRECATED.
+  // The config this deployment is rolling back to. Will be cleared after rollback is done.
+  // +optional
+  optional RollbackConfig rollbackTo = 8;
+
+  // The maximum time in seconds for a deployment to make progress before it
+  // is considered to be failed. The deployment controller will continue to
+  // process failed deployments and a condition with a ProgressDeadlineExceeded
+  // reason will be surfaced in the deployment status. Note that progress will
+  // not be estimated during the time a deployment is paused. Defaults to 600s.
+  // +optional
+  optional int32 progressDeadlineSeconds = 9;
+}
+
+// DeploymentStatus is the most recently observed status of the Deployment.
+message DeploymentStatus {
+  // The generation observed by the deployment controller.
+  // +optional
+  optional int64 observedGeneration = 1;
+
+  // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+  // +optional
+  optional int32 replicas = 2;
+
+  // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+  // +optional
+  optional int32 updatedReplicas = 3;
+
+  // Total number of ready pods targeted by this deployment.
+  // +optional
+  optional int32 readyReplicas = 7;
+
+  // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+  // +optional
+  optional int32 availableReplicas = 4;
+
+  // Total number of unavailable pods targeted by this deployment. This is the total number of
+  // pods that are still required for the deployment to have 100% available capacity. They may
+  // either be pods that are running but not yet available or pods that still have not been created.
+  // +optional
+  optional int32 unavailableReplicas = 5;
+
+  // Represents the latest available observations of a deployment's current state.
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated DeploymentCondition conditions = 6;
+
+  // Count of hash collisions for the Deployment. The Deployment controller uses this
+  // field as a collision avoidance mechanism when it needs to create the name for the
+  // newest ReplicaSet.
+  // +optional
+  optional int32 collisionCount = 8;
+}
+
+// DeploymentStrategy describes how to replace existing pods with new ones.
+message DeploymentStrategy {
+  // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+  // +optional
+  optional string type = 1;
+
+  // Rolling update config params. Present only if DeploymentStrategyType =
+  // RollingUpdate.
+  // ---
+  // TODO: Update this to follow our convention for oneOf, whatever we decide it
+  // to be.
+  // +optional
+  optional RollingUpdateDeployment rollingUpdate = 2;
+}
+
+// DEPRECATED.
+message RollbackConfig {
+  // The revision to rollback to. If set to 0, rollback to the last revision.
+  // +optional
+  optional int64 revision = 1;
+}
+
+// Spec to control the desired behavior of rolling update.
+message RollingUpdateDeployment {
+  // The maximum number of pods that can be unavailable during the update.
+  // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+  // Absolute number is calculated from percentage by rounding down.
+  // This can not be 0 if MaxSurge is 0.
+  // Defaults to 25%.
+  // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+  // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+  // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+  // that the total number of pods available at all times during the update is at
+  // least 70% of desired pods.
+  // +optional
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
+
+  // The maximum number of pods that can be scheduled above the desired number of
+  // pods.
+  // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+  // This can not be 0 if MaxUnavailable is 0.
+  // Absolute number is calculated from percentage by rounding up.
+  // Defaults to 25%.
+  // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+  // the rolling update starts, such that the total number of old and new pods do not exceed
+  // 130% of desired pods. Once old pods have been killed,
+  // new ReplicaSet can be scaled up further, ensuring that total number of pods running
+  // at any time during the update is atmost 130% of desired pods.
+  // +optional
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
+}
+
+// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
+message RollingUpdateStatefulSetStrategy {
+  // Partition indicates the ordinal at which the StatefulSet should be
+  // partitioned.
+  optional int32 partition = 1;
+}
+
+// Scale represents a scaling request for a resource.
+message Scale {
+  // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
+  // +optional
+  optional ScaleSpec spec = 2;
+
+  // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
+  // +optional
+  optional ScaleStatus status = 3;
+}
+
+// ScaleSpec describes the attributes of a scale subresource
+message ScaleSpec {
+  // desired number of instances for the scaled object.
+  // +optional
+  optional int32 replicas = 1;
+}
+
+// ScaleStatus represents the current status of a scale subresource.
+message ScaleStatus {
+  // actual number of observed instances of the scaled object.
+  optional int32 replicas = 1;
+
+  // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
+  // +optional
+  map<string, string> selector = 2;
+
+  // label selector for pods that should match the replicas count. This is a serializated
+  // version of both map-based and more expressive set-based selectors. This is done to
+  // avoid introspection in the clients. The string will be in the same format as the
+  // query-param syntax. If the target type only supports map-based selectors, both this
+  // field and map-based selector field are populated.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+  // +optional
+  optional string targetSelector = 3;
+}
+
+// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for
+// more information.
+// StatefulSet represents a set of pods with consistent identities.
+// Identities are defined as:
+//  - Network: A single stable DNS and hostname.
+//  - Storage: As many VolumeClaims as requested.
+// The StatefulSet guarantees that a given network identity will always
+// map to the same storage identity.
+message StatefulSet {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the desired identities of pods in this set.
+  // +optional
+  optional StatefulSetSpec spec = 2;
+
+  // Status is the current status of Pods in this StatefulSet. This data
+  // may be out of date by some window of time.
+  // +optional
+  optional StatefulSetStatus status = 3;
+}
+
+// StatefulSetCondition describes the state of a statefulset at a certain point.
+message StatefulSetCondition {
+  // Type of statefulset condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // Last time the condition transitioned from one status to another.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+  // The reason for the condition's last transition.
+  // +optional
+  optional string reason = 4;
+
+  // A human readable message indicating details about the transition.
+  // +optional
+  optional string message = 5;
+}
+
+// StatefulSetList is a collection of StatefulSets.
+message StatefulSetList {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  repeated StatefulSet items = 2;
+}
+
+// A StatefulSetSpec is the specification of a StatefulSet.
+message StatefulSetSpec {
+  // replicas is the desired number of replicas of the given Template.
+  // These are replicas in the sense that they are instantiations of the
+  // same Template, but individual replicas also have a consistent identity.
+  // If unspecified, defaults to 1.
+  // TODO: Consider a rename of this field.
+  // +optional
+  optional int32 replicas = 1;
+
+  // selector is a label query over pods that should match the replica count.
+  // If empty, defaulted to labels on the pod template.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+  // template is the object that describes the pod that will be created if
+  // insufficient replicas are detected. Each pod stamped out by the StatefulSet
+  // will fulfill this Template, but have a unique identity from the rest
+  // of the StatefulSet.
+  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+
+  // volumeClaimTemplates is a list of claims that pods are allowed to reference.
+  // The StatefulSet controller is responsible for mapping network identities to
+  // claims in a way that maintains the identity of a pod. Every claim in
+  // this list must have at least one matching (by name) volumeMount in one
+  // container in the template. A claim in this list takes precedence over
+  // any volumes in the template, with the same name.
+  // TODO: Define the behavior if a claim already exists with the same name.
+  // +optional
+  repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
+
+  // serviceName is the name of the service that governs this StatefulSet.
+  // This service must exist before the StatefulSet, and is responsible for
+  // the network identity of the set. Pods get DNS/hostnames that follow the
+  // pattern: pod-specific-string.serviceName.default.svc.cluster.local
+  // where "pod-specific-string" is managed by the StatefulSet controller.
+  optional string serviceName = 5;
+
+  // podManagementPolicy controls how pods are created during initial scale up,
+  // when replacing pods on nodes, or when scaling down. The default policy is
+  // `OrderedReady`, where pods are created in increasing order (pod-0, then
+  // pod-1, etc) and the controller will wait until each pod is ready before
+  // continuing. When scaling down, the pods are removed in the opposite order.
+  // The alternative policy is `Parallel` which will create pods in parallel
+  // to match the desired scale without waiting, and on scale down will delete
+  // all pods at once.
+  // +optional
+  optional string podManagementPolicy = 6;
+
+  // updateStrategy indicates the StatefulSetUpdateStrategy that will be
+  // employed to update Pods in the StatefulSet when a revision is made to
+  // Template.
+  optional StatefulSetUpdateStrategy updateStrategy = 7;
+
+  // revisionHistoryLimit is the maximum number of revisions that will
+  // be maintained in the StatefulSet's revision history. The revision history
+  // consists of all revisions not represented by a currently applied
+  // StatefulSetSpec version. The default value is 10.
+  optional int32 revisionHistoryLimit = 8;
+}
+
+// StatefulSetStatus represents the current state of a StatefulSet.
+message StatefulSetStatus {
+  // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
+  // StatefulSet's generation, which is updated on mutation by the API Server.
+  // +optional
+  optional int64 observedGeneration = 1;
+
+  // replicas is the number of Pods created by the StatefulSet controller.
+  optional int32 replicas = 2;
+
+  // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.
+  optional int32 readyReplicas = 3;
+
+  // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+  // indicated by currentRevision.
+  optional int32 currentReplicas = 4;
+
+  // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+  // indicated by updateRevision.
+  optional int32 updatedReplicas = 5;
+
+  // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
+  // sequence [0,currentReplicas).
+  optional string currentRevision = 6;
+
+  // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
+  // [replicas-updatedReplicas,replicas)
+  optional string updateRevision = 7;
+
+  // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller
+  // uses this field as a collision avoidance mechanism when it needs to create the name for the
+  // newest ControllerRevision.
+  // +optional
+  optional int32 collisionCount = 9;
+
+  // Represents the latest available observations of a statefulset's current state.
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated StatefulSetCondition conditions = 10;
+}
+
+// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
+// controller will use to perform updates. It includes any additional parameters
+// necessary to perform the update for the indicated strategy.
+message StatefulSetUpdateStrategy {
+  // Type indicates the type of the StatefulSetUpdateStrategy.
+  optional string type = 1;
+
+  // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
+  optional RollingUpdateStatefulSetStrategy rollingUpdate = 2;
+}
+
diff --git a/vendor/k8s.io/api/apps/v1beta1/register.go b/vendor/k8s.io/api/apps/v1beta1/register.go
new file mode 100644
index 0000000..5b16819
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1beta1/register.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "apps"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Deployment{},
+		&DeploymentList{},
+		&DeploymentRollback{},
+		&Scale{},
+		&StatefulSet{},
+		&StatefulSetList{},
+		&ControllerRevision{},
+		&ControllerRevisionList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go
new file mode 100644
index 0000000..326902f
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1beta1/types.go
@@ -0,0 +1,567 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	"k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/intstr"
+)
+
+const (
+	ControllerRevisionHashLabelKey = "controller-revision-hash"
+	StatefulSetRevisionLabel       = ControllerRevisionHashLabelKey
+	StatefulSetPodNameLabel        = "statefulset.kubernetes.io/pod-name"
+)
+
+// ScaleSpec describes the attributes of a scale subresource
+type ScaleSpec struct {
+	// desired number of instances for the scaled object.
+	// +optional
+	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+}
+
+// ScaleStatus represents the current status of a scale subresource.
+type ScaleStatus struct {
+	// actual number of observed instances of the scaled object.
+	Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
+
+	// label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
+	// +optional
+	Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
+
+	// label selector for pods that should match the replicas count. This is a serializated
+	// version of both map-based and more expressive set-based selectors. This is done to
+	// avoid introspection in the clients. The string will be in the same format as the
+	// query-param syntax. If the target type only supports map-based selectors, both this
+	// field and map-based selector field are populated.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+	// +optional
+	TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Scale represents a scaling request for a resource.
+type Scale struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
+	// +optional
+	Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
+	// +optional
+	Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for
+// more information.
+// StatefulSet represents a set of pods with consistent identities.
+// Identities are defined as:
+//  - Network: A single stable DNS and hostname.
+//  - Storage: As many VolumeClaims as requested.
+// The StatefulSet guarantees that a given network identity will always
+// map to the same storage identity.
+type StatefulSet struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the desired identities of pods in this set.
+	// +optional
+	Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is the current status of Pods in this StatefulSet. This data
+	// may be out of date by some window of time.
+	// +optional
+	Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// PodManagementPolicyType defines the policy for creating pods under a stateful set.
+type PodManagementPolicyType string
+
+const (
+	// OrderedReadyPodManagement will create pods in strictly increasing order on
+	// scale up and strictly decreasing order on scale down, progressing only when
+	// the previous pod is ready or terminated. At most one pod will be changed
+	// at any time.
+	OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady"
+	// ParallelPodManagement will create and delete pods as soon as the stateful set
+	// replica count is changed, and will not wait for pods to be ready or complete
+	// termination.
+	ParallelPodManagement = "Parallel"
+)
+
+// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
+// controller will use to perform updates. It includes any additional parameters
+// necessary to perform the update for the indicated strategy.
+type StatefulSetUpdateStrategy struct {
+	// Type indicates the type of the StatefulSetUpdateStrategy.
+	Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"`
+	// RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
+	RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
+}
+
+// StatefulSetUpdateStrategyType is a string enumeration type that enumerates
+// all possible update strategies for the StatefulSet controller.
+type StatefulSetUpdateStrategyType string
+
+const (
+	// RollingUpdateStatefulSetStrategyType indicates that update will be
+	// applied to all Pods in the StatefulSet with respect to the StatefulSet
+	// ordering constraints. When a scale operation is performed with this
+	// strategy, new Pods will be created from the specification version indicated
+	// by the StatefulSet's updateRevision.
+	RollingUpdateStatefulSetStrategyType = "RollingUpdate"
+	// OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version
+	// tracking and ordered rolling restarts are disabled. Pods are recreated
+	// from the StatefulSetSpec when they are manually deleted. When a scale
+	// operation is performed with this strategy,specification version indicated
+	// by the StatefulSet's currentRevision.
+	OnDeleteStatefulSetStrategyType = "OnDelete"
+)
+
+// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
+type RollingUpdateStatefulSetStrategy struct {
+	// Partition indicates the ordinal at which the StatefulSet should be
+	// partitioned.
+	Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"`
+}
+
+// A StatefulSetSpec is the specification of a StatefulSet.
+type StatefulSetSpec struct {
+	// replicas is the desired number of replicas of the given Template.
+	// These are replicas in the sense that they are instantiations of the
+	// same Template, but individual replicas also have a consistent identity.
+	// If unspecified, defaults to 1.
+	// TODO: Consider a rename of this field.
+	// +optional
+	Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+	// selector is a label query over pods that should match the replica count.
+	// If empty, defaulted to labels on the pod template.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
+
+	// template is the object that describes the pod that will be created if
+	// insufficient replicas are detected. Each pod stamped out by the StatefulSet
+	// will fulfill this Template, but have a unique identity from the rest
+	// of the StatefulSet.
+	Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"`
+
+	// volumeClaimTemplates is a list of claims that pods are allowed to reference.
+	// The StatefulSet controller is responsible for mapping network identities to
+	// claims in a way that maintains the identity of a pod. Every claim in
+	// this list must have at least one matching (by name) volumeMount in one
+	// container in the template. A claim in this list takes precedence over
+	// any volumes in the template, with the same name.
+	// TODO: Define the behavior if a claim already exists with the same name.
+	// +optional
+	VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"`
+
+	// serviceName is the name of the service that governs this StatefulSet.
+	// This service must exist before the StatefulSet, and is responsible for
+	// the network identity of the set. Pods get DNS/hostnames that follow the
+	// pattern: pod-specific-string.serviceName.default.svc.cluster.local
+	// where "pod-specific-string" is managed by the StatefulSet controller.
+	ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"`
+
+	// podManagementPolicy controls how pods are created during initial scale up,
+	// when replacing pods on nodes, or when scaling down. The default policy is
+	// `OrderedReady`, where pods are created in increasing order (pod-0, then
+	// pod-1, etc) and the controller will wait until each pod is ready before
+	// continuing. When scaling down, the pods are removed in the opposite order.
+	// The alternative policy is `Parallel` which will create pods in parallel
+	// to match the desired scale without waiting, and on scale down will delete
+	// all pods at once.
+	// +optional
+	PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"`
+
+	// updateStrategy indicates the StatefulSetUpdateStrategy that will be
+	// employed to update Pods in the StatefulSet when a revision is made to
+	// Template.
+	UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"`
+
+	// revisionHistoryLimit is the maximum number of revisions that will
+	// be maintained in the StatefulSet's revision history. The revision history
+	// consists of all revisions not represented by a currently applied
+	// StatefulSetSpec version. The default value is 10.
+	RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"`
+}
+
+// StatefulSetStatus represents the current state of a StatefulSet.
+type StatefulSetStatus struct {
+	// observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
+	// StatefulSet's generation, which is updated on mutation by the API Server.
+	// +optional
+	ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+	// replicas is the number of Pods created by the StatefulSet controller.
+	Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"`
+
+	// readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.
+	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"`
+
+	// currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+	// indicated by currentRevision.
+	CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"`
+
+	// updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+	// indicated by updateRevision.
+	UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"`
+
+	// currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
+	// sequence [0,currentReplicas).
+	CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"`
+
+	// updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
+	// [replicas-updatedReplicas,replicas)
+	UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"`
+
+	// collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller
+	// uses this field as a collision avoidance mechanism when it needs to create the name for the
+	// newest ControllerRevision.
+	// +optional
+	CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"`
+
+	// Represents the latest available observations of a statefulset's current state.
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"`
+}
+
+type StatefulSetConditionType string
+
+// StatefulSetCondition describes the state of a statefulset at a certain point.
+type StatefulSetCondition struct {
+	// Type of statefulset condition.
+	Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+	// Last time the condition transitioned from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+	// The reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// A human readable message indicating details about the transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// StatefulSetList is a collection of StatefulSets.
+type StatefulSetList struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	Items           []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for
+// more information.
+// Deployment enables declarative updates for Pods and ReplicaSets.
+type Deployment struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired behavior of the Deployment.
+	// +optional
+	Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Most recently observed status of the Deployment.
+	// +optional
+	Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
+type DeploymentSpec struct {
+	// Number of desired pods. This is a pointer to distinguish between explicit
+	// zero and not specified. Defaults to 1.
+	// +optional
+	Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+	// Label selector for pods. Existing ReplicaSets whose pods are
+	// selected by this will be the ones affected by this deployment.
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
+
+	// Template describes the pods that will be created.
+	Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"`
+
+	// The deployment strategy to use to replace existing pods with new ones.
+	// +optional
+	// +patchStrategy=retainKeys
+	Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"`
+
+	// Minimum number of seconds for which a newly created pod should be ready
+	// without any of its container crashing, for it to be considered available.
+	// Defaults to 0 (pod will be considered available as soon as it is ready)
+	// +optional
+	MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"`
+
+	// The number of old ReplicaSets to retain to allow rollback.
+	// This is a pointer to distinguish between explicit zero and not specified.
+	// Defaults to 2.
+	// +optional
+	RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"`
+
+	// Indicates that the deployment is paused.
+	// +optional
+	Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"`
+
+	// DEPRECATED.
+	// The config this deployment is rolling back to. Will be cleared after rollback is done.
+	// +optional
+	RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"`
+
+	// The maximum time in seconds for a deployment to make progress before it
+	// is considered to be failed. The deployment controller will continue to
+	// process failed deployments and a condition with a ProgressDeadlineExceeded
+	// reason will be surfaced in the deployment status. Note that progress will
+	// not be estimated during the time a deployment is paused. Defaults to 600s.
+	// +optional
+	ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DEPRECATED.
+// DeploymentRollback stores the information required to rollback a deployment.
+type DeploymentRollback struct {
+	metav1.TypeMeta `json:",inline"`
+	// Required: This must match the Name of a deployment.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// The annotations to be updated to a deployment
+	// +optional
+	UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"`
+	// The config of this deployment rollback.
+	RollbackTo RollbackConfig `json:"rollbackTo" protobuf:"bytes,3,opt,name=rollbackTo"`
+}
+
+// DEPRECATED.
+type RollbackConfig struct {
+	// The revision to rollback to. If set to 0, rollback to the last revision.
+	// +optional
+	Revision int64 `json:"revision,omitempty" protobuf:"varint,1,opt,name=revision"`
+}
+
+const (
+	// DefaultDeploymentUniqueLabelKey is the default key of the selector that is added
+	// to existing ReplicaSets (and label key that is added to its pods) to prevent the existing ReplicaSets
+	// to select new pods (and old pods being select by new ReplicaSet).
+	DefaultDeploymentUniqueLabelKey string = "pod-template-hash"
+)
+
+// DeploymentStrategy describes how to replace existing pods with new ones.
+type DeploymentStrategy struct {
+	// Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+	// +optional
+	Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"`
+
+	// Rolling update config params. Present only if DeploymentStrategyType =
+	// RollingUpdate.
+	//---
+	// TODO: Update this to follow our convention for oneOf, whatever we decide it
+	// to be.
+	// +optional
+	RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
+}
+
+type DeploymentStrategyType string
+
+const (
+	// Kill all existing pods before creating new ones.
+	RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate"
+
+	// Replace the old ReplicaSets by new one using rolling update i.e gradually scale down the old ReplicaSets and scale up the new one.
+	RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate"
+)
+
+// Spec to control the desired behavior of rolling update.
+type RollingUpdateDeployment struct {
+	// The maximum number of pods that can be unavailable during the update.
+	// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+	// Absolute number is calculated from percentage by rounding down.
+	// This can not be 0 if MaxSurge is 0.
+	// Defaults to 25%.
+	// Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+	// immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+	// can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+	// that the total number of pods available at all times during the update is at
+	// least 70% of desired pods.
+	// +optional
+	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
+
+	// The maximum number of pods that can be scheduled above the desired number of
+	// pods.
+	// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+	// This can not be 0 if MaxUnavailable is 0.
+	// Absolute number is calculated from percentage by rounding up.
+	// Defaults to 25%.
+	// Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+	// the rolling update starts, such that the total number of old and new pods do not exceed
+	// 130% of desired pods. Once old pods have been killed,
+	// new ReplicaSet can be scaled up further, ensuring that total number of pods running
+	// at any time during the update is atmost 130% of desired pods.
+	// +optional
+	MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"`
+}
+
+// DeploymentStatus is the most recently observed status of the Deployment.
+type DeploymentStatus struct {
+	// The generation observed by the deployment controller.
+	// +optional
+	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+	// Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+	// +optional
+	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"`
+
+	// Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+	// +optional
+	UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"`
+
+	// Total number of ready pods targeted by this deployment.
+	// +optional
+	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"`
+
+	// Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+	// +optional
+	AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"`
+
+	// Total number of unavailable pods targeted by this deployment. This is the total number of
+	// pods that are still required for the deployment to have 100% available capacity. They may
+	// either be pods that are running but not yet available or pods that still have not been created.
+	// +optional
+	UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
+
+	// Represents the latest available observations of a deployment's current state.
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
+
+	// Count of hash collisions for the Deployment. The Deployment controller uses this
+	// field as a collision avoidance mechanism when it needs to create the name for the
+	// newest ReplicaSet.
+	// +optional
+	CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"`
+}
+
+type DeploymentConditionType string
+
+// These are valid conditions of a deployment.
+const (
+	// Available means the deployment is available, ie. at least the minimum available
+	// replicas required are up and running for at least minReadySeconds.
+	DeploymentAvailable DeploymentConditionType = "Available"
+	// Progressing means the deployment is progressing. Progress for a deployment is
+	// considered when a new replica set is created or adopted, and when new pods scale
+	// up or old pods scale down. Progress is not estimated for paused deployments or
+	// when progressDeadlineSeconds is not specified.
+	DeploymentProgressing DeploymentConditionType = "Progressing"
+	// ReplicaFailure is added in a deployment when one of its pods fails to be created
+	// or deleted.
+	DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure"
+)
+
+// DeploymentCondition describes the state of a deployment at a certain point.
+type DeploymentCondition struct {
+	// Type of deployment condition.
+	Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+	// The last time this condition was updated.
+	LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"`
+	// Last time the condition transitioned from one status to another.
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"`
+	// The reason for the condition's last transition.
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// A human readable message indicating details about the transition.
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DeploymentList is a list of Deployments.
+type DeploymentList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of Deployments.
+	Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the
+// release notes for more information.
+// ControllerRevision implements an immutable snapshot of state data. Clients
+// are responsible for serializing and deserializing the objects that contain
+// their internal state.
+// Once a ControllerRevision has been successfully created, it can not be updated.
+// The API Server will fail validation of all requests that attempt to mutate
+// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both
+// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However,
+// it may be subject to name and representation changes in future releases, and clients should not
+// depend on its stability. It is primarily for internal use by controllers.
+type ControllerRevision struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Data is the serialized representation of the state.
+	Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"`
+
+	// Revision indicates the revision of the state represented by Data.
+	Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
+type ControllerRevisionList struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of ControllerRevisions
+	Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..68ebef3
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,273 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_ControllerRevision = map[string]string{
+	"":         "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"data":     "Data is the serialized representation of the state.",
+	"revision": "Revision indicates the revision of the state represented by Data.",
+}
+
+func (ControllerRevision) SwaggerDoc() map[string]string {
+	return map_ControllerRevision
+}
+
+var map_ControllerRevisionList = map[string]string{
+	"":         "ControllerRevisionList is a resource containing a list of ControllerRevision objects.",
+	"metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "Items is the list of ControllerRevisions",
+}
+
+func (ControllerRevisionList) SwaggerDoc() map[string]string {
+	return map_ControllerRevisionList
+}
+
+var map_Deployment = map[string]string{
+	"":         "DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.",
+	"metadata": "Standard object metadata.",
+	"spec":     "Specification of the desired behavior of the Deployment.",
+	"status":   "Most recently observed status of the Deployment.",
+}
+
+func (Deployment) SwaggerDoc() map[string]string {
+	return map_Deployment
+}
+
+var map_DeploymentCondition = map[string]string{
+	"":                   "DeploymentCondition describes the state of a deployment at a certain point.",
+	"type":               "Type of deployment condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastUpdateTime":     "The last time this condition was updated.",
+	"lastTransitionTime": "Last time the condition transitioned from one status to another.",
+	"reason":             "The reason for the condition's last transition.",
+	"message":            "A human readable message indicating details about the transition.",
+}
+
+func (DeploymentCondition) SwaggerDoc() map[string]string {
+	return map_DeploymentCondition
+}
+
+var map_DeploymentList = map[string]string{
+	"":         "DeploymentList is a list of Deployments.",
+	"metadata": "Standard list metadata.",
+	"items":    "Items is the list of Deployments.",
+}
+
+func (DeploymentList) SwaggerDoc() map[string]string {
+	return map_DeploymentList
+}
+
+var map_DeploymentRollback = map[string]string{
+	"":                   "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.",
+	"name":               "Required: This must match the Name of a deployment.",
+	"updatedAnnotations": "The annotations to be updated to a deployment",
+	"rollbackTo":         "The config of this deployment rollback.",
+}
+
+func (DeploymentRollback) SwaggerDoc() map[string]string {
+	return map_DeploymentRollback
+}
+
+var map_DeploymentSpec = map[string]string{
+	"":                        "DeploymentSpec is the specification of the desired behavior of the Deployment.",
+	"replicas":                "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.",
+	"selector":                "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.",
+	"template":                "Template describes the pods that will be created.",
+	"strategy":                "The deployment strategy to use to replace existing pods with new ones.",
+	"minReadySeconds":         "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+	"revisionHistoryLimit":    "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.",
+	"paused":                  "Indicates that the deployment is paused.",
+	"rollbackTo":              "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.",
+	"progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.",
+}
+
+func (DeploymentSpec) SwaggerDoc() map[string]string {
+	return map_DeploymentSpec
+}
+
+var map_DeploymentStatus = map[string]string{
+	"":                    "DeploymentStatus is the most recently observed status of the Deployment.",
+	"observedGeneration":  "The generation observed by the deployment controller.",
+	"replicas":            "Total number of non-terminated pods targeted by this deployment (their labels match the selector).",
+	"updatedReplicas":     "Total number of non-terminated pods targeted by this deployment that have the desired template spec.",
+	"readyReplicas":       "Total number of ready pods targeted by this deployment.",
+	"availableReplicas":   "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
+	"unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
+	"conditions":          "Represents the latest available observations of a deployment's current state.",
+	"collisionCount":      "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
+}
+
+func (DeploymentStatus) SwaggerDoc() map[string]string {
+	return map_DeploymentStatus
+}
+
+var map_DeploymentStrategy = map[string]string{
+	"":              "DeploymentStrategy describes how to replace existing pods with new ones.",
+	"type":          "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.",
+	"rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.",
+}
+
+func (DeploymentStrategy) SwaggerDoc() map[string]string {
+	return map_DeploymentStrategy
+}
+
+var map_RollbackConfig = map[string]string{
+	"":         "DEPRECATED.",
+	"revision": "The revision to rollback to. If set to 0, rollback to the last revision.",
+}
+
+func (RollbackConfig) SwaggerDoc() map[string]string {
+	return map_RollbackConfig
+}
+
+var map_RollingUpdateDeployment = map[string]string{
+	"":               "Spec to control the desired behavior of rolling update.",
+	"maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.",
+	"maxSurge":       "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.",
+}
+
+func (RollingUpdateDeployment) SwaggerDoc() map[string]string {
+	return map_RollingUpdateDeployment
+}
+
+var map_RollingUpdateStatefulSetStrategy = map[string]string{
+	"":          "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.",
+	"partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned.",
+}
+
+func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string {
+	return map_RollingUpdateStatefulSetStrategy
+}
+
+var map_Scale = map[string]string{
+	"":         "Scale represents a scaling request for a resource.",
+	"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.",
+	"spec":     "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.",
+	"status":   "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.",
+}
+
+func (Scale) SwaggerDoc() map[string]string {
+	return map_Scale
+}
+
+var map_ScaleSpec = map[string]string{
+	"":         "ScaleSpec describes the attributes of a scale subresource",
+	"replicas": "desired number of instances for the scaled object.",
+}
+
+func (ScaleSpec) SwaggerDoc() map[string]string {
+	return map_ScaleSpec
+}
+
+var map_ScaleStatus = map[string]string{
+	"":               "ScaleStatus represents the current status of a scale subresource.",
+	"replicas":       "actual number of observed instances of the scaled object.",
+	"selector":       "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors",
+	"targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+}
+
+func (ScaleStatus) SwaggerDoc() map[string]string {
+	return map_ScaleStatus
+}
+
+var map_StatefulSet = map[string]string{
+	"":       "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.",
+	"spec":   "Spec defines the desired identities of pods in this set.",
+	"status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.",
+}
+
+func (StatefulSet) SwaggerDoc() map[string]string {
+	return map_StatefulSet
+}
+
+var map_StatefulSetCondition = map[string]string{
+	"":                   "StatefulSetCondition describes the state of a statefulset at a certain point.",
+	"type":               "Type of statefulset condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastTransitionTime": "Last time the condition transitioned from one status to another.",
+	"reason":             "The reason for the condition's last transition.",
+	"message":            "A human readable message indicating details about the transition.",
+}
+
+func (StatefulSetCondition) SwaggerDoc() map[string]string {
+	return map_StatefulSetCondition
+}
+
+var map_StatefulSetList = map[string]string{
+	"": "StatefulSetList is a collection of StatefulSets.",
+}
+
+func (StatefulSetList) SwaggerDoc() map[string]string {
+	return map_StatefulSetList
+}
+
+var map_StatefulSetSpec = map[string]string{
+	"":                     "A StatefulSetSpec is the specification of a StatefulSet.",
+	"replicas":             "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.",
+	"selector":             "selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+	"template":             "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.",
+	"volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.",
+	"serviceName":          "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.",
+	"podManagementPolicy":  "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.",
+	"updateStrategy":       "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.",
+	"revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
+}
+
+func (StatefulSetSpec) SwaggerDoc() map[string]string {
+	return map_StatefulSetSpec
+}
+
+var map_StatefulSetStatus = map[string]string{
+	"":                   "StatefulSetStatus represents the current state of a StatefulSet.",
+	"observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.",
+	"replicas":           "replicas is the number of Pods created by the StatefulSet controller.",
+	"readyReplicas":      "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.",
+	"currentReplicas":    "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.",
+	"updatedReplicas":    "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.",
+	"currentRevision":    "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).",
+	"updateRevision":     "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)",
+	"collisionCount":     "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.",
+	"conditions":         "Represents the latest available observations of a statefulset's current state.",
+}
+
+func (StatefulSetStatus) SwaggerDoc() map[string]string {
+	return map_StatefulSetStatus
+}
+
+var map_StatefulSetUpdateStrategy = map[string]string{
+	"":              "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.",
+	"type":          "Type indicates the type of the StatefulSetUpdateStrategy.",
+	"rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.",
+}
+
+func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string {
+	return map_StatefulSetUpdateStrategy
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..93892bf
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,594 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	corev1 "k8s.io/api/core/v1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Data.DeepCopyInto(&out.Data)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision.
+func (in *ControllerRevision) DeepCopy() *ControllerRevision {
+	if in == nil {
+		return nil
+	}
+	out := new(ControllerRevision)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerRevision) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ControllerRevision, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList.
+func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList {
+	if in == nil {
+		return nil
+	}
+	out := new(ControllerRevisionList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerRevisionList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Deployment) DeepCopyInto(out *Deployment) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment.
+func (in *Deployment) DeepCopy() *Deployment {
+	if in == nil {
+		return nil
+	}
+	out := new(Deployment)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Deployment) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) {
+	*out = *in
+	in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition.
+func (in *DeploymentCondition) DeepCopy() *DeploymentCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentList) DeepCopyInto(out *DeploymentList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Deployment, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList.
+func (in *DeploymentList) DeepCopy() *DeploymentList {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeploymentList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.UpdatedAnnotations != nil {
+		in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	out.RollbackTo = in.RollbackTo
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback.
+func (in *DeploymentRollback) DeepCopy() *DeploymentRollback {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentRollback)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeploymentRollback) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
+	*out = *in
+	if in.Replicas != nil {
+		in, out := &in.Replicas, &out.Replicas
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Template.DeepCopyInto(&out.Template)
+	in.Strategy.DeepCopyInto(&out.Strategy)
+	if in.RevisionHistoryLimit != nil {
+		in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+		*out = new(int32)
+		**out = **in
+	}
+	if in.RollbackTo != nil {
+		in, out := &in.RollbackTo, &out.RollbackTo
+		*out = new(RollbackConfig)
+		**out = **in
+	}
+	if in.ProgressDeadlineSeconds != nil {
+		in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec.
+func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]DeploymentCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.CollisionCount != nil {
+		in, out := &in.CollisionCount, &out.CollisionCount
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus.
+func (in *DeploymentStatus) DeepCopy() *DeploymentStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) {
+	*out = *in
+	if in.RollingUpdate != nil {
+		in, out := &in.RollingUpdate, &out.RollingUpdate
+		*out = new(RollingUpdateDeployment)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy.
+func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentStrategy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig.
+func (in *RollbackConfig) DeepCopy() *RollbackConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(RollbackConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) {
+	*out = *in
+	if in.MaxUnavailable != nil {
+		in, out := &in.MaxUnavailable, &out.MaxUnavailable
+		*out = new(intstr.IntOrString)
+		**out = **in
+	}
+	if in.MaxSurge != nil {
+		in, out := &in.MaxSurge, &out.MaxSurge
+		*out = new(intstr.IntOrString)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment.
+func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment {
+	if in == nil {
+		return nil
+	}
+	out := new(RollingUpdateDeployment)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) {
+	*out = *in
+	if in.Partition != nil {
+		in, out := &in.Partition, &out.Partition
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy.
+func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy {
+	if in == nil {
+		return nil
+	}
+	out := new(RollingUpdateStatefulSetStrategy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Scale) DeepCopyInto(out *Scale) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	out.Spec = in.Spec
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale.
+func (in *Scale) DeepCopy() *Scale {
+	if in == nil {
+		return nil
+	}
+	out := new(Scale)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Scale) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec.
+func (in *ScaleSpec) DeepCopy() *ScaleSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ScaleSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) {
+	*out = *in
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus.
+func (in *ScaleStatus) DeepCopy() *ScaleStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ScaleStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSet) DeepCopyInto(out *StatefulSet) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet.
+func (in *StatefulSet) DeepCopy() *StatefulSet {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSet)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StatefulSet) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) {
+	*out = *in
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition.
+func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSetCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]StatefulSet, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList.
+func (in *StatefulSetList) DeepCopy() *StatefulSetList {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSetList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StatefulSetList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
+	*out = *in
+	if in.Replicas != nil {
+		in, out := &in.Replicas, &out.Replicas
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Template.DeepCopyInto(&out.Template)
+	if in.VolumeClaimTemplates != nil {
+		in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates
+		*out = make([]corev1.PersistentVolumeClaim, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
+	if in.RevisionHistoryLimit != nil {
+		in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec.
+func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSetSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) {
+	*out = *in
+	if in.ObservedGeneration != nil {
+		in, out := &in.ObservedGeneration, &out.ObservedGeneration
+		*out = new(int64)
+		**out = **in
+	}
+	if in.CollisionCount != nil {
+		in, out := &in.CollisionCount, &out.CollisionCount
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]StatefulSetCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus.
+func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSetStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) {
+	*out = *in
+	if in.RollingUpdate != nil {
+		in, out := &in.RollingUpdate, &out.RollingUpdate
+		*out = new(RollingUpdateStatefulSetStrategy)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy.
+func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSetUpdateStrategy)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/apps/v1beta2/doc.go b/vendor/k8s.io/api/apps/v1beta2/doc.go
new file mode 100644
index 0000000..e93e164
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1beta2/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+package v1beta2 // import "k8s.io/api/apps/v1beta2"
diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto
new file mode 100644
index 0000000..5d11cbe
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto
@@ -0,0 +1,752 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.apps.v1beta2;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta2";
+
+// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the
+// release notes for more information.
+// ControllerRevision implements an immutable snapshot of state data. Clients
+// are responsible for serializing and deserializing the objects that contain
+// their internal state.
+// Once a ControllerRevision has been successfully created, it can not be updated.
+// The API Server will fail validation of all requests that attempt to mutate
+// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both
+// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However,
+// it may be subject to name and representation changes in future releases, and clients should not
+// depend on its stability. It is primarily for internal use by controllers.
+message ControllerRevision {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Data is the serialized representation of the state.
+  optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2;
+
+  // Revision indicates the revision of the state represented by Data.
+  optional int64 revision = 3;
+}
+
+// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
+message ControllerRevisionList {
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of ControllerRevisions
+  repeated ControllerRevision items = 2;
+}
+
+// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for
+// more information.
+// DaemonSet represents the configuration of a daemon set.
+message DaemonSet {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // The desired behavior of this daemon set.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional DaemonSetSpec spec = 2;
+
+  // The current status of this daemon set. This data may be
+  // out of date by some window of time.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional DaemonSetStatus status = 3;
+}
+
+// DaemonSetCondition describes the state of a DaemonSet at a certain point.
+message DaemonSetCondition {
+  // Type of DaemonSet condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // Last time the condition transitioned from one status to another.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+  // The reason for the condition's last transition.
+  // +optional
+  optional string reason = 4;
+
+  // A human readable message indicating details about the transition.
+  // +optional
+  optional string message = 5;
+}
+
+// DaemonSetList is a collection of daemon sets.
+message DaemonSetList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // A list of daemon sets.
+  repeated DaemonSet items = 2;
+}
+
+// DaemonSetSpec is the specification of a daemon set.
+message DaemonSetSpec {
+  // A label query over pods that are managed by the daemon set.
+  // Must match in order to be controlled.
+  // It must match the pod template's labels.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
+
+  // An object that describes the pod that will be created.
+  // The DaemonSet will create exactly one copy of this pod on every node
+  // that matches the template's node selector (or on every node if no node
+  // selector is specified).
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+  optional k8s.io.api.core.v1.PodTemplateSpec template = 2;
+
+  // An update strategy to replace existing DaemonSet pods with new pods.
+  // +optional
+  optional DaemonSetUpdateStrategy updateStrategy = 3;
+
+  // The minimum number of seconds for which a newly created DaemonSet pod should
+  // be ready without any of its container crashing, for it to be considered
+  // available. Defaults to 0 (pod will be considered available as soon as it
+  // is ready).
+  // +optional
+  optional int32 minReadySeconds = 4;
+
+  // The number of old history to retain to allow rollback.
+  // This is a pointer to distinguish between explicit zero and not specified.
+  // Defaults to 10.
+  // +optional
+  optional int32 revisionHistoryLimit = 6;
+}
+
+// DaemonSetStatus represents the current status of a daemon set.
+message DaemonSetStatus {
+  // The number of nodes that are running at least 1
+  // daemon pod and are supposed to run the daemon pod.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+  optional int32 currentNumberScheduled = 1;
+
+  // The number of nodes that are running the daemon pod, but are
+  // not supposed to run the daemon pod.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+  optional int32 numberMisscheduled = 2;
+
+  // The total number of nodes that should be running the daemon
+  // pod (including nodes correctly running the daemon pod).
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+  optional int32 desiredNumberScheduled = 3;
+
+  // The number of nodes that should be running the daemon pod and have one
+  // or more of the daemon pod running and ready.
+  optional int32 numberReady = 4;
+
+  // The most recent generation observed by the daemon set controller.
+  // +optional
+  optional int64 observedGeneration = 5;
+
+  // The total number of nodes that are running updated daemon pod
+  // +optional
+  optional int32 updatedNumberScheduled = 6;
+
+  // The number of nodes that should be running the
+  // daemon pod and have one or more of the daemon pod running and
+  // available (ready for at least spec.minReadySeconds)
+  // +optional
+  optional int32 numberAvailable = 7;
+
+  // The number of nodes that should be running the
+  // daemon pod and have none of the daemon pod running and available
+  // (ready for at least spec.minReadySeconds)
+  // +optional
+  optional int32 numberUnavailable = 8;
+
+  // Count of hash collisions for the DaemonSet. The DaemonSet controller
+  // uses this field as a collision avoidance mechanism when it needs to
+  // create the name for the newest ControllerRevision.
+  // +optional
+  optional int32 collisionCount = 9;
+
+  // Represents the latest available observations of a DaemonSet's current state.
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated DaemonSetCondition conditions = 10;
+}
+
+// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.
+message DaemonSetUpdateStrategy {
+  // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.
+  // +optional
+  optional string type = 1;
+
+  // Rolling update config params. Present only if type = "RollingUpdate".
+  // ---
+  // TODO: Update this to follow our convention for oneOf, whatever we decide it
+  // to be. Same as Deployment `strategy.rollingUpdate`.
+  // See https://github.com/kubernetes/kubernetes/issues/35345
+  // +optional
+  optional RollingUpdateDaemonSet rollingUpdate = 2;
+}
+
+// DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for
+// more information.
+// Deployment enables declarative updates for Pods and ReplicaSets.
+message Deployment {
+  // Standard object metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior of the Deployment.
+  // +optional
+  optional DeploymentSpec spec = 2;
+
+  // Most recently observed status of the Deployment.
+  // +optional
+  optional DeploymentStatus status = 3;
+}
+
+// DeploymentCondition describes the state of a deployment at a certain point.
+message DeploymentCondition {
+  // Type of deployment condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // The last time this condition was updated.
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
+
+  // Last time the condition transitioned from one status to another.
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
+
+  // The reason for the condition's last transition.
+  optional string reason = 4;
+
+  // A human readable message indicating details about the transition.
+  optional string message = 5;
+}
+
+// DeploymentList is a list of Deployments.
+message DeploymentList {
+  // Standard list metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of Deployments.
+  repeated Deployment items = 2;
+}
+
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
+message DeploymentSpec {
+  // Number of desired pods. This is a pointer to distinguish between explicit
+  // zero and not specified. Defaults to 1.
+  // +optional
+  optional int32 replicas = 1;
+
+  // Label selector for pods. Existing ReplicaSets whose pods are
+  // selected by this will be the ones affected by this deployment.
+  // It must match the pod template's labels.
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+  // Template describes the pods that will be created.
+  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+
+  // The deployment strategy to use to replace existing pods with new ones.
+  // +optional
+  // +patchStrategy=retainKeys
+  optional DeploymentStrategy strategy = 4;
+
+  // Minimum number of seconds for which a newly created pod should be ready
+  // without any of its container crashing, for it to be considered available.
+  // Defaults to 0 (pod will be considered available as soon as it is ready)
+  // +optional
+  optional int32 minReadySeconds = 5;
+
+  // The number of old ReplicaSets to retain to allow rollback.
+  // This is a pointer to distinguish between explicit zero and not specified.
+  // Defaults to 10.
+  // +optional
+  optional int32 revisionHistoryLimit = 6;
+
+  // Indicates that the deployment is paused.
+  // +optional
+  optional bool paused = 7;
+
+  // The maximum time in seconds for a deployment to make progress before it
+  // is considered to be failed. The deployment controller will continue to
+  // process failed deployments and a condition with a ProgressDeadlineExceeded
+  // reason will be surfaced in the deployment status. Note that progress will
+  // not be estimated during the time a deployment is paused. Defaults to 600s.
+  optional int32 progressDeadlineSeconds = 9;
+}
+
+// DeploymentStatus is the most recently observed status of the Deployment.
+message DeploymentStatus {
+  // The generation observed by the deployment controller.
+  // +optional
+  optional int64 observedGeneration = 1;
+
+  // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+  // +optional
+  optional int32 replicas = 2;
+
+  // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+  // +optional
+  optional int32 updatedReplicas = 3;
+
+  // Total number of ready pods targeted by this deployment.
+  // +optional
+  optional int32 readyReplicas = 7;
+
+  // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+  // +optional
+  optional int32 availableReplicas = 4;
+
+  // Total number of unavailable pods targeted by this deployment. This is the total number of
+  // pods that are still required for the deployment to have 100% available capacity. They may
+  // either be pods that are running but not yet available or pods that still have not been created.
+  // +optional
+  optional int32 unavailableReplicas = 5;
+
+  // Represents the latest available observations of a deployment's current state.
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated DeploymentCondition conditions = 6;
+
+  // Count of hash collisions for the Deployment. The Deployment controller uses this
+  // field as a collision avoidance mechanism when it needs to create the name for the
+  // newest ReplicaSet.
+  // +optional
+  optional int32 collisionCount = 8;
+}
+
+// DeploymentStrategy describes how to replace existing pods with new ones.
+message DeploymentStrategy {
+  // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+  // +optional
+  optional string type = 1;
+
+  // Rolling update config params. Present only if DeploymentStrategyType =
+  // RollingUpdate.
+  // ---
+  // TODO: Update this to follow our convention for oneOf, whatever we decide it
+  // to be.
+  // +optional
+  optional RollingUpdateDeployment rollingUpdate = 2;
+}
+
+// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for
+// more information.
+// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
+message ReplicaSet {
+  // If the Labels of a ReplicaSet are empty, they are defaulted to
+  // be the same as the Pod(s) that the ReplicaSet manages.
+  // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the specification of the desired behavior of the ReplicaSet.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional ReplicaSetSpec spec = 2;
+
+  // Status is the most recently observed status of the ReplicaSet.
+  // This data may be out of date by some window of time.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional ReplicaSetStatus status = 3;
+}
+
+// ReplicaSetCondition describes the state of a replica set at a certain point.
+message ReplicaSetCondition {
+  // Type of replica set condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // The last time the condition transitioned from one status to another.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+  // The reason for the condition's last transition.
+  // +optional
+  optional string reason = 4;
+
+  // A human readable message indicating details about the transition.
+  // +optional
+  optional string message = 5;
+}
+
+// ReplicaSetList is a collection of ReplicaSets.
+message ReplicaSetList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of ReplicaSets.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+  repeated ReplicaSet items = 2;
+}
+
+// ReplicaSetSpec is the specification of a ReplicaSet.
+message ReplicaSetSpec {
+  // Replicas is the number of desired replicas.
+  // This is a pointer to distinguish between explicit zero and unspecified.
+  // Defaults to 1.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+  // +optional
+  optional int32 replicas = 1;
+
+  // Minimum number of seconds for which a newly created pod should be ready
+  // without any of its container crashing, for it to be considered available.
+  // Defaults to 0 (pod will be considered available as soon as it is ready)
+  // +optional
+  optional int32 minReadySeconds = 4;
+
+  // Selector is a label query over pods that should match the replica count.
+  // Label keys and values that must match in order to be controlled by this replica set.
+  // It must match the pod template's labels.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+  // Template is the object that describes the pod that will be created if
+  // insufficient replicas are detected.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+  // +optional
+  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+}
+
+// ReplicaSetStatus represents the current status of a ReplicaSet.
+message ReplicaSetStatus {
+  // Replicas is the most recently oberved number of replicas.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+  optional int32 replicas = 1;
+
+  // The number of pods that have labels matching the labels of the pod template of the replicaset.
+  // +optional
+  optional int32 fullyLabeledReplicas = 2;
+
+  // The number of ready replicas for this replica set.
+  // +optional
+  optional int32 readyReplicas = 4;
+
+  // The number of available replicas (ready for at least minReadySeconds) for this replica set.
+  // +optional
+  optional int32 availableReplicas = 5;
+
+  // ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
+  // +optional
+  optional int64 observedGeneration = 3;
+
+  // Represents the latest available observations of a replica set's current state.
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated ReplicaSetCondition conditions = 6;
+}
+
+// Spec to control the desired behavior of daemon set rolling update.
+message RollingUpdateDaemonSet {
+  // The maximum number of DaemonSet pods that can be unavailable during the
+  // update. Value can be an absolute number (ex: 5) or a percentage of total
+  // number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+  // number is calculated from percentage by rounding up.
+  // This cannot be 0.
+  // Default value is 1.
+  // Example: when this is set to 30%, at most 30% of the total number of nodes
+  // that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+  // can have their pods stopped for an update at any given
+  // time. The update starts by stopping at most 30% of those DaemonSet pods
+  // and then brings up new DaemonSet pods in their place. Once the new pods
+  // are available, it then proceeds onto other DaemonSet pods, thus ensuring
+  // that at least 70% of original number of DaemonSet pods are available at
+  // all times during the update.
+  // +optional
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
+}
+
+// Spec to control the desired behavior of rolling update.
+message RollingUpdateDeployment {
+  // The maximum number of pods that can be unavailable during the update.
+  // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+  // Absolute number is calculated from percentage by rounding down.
+  // This can not be 0 if MaxSurge is 0.
+  // Defaults to 25%.
+  // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+  // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+  // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+  // that the total number of pods available at all times during the update is at
+  // least 70% of desired pods.
+  // +optional
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
+
+  // The maximum number of pods that can be scheduled above the desired number of
+  // pods.
+  // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+  // This can not be 0 if MaxUnavailable is 0.
+  // Absolute number is calculated from percentage by rounding up.
+  // Defaults to 25%.
+  // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+  // the rolling update starts, such that the total number of old and new pods do not exceed
+  // 130% of desired pods. Once old pods have been killed,
+  // new ReplicaSet can be scaled up further, ensuring that total number of pods running
+  // at any time during the update is atmost 130% of desired pods.
+  // +optional
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
+}
+
+// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
+message RollingUpdateStatefulSetStrategy {
+  // Partition indicates the ordinal at which the StatefulSet should be
+  // partitioned.
+  // Default value is 0.
+  // +optional
+  optional int32 partition = 1;
+}
+
+// Scale represents a scaling request for a resource.
+message Scale {
+  // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
+  // +optional
+  optional ScaleSpec spec = 2;
+
+  // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
+  // +optional
+  optional ScaleStatus status = 3;
+}
+
+// ScaleSpec describes the attributes of a scale subresource
+message ScaleSpec {
+  // desired number of instances for the scaled object.
+  // +optional
+  optional int32 replicas = 1;
+}
+
+// ScaleStatus represents the current status of a scale subresource.
+message ScaleStatus {
+  // actual number of observed instances of the scaled object.
+  optional int32 replicas = 1;
+
+  // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
+  // +optional
+  map<string, string> selector = 2;
+
+  // label selector for pods that should match the replicas count. This is a serializated
+  // version of both map-based and more expressive set-based selectors. This is done to
+  // avoid introspection in the clients. The string will be in the same format as the
+  // query-param syntax. If the target type only supports map-based selectors, both this
+  // field and map-based selector field are populated.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+  // +optional
+  optional string targetSelector = 3;
+}
+
+// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for
+// more information.
+// StatefulSet represents a set of pods with consistent identities.
+// Identities are defined as:
+//  - Network: A single stable DNS and hostname.
+//  - Storage: As many VolumeClaims as requested.
+// The StatefulSet guarantees that a given network identity will always
+// map to the same storage identity.
+message StatefulSet {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the desired identities of pods in this set.
+  // +optional
+  optional StatefulSetSpec spec = 2;
+
+  // Status is the current status of Pods in this StatefulSet. This data
+  // may be out of date by some window of time.
+  // +optional
+  optional StatefulSetStatus status = 3;
+}
+
+// StatefulSetCondition describes the state of a statefulset at a certain point.
+message StatefulSetCondition {
+  // Type of statefulset condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // Last time the condition transitioned from one status to another.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+  // The reason for the condition's last transition.
+  // +optional
+  optional string reason = 4;
+
+  // A human readable message indicating details about the transition.
+  // +optional
+  optional string message = 5;
+}
+
+// StatefulSetList is a collection of StatefulSets.
+message StatefulSetList {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  repeated StatefulSet items = 2;
+}
+
+// A StatefulSetSpec is the specification of a StatefulSet.
+message StatefulSetSpec {
+  // replicas is the desired number of replicas of the given Template.
+  // These are replicas in the sense that they are instantiations of the
+  // same Template, but individual replicas also have a consistent identity.
+  // If unspecified, defaults to 1.
+  // TODO: Consider a rename of this field.
+  // +optional
+  optional int32 replicas = 1;
+
+  // selector is a label query over pods that should match the replica count.
+  // It must match the pod template's labels.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+  // template is the object that describes the pod that will be created if
+  // insufficient replicas are detected. Each pod stamped out by the StatefulSet
+  // will fulfill this Template, but have a unique identity from the rest
+  // of the StatefulSet.
+  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+
+  // volumeClaimTemplates is a list of claims that pods are allowed to reference.
+  // The StatefulSet controller is responsible for mapping network identities to
+  // claims in a way that maintains the identity of a pod. Every claim in
+  // this list must have at least one matching (by name) volumeMount in one
+  // container in the template. A claim in this list takes precedence over
+  // any volumes in the template, with the same name.
+  // TODO: Define the behavior if a claim already exists with the same name.
+  // +optional
+  repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
+
+  // serviceName is the name of the service that governs this StatefulSet.
+  // This service must exist before the StatefulSet, and is responsible for
+  // the network identity of the set. Pods get DNS/hostnames that follow the
+  // pattern: pod-specific-string.serviceName.default.svc.cluster.local
+  // where "pod-specific-string" is managed by the StatefulSet controller.
+  optional string serviceName = 5;
+
+  // podManagementPolicy controls how pods are created during initial scale up,
+  // when replacing pods on nodes, or when scaling down. The default policy is
+  // `OrderedReady`, where pods are created in increasing order (pod-0, then
+  // pod-1, etc) and the controller will wait until each pod is ready before
+  // continuing. When scaling down, the pods are removed in the opposite order.
+  // The alternative policy is `Parallel` which will create pods in parallel
+  // to match the desired scale without waiting, and on scale down will delete
+  // all pods at once.
+  // +optional
+  optional string podManagementPolicy = 6;
+
+  // updateStrategy indicates the StatefulSetUpdateStrategy that will be
+  // employed to update Pods in the StatefulSet when a revision is made to
+  // Template.
+  optional StatefulSetUpdateStrategy updateStrategy = 7;
+
+  // revisionHistoryLimit is the maximum number of revisions that will
+  // be maintained in the StatefulSet's revision history. The revision history
+  // consists of all revisions not represented by a currently applied
+  // StatefulSetSpec version. The default value is 10.
+  optional int32 revisionHistoryLimit = 8;
+}
+
+// StatefulSetStatus represents the current state of a StatefulSet.
+message StatefulSetStatus {
+  // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
+  // StatefulSet's generation, which is updated on mutation by the API Server.
+  // +optional
+  optional int64 observedGeneration = 1;
+
+  // replicas is the number of Pods created by the StatefulSet controller.
+  optional int32 replicas = 2;
+
+  // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.
+  optional int32 readyReplicas = 3;
+
+  // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+  // indicated by currentRevision.
+  optional int32 currentReplicas = 4;
+
+  // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+  // indicated by updateRevision.
+  optional int32 updatedReplicas = 5;
+
+  // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
+  // sequence [0,currentReplicas).
+  optional string currentRevision = 6;
+
+  // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
+  // [replicas-updatedReplicas,replicas)
+  optional string updateRevision = 7;
+
+  // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller
+  // uses this field as a collision avoidance mechanism when it needs to create the name for the
+  // newest ControllerRevision.
+  // +optional
+  optional int32 collisionCount = 9;
+
+  // Represents the latest available observations of a statefulset's current state.
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated StatefulSetCondition conditions = 10;
+}
+
+// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
+// controller will use to perform updates. It includes any additional parameters
+// necessary to perform the update for the indicated strategy.
+message StatefulSetUpdateStrategy {
+  // Type indicates the type of the StatefulSetUpdateStrategy.
+  // Default is RollingUpdate.
+  // +optional
+  optional string type = 1;
+
+  // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
+  // +optional
+  optional RollingUpdateStatefulSetStrategy rollingUpdate = 2;
+}
+
diff --git a/vendor/k8s.io/api/apps/v1beta2/register.go b/vendor/k8s.io/api/apps/v1beta2/register.go
new file mode 100644
index 0000000..2784ee3
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1beta2/register.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "apps"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Deployment{},
+		&DeploymentList{},
+		&Scale{},
+		&StatefulSet{},
+		&StatefulSetList{},
+		&DaemonSet{},
+		&DaemonSetList{},
+		&ReplicaSet{},
+		&ReplicaSetList{},
+		&ControllerRevision{},
+		&ControllerRevisionList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go
new file mode 100644
index 0000000..e75589a
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1beta2/types.go
@@ -0,0 +1,876 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+	"k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/intstr"
+)
+
+const (
+	ControllerRevisionHashLabelKey = "controller-revision-hash"
+	StatefulSetRevisionLabel       = ControllerRevisionHashLabelKey
+	DeprecatedRollbackTo           = "deprecated.deployment.rollback.to"
+	DeprecatedTemplateGeneration   = "deprecated.daemonset.template.generation"
+	StatefulSetPodNameLabel        = "statefulset.kubernetes.io/pod-name"
+)
+
+// ScaleSpec describes the attributes of a scale subresource
+type ScaleSpec struct {
+	// desired number of instances for the scaled object.
+	// +optional
+	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+}
+
+// ScaleStatus represents the current status of a scale subresource.
+type ScaleStatus struct {
+	// actual number of observed instances of the scaled object.
+	Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
+
+	// label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
+	// +optional
+	Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
+
+	// label selector for pods that should match the replicas count. This is a serializated
+	// version of both map-based and more expressive set-based selectors. This is done to
+	// avoid introspection in the clients. The string will be in the same format as the
+	// query-param syntax. If the target type only supports map-based selectors, both this
+	// field and map-based selector field are populated.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+	// +optional
+	TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Scale represents a scaling request for a resource.
+type Scale struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
+	// +optional
+	Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
+	// +optional
+	Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +genclient
+// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale
+// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for
+// more information.
+// StatefulSet represents a set of pods with consistent identities.
+// Identities are defined as:
+//  - Network: A single stable DNS and hostname.
+//  - Storage: As many VolumeClaims as requested.
+// The StatefulSet guarantees that a given network identity will always
+// map to the same storage identity.
+type StatefulSet struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the desired identities of pods in this set.
+	// +optional
+	Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is the current status of Pods in this StatefulSet. This data
+	// may be out of date by some window of time.
+	// +optional
+	Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// PodManagementPolicyType defines the policy for creating pods under a stateful set.
+type PodManagementPolicyType string
+
+const (
+	// OrderedReadyPodManagement will create pods in strictly increasing order on
+	// scale up and strictly decreasing order on scale down, progressing only when
+	// the previous pod is ready or terminated. At most one pod will be changed
+	// at any time.
+	OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady"
+	// ParallelPodManagement will create and delete pods as soon as the stateful set
+	// replica count is changed, and will not wait for pods to be ready or complete
+	// termination.
+	ParallelPodManagement = "Parallel"
+)
+
+// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
+// controller will use to perform updates. It includes any additional parameters
+// necessary to perform the update for the indicated strategy.
+type StatefulSetUpdateStrategy struct {
+	// Type indicates the type of the StatefulSetUpdateStrategy.
+	// Default is RollingUpdate.
+	// +optional
+	Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"`
+	// RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
+	// +optional
+	RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
+}
+
+// StatefulSetUpdateStrategyType is a string enumeration type that enumerates
+// all possible update strategies for the StatefulSet controller.
+type StatefulSetUpdateStrategyType string
+
+const (
+	// RollingUpdateStatefulSetStrategyType indicates that update will be
+	// applied to all Pods in the StatefulSet with respect to the StatefulSet
+	// ordering constraints. When a scale operation is performed with this
+	// strategy, new Pods will be created from the specification version indicated
+	// by the StatefulSet's updateRevision.
+	RollingUpdateStatefulSetStrategyType = "RollingUpdate"
+	// OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version
+	// tracking and ordered rolling restarts are disabled. Pods are recreated
+	// from the StatefulSetSpec when they are manually deleted. When a scale
+	// operation is performed with this strategy,specification version indicated
+	// by the StatefulSet's currentRevision.
+	OnDeleteStatefulSetStrategyType = "OnDelete"
+)
+
+// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
+type RollingUpdateStatefulSetStrategy struct {
+	// Partition indicates the ordinal at which the StatefulSet should be
+	// partitioned.
+	// Default value is 0.
+	// +optional
+	Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"`
+}
+
+// A StatefulSetSpec is the specification of a StatefulSet.
+type StatefulSetSpec struct {
+	// replicas is the desired number of replicas of the given Template.
+	// These are replicas in the sense that they are instantiations of the
+	// same Template, but individual replicas also have a consistent identity.
+	// If unspecified, defaults to 1.
+	// TODO: Consider a rename of this field.
+	// +optional
+	Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+	// selector is a label query over pods that should match the replica count.
+	// It must match the pod template's labels.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+	Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"`
+
+	// template is the object that describes the pod that will be created if
+	// insufficient replicas are detected. Each pod stamped out by the StatefulSet
+	// will fulfill this Template, but have a unique identity from the rest
+	// of the StatefulSet.
+	Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"`
+
+	// volumeClaimTemplates is a list of claims that pods are allowed to reference.
+	// The StatefulSet controller is responsible for mapping network identities to
+	// claims in a way that maintains the identity of a pod. Every claim in
+	// this list must have at least one matching (by name) volumeMount in one
+	// container in the template. A claim in this list takes precedence over
+	// any volumes in the template, with the same name.
+	// TODO: Define the behavior if a claim already exists with the same name.
+	// +optional
+	VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"`
+
+	// serviceName is the name of the service that governs this StatefulSet.
+	// This service must exist before the StatefulSet, and is responsible for
+	// the network identity of the set. Pods get DNS/hostnames that follow the
+	// pattern: pod-specific-string.serviceName.default.svc.cluster.local
+	// where "pod-specific-string" is managed by the StatefulSet controller.
+	ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"`
+
+	// podManagementPolicy controls how pods are created during initial scale up,
+	// when replacing pods on nodes, or when scaling down. The default policy is
+	// `OrderedReady`, where pods are created in increasing order (pod-0, then
+	// pod-1, etc) and the controller will wait until each pod is ready before
+	// continuing. When scaling down, the pods are removed in the opposite order.
+	// The alternative policy is `Parallel` which will create pods in parallel
+	// to match the desired scale without waiting, and on scale down will delete
+	// all pods at once.
+	// +optional
+	PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"`
+
+	// updateStrategy indicates the StatefulSetUpdateStrategy that will be
+	// employed to update Pods in the StatefulSet when a revision is made to
+	// Template.
+	UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"`
+
+	// revisionHistoryLimit is the maximum number of revisions that will
+	// be maintained in the StatefulSet's revision history. The revision history
+	// consists of all revisions not represented by a currently applied
+	// StatefulSetSpec version. The default value is 10.
+	RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"`
+}
+
+// StatefulSetStatus represents the current state of a StatefulSet.
+type StatefulSetStatus struct {
+	// observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
+	// StatefulSet's generation, which is updated on mutation by the API Server.
+	// +optional
+	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+	// replicas is the number of Pods created by the StatefulSet controller.
+	Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"`
+
+	// readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.
+	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"`
+
+	// currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+	// indicated by currentRevision.
+	CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"`
+
+	// updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+	// indicated by updateRevision.
+	UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"`
+
+	// currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
+	// sequence [0,currentReplicas).
+	CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"`
+
+	// updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
+	// [replicas-updatedReplicas,replicas)
+	UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"`
+
+	// collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller
+	// uses this field as a collision avoidance mechanism when it needs to create the name for the
+	// newest ControllerRevision.
+	// +optional
+	CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"`
+
+	// Represents the latest available observations of a statefulset's current state.
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"`
+}
+
+type StatefulSetConditionType string
+
+// StatefulSetCondition describes the state of a statefulset at a certain point.
+type StatefulSetCondition struct {
+	// Type of statefulset condition.
+	Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+	// Last time the condition transitioned from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+	// The reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// A human readable message indicating details about the transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// StatefulSetList is a collection of StatefulSets.
+type StatefulSetList struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	Items           []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for
+// more information.
+// Deployment enables declarative updates for Pods and ReplicaSets.
+type Deployment struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired behavior of the Deployment.
+	// +optional
+	Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Most recently observed status of the Deployment.
+	// +optional
+	Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
+type DeploymentSpec struct {
+	// Number of desired pods. This is a pointer to distinguish between explicit
+	// zero and not specified. Defaults to 1.
+	// +optional
+	Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+	// Label selector for pods. Existing ReplicaSets whose pods are
+	// selected by this will be the ones affected by this deployment.
+	// It must match the pod template's labels.
+	Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"`
+
+	// Template describes the pods that will be created.
+	Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"`
+
+	// The deployment strategy to use to replace existing pods with new ones.
+	// +optional
+	// +patchStrategy=retainKeys
+	Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"`
+
+	// Minimum number of seconds for which a newly created pod should be ready
+	// without any of its container crashing, for it to be considered available.
+	// Defaults to 0 (pod will be considered available as soon as it is ready)
+	// +optional
+	MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"`
+
+	// The number of old ReplicaSets to retain to allow rollback.
+	// This is a pointer to distinguish between explicit zero and not specified.
+	// Defaults to 10.
+	// +optional
+	RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"`
+
+	// Indicates that the deployment is paused.
+	// +optional
+	Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"`
+
+	// The maximum time in seconds for a deployment to make progress before it
+	// is considered to be failed. The deployment controller will continue to
+	// process failed deployments and a condition with a ProgressDeadlineExceeded
+	// reason will be surfaced in the deployment status. Note that progress will
+	// not be estimated during the time a deployment is paused. Defaults to 600s.
+	ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"`
+}
+
+const (
+	// DefaultDeploymentUniqueLabelKey is the default key of the selector that is added
+	// to existing ReplicaSets (and label key that is added to its pods) to prevent the existing ReplicaSets
+	// to select new pods (and old pods being select by new ReplicaSet).
+	DefaultDeploymentUniqueLabelKey string = "pod-template-hash"
+)
+
+// DeploymentStrategy describes how to replace existing pods with new ones.
+type DeploymentStrategy struct {
+	// Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+	// +optional
+	Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"`
+
+	// Rolling update config params. Present only if DeploymentStrategyType =
+	// RollingUpdate.
+	//---
+	// TODO: Update this to follow our convention for oneOf, whatever we decide it
+	// to be.
+	// +optional
+	RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
+}
+
+type DeploymentStrategyType string
+
+const (
+	// Kill all existing pods before creating new ones.
+	RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate"
+
+	// Replace the old ReplicaSets by new one using rolling update i.e gradually scale down the old ReplicaSets and scale up the new one.
+	RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate"
+)
+
+// Spec to control the desired behavior of rolling update.
+type RollingUpdateDeployment struct {
+	// The maximum number of pods that can be unavailable during the update.
+	// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+	// Absolute number is calculated from percentage by rounding down.
+	// This can not be 0 if MaxSurge is 0.
+	// Defaults to 25%.
+	// Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+	// immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+	// can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+	// that the total number of pods available at all times during the update is at
+	// least 70% of desired pods.
+	// +optional
+	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
+
+	// The maximum number of pods that can be scheduled above the desired number of
+	// pods.
+	// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+	// This can not be 0 if MaxUnavailable is 0.
+	// Absolute number is calculated from percentage by rounding up.
+	// Defaults to 25%.
+	// Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+	// the rolling update starts, such that the total number of old and new pods do not exceed
+	// 130% of desired pods. Once old pods have been killed,
+	// new ReplicaSet can be scaled up further, ensuring that total number of pods running
+	// at any time during the update is atmost 130% of desired pods.
+	// +optional
+	MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"`
+}
+
+// DeploymentStatus is the most recently observed status of the Deployment.
+type DeploymentStatus struct {
+	// The generation observed by the deployment controller.
+	// +optional
+	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+	// Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+	// +optional
+	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"`
+
+	// Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+	// +optional
+	UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"`
+
+	// Total number of ready pods targeted by this deployment.
+	// +optional
+	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"`
+
+	// Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+	// +optional
+	AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"`
+
+	// Total number of unavailable pods targeted by this deployment. This is the total number of
+	// pods that are still required for the deployment to have 100% available capacity. They may
+	// either be pods that are running but not yet available or pods that still have not been created.
+	// +optional
+	UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
+
+	// Represents the latest available observations of a deployment's current state.
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
+
+	// Count of hash collisions for the Deployment. The Deployment controller uses this
+	// field as a collision avoidance mechanism when it needs to create the name for the
+	// newest ReplicaSet.
+	// +optional
+	CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"`
+}
+
+type DeploymentConditionType string
+
+// These are valid conditions of a deployment.
+const (
+	// Available means the deployment is available, ie. at least the minimum available
+	// replicas required are up and running for at least minReadySeconds.
+	DeploymentAvailable DeploymentConditionType = "Available"
+	// Progressing means the deployment is progressing. Progress for a deployment is
+	// considered when a new replica set is created or adopted, and when new pods scale
+	// up or old pods scale down. Progress is not estimated for paused deployments or
+	// when progressDeadlineSeconds is not specified.
+	DeploymentProgressing DeploymentConditionType = "Progressing"
+	// ReplicaFailure is added in a deployment when one of its pods fails to be created
+	// or deleted.
+	DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure"
+)
+
+// DeploymentCondition describes the state of a deployment at a certain point.
+type DeploymentCondition struct {
+	// Type of deployment condition.
+	Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+	// The last time this condition was updated.
+	LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"`
+	// Last time the condition transitioned from one status to another.
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"`
+	// The reason for the condition's last transition.
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// A human readable message indicating details about the transition.
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DeploymentList is a list of Deployments.
+type DeploymentList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of Deployments.
+	Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.
+type DaemonSetUpdateStrategy struct {
+	// Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.
+	// +optional
+	Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"`
+
+	// Rolling update config params. Present only if type = "RollingUpdate".
+	//---
+	// TODO: Update this to follow our convention for oneOf, whatever we decide it
+	// to be. Same as Deployment `strategy.rollingUpdate`.
+	// See https://github.com/kubernetes/kubernetes/issues/35345
+	// +optional
+	RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
+}
+
+type DaemonSetUpdateStrategyType string
+
+const (
+	// Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other.
+	RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate"
+
+	// Replace the old daemons only when it's killed
+	OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete"
+)
+
+// Spec to control the desired behavior of daemon set rolling update.
+type RollingUpdateDaemonSet struct {
+	// The maximum number of DaemonSet pods that can be unavailable during the
+	// update. Value can be an absolute number (ex: 5) or a percentage of total
+	// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+	// number is calculated from percentage by rounding up.
+	// This cannot be 0.
+	// Default value is 1.
+	// Example: when this is set to 30%, at most 30% of the total number of nodes
+	// that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+	// can have their pods stopped for an update at any given
+	// time. The update starts by stopping at most 30% of those DaemonSet pods
+	// and then brings up new DaemonSet pods in their place. Once the new pods
+	// are available, it then proceeds onto other DaemonSet pods, thus ensuring
+	// that at least 70% of original number of DaemonSet pods are available at
+	// all times during the update.
+	// +optional
+	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
+}
+
+// DaemonSetSpec is the specification of a daemon set.
+type DaemonSetSpec struct {
+	// A label query over pods that are managed by the daemon set.
+	// Must match in order to be controlled.
+	// It must match the pod template's labels.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+	Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"`
+
+	// An object that describes the pod that will be created.
+	// The DaemonSet will create exactly one copy of this pod on every node
+	// that matches the template's node selector (or on every node if no node
+	// selector is specified).
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+	Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"`
+
+	// An update strategy to replace existing DaemonSet pods with new pods.
+	// +optional
+	UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"`
+
+	// The minimum number of seconds for which a newly created DaemonSet pod should
+	// be ready without any of its container crashing, for it to be considered
+	// available. Defaults to 0 (pod will be considered available as soon as it
+	// is ready).
+	// +optional
+	MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
+
+	// The number of old history to retain to allow rollback.
+	// This is a pointer to distinguish between explicit zero and not specified.
+	// Defaults to 10.
+	// +optional
+	RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"`
+}
+
+// DaemonSetStatus represents the current status of a daemon set.
+type DaemonSetStatus struct {
+	// The number of nodes that are running at least 1
+	// daemon pod and are supposed to run the daemon pod.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+	CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"`
+
+	// The number of nodes that are running the daemon pod, but are
+	// not supposed to run the daemon pod.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+	NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"`
+
+	// The total number of nodes that should be running the daemon
+	// pod (including nodes correctly running the daemon pod).
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+	DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"`
+
+	// The number of nodes that should be running the daemon pod and have one
+	// or more of the daemon pod running and ready.
+	NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"`
+
+	// The most recent generation observed by the daemon set controller.
+	// +optional
+	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"`
+
+	// The total number of nodes that are running updated daemon pod
+	// +optional
+	UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"`
+
+	// The number of nodes that should be running the
+	// daemon pod and have one or more of the daemon pod running and
+	// available (ready for at least spec.minReadySeconds)
+	// +optional
+	NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"`
+
+	// The number of nodes that should be running the
+	// daemon pod and have none of the daemon pod running and available
+	// (ready for at least spec.minReadySeconds)
+	// +optional
+	NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"`
+
+	// Count of hash collisions for the DaemonSet. The DaemonSet controller
+	// uses this field as a collision avoidance mechanism when it needs to
+	// create the name for the newest ControllerRevision.
+	// +optional
+	CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"`
+
+	// Represents the latest available observations of a DaemonSet's current state.
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"`
+}
+
+type DaemonSetConditionType string
+
+// TODO: Add valid condition types of a DaemonSet.
+
+// DaemonSetCondition describes the state of a DaemonSet at a certain point.
+type DaemonSetCondition struct {
+	// Type of DaemonSet condition.
+	Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+	// Last time the condition transitioned from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+	// The reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// A human readable message indicating details about the transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for
+// more information.
+// DaemonSet represents the configuration of a daemon set.
+type DaemonSet struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// The desired behavior of this daemon set.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// The current status of this daemon set. This data may be
+	// out of date by some window of time.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+const (
+	// DefaultDaemonSetUniqueLabelKey is the default label key that is added
+	// to existing DaemonSet pods to distinguish between old and new
+	// DaemonSet pods during DaemonSet template updates.
+	DefaultDaemonSetUniqueLabelKey = ControllerRevisionHashLabelKey
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DaemonSetList is a collection of daemon sets.
+type DaemonSetList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// A list of daemon sets.
+	Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for
+// more information.
+// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
+type ReplicaSet struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// If the Labels of a ReplicaSet are empty, they are defaulted to
+	// be the same as the Pod(s) that the ReplicaSet manages.
+	// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the specification of the desired behavior of the ReplicaSet.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is the most recently observed status of the ReplicaSet.
+	// This data may be out of date by some window of time.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ReplicaSetList is a collection of ReplicaSets.
+type ReplicaSetList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of ReplicaSets.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+	Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ReplicaSetSpec is the specification of a ReplicaSet.
+type ReplicaSetSpec struct {
+	// Replicas is the number of desired replicas.
+	// This is a pointer to distinguish between explicit zero and unspecified.
+	// Defaults to 1.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+	// +optional
+	Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+	// Minimum number of seconds for which a newly created pod should be ready
+	// without any of its container crashing, for it to be considered available.
+	// Defaults to 0 (pod will be considered available as soon as it is ready)
+	// +optional
+	MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
+
+	// Selector is a label query over pods that should match the replica count.
+	// Label keys and values that must match in order to be controlled by this replica set.
+	// It must match the pod template's labels.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+	Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"`
+
+	// Template is the object that describes the pod that will be created if
+	// insufficient replicas are detected.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+	// +optional
+	Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
+}
+
+// ReplicaSetStatus represents the current status of a ReplicaSet.
+type ReplicaSetStatus struct {
+	// Replicas is the most recently oberved number of replicas.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+	Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
+
+	// The number of pods that have labels matching the labels of the pod template of the replicaset.
+	// +optional
+	FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
+
+	// The number of ready replicas for this replica set.
+	// +optional
+	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
+
+	// The number of available replicas (ready for at least minReadySeconds) for this replica set.
+	// +optional
+	AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
+
+	// ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
+	// +optional
+	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
+
+	// Represents the latest available observations of a replica set's current state.
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
+}
+
+type ReplicaSetConditionType string
+
+// These are valid conditions of a replica set.
+const (
+	// ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created
+	// due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted
+	// due to kubelet being down or finalizers are failing.
+	ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure"
+)
+
+// ReplicaSetCondition describes the state of a replica set at a certain point.
+type ReplicaSetCondition struct {
+	// Type of replica set condition.
+	Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+	// The last time the condition transitioned from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+	// The reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// A human readable message indicating details about the transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the
+// release notes for more information.
+// ControllerRevision implements an immutable snapshot of state data. Clients
+// are responsible for serializing and deserializing the objects that contain
+// their internal state.
+// Once a ControllerRevision has been successfully created, it can not be updated.
+// The API Server will fail validation of all requests that attempt to mutate
+// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both
+// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However,
+// it may be subject to name and representation changes in future releases, and clients should not
+// depend on its stability. It is primarily for internal use by controllers.
+type ControllerRevision struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Data is the serialized representation of the state.
+	Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"`
+
+	// Revision indicates the revision of the state represented by Data.
+	Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
+type ControllerRevisionList struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of ControllerRevisions
+	Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
new file mode 100644
index 0000000..f8229ce
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
@@ -0,0 +1,396 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_ControllerRevision = map[string]string{
+	"":         "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"data":     "Data is the serialized representation of the state.",
+	"revision": "Revision indicates the revision of the state represented by Data.",
+}
+
+func (ControllerRevision) SwaggerDoc() map[string]string {
+	return map_ControllerRevision
+}
+
+var map_ControllerRevisionList = map[string]string{
+	"":         "ControllerRevisionList is a resource containing a list of ControllerRevision objects.",
+	"metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "Items is the list of ControllerRevisions",
+}
+
+func (ControllerRevisionList) SwaggerDoc() map[string]string {
+	return map_ControllerRevisionList
+}
+
+var map_DaemonSet = map[string]string{
+	"":         "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+	"status":   "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (DaemonSet) SwaggerDoc() map[string]string {
+	return map_DaemonSet
+}
+
+var map_DaemonSetCondition = map[string]string{
+	"":                   "DaemonSetCondition describes the state of a DaemonSet at a certain point.",
+	"type":               "Type of DaemonSet condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastTransitionTime": "Last time the condition transitioned from one status to another.",
+	"reason":             "The reason for the condition's last transition.",
+	"message":            "A human readable message indicating details about the transition.",
+}
+
+func (DaemonSetCondition) SwaggerDoc() map[string]string {
+	return map_DaemonSetCondition
+}
+
+var map_DaemonSetList = map[string]string{
+	"":         "DaemonSetList is a collection of daemon sets.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "A list of daemon sets.",
+}
+
+func (DaemonSetList) SwaggerDoc() map[string]string {
+	return map_DaemonSetList
+}
+
+var map_DaemonSetSpec = map[string]string{
+	"":                     "DaemonSetSpec is the specification of a daemon set.",
+	"selector":             "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+	"template":             "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+	"updateStrategy":       "An update strategy to replace existing DaemonSet pods with new pods.",
+	"minReadySeconds":      "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).",
+	"revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.",
+}
+
+func (DaemonSetSpec) SwaggerDoc() map[string]string {
+	return map_DaemonSetSpec
+}
+
+var map_DaemonSetStatus = map[string]string{
+	"":                       "DaemonSetStatus represents the current status of a daemon set.",
+	"currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+	"numberMisscheduled":     "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+	"desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+	"numberReady":            "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.",
+	"observedGeneration":     "The most recent generation observed by the daemon set controller.",
+	"updatedNumberScheduled": "The total number of nodes that are running updated daemon pod",
+	"numberAvailable":        "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)",
+	"numberUnavailable":      "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)",
+	"collisionCount":         "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.",
+	"conditions":             "Represents the latest available observations of a DaemonSet's current state.",
+}
+
+func (DaemonSetStatus) SwaggerDoc() map[string]string {
+	return map_DaemonSetStatus
+}
+
+var map_DaemonSetUpdateStrategy = map[string]string{
+	"":              "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.",
+	"type":          "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.",
+	"rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".",
+}
+
+func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string {
+	return map_DaemonSetUpdateStrategy
+}
+
+var map_Deployment = map[string]string{
+	"":         "DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.",
+	"metadata": "Standard object metadata.",
+	"spec":     "Specification of the desired behavior of the Deployment.",
+	"status":   "Most recently observed status of the Deployment.",
+}
+
+func (Deployment) SwaggerDoc() map[string]string {
+	return map_Deployment
+}
+
+var map_DeploymentCondition = map[string]string{
+	"":                   "DeploymentCondition describes the state of a deployment at a certain point.",
+	"type":               "Type of deployment condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastUpdateTime":     "The last time this condition was updated.",
+	"lastTransitionTime": "Last time the condition transitioned from one status to another.",
+	"reason":             "The reason for the condition's last transition.",
+	"message":            "A human readable message indicating details about the transition.",
+}
+
+func (DeploymentCondition) SwaggerDoc() map[string]string {
+	return map_DeploymentCondition
+}
+
+var map_DeploymentList = map[string]string{
+	"":         "DeploymentList is a list of Deployments.",
+	"metadata": "Standard list metadata.",
+	"items":    "Items is the list of Deployments.",
+}
+
+func (DeploymentList) SwaggerDoc() map[string]string {
+	return map_DeploymentList
+}
+
+var map_DeploymentSpec = map[string]string{
+	"":                        "DeploymentSpec is the specification of the desired behavior of the Deployment.",
+	"replicas":                "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.",
+	"selector":                "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.",
+	"template":                "Template describes the pods that will be created.",
+	"strategy":                "The deployment strategy to use to replace existing pods with new ones.",
+	"minReadySeconds":         "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+	"revisionHistoryLimit":    "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.",
+	"paused":                  "Indicates that the deployment is paused.",
+	"progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.",
+}
+
+func (DeploymentSpec) SwaggerDoc() map[string]string {
+	return map_DeploymentSpec
+}
+
+var map_DeploymentStatus = map[string]string{
+	"":                    "DeploymentStatus is the most recently observed status of the Deployment.",
+	"observedGeneration":  "The generation observed by the deployment controller.",
+	"replicas":            "Total number of non-terminated pods targeted by this deployment (their labels match the selector).",
+	"updatedReplicas":     "Total number of non-terminated pods targeted by this deployment that have the desired template spec.",
+	"readyReplicas":       "Total number of ready pods targeted by this deployment.",
+	"availableReplicas":   "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
+	"unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
+	"conditions":          "Represents the latest available observations of a deployment's current state.",
+	"collisionCount":      "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
+}
+
+func (DeploymentStatus) SwaggerDoc() map[string]string {
+	return map_DeploymentStatus
+}
+
+var map_DeploymentStrategy = map[string]string{
+	"":              "DeploymentStrategy describes how to replace existing pods with new ones.",
+	"type":          "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.",
+	"rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.",
+}
+
+func (DeploymentStrategy) SwaggerDoc() map[string]string {
+	return map_DeploymentStrategy
+}
+
+var map_ReplicaSet = map[string]string{
+	"":         "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.",
+	"metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+	"status":   "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (ReplicaSet) SwaggerDoc() map[string]string {
+	return map_ReplicaSet
+}
+
+var map_ReplicaSetCondition = map[string]string{
+	"":                   "ReplicaSetCondition describes the state of a replica set at a certain point.",
+	"type":               "Type of replica set condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastTransitionTime": "The last time the condition transitioned from one status to another.",
+	"reason":             "The reason for the condition's last transition.",
+	"message":            "A human readable message indicating details about the transition.",
+}
+
+func (ReplicaSetCondition) SwaggerDoc() map[string]string {
+	return map_ReplicaSetCondition
+}
+
+var map_ReplicaSetList = map[string]string{
+	"":         "ReplicaSetList is a collection of ReplicaSets.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller",
+}
+
+func (ReplicaSetList) SwaggerDoc() map[string]string {
+	return map_ReplicaSetList
+}
+
+var map_ReplicaSetSpec = map[string]string{
+	"":                "ReplicaSetSpec is the specification of a ReplicaSet.",
+	"replicas":        "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
+	"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+	"selector":        "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+	"template":        "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+}
+
+func (ReplicaSetSpec) SwaggerDoc() map[string]string {
+	return map_ReplicaSetSpec
+}
+
+var map_ReplicaSetStatus = map[string]string{
+	"":                     "ReplicaSetStatus represents the current status of a ReplicaSet.",
+	"replicas":             "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
+	"fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.",
+	"readyReplicas":        "The number of ready replicas for this replica set.",
+	"availableReplicas":    "The number of available replicas (ready for at least minReadySeconds) for this replica set.",
+	"observedGeneration":   "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
+	"conditions":           "Represents the latest available observations of a replica set's current state.",
+}
+
+func (ReplicaSetStatus) SwaggerDoc() map[string]string {
+	return map_ReplicaSetStatus
+}
+
+var map_RollingUpdateDaemonSet = map[string]string{
+	"":               "Spec to control the desired behavior of daemon set rolling update.",
+	"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
+}
+
+func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
+	return map_RollingUpdateDaemonSet
+}
+
+var map_RollingUpdateDeployment = map[string]string{
+	"":               "Spec to control the desired behavior of rolling update.",
+	"maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.",
+	"maxSurge":       "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.",
+}
+
+func (RollingUpdateDeployment) SwaggerDoc() map[string]string {
+	return map_RollingUpdateDeployment
+}
+
+var map_RollingUpdateStatefulSetStrategy = map[string]string{
+	"":          "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.",
+	"partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.",
+}
+
+func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string {
+	return map_RollingUpdateStatefulSetStrategy
+}
+
+var map_Scale = map[string]string{
+	"":         "Scale represents a scaling request for a resource.",
+	"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.",
+	"spec":     "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.",
+	"status":   "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.",
+}
+
+func (Scale) SwaggerDoc() map[string]string {
+	return map_Scale
+}
+
+var map_ScaleSpec = map[string]string{
+	"":         "ScaleSpec describes the attributes of a scale subresource",
+	"replicas": "desired number of instances for the scaled object.",
+}
+
+func (ScaleSpec) SwaggerDoc() map[string]string {
+	return map_ScaleSpec
+}
+
+var map_ScaleStatus = map[string]string{
+	"":               "ScaleStatus represents the current status of a scale subresource.",
+	"replicas":       "actual number of observed instances of the scaled object.",
+	"selector":       "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors",
+	"targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+}
+
+func (ScaleStatus) SwaggerDoc() map[string]string {
+	return map_ScaleStatus
+}
+
+var map_StatefulSet = map[string]string{
+	"":       "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.",
+	"spec":   "Spec defines the desired identities of pods in this set.",
+	"status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.",
+}
+
+func (StatefulSet) SwaggerDoc() map[string]string {
+	return map_StatefulSet
+}
+
+var map_StatefulSetCondition = map[string]string{
+	"":                   "StatefulSetCondition describes the state of a statefulset at a certain point.",
+	"type":               "Type of statefulset condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastTransitionTime": "Last time the condition transitioned from one status to another.",
+	"reason":             "The reason for the condition's last transition.",
+	"message":            "A human readable message indicating details about the transition.",
+}
+
+func (StatefulSetCondition) SwaggerDoc() map[string]string {
+	return map_StatefulSetCondition
+}
+
+var map_StatefulSetList = map[string]string{
+	"": "StatefulSetList is a collection of StatefulSets.",
+}
+
+func (StatefulSetList) SwaggerDoc() map[string]string {
+	return map_StatefulSetList
+}
+
+var map_StatefulSetSpec = map[string]string{
+	"":                     "A StatefulSetSpec is the specification of a StatefulSet.",
+	"replicas":             "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.",
+	"selector":             "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+	"template":             "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.",
+	"volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.",
+	"serviceName":          "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.",
+	"podManagementPolicy":  "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.",
+	"updateStrategy":       "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.",
+	"revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
+}
+
+func (StatefulSetSpec) SwaggerDoc() map[string]string {
+	return map_StatefulSetSpec
+}
+
+var map_StatefulSetStatus = map[string]string{
+	"":                   "StatefulSetStatus represents the current state of a StatefulSet.",
+	"observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.",
+	"replicas":           "replicas is the number of Pods created by the StatefulSet controller.",
+	"readyReplicas":      "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.",
+	"currentReplicas":    "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.",
+	"updatedReplicas":    "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.",
+	"currentRevision":    "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).",
+	"updateRevision":     "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)",
+	"collisionCount":     "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.",
+	"conditions":         "Represents the latest available observations of a statefulset's current state.",
+}
+
+func (StatefulSetStatus) SwaggerDoc() map[string]string {
+	return map_StatefulSetStatus
+}
+
+var map_StatefulSetUpdateStrategy = map[string]string{
+	"":              "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.",
+	"type":          "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.",
+	"rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.",
+}
+
+func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string {
+	return map_StatefulSetUpdateStrategy
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go
new file mode 100644
index 0000000..8a0bad2
--- /dev/null
+++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go
@@ -0,0 +1,839 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+	corev1 "k8s.io/api/core/v1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Data.DeepCopyInto(&out.Data)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision.
+func (in *ControllerRevision) DeepCopy() *ControllerRevision {
+	if in == nil {
+		return nil
+	}
+	out := new(ControllerRevision)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerRevision) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ControllerRevision, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList.
+func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList {
+	if in == nil {
+		return nil
+	}
+	out := new(ControllerRevisionList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerRevisionList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSet) DeepCopyInto(out *DaemonSet) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet.
+func (in *DaemonSet) DeepCopy() *DaemonSet {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSet)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DaemonSet) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) {
+	*out = *in
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition.
+func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSetCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]DaemonSet, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList.
+func (in *DaemonSetList) DeepCopy() *DaemonSetList {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSetList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DaemonSetList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) {
+	*out = *in
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Template.DeepCopyInto(&out.Template)
+	in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
+	if in.RevisionHistoryLimit != nil {
+		in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec.
+func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSetSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) {
+	*out = *in
+	if in.CollisionCount != nil {
+		in, out := &in.CollisionCount, &out.CollisionCount
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]DaemonSetCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus.
+func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSetStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) {
+	*out = *in
+	if in.RollingUpdate != nil {
+		in, out := &in.RollingUpdate, &out.RollingUpdate
+		*out = new(RollingUpdateDaemonSet)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy.
+func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSetUpdateStrategy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Deployment) DeepCopyInto(out *Deployment) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment.
+func (in *Deployment) DeepCopy() *Deployment {
+	if in == nil {
+		return nil
+	}
+	out := new(Deployment)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Deployment) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) {
+	*out = *in
+	in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition.
+func (in *DeploymentCondition) DeepCopy() *DeploymentCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentList) DeepCopyInto(out *DeploymentList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Deployment, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList.
+func (in *DeploymentList) DeepCopy() *DeploymentList {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeploymentList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
+	*out = *in
+	if in.Replicas != nil {
+		in, out := &in.Replicas, &out.Replicas
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Template.DeepCopyInto(&out.Template)
+	in.Strategy.DeepCopyInto(&out.Strategy)
+	if in.RevisionHistoryLimit != nil {
+		in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+		*out = new(int32)
+		**out = **in
+	}
+	if in.ProgressDeadlineSeconds != nil {
+		in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec.
+func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]DeploymentCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.CollisionCount != nil {
+		in, out := &in.CollisionCount, &out.CollisionCount
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus.
+func (in *DeploymentStatus) DeepCopy() *DeploymentStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) {
+	*out = *in
+	if in.RollingUpdate != nil {
+		in, out := &in.RollingUpdate, &out.RollingUpdate
+		*out = new(RollingUpdateDeployment)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy.
+func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentStrategy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet.
+func (in *ReplicaSet) DeepCopy() *ReplicaSet {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicaSet)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ReplicaSet) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) {
+	*out = *in
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition.
+func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicaSetCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ReplicaSet, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList.
+func (in *ReplicaSetList) DeepCopy() *ReplicaSetList {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicaSetList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ReplicaSetList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) {
+	*out = *in
+	if in.Replicas != nil {
+		in, out := &in.Replicas, &out.Replicas
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Template.DeepCopyInto(&out.Template)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec.
+func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicaSetSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]ReplicaSetCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus.
+func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicaSetStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) {
+	*out = *in
+	if in.MaxUnavailable != nil {
+		in, out := &in.MaxUnavailable, &out.MaxUnavailable
+		*out = new(intstr.IntOrString)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet.
+func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet {
+	if in == nil {
+		return nil
+	}
+	out := new(RollingUpdateDaemonSet)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) {
+	*out = *in
+	if in.MaxUnavailable != nil {
+		in, out := &in.MaxUnavailable, &out.MaxUnavailable
+		*out = new(intstr.IntOrString)
+		**out = **in
+	}
+	if in.MaxSurge != nil {
+		in, out := &in.MaxSurge, &out.MaxSurge
+		*out = new(intstr.IntOrString)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment.
+func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment {
+	if in == nil {
+		return nil
+	}
+	out := new(RollingUpdateDeployment)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) {
+	*out = *in
+	if in.Partition != nil {
+		in, out := &in.Partition, &out.Partition
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy.
+func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy {
+	if in == nil {
+		return nil
+	}
+	out := new(RollingUpdateStatefulSetStrategy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Scale) DeepCopyInto(out *Scale) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	out.Spec = in.Spec
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale.
+func (in *Scale) DeepCopy() *Scale {
+	if in == nil {
+		return nil
+	}
+	out := new(Scale)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Scale) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec.
+func (in *ScaleSpec) DeepCopy() *ScaleSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ScaleSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) {
+	*out = *in
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus.
+func (in *ScaleStatus) DeepCopy() *ScaleStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ScaleStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSet) DeepCopyInto(out *StatefulSet) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet.
+func (in *StatefulSet) DeepCopy() *StatefulSet {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSet)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StatefulSet) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) {
+	*out = *in
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition.
+func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSetCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]StatefulSet, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList.
+func (in *StatefulSetList) DeepCopy() *StatefulSetList {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSetList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StatefulSetList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
+	*out = *in
+	if in.Replicas != nil {
+		in, out := &in.Replicas, &out.Replicas
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Template.DeepCopyInto(&out.Template)
+	if in.VolumeClaimTemplates != nil {
+		in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates
+		*out = make([]corev1.PersistentVolumeClaim, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
+	if in.RevisionHistoryLimit != nil {
+		in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec.
+func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSetSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) {
+	*out = *in
+	if in.CollisionCount != nil {
+		in, out := &in.CollisionCount, &out.CollisionCount
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]StatefulSetCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus.
+func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSetStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) {
+	*out = *in
+	if in.RollingUpdate != nil {
+		in, out := &in.RollingUpdate, &out.RollingUpdate
+		*out = new(RollingUpdateStatefulSetStrategy)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy.
+func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy {
+	if in == nil {
+		return nil
+	}
+	out := new(StatefulSetUpdateStrategy)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go b/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go
new file mode 100644
index 0000000..c0d184a
--- /dev/null
+++ b/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+// +groupName=auditregistration.k8s.io
+
+package v1alpha1 // import "k8s.io/api/auditregistration/v1alpha1"
diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto
new file mode 100644
index 0000000..70801a6
--- /dev/null
+++ b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto
@@ -0,0 +1,158 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.auditregistration.v1alpha1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1alpha1";
+
+// AuditSink represents a cluster level audit sink
+message AuditSink {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the audit configuration spec
+  optional AuditSinkSpec spec = 2;
+}
+
+// AuditSinkList is a list of AuditSink items.
+message AuditSinkList {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of audit configurations.
+  repeated AuditSink items = 2;
+}
+
+// AuditSinkSpec holds the spec for the audit sink
+message AuditSinkSpec {
+  // Policy defines the policy for selecting which events should be sent to the webhook
+  // required
+  optional Policy policy = 1;
+
+  // Webhook to send events
+  // required
+  optional Webhook webhook = 2;
+}
+
+// Policy defines the configuration of how audit events are logged
+message Policy {
+  // The Level that all requests are recorded at.
+  // available options: None, Metadata, Request, RequestResponse
+  // required
+  optional string level = 1;
+
+  // Stages is a list of stages for which events are created.
+  // +optional
+  repeated string stages = 2;
+}
+
+// ServiceReference holds a reference to Service.legacy.k8s.io
+message ServiceReference {
+  // `namespace` is the namespace of the service.
+  // Required
+  optional string namespace = 1;
+
+  // `name` is the name of the service.
+  // Required
+  optional string name = 2;
+
+  // `path` is an optional URL path which will be sent in any request to
+  // this service.
+  // +optional
+  optional string path = 3;
+}
+
+// Webhook holds the configuration of the webhook
+message Webhook {
+  // Throttle holds the options for throttling the webhook
+  // +optional
+  optional WebhookThrottleConfig throttle = 1;
+
+  // ClientConfig holds the connection parameters for the webhook
+  // required
+  optional WebhookClientConfig clientConfig = 2;
+}
+
+// WebhookClientConfig contains the information to make a connection with the webhook
+message WebhookClientConfig {
+  // `url` gives the location of the webhook, in standard URL form
+  // (`scheme://host:port/path`). Exactly one of `url` or `service`
+  // must be specified.
+  //
+  // The `host` should not refer to a service running in the cluster; use
+  // the `service` field instead. The host might be resolved via external
+  // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve
+  // in-cluster DNS as that would be a layering violation). `host` may
+  // also be an IP address.
+  //
+  // Please note that using `localhost` or `127.0.0.1` as a `host` is
+  // risky unless you take great care to run this webhook on all hosts
+  // which run an apiserver which might need to make calls to this
+  // webhook. Such installs are likely to be non-portable, i.e., not easy
+  // to turn up in a new cluster.
+  //
+  // The scheme must be "https"; the URL must begin with "https://".
+  //
+  // A path is optional, and if present may be any string permissible in
+  // a URL. You may use the path to pass an arbitrary string to the
+  // webhook, for example, a cluster identifier.
+  //
+  // Attempting to use a user or basic auth e.g. "user:password@" is not
+  // allowed. Fragments ("#...") and query parameters ("?...") are not
+  // allowed, either.
+  //
+  // +optional
+  optional string url = 1;
+
+  // `service` is a reference to the service for this webhook. Either
+  // `service` or `url` must be specified.
+  //
+  // If the webhook is running within the cluster, then you should use `service`.
+  //
+  // Port 443 will be used if it is open, otherwise it is an error.
+  //
+  // +optional
+  optional ServiceReference service = 2;
+
+  // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
+  // If unspecified, system trust roots on the apiserver are used.
+  // +optional
+  optional bytes caBundle = 3;
+}
+
+// WebhookThrottleConfig holds the configuration for throttling events
+message WebhookThrottleConfig {
+  // ThrottleQPS maximum number of batches per second
+  // default 10 QPS
+  // +optional
+  optional int64 qps = 1;
+
+  // ThrottleBurst is the maximum number of events sent at the same moment
+  // default 15 QPS
+  // +optional
+  optional int64 burst = 2;
+}
+
diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/register.go b/vendor/k8s.io/api/auditregistration/v1alpha1/register.go
new file mode 100644
index 0000000..d627160
--- /dev/null
+++ b/vendor/k8s.io/api/auditregistration/v1alpha1/register.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "auditregistration.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	SchemeBuilder      runtime.SchemeBuilder
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+	// We only register manually written functions here. The registration of the
+	// generated functions takes place in the generated files. The separation
+	// makes the code compile even when the generated files are missing.
+	localSchemeBuilder.Register(addKnownTypes)
+}
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&AuditSink{},
+		&AuditSinkList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/types.go b/vendor/k8s.io/api/auditregistration/v1alpha1/types.go
new file mode 100644
index 0000000..af31cfe
--- /dev/null
+++ b/vendor/k8s.io/api/auditregistration/v1alpha1/types.go
@@ -0,0 +1,194 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:openapi-gen=true
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// Level defines the amount of information logged during auditing
+type Level string
+
+// Valid audit levels
+const (
+	// LevelNone disables auditing
+	LevelNone Level = "None"
+	// LevelMetadata provides the basic level of auditing.
+	LevelMetadata Level = "Metadata"
+	// LevelRequest provides Metadata level of auditing, and additionally
+	// logs the request object (does not apply for non-resource requests).
+	LevelRequest Level = "Request"
+	// LevelRequestResponse provides Request level of auditing, and additionally
+	// logs the response object (does not apply for non-resource requests and watches).
+	LevelRequestResponse Level = "RequestResponse"
+)
+
+// Stage defines the stages in request handling during which audit events may be generated.
+type Stage string
+
+// Valid audit stages.
+const (
+	// The stage for events generated after the audit handler receives the request, but before it
+	// is delegated down the handler chain.
+	StageRequestReceived = "RequestReceived"
+	// The stage for events generated after the response headers are sent, but before the response body
+	// is sent. This stage is only generated for long-running requests (e.g. watch).
+	StageResponseStarted = "ResponseStarted"
+	// The stage for events generated after the response body has been completed, and no more bytes
+	// will be sent.
+	StageResponseComplete = "ResponseComplete"
+	// The stage for events generated when a panic occurred.
+	StagePanic = "Panic"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AuditSink represents a cluster level audit sink
+type AuditSink struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the audit configuration spec
+	Spec AuditSinkSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// AuditSinkSpec holds the spec for the audit sink
+type AuditSinkSpec struct {
+	// Policy defines the policy for selecting which events should be sent to the webhook
+	// required
+	Policy Policy `json:"policy" protobuf:"bytes,1,opt,name=policy"`
+
+	// Webhook to send events
+	// required
+	Webhook Webhook `json:"webhook" protobuf:"bytes,2,opt,name=webhook"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AuditSinkList is a list of AuditSink items.
+type AuditSinkList struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of audit configurations.
+	Items []AuditSink `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// Policy defines the configuration of how audit events are logged
+type Policy struct {
+	// The Level that all requests are recorded at.
+	// available options: None, Metadata, Request, RequestResponse
+	// required
+	Level Level `json:"level" protobuf:"bytes,1,opt,name=level"`
+
+	// Stages is a list of stages for which events are created.
+	// +optional
+	Stages []Stage `json:"stages" protobuf:"bytes,2,opt,name=stages"`
+}
+
+// Webhook holds the configuration of the webhook
+type Webhook struct {
+	// Throttle holds the options for throttling the webhook
+	// +optional
+	Throttle *WebhookThrottleConfig `json:"throttle,omitempty" protobuf:"bytes,1,opt,name=throttle"`
+
+	// ClientConfig holds the connection parameters for the webhook
+	// required
+	ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"`
+}
+
+// WebhookThrottleConfig holds the configuration for throttling events
+type WebhookThrottleConfig struct {
+	// ThrottleQPS maximum number of batches per second
+	// default 10 QPS
+	// +optional
+	QPS *int64 `json:"qps,omitempty" protobuf:"bytes,1,opt,name=qps"`
+
+	// ThrottleBurst is the maximum number of events sent at the same moment
+	// default 15 QPS
+	// +optional
+	Burst *int64 `json:"burst,omitempty" protobuf:"bytes,2,opt,name=burst"`
+}
+
+// WebhookClientConfig contains the information to make a connection with the webhook
+type WebhookClientConfig struct {
+	// `url` gives the location of the webhook, in standard URL form
+	// (`scheme://host:port/path`). Exactly one of `url` or `service`
+	// must be specified.
+	//
+	// The `host` should not refer to a service running in the cluster; use
+	// the `service` field instead. The host might be resolved via external
+	// DNS in some apiservers (e.g., `kube-apiserver` cannot resolve
+	// in-cluster DNS as that would be a layering violation). `host` may
+	// also be an IP address.
+	//
+	// Please note that using `localhost` or `127.0.0.1` as a `host` is
+	// risky unless you take great care to run this webhook on all hosts
+	// which run an apiserver which might need to make calls to this
+	// webhook. Such installs are likely to be non-portable, i.e., not easy
+	// to turn up in a new cluster.
+	//
+	// The scheme must be "https"; the URL must begin with "https://".
+	//
+	// A path is optional, and if present may be any string permissible in
+	// a URL. You may use the path to pass an arbitrary string to the
+	// webhook, for example, a cluster identifier.
+	//
+	// Attempting to use a user or basic auth e.g. "user:password@" is not
+	// allowed. Fragments ("#...") and query parameters ("?...") are not
+	// allowed, either.
+	//
+	// +optional
+	URL *string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"`
+
+	// `service` is a reference to the service for this webhook. Either
+	// `service` or `url` must be specified.
+	//
+	// If the webhook is running within the cluster, then you should use `service`.
+	//
+	// Port 443 will be used if it is open, otherwise it is an error.
+	//
+	// +optional
+	Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,2,opt,name=service"`
+
+	// `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
+	// If unspecified, system trust roots on the apiserver are used.
+	// +optional
+	CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,3,opt,name=caBundle"`
+}
+
+// ServiceReference holds a reference to Service.legacy.k8s.io
+type ServiceReference struct {
+	// `namespace` is the namespace of the service.
+	// Required
+	Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
+
+	// `name` is the name of the service.
+	// Required
+	Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
+
+	// `path` is an optional URL path which will be sent in any request to
+	// this service.
+	// +optional
+	Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"`
+}
diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..edd608f
--- /dev/null
+++ b/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go
@@ -0,0 +1,110 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_AuditSink = map[string]string{
+	"":     "AuditSink represents a cluster level audit sink",
+	"spec": "Spec defines the audit configuration spec",
+}
+
+func (AuditSink) SwaggerDoc() map[string]string {
+	return map_AuditSink
+}
+
+var map_AuditSinkList = map[string]string{
+	"":      "AuditSinkList is a list of AuditSink items.",
+	"items": "List of audit configurations.",
+}
+
+func (AuditSinkList) SwaggerDoc() map[string]string {
+	return map_AuditSinkList
+}
+
+var map_AuditSinkSpec = map[string]string{
+	"":        "AuditSinkSpec holds the spec for the audit sink",
+	"policy":  "Policy defines the policy for selecting which events should be sent to the webhook required",
+	"webhook": "Webhook to send events required",
+}
+
+func (AuditSinkSpec) SwaggerDoc() map[string]string {
+	return map_AuditSinkSpec
+}
+
+var map_Policy = map[string]string{
+	"":       "Policy defines the configuration of how audit events are logged",
+	"level":  "The Level that all requests are recorded at. available options: None, Metadata, Request, RequestResponse required",
+	"stages": "Stages is a list of stages for which events are created.",
+}
+
+func (Policy) SwaggerDoc() map[string]string {
+	return map_Policy
+}
+
+var map_ServiceReference = map[string]string{
+	"":          "ServiceReference holds a reference to Service.legacy.k8s.io",
+	"namespace": "`namespace` is the namespace of the service. Required",
+	"name":      "`name` is the name of the service. Required",
+	"path":      "`path` is an optional URL path which will be sent in any request to this service.",
+}
+
+func (ServiceReference) SwaggerDoc() map[string]string {
+	return map_ServiceReference
+}
+
+var map_Webhook = map[string]string{
+	"":             "Webhook holds the configuration of the webhook",
+	"throttle":     "Throttle holds the options for throttling the webhook",
+	"clientConfig": "ClientConfig holds the connection parameters for the webhook required",
+}
+
+func (Webhook) SwaggerDoc() map[string]string {
+	return map_Webhook
+}
+
+var map_WebhookClientConfig = map[string]string{
+	"":         "WebhookClientConfig contains the information to make a connection with the webhook",
+	"url":      "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.",
+	"service":  "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.",
+	"caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.",
+}
+
+func (WebhookClientConfig) SwaggerDoc() map[string]string {
+	return map_WebhookClientConfig
+}
+
+var map_WebhookThrottleConfig = map[string]string{
+	"":      "WebhookThrottleConfig holds the configuration for throttling events",
+	"qps":   "ThrottleQPS maximum number of batches per second default 10 QPS",
+	"burst": "ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS",
+}
+
+func (WebhookThrottleConfig) SwaggerDoc() map[string]string {
+	return map_WebhookThrottleConfig
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..e71deff
--- /dev/null
+++ b/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,224 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuditSink) DeepCopyInto(out *AuditSink) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSink.
+func (in *AuditSink) DeepCopy() *AuditSink {
+	if in == nil {
+		return nil
+	}
+	out := new(AuditSink)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AuditSink) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuditSinkList) DeepCopyInto(out *AuditSinkList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]AuditSink, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkList.
+func (in *AuditSinkList) DeepCopy() *AuditSinkList {
+	if in == nil {
+		return nil
+	}
+	out := new(AuditSinkList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AuditSinkList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuditSinkSpec) DeepCopyInto(out *AuditSinkSpec) {
+	*out = *in
+	in.Policy.DeepCopyInto(&out.Policy)
+	in.Webhook.DeepCopyInto(&out.Webhook)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkSpec.
+func (in *AuditSinkSpec) DeepCopy() *AuditSinkSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(AuditSinkSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Policy) DeepCopyInto(out *Policy) {
+	*out = *in
+	if in.Stages != nil {
+		in, out := &in.Stages, &out.Stages
+		*out = make([]Stage, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
+func (in *Policy) DeepCopy() *Policy {
+	if in == nil {
+		return nil
+	}
+	out := new(Policy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceReference) DeepCopyInto(out *ServiceReference) {
+	*out = *in
+	if in.Path != nil {
+		in, out := &in.Path, &out.Path
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference.
+func (in *ServiceReference) DeepCopy() *ServiceReference {
+	if in == nil {
+		return nil
+	}
+	out := new(ServiceReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Webhook) DeepCopyInto(out *Webhook) {
+	*out = *in
+	if in.Throttle != nil {
+		in, out := &in.Throttle, &out.Throttle
+		*out = new(WebhookThrottleConfig)
+		(*in).DeepCopyInto(*out)
+	}
+	in.ClientConfig.DeepCopyInto(&out.ClientConfig)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook.
+func (in *Webhook) DeepCopy() *Webhook {
+	if in == nil {
+		return nil
+	}
+	out := new(Webhook)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) {
+	*out = *in
+	if in.URL != nil {
+		in, out := &in.URL, &out.URL
+		*out = new(string)
+		**out = **in
+	}
+	if in.Service != nil {
+		in, out := &in.Service, &out.Service
+		*out = new(ServiceReference)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.CABundle != nil {
+		in, out := &in.CABundle, &out.CABundle
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig.
+func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(WebhookClientConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookThrottleConfig) DeepCopyInto(out *WebhookThrottleConfig) {
+	*out = *in
+	if in.QPS != nil {
+		in, out := &in.QPS, &out.QPS
+		*out = new(int64)
+		**out = **in
+	}
+	if in.Burst != nil {
+		in, out := &in.Burst, &out.Burst
+		*out = new(int64)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookThrottleConfig.
+func (in *WebhookThrottleConfig) DeepCopy() *WebhookThrottleConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(WebhookThrottleConfig)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go
new file mode 100644
index 0000000..193f154
--- /dev/null
+++ b/vendor/k8s.io/api/authentication/v1/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +groupName=authentication.k8s.io
+// +k8s:openapi-gen=true
+
+package v1 // import "k8s.io/api/authentication/v1"
diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto
new file mode 100644
index 0000000..b69636a
--- /dev/null
+++ b/vendor/k8s.io/api/authentication/v1/generated.proto
@@ -0,0 +1,179 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.authentication.v1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1";
+
+// BoundObjectReference is a reference to an object that a token is bound to.
+message BoundObjectReference {
+  // Kind of the referent. Valid kinds are 'Pod' and 'Secret'.
+  // +optional
+  optional string kind = 1;
+
+  // API version of the referent.
+  // +optional
+  optional string aPIVersion = 2;
+
+  // Name of the referent.
+  // +optional
+  optional string name = 3;
+
+  // UID of the referent.
+  // +optional
+  optional string uID = 4;
+}
+
+// ExtraValue masks the value so protobuf can generate
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message ExtraValue {
+  // items, if empty, will result in an empty slice
+
+  repeated string items = 1;
+}
+
+// TokenRequest requests a token for a given service account.
+message TokenRequest {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  optional TokenRequestSpec spec = 2;
+
+  // +optional
+  optional TokenRequestStatus status = 3;
+}
+
+// TokenRequestSpec contains client provided parameters of a token request.
+message TokenRequestSpec {
+  // Audiences are the intendend audiences of the token. A recipient of a
+  // token must identitfy themself with an identifier in the list of
+  // audiences of the token, and otherwise should reject the token. A
+  // token issued for multiple audiences may be used to authenticate
+  // against any of the audiences listed but implies a high degree of
+  // trust between the target audiences.
+  repeated string audiences = 1;
+
+  // ExpirationSeconds is the requested duration of validity of the request. The
+  // token issuer may return a token with a different validity duration so a
+  // client needs to check the 'expiration' field in a response.
+  // +optional
+  optional int64 expirationSeconds = 4;
+
+  // BoundObjectRef is a reference to an object that the token will be bound to.
+  // The token will only be valid for as long as the bound objet exists.
+  // +optional
+  optional BoundObjectReference boundObjectRef = 3;
+}
+
+// TokenRequestStatus is the result of a token request.
+message TokenRequestStatus {
+  // Token is the opaque bearer token.
+  optional string token = 1;
+
+  // ExpirationTimestamp is the time of expiration of the returned token.
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationTimestamp = 2;
+}
+
+// TokenReview attempts to authenticate a token to a known user.
+// Note: TokenReview requests may be cached by the webhook token authenticator
+// plugin in the kube-apiserver.
+message TokenReview {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec holds information about the request being evaluated
+  optional TokenReviewSpec spec = 2;
+
+  // Status is filled in by the server and indicates whether the request can be authenticated.
+  // +optional
+  optional TokenReviewStatus status = 3;
+}
+
+// TokenReviewSpec is a description of the token authentication request.
+message TokenReviewSpec {
+  // Token is the opaque bearer token.
+  // +optional
+  optional string token = 1;
+
+  // Audiences is a list of the identifiers that the resource server presented
+  // with the token identifies as. Audience-aware token authenticators will
+  // verify that the token was intended for at least one of the audiences in
+  // this list. If no audiences are provided, the audience will default to the
+  // audience of the Kubernetes apiserver.
+  // +optional
+  repeated string audiences = 2;
+}
+
+// TokenReviewStatus is the result of the token authentication request.
+message TokenReviewStatus {
+  // Authenticated indicates that the token was associated with a known user.
+  // +optional
+  optional bool authenticated = 1;
+
+  // User is the UserInfo associated with the provided token.
+  // +optional
+  optional UserInfo user = 2;
+
+  // Audiences are audience identifiers chosen by the authenticator that are
+  // compatible with both the TokenReview and token. An identifier is any
+  // identifier in the intersection of the TokenReviewSpec audiences and the
+  // token's audiences. A client of the TokenReview API that sets the
+  // spec.audiences field should validate that a compatible audience identifier
+  // is returned in the status.audiences field to ensure that the TokenReview
+  // server is audience aware. If a TokenReview returns an empty
+  // status.audience field where status.authenticated is "true", the token is
+  // valid against the audience of the Kubernetes API server.
+  // +optional
+  repeated string audiences = 4;
+
+  // Error indicates that the token couldn't be checked
+  // +optional
+  optional string error = 3;
+}
+
+// UserInfo holds the information about the user needed to implement the
+// user.Info interface.
+message UserInfo {
+  // The name that uniquely identifies this user among all active users.
+  // +optional
+  optional string username = 1;
+
+  // A unique value that identifies this user across time. If this user is
+  // deleted and another user by the same name is added, they will have
+  // different UIDs.
+  // +optional
+  optional string uid = 2;
+
+  // The names of groups this user is a part of.
+  // +optional
+  repeated string groups = 3;
+
+  // Any additional information provided by the authenticator.
+  // +optional
+  map<string, ExtraValue> extra = 4;
+}
+
diff --git a/vendor/k8s.io/api/authentication/v1/register.go b/vendor/k8s.io/api/authentication/v1/register.go
new file mode 100644
index 0000000..c522e4a
--- /dev/null
+++ b/vendor/k8s.io/api/authentication/v1/register.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "authentication.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&TokenReview{},
+		&TokenRequest{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go
new file mode 100644
index 0000000..d348c6f
--- /dev/null
+++ b/vendor/k8s.io/api/authentication/v1/types.go
@@ -0,0 +1,186 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+const (
+	// ImpersonateUserHeader is used to impersonate a particular user during an API server request
+	ImpersonateUserHeader = "Impersonate-User"
+
+	// ImpersonateGroupHeader is used to impersonate a particular group during an API server request.
+	// It can be repeated multiplied times for multiple groups.
+	ImpersonateGroupHeader = "Impersonate-Group"
+
+	// ImpersonateUserExtraHeaderPrefix is a prefix for any header used to impersonate an entry in the
+	// extra map[string][]string for user.Info.  The key will be every after the prefix.
+	// It can be repeated multiplied times for multiple map keys and the same key can be repeated multiple
+	// times to have multiple elements in the slice under a single key
+	ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:noVerbs
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// TokenReview attempts to authenticate a token to a known user.
+// Note: TokenReview requests may be cached by the webhook token authenticator
+// plugin in the kube-apiserver.
+type TokenReview struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec holds information about the request being evaluated
+	Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is filled in by the server and indicates whether the request can be authenticated.
+	// +optional
+	Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// TokenReviewSpec is a description of the token authentication request.
+type TokenReviewSpec struct {
+	// Token is the opaque bearer token.
+	// +optional
+	Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"`
+	// Audiences is a list of the identifiers that the resource server presented
+	// with the token identifies as. Audience-aware token authenticators will
+	// verify that the token was intended for at least one of the audiences in
+	// this list. If no audiences are provided, the audience will default to the
+	// audience of the Kubernetes apiserver.
+	// +optional
+	Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"`
+}
+
+// TokenReviewStatus is the result of the token authentication request.
+type TokenReviewStatus struct {
+	// Authenticated indicates that the token was associated with a known user.
+	// +optional
+	Authenticated bool `json:"authenticated,omitempty" protobuf:"varint,1,opt,name=authenticated"`
+	// User is the UserInfo associated with the provided token.
+	// +optional
+	User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"`
+	// Audiences are audience identifiers chosen by the authenticator that are
+	// compatible with both the TokenReview and token. An identifier is any
+	// identifier in the intersection of the TokenReviewSpec audiences and the
+	// token's audiences. A client of the TokenReview API that sets the
+	// spec.audiences field should validate that a compatible audience identifier
+	// is returned in the status.audiences field to ensure that the TokenReview
+	// server is audience aware. If a TokenReview returns an empty
+	// status.audience field where status.authenticated is "true", the token is
+	// valid against the audience of the Kubernetes API server.
+	// +optional
+	Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"`
+	// Error indicates that the token couldn't be checked
+	// +optional
+	Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"`
+}
+
+// UserInfo holds the information about the user needed to implement the
+// user.Info interface.
+type UserInfo struct {
+	// The name that uniquely identifies this user among all active users.
+	// +optional
+	Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"`
+	// A unique value that identifies this user across time. If this user is
+	// deleted and another user by the same name is added, they will have
+	// different UIDs.
+	// +optional
+	UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"`
+	// The names of groups this user is a part of.
+	// +optional
+	Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"`
+	// Any additional information provided by the authenticator.
+	// +optional
+	Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"`
+}
+
+// ExtraValue masks the value so protobuf can generate
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type ExtraValue []string
+
+func (t ExtraValue) String() string {
+	return fmt.Sprintf("%v", []string(t))
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// TokenRequest requests a token for a given service account.
+type TokenRequest struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	Spec TokenRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+	// +optional
+	Status TokenRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// TokenRequestSpec contains client provided parameters of a token request.
+type TokenRequestSpec struct {
+	// Audiences are the intendend audiences of the token. A recipient of a
+	// token must identitfy themself with an identifier in the list of
+	// audiences of the token, and otherwise should reject the token. A
+	// token issued for multiple audiences may be used to authenticate
+	// against any of the audiences listed but implies a high degree of
+	// trust between the target audiences.
+	Audiences []string `json:"audiences" protobuf:"bytes,1,rep,name=audiences"`
+
+	// ExpirationSeconds is the requested duration of validity of the request. The
+	// token issuer may return a token with a different validity duration so a
+	// client needs to check the 'expiration' field in a response.
+	// +optional
+	ExpirationSeconds *int64 `json:"expirationSeconds" protobuf:"varint,4,opt,name=expirationSeconds"`
+
+	// BoundObjectRef is a reference to an object that the token will be bound to.
+	// The token will only be valid for as long as the bound objet exists.
+	// +optional
+	BoundObjectRef *BoundObjectReference `json:"boundObjectRef" protobuf:"bytes,3,opt,name=boundObjectRef"`
+}
+
+// TokenRequestStatus is the result of a token request.
+type TokenRequestStatus struct {
+	// Token is the opaque bearer token.
+	Token string `json:"token" protobuf:"bytes,1,opt,name=token"`
+	// ExpirationTimestamp is the time of expiration of the returned token.
+	ExpirationTimestamp metav1.Time `json:"expirationTimestamp" protobuf:"bytes,2,opt,name=expirationTimestamp"`
+}
+
+// BoundObjectReference is a reference to an object that a token is bound to.
+type BoundObjectReference struct {
+	// Kind of the referent. Valid kinds are 'Pod' and 'Secret'.
+	// +optional
+	Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
+	// API version of the referent.
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=aPIVersion"`
+
+	// Name of the referent.
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
+	// UID of the referent.
+	// +optional
+	UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uID,casttype=k8s.io/apimachinery/pkg/types.UID"`
+}
diff --git a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..f2c9b95
--- /dev/null
+++ b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go
@@ -0,0 +1,115 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_BoundObjectReference = map[string]string{
+	"":           "BoundObjectReference is a reference to an object that a token is bound to.",
+	"kind":       "Kind of the referent. Valid kinds are 'Pod' and 'Secret'.",
+	"apiVersion": "API version of the referent.",
+	"name":       "Name of the referent.",
+	"uid":        "UID of the referent.",
+}
+
+func (BoundObjectReference) SwaggerDoc() map[string]string {
+	return map_BoundObjectReference
+}
+
+var map_TokenRequest = map[string]string{
+	"": "TokenRequest requests a token for a given service account.",
+}
+
+func (TokenRequest) SwaggerDoc() map[string]string {
+	return map_TokenRequest
+}
+
+var map_TokenRequestSpec = map[string]string{
+	"":                  "TokenRequestSpec contains client provided parameters of a token request.",
+	"audiences":         "Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences.",
+	"expirationSeconds": "ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.",
+	"boundObjectRef":    "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound objet exists.",
+}
+
+func (TokenRequestSpec) SwaggerDoc() map[string]string {
+	return map_TokenRequestSpec
+}
+
+var map_TokenRequestStatus = map[string]string{
+	"":                    "TokenRequestStatus is the result of a token request.",
+	"token":               "Token is the opaque bearer token.",
+	"expirationTimestamp": "ExpirationTimestamp is the time of expiration of the returned token.",
+}
+
+func (TokenRequestStatus) SwaggerDoc() map[string]string {
+	return map_TokenRequestStatus
+}
+
+var map_TokenReview = map[string]string{
+	"":       "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.",
+	"spec":   "Spec holds information about the request being evaluated",
+	"status": "Status is filled in by the server and indicates whether the request can be authenticated.",
+}
+
+func (TokenReview) SwaggerDoc() map[string]string {
+	return map_TokenReview
+}
+
+var map_TokenReviewSpec = map[string]string{
+	"":          "TokenReviewSpec is a description of the token authentication request.",
+	"token":     "Token is the opaque bearer token.",
+	"audiences": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.",
+}
+
+func (TokenReviewSpec) SwaggerDoc() map[string]string {
+	return map_TokenReviewSpec
+}
+
+var map_TokenReviewStatus = map[string]string{
+	"":              "TokenReviewStatus is the result of the token authentication request.",
+	"authenticated": "Authenticated indicates that the token was associated with a known user.",
+	"user":          "User is the UserInfo associated with the provided token.",
+	"audiences":     "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.",
+	"error":         "Error indicates that the token couldn't be checked",
+}
+
+func (TokenReviewStatus) SwaggerDoc() map[string]string {
+	return map_TokenReviewStatus
+}
+
+var map_UserInfo = map[string]string{
+	"":         "UserInfo holds the information about the user needed to implement the user.Info interface.",
+	"username": "The name that uniquely identifies this user among all active users.",
+	"uid":      "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.",
+	"groups":   "The names of groups this user is a part of.",
+	"extra":    "Any additional information provided by the authenticator.",
+}
+
+func (UserInfo) SwaggerDoc() map[string]string {
+	return map_UserInfo
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..aca99c4
--- /dev/null
+++ b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go
@@ -0,0 +1,244 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BoundObjectReference) DeepCopyInto(out *BoundObjectReference) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BoundObjectReference.
+func (in *BoundObjectReference) DeepCopy() *BoundObjectReference {
+	if in == nil {
+		return nil
+	}
+	out := new(BoundObjectReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
+	{
+		in := &in
+		*out = make(ExtraValue, len(*in))
+		copy(*out, *in)
+		return
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue.
+func (in ExtraValue) DeepCopy() ExtraValue {
+	if in == nil {
+		return nil
+	}
+	out := new(ExtraValue)
+	in.DeepCopyInto(out)
+	return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenRequest) DeepCopyInto(out *TokenRequest) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequest.
+func (in *TokenRequest) DeepCopy() *TokenRequest {
+	if in == nil {
+		return nil
+	}
+	out := new(TokenRequest)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TokenRequest) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenRequestSpec) DeepCopyInto(out *TokenRequestSpec) {
+	*out = *in
+	if in.Audiences != nil {
+		in, out := &in.Audiences, &out.Audiences
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ExpirationSeconds != nil {
+		in, out := &in.ExpirationSeconds, &out.ExpirationSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	if in.BoundObjectRef != nil {
+		in, out := &in.BoundObjectRef, &out.BoundObjectRef
+		*out = new(BoundObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequestSpec.
+func (in *TokenRequestSpec) DeepCopy() *TokenRequestSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(TokenRequestSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenRequestStatus) DeepCopyInto(out *TokenRequestStatus) {
+	*out = *in
+	in.ExpirationTimestamp.DeepCopyInto(&out.ExpirationTimestamp)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequestStatus.
+func (in *TokenRequestStatus) DeepCopy() *TokenRequestStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(TokenRequestStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenReview) DeepCopyInto(out *TokenReview) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReview.
+func (in *TokenReview) DeepCopy() *TokenReview {
+	if in == nil {
+		return nil
+	}
+	out := new(TokenReview)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TokenReview) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) {
+	*out = *in
+	if in.Audiences != nil {
+		in, out := &in.Audiences, &out.Audiences
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewSpec.
+func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(TokenReviewSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) {
+	*out = *in
+	in.User.DeepCopyInto(&out.User)
+	if in.Audiences != nil {
+		in, out := &in.Audiences, &out.Audiences
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewStatus.
+func (in *TokenReviewStatus) DeepCopy() *TokenReviewStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(TokenReviewStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserInfo) DeepCopyInto(out *UserInfo) {
+	*out = *in
+	if in.Groups != nil {
+		in, out := &in.Groups, &out.Groups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Extra != nil {
+		in, out := &in.Extra, &out.Extra
+		*out = make(map[string]ExtraValue, len(*in))
+		for key, val := range *in {
+			var outVal []string
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = make(ExtraValue, len(*in))
+				copy(*out, *in)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInfo.
+func (in *UserInfo) DeepCopy() *UserInfo {
+	if in == nil {
+		return nil
+	}
+	out := new(UserInfo)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/authentication/v1beta1/doc.go b/vendor/k8s.io/api/authentication/v1beta1/doc.go
new file mode 100644
index 0000000..919f3c4
--- /dev/null
+++ b/vendor/k8s.io/api/authentication/v1beta1/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +groupName=authentication.k8s.io
+// +k8s:openapi-gen=true
+
+package v1beta1 // import "k8s.io/api/authentication/v1beta1"
diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto
new file mode 100644
index 0000000..caf2a6a
--- /dev/null
+++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto
@@ -0,0 +1,118 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.authentication.v1beta1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta1";
+
+// ExtraValue masks the value so protobuf can generate
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message ExtraValue {
+  // items, if empty, will result in an empty slice
+
+  repeated string items = 1;
+}
+
+// TokenReview attempts to authenticate a token to a known user.
+// Note: TokenReview requests may be cached by the webhook token authenticator
+// plugin in the kube-apiserver.
+message TokenReview {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec holds information about the request being evaluated
+  optional TokenReviewSpec spec = 2;
+
+  // Status is filled in by the server and indicates whether the request can be authenticated.
+  // +optional
+  optional TokenReviewStatus status = 3;
+}
+
+// TokenReviewSpec is a description of the token authentication request.
+message TokenReviewSpec {
+  // Token is the opaque bearer token.
+  // +optional
+  optional string token = 1;
+
+  // Audiences is a list of the identifiers that the resource server presented
+  // with the token identifies as. Audience-aware token authenticators will
+  // verify that the token was intended for at least one of the audiences in
+  // this list. If no audiences are provided, the audience will default to the
+  // audience of the Kubernetes apiserver.
+  // +optional
+  repeated string audiences = 2;
+}
+
+// TokenReviewStatus is the result of the token authentication request.
+message TokenReviewStatus {
+  // Authenticated indicates that the token was associated with a known user.
+  // +optional
+  optional bool authenticated = 1;
+
+  // User is the UserInfo associated with the provided token.
+  // +optional
+  optional UserInfo user = 2;
+
+  // Audiences are audience identifiers chosen by the authenticator that are
+  // compatible with both the TokenReview and token. An identifier is any
+  // identifier in the intersection of the TokenReviewSpec audiences and the
+  // token's audiences. A client of the TokenReview API that sets the
+  // spec.audiences field should validate that a compatible audience identifier
+  // is returned in the status.audiences field to ensure that the TokenReview
+  // server is audience aware. If a TokenReview returns an empty
+  // status.audience field where status.authenticated is "true", the token is
+  // valid against the audience of the Kubernetes API server.
+  // +optional
+  repeated string audiences = 4;
+
+  // Error indicates that the token couldn't be checked
+  // +optional
+  optional string error = 3;
+}
+
+// UserInfo holds the information about the user needed to implement the
+// user.Info interface.
+message UserInfo {
+  // The name that uniquely identifies this user among all active users.
+  // +optional
+  optional string username = 1;
+
+  // A unique value that identifies this user across time. If this user is
+  // deleted and another user by the same name is added, they will have
+  // different UIDs.
+  // +optional
+  optional string uid = 2;
+
+  // The names of groups this user is a part of.
+  // +optional
+  repeated string groups = 3;
+
+  // Any additional information provided by the authenticator.
+  // +optional
+  map<string, ExtraValue> extra = 4;
+}
+
diff --git a/vendor/k8s.io/api/authentication/v1beta1/register.go b/vendor/k8s.io/api/authentication/v1beta1/register.go
new file mode 100644
index 0000000..ed23e50
--- /dev/null
+++ b/vendor/k8s.io/api/authentication/v1beta1/register.go
@@ -0,0 +1,51 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "authentication.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&TokenReview{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go
new file mode 100644
index 0000000..0b6cba8
--- /dev/null
+++ b/vendor/k8s.io/api/authentication/v1beta1/types.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:noVerbs
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// TokenReview attempts to authenticate a token to a known user.
+// Note: TokenReview requests may be cached by the webhook token authenticator
+// plugin in the kube-apiserver.
+type TokenReview struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec holds information about the request being evaluated
+	Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is filled in by the server and indicates whether the request can be authenticated.
+	// +optional
+	Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// TokenReviewSpec is a description of the token authentication request.
+type TokenReviewSpec struct {
+	// Token is the opaque bearer token.
+	// +optional
+	Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"`
+	// Audiences is a list of the identifiers that the resource server presented
+	// with the token identifies as. Audience-aware token authenticators will
+	// verify that the token was intended for at least one of the audiences in
+	// this list. If no audiences are provided, the audience will default to the
+	// audience of the Kubernetes apiserver.
+	// +optional
+	Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"`
+}
+
+// TokenReviewStatus is the result of the token authentication request.
+type TokenReviewStatus struct {
+	// Authenticated indicates that the token was associated with a known user.
+	// +optional
+	Authenticated bool `json:"authenticated,omitempty" protobuf:"varint,1,opt,name=authenticated"`
+	// User is the UserInfo associated with the provided token.
+	// +optional
+	User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"`
+	// Audiences are audience identifiers chosen by the authenticator that are
+	// compatible with both the TokenReview and token. An identifier is any
+	// identifier in the intersection of the TokenReviewSpec audiences and the
+	// token's audiences. A client of the TokenReview API that sets the
+	// spec.audiences field should validate that a compatible audience identifier
+	// is returned in the status.audiences field to ensure that the TokenReview
+	// server is audience aware. If a TokenReview returns an empty
+	// status.audience field where status.authenticated is "true", the token is
+	// valid against the audience of the Kubernetes API server.
+	// +optional
+	Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"`
+	// Error indicates that the token couldn't be checked
+	// +optional
+	Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"`
+}
+
+// UserInfo holds the information about the user needed to implement the
+// user.Info interface.
+type UserInfo struct {
+	// The name that uniquely identifies this user among all active users.
+	// +optional
+	Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"`
+	// A unique value that identifies this user across time. If this user is
+	// deleted and another user by the same name is added, they will have
+	// different UIDs.
+	// +optional
+	UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"`
+	// The names of groups this user is a part of.
+	// +optional
+	Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"`
+	// Any additional information provided by the authenticator.
+	// +optional
+	Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"`
+}
+
+// ExtraValue masks the value so protobuf can generate
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type ExtraValue []string
+
+func (t ExtraValue) String() string {
+	return fmt.Sprintf("%v", []string(t))
+}
diff --git a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..8c9acfb
--- /dev/null
+++ b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,74 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_TokenReview = map[string]string{
+	"":       "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.",
+	"spec":   "Spec holds information about the request being evaluated",
+	"status": "Status is filled in by the server and indicates whether the request can be authenticated.",
+}
+
+func (TokenReview) SwaggerDoc() map[string]string {
+	return map_TokenReview
+}
+
+var map_TokenReviewSpec = map[string]string{
+	"":          "TokenReviewSpec is a description of the token authentication request.",
+	"token":     "Token is the opaque bearer token.",
+	"audiences": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.",
+}
+
+func (TokenReviewSpec) SwaggerDoc() map[string]string {
+	return map_TokenReviewSpec
+}
+
+var map_TokenReviewStatus = map[string]string{
+	"":              "TokenReviewStatus is the result of the token authentication request.",
+	"authenticated": "Authenticated indicates that the token was associated with a known user.",
+	"user":          "User is the UserInfo associated with the provided token.",
+	"audiences":     "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.",
+	"error":         "Error indicates that the token couldn't be checked",
+}
+
+func (TokenReviewStatus) SwaggerDoc() map[string]string {
+	return map_TokenReviewStatus
+}
+
+var map_UserInfo = map[string]string{
+	"":         "UserInfo holds the information about the user needed to implement the user.Info interface.",
+	"username": "The name that uniquely identifies this user among all active users.",
+	"uid":      "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.",
+	"groups":   "The names of groups this user is a part of.",
+	"extra":    "Any additional information provided by the authenticator.",
+}
+
+func (UserInfo) SwaggerDoc() map[string]string {
+	return map_UserInfo
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..a5d82a8
--- /dev/null
+++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,152 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
+	{
+		in := &in
+		*out = make(ExtraValue, len(*in))
+		copy(*out, *in)
+		return
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue.
+func (in ExtraValue) DeepCopy() ExtraValue {
+	if in == nil {
+		return nil
+	}
+	out := new(ExtraValue)
+	in.DeepCopyInto(out)
+	return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenReview) DeepCopyInto(out *TokenReview) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReview.
+func (in *TokenReview) DeepCopy() *TokenReview {
+	if in == nil {
+		return nil
+	}
+	out := new(TokenReview)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TokenReview) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) {
+	*out = *in
+	if in.Audiences != nil {
+		in, out := &in.Audiences, &out.Audiences
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewSpec.
+func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(TokenReviewSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) {
+	*out = *in
+	in.User.DeepCopyInto(&out.User)
+	if in.Audiences != nil {
+		in, out := &in.Audiences, &out.Audiences
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewStatus.
+func (in *TokenReviewStatus) DeepCopy() *TokenReviewStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(TokenReviewStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserInfo) DeepCopyInto(out *UserInfo) {
+	*out = *in
+	if in.Groups != nil {
+		in, out := &in.Groups, &out.Groups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Extra != nil {
+		in, out := &in.Extra, &out.Extra
+		*out = make(map[string]ExtraValue, len(*in))
+		for key, val := range *in {
+			var outVal []string
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = make(ExtraValue, len(*in))
+				copy(*out, *in)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInfo.
+func (in *UserInfo) DeepCopy() *UserInfo {
+	if in == nil {
+		return nil
+	}
+	out := new(UserInfo)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go
new file mode 100644
index 0000000..c63ac28
--- /dev/null
+++ b/vendor/k8s.io/api/authorization/v1/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+// +groupName=authorization.k8s.io
+
+package v1 // import "k8s.io/api/authorization/v1"
diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto
new file mode 100644
index 0000000..f68a04e
--- /dev/null
+++ b/vendor/k8s.io/api/authorization/v1/generated.proto
@@ -0,0 +1,272 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.authorization.v1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1";
+
+// ExtraValue masks the value so protobuf can generate
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message ExtraValue {
+  // items, if empty, will result in an empty slice
+
+  repeated string items = 1;
+}
+
+// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace.
+// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions
+// checking.
+message LocalSubjectAccessReview {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec holds information about the request being evaluated.  spec.namespace must be equal to the namespace
+  // you made the request against.  If empty, it is defaulted.
+  optional SubjectAccessReviewSpec spec = 2;
+
+  // Status is filled in by the server and indicates whether the request is allowed or not
+  // +optional
+  optional SubjectAccessReviewStatus status = 3;
+}
+
+// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface
+message NonResourceAttributes {
+  // Path is the URL path of the request
+  // +optional
+  optional string path = 1;
+
+  // Verb is the standard HTTP verb
+  // +optional
+  optional string verb = 2;
+}
+
+// NonResourceRule holds information that describes a rule for the non-resource
+message NonResourceRule {
+  // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options.  "*" means all.
+  repeated string verbs = 1;
+
+  // NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full,
+  // final step in the path.  "*" means all.
+  // +optional
+  repeated string nonResourceURLs = 2;
+}
+
+// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface
+message ResourceAttributes {
+  // Namespace is the namespace of the action being requested.  Currently, there is no distinction between no namespace and all namespaces
+  // "" (empty) is defaulted for LocalSubjectAccessReviews
+  // "" (empty) is empty for cluster-scoped resources
+  // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview
+  // +optional
+  optional string namespace = 1;
+
+  // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy.  "*" means all.
+  // +optional
+  optional string verb = 2;
+
+  // Group is the API Group of the Resource.  "*" means all.
+  // +optional
+  optional string group = 3;
+
+  // Version is the API Version of the Resource.  "*" means all.
+  // +optional
+  optional string version = 4;
+
+  // Resource is one of the existing resource types.  "*" means all.
+  // +optional
+  optional string resource = 5;
+
+  // Subresource is one of the existing resource types.  "" means none.
+  // +optional
+  optional string subresource = 6;
+
+  // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all.
+  // +optional
+  optional string name = 7;
+}
+
+// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant,
+// may contain duplicates, and possibly be incomplete.
+message ResourceRule {
+  // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy.  "*" means all.
+  repeated string verbs = 1;
+
+  // APIGroups is the name of the APIGroup that contains the resources.  If multiple API groups are specified, any action requested against one of
+  // the enumerated resources in any API group will be allowed.  "*" means all.
+  // +optional
+  repeated string apiGroups = 2;
+
+  // Resources is a list of resources this rule applies to.  "*" means all in the specified apiGroups.
+  //  "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups.
+  // +optional
+  repeated string resources = 3;
+
+  // ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.  "*" means all.
+  // +optional
+  repeated string resourceNames = 4;
+}
+
+// SelfSubjectAccessReview checks whether or the current user can perform an action.  Not filling in a
+// spec.namespace means "in all namespaces".  Self is a special case, because users should always be able
+// to check whether they can perform an action
+message SelfSubjectAccessReview {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec holds information about the request being evaluated.  user and groups must be empty
+  optional SelfSubjectAccessReviewSpec spec = 2;
+
+  // Status is filled in by the server and indicates whether the request is allowed or not
+  // +optional
+  optional SubjectAccessReviewStatus status = 3;
+}
+
+// SelfSubjectAccessReviewSpec is a description of the access request.  Exactly one of ResourceAuthorizationAttributes
+// and NonResourceAuthorizationAttributes must be set
+message SelfSubjectAccessReviewSpec {
+  // ResourceAuthorizationAttributes describes information for a resource access request
+  // +optional
+  optional ResourceAttributes resourceAttributes = 1;
+
+  // NonResourceAttributes describes information for a non-resource access request
+  // +optional
+  optional NonResourceAttributes nonResourceAttributes = 2;
+}
+
+// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace.
+// The returned list of actions may be incomplete depending on the server's authorization mode,
+// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions,
+// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to
+// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns.
+// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.
+message SelfSubjectRulesReview {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec holds information about the request being evaluated.
+  optional SelfSubjectRulesReviewSpec spec = 2;
+
+  // Status is filled in by the server and indicates the set of actions a user can perform.
+  // +optional
+  optional SubjectRulesReviewStatus status = 3;
+}
+
+message SelfSubjectRulesReviewSpec {
+  // Namespace to evaluate rules for. Required.
+  optional string namespace = 1;
+}
+
+// SubjectAccessReview checks whether or not a user or group can perform an action.
+message SubjectAccessReview {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec holds information about the request being evaluated
+  optional SubjectAccessReviewSpec spec = 2;
+
+  // Status is filled in by the server and indicates whether the request is allowed or not
+  // +optional
+  optional SubjectAccessReviewStatus status = 3;
+}
+
+// SubjectAccessReviewSpec is a description of the access request.  Exactly one of ResourceAuthorizationAttributes
+// and NonResourceAuthorizationAttributes must be set
+message SubjectAccessReviewSpec {
+  // ResourceAuthorizationAttributes describes information for a resource access request
+  // +optional
+  optional ResourceAttributes resourceAttributes = 1;
+
+  // NonResourceAttributes describes information for a non-resource access request
+  // +optional
+  optional NonResourceAttributes nonResourceAttributes = 2;
+
+  // User is the user you're testing for.
+  // If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups
+  // +optional
+  optional string user = 3;
+
+  // Groups is the groups you're testing for.
+  // +optional
+  repeated string groups = 4;
+
+  // Extra corresponds to the user.Info.GetExtra() method from the authenticator.  Since that is input to the authorizer
+  // it needs a reflection here.
+  // +optional
+  map<string, ExtraValue> extra = 5;
+
+  // UID information about the requesting user.
+  // +optional
+  optional string uid = 6;
+}
+
+// SubjectAccessReviewStatus
+message SubjectAccessReviewStatus {
+  // Allowed is required. True if the action would be allowed, false otherwise.
+  optional bool allowed = 1;
+
+  // Denied is optional. True if the action would be denied, otherwise
+  // false. If both allowed is false and denied is false, then the
+  // authorizer has no opinion on whether to authorize the action. Denied
+  // may not be true if Allowed is true.
+  // +optional
+  optional bool denied = 4;
+
+  // Reason is optional.  It indicates why a request was allowed or denied.
+  // +optional
+  optional string reason = 2;
+
+  // EvaluationError is an indication that some error occurred during the authorization check.
+  // It is entirely possible to get an error and be able to continue determine authorization status in spite of it.
+  // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.
+  // +optional
+  optional string evaluationError = 3;
+}
+
+// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on
+// the set of authorizers the server is configured with and any errors experienced during evaluation.
+// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission,
+// even if that list is incomplete.
+message SubjectRulesReviewStatus {
+  // ResourceRules is the list of actions the subject is allowed to perform on resources.
+  // The list ordering isn't significant, may contain duplicates, and possibly be incomplete.
+  repeated ResourceRule resourceRules = 1;
+
+  // NonResourceRules is the list of actions the subject is allowed to perform on non-resources.
+  // The list ordering isn't significant, may contain duplicates, and possibly be incomplete.
+  repeated NonResourceRule nonResourceRules = 2;
+
+  // Incomplete is true when the rules returned by this call are incomplete. This is most commonly
+  // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.
+  optional bool incomplete = 3;
+
+  // EvaluationError can appear in combination with Rules. It indicates an error occurred during
+  // rule evaluation, such as an authorizer that doesn't support rule evaluation, and that
+  // ResourceRules and/or NonResourceRules may be incomplete.
+  // +optional
+  optional string evaluationError = 4;
+}
+
diff --git a/vendor/k8s.io/api/authorization/v1/register.go b/vendor/k8s.io/api/authorization/v1/register.go
new file mode 100644
index 0000000..5931198
--- /dev/null
+++ b/vendor/k8s.io/api/authorization/v1/register.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "authorization.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&SelfSubjectRulesReview{},
+		&SelfSubjectAccessReview{},
+		&SubjectAccessReview{},
+		&LocalSubjectAccessReview{},
+	)
+
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go
new file mode 100644
index 0000000..86b05c5
--- /dev/null
+++ b/vendor/k8s.io/api/authorization/v1/types.go
@@ -0,0 +1,268 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:noVerbs
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SubjectAccessReview checks whether or not a user or group can perform an action.
+type SubjectAccessReview struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec holds information about the request being evaluated
+	Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is filled in by the server and indicates whether the request is allowed or not
+	// +optional
+	Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:noVerbs
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SelfSubjectAccessReview checks whether or the current user can perform an action.  Not filling in a
+// spec.namespace means "in all namespaces".  Self is a special case, because users should always be able
+// to check whether they can perform an action
+type SelfSubjectAccessReview struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec holds information about the request being evaluated.  user and groups must be empty
+	Spec SelfSubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is filled in by the server and indicates whether the request is allowed or not
+	// +optional
+	Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +genclient
+// +genclient:noVerbs
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace.
+// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions
+// checking.
+type LocalSubjectAccessReview struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec holds information about the request being evaluated.  spec.namespace must be equal to the namespace
+	// you made the request against.  If empty, it is defaulted.
+	Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is filled in by the server and indicates whether the request is allowed or not
+	// +optional
+	Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface
+type ResourceAttributes struct {
+	// Namespace is the namespace of the action being requested.  Currently, there is no distinction between no namespace and all namespaces
+	// "" (empty) is defaulted for LocalSubjectAccessReviews
+	// "" (empty) is empty for cluster-scoped resources
+	// "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview
+	// +optional
+	Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"`
+	// Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy.  "*" means all.
+	// +optional
+	Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"`
+	// Group is the API Group of the Resource.  "*" means all.
+	// +optional
+	Group string `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"`
+	// Version is the API Version of the Resource.  "*" means all.
+	// +optional
+	Version string `json:"version,omitempty" protobuf:"bytes,4,opt,name=version"`
+	// Resource is one of the existing resource types.  "*" means all.
+	// +optional
+	Resource string `json:"resource,omitempty" protobuf:"bytes,5,opt,name=resource"`
+	// Subresource is one of the existing resource types.  "" means none.
+	// +optional
+	Subresource string `json:"subresource,omitempty" protobuf:"bytes,6,opt,name=subresource"`
+	// Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all.
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"`
+}
+
+// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface
+type NonResourceAttributes struct {
+	// Path is the URL path of the request
+	// +optional
+	Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
+	// Verb is the standard HTTP verb
+	// +optional
+	Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"`
+}
+
+// SubjectAccessReviewSpec is a description of the access request.  Exactly one of ResourceAuthorizationAttributes
+// and NonResourceAuthorizationAttributes must be set
+type SubjectAccessReviewSpec struct {
+	// ResourceAuthorizationAttributes describes information for a resource access request
+	// +optional
+	ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"`
+	// NonResourceAttributes describes information for a non-resource access request
+	// +optional
+	NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"`
+
+	// User is the user you're testing for.
+	// If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups
+	// +optional
+	User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
+	// Groups is the groups you're testing for.
+	// +optional
+	Groups []string `json:"groups,omitempty" protobuf:"bytes,4,rep,name=groups"`
+	// Extra corresponds to the user.Info.GetExtra() method from the authenticator.  Since that is input to the authorizer
+	// it needs a reflection here.
+	// +optional
+	Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"`
+	// UID information about the requesting user.
+	// +optional
+	UID string `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid"`
+}
+
+// ExtraValue masks the value so protobuf can generate
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type ExtraValue []string
+
+func (t ExtraValue) String() string {
+	return fmt.Sprintf("%v", []string(t))
+}
+
+// SelfSubjectAccessReviewSpec is a description of the access request.  Exactly one of ResourceAuthorizationAttributes
+// and NonResourceAuthorizationAttributes must be set
+type SelfSubjectAccessReviewSpec struct {
+	// ResourceAuthorizationAttributes describes information for a resource access request
+	// +optional
+	ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"`
+	// NonResourceAttributes describes information for a non-resource access request
+	// +optional
+	NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"`
+}
+
+// SubjectAccessReviewStatus
+type SubjectAccessReviewStatus struct {
+	// Allowed is required. True if the action would be allowed, false otherwise.
+	Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"`
+	// Denied is optional. True if the action would be denied, otherwise
+	// false. If both allowed is false and denied is false, then the
+	// authorizer has no opinion on whether to authorize the action. Denied
+	// may not be true if Allowed is true.
+	// +optional
+	Denied bool `json:"denied,omitempty" protobuf:"varint,4,opt,name=denied"`
+	// Reason is optional.  It indicates why a request was allowed or denied.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"`
+	// EvaluationError is an indication that some error occurred during the authorization check.
+	// It is entirely possible to get an error and be able to continue determine authorization status in spite of it.
+	// For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.
+	// +optional
+	EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,3,opt,name=evaluationError"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:noVerbs
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace.
+// The returned list of actions may be incomplete depending on the server's authorization mode,
+// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions,
+// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to
+// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns.
+// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.
+type SelfSubjectRulesReview struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec holds information about the request being evaluated.
+	Spec SelfSubjectRulesReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is filled in by the server and indicates the set of actions a user can perform.
+	// +optional
+	Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+type SelfSubjectRulesReviewSpec struct {
+	// Namespace to evaluate rules for. Required.
+	Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"`
+}
+
+// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on
+// the set of authorizers the server is configured with and any errors experienced during evaluation.
+// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission,
+// even if that list is incomplete.
+type SubjectRulesReviewStatus struct {
+	// ResourceRules is the list of actions the subject is allowed to perform on resources.
+	// The list ordering isn't significant, may contain duplicates, and possibly be incomplete.
+	ResourceRules []ResourceRule `json:"resourceRules" protobuf:"bytes,1,rep,name=resourceRules"`
+	// NonResourceRules is the list of actions the subject is allowed to perform on non-resources.
+	// The list ordering isn't significant, may contain duplicates, and possibly be incomplete.
+	NonResourceRules []NonResourceRule `json:"nonResourceRules" protobuf:"bytes,2,rep,name=nonResourceRules"`
+	// Incomplete is true when the rules returned by this call are incomplete. This is most commonly
+	// encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.
+	Incomplete bool `json:"incomplete" protobuf:"bytes,3,rep,name=incomplete"`
+	// EvaluationError can appear in combination with Rules. It indicates an error occurred during
+	// rule evaluation, such as an authorizer that doesn't support rule evaluation, and that
+	// ResourceRules and/or NonResourceRules may be incomplete.
+	// +optional
+	EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,4,opt,name=evaluationError"`
+}
+
+// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant,
+// may contain duplicates, and possibly be incomplete.
+type ResourceRule struct {
+	// Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy.  "*" means all.
+	Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
+
+	// APIGroups is the name of the APIGroup that contains the resources.  If multiple API groups are specified, any action requested against one of
+	// the enumerated resources in any API group will be allowed.  "*" means all.
+	// +optional
+	APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"`
+	// Resources is a list of resources this rule applies to.  "*" means all in the specified apiGroups.
+	//  "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups.
+	// +optional
+	Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"`
+	// ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.  "*" means all.
+	// +optional
+	ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"`
+}
+
+// NonResourceRule holds information that describes a rule for the non-resource
+type NonResourceRule struct {
+	// Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options.  "*" means all.
+	Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
+
+	// NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full,
+	// final step in the path.  "*" means all.
+	// +optional
+	NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,2,rep,name=nonResourceURLs"`
+}
diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..8445f71
--- /dev/null
+++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
@@ -0,0 +1,173 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_LocalSubjectAccessReview = map[string]string{
+	"":       "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.",
+	"spec":   "Spec holds information about the request being evaluated.  spec.namespace must be equal to the namespace you made the request against.  If empty, it is defaulted.",
+	"status": "Status is filled in by the server and indicates whether the request is allowed or not",
+}
+
+func (LocalSubjectAccessReview) SwaggerDoc() map[string]string {
+	return map_LocalSubjectAccessReview
+}
+
+var map_NonResourceAttributes = map[string]string{
+	"":     "NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface",
+	"path": "Path is the URL path of the request",
+	"verb": "Verb is the standard HTTP verb",
+}
+
+func (NonResourceAttributes) SwaggerDoc() map[string]string {
+	return map_NonResourceAttributes
+}
+
+var map_NonResourceRule = map[string]string{
+	"":                "NonResourceRule holds information that describes a rule for the non-resource",
+	"verbs":           "Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options.  \"*\" means all.",
+	"nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full, final step in the path.  \"*\" means all.",
+}
+
+func (NonResourceRule) SwaggerDoc() map[string]string {
+	return map_NonResourceRule
+}
+
+var map_ResourceAttributes = map[string]string{
+	"":            "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface",
+	"namespace":   "Namespace is the namespace of the action being requested.  Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview",
+	"verb":        "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy.  \"*\" means all.",
+	"group":       "Group is the API Group of the Resource.  \"*\" means all.",
+	"version":     "Version is the API Version of the Resource.  \"*\" means all.",
+	"resource":    "Resource is one of the existing resource types.  \"*\" means all.",
+	"subresource": "Subresource is one of the existing resource types.  \"\" means none.",
+	"name":        "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.",
+}
+
+func (ResourceAttributes) SwaggerDoc() map[string]string {
+	return map_ResourceAttributes
+}
+
+var map_ResourceRule = map[string]string{
+	"":              "ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.",
+	"verbs":         "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy.  \"*\" means all.",
+	"apiGroups":     "APIGroups is the name of the APIGroup that contains the resources.  If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.  \"*\" means all.",
+	"resources":     "Resources is a list of resources this rule applies to.  \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.",
+	"resourceNames": "ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.  \"*\" means all.",
+}
+
+func (ResourceRule) SwaggerDoc() map[string]string {
+	return map_ResourceRule
+}
+
+var map_SelfSubjectAccessReview = map[string]string{
+	"":       "SelfSubjectAccessReview checks whether or the current user can perform an action.  Not filling in a spec.namespace means \"in all namespaces\".  Self is a special case, because users should always be able to check whether they can perform an action",
+	"spec":   "Spec holds information about the request being evaluated.  user and groups must be empty",
+	"status": "Status is filled in by the server and indicates whether the request is allowed or not",
+}
+
+func (SelfSubjectAccessReview) SwaggerDoc() map[string]string {
+	return map_SelfSubjectAccessReview
+}
+
+var map_SelfSubjectAccessReviewSpec = map[string]string{
+	"":                      "SelfSubjectAccessReviewSpec is a description of the access request.  Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set",
+	"resourceAttributes":    "ResourceAuthorizationAttributes describes information for a resource access request",
+	"nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request",
+}
+
+func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string {
+	return map_SelfSubjectAccessReviewSpec
+}
+
+var map_SelfSubjectRulesReview = map[string]string{
+	"":       "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.",
+	"spec":   "Spec holds information about the request being evaluated.",
+	"status": "Status is filled in by the server and indicates the set of actions a user can perform.",
+}
+
+func (SelfSubjectRulesReview) SwaggerDoc() map[string]string {
+	return map_SelfSubjectRulesReview
+}
+
+var map_SelfSubjectRulesReviewSpec = map[string]string{
+	"namespace": "Namespace to evaluate rules for. Required.",
+}
+
+func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string {
+	return map_SelfSubjectRulesReviewSpec
+}
+
+var map_SubjectAccessReview = map[string]string{
+	"":       "SubjectAccessReview checks whether or not a user or group can perform an action.",
+	"spec":   "Spec holds information about the request being evaluated",
+	"status": "Status is filled in by the server and indicates whether the request is allowed or not",
+}
+
+func (SubjectAccessReview) SwaggerDoc() map[string]string {
+	return map_SubjectAccessReview
+}
+
+var map_SubjectAccessReviewSpec = map[string]string{
+	"":                      "SubjectAccessReviewSpec is a description of the access request.  Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set",
+	"resourceAttributes":    "ResourceAuthorizationAttributes describes information for a resource access request",
+	"nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request",
+	"user":                  "User is the user you're testing for. If you specify \"User\" but not \"Groups\", then is it interpreted as \"What if User were not a member of any groups",
+	"groups":                "Groups is the groups you're testing for.",
+	"extra":                 "Extra corresponds to the user.Info.GetExtra() method from the authenticator.  Since that is input to the authorizer it needs a reflection here.",
+	"uid":                   "UID information about the requesting user.",
+}
+
+func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string {
+	return map_SubjectAccessReviewSpec
+}
+
+var map_SubjectAccessReviewStatus = map[string]string{
+	"":                "SubjectAccessReviewStatus",
+	"allowed":         "Allowed is required. True if the action would be allowed, false otherwise.",
+	"denied":          "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.",
+	"reason":          "Reason is optional.  It indicates why a request was allowed or denied.",
+	"evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.",
+}
+
+func (SubjectAccessReviewStatus) SwaggerDoc() map[string]string {
+	return map_SubjectAccessReviewStatus
+}
+
+var map_SubjectRulesReviewStatus = map[string]string{
+	"":                 "SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.",
+	"resourceRules":    "ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.",
+	"nonResourceRules": "NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.",
+	"incomplete":       "Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.",
+	"evaluationError":  "EvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.",
+}
+
+func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string {
+	return map_SubjectRulesReviewStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..1d11b38
--- /dev/null
+++ b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go
@@ -0,0 +1,385 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
+	{
+		in := &in
+		*out = make(ExtraValue, len(*in))
+		copy(*out, *in)
+		return
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue.
+func (in ExtraValue) DeepCopy() ExtraValue {
+	if in == nil {
+		return nil
+	}
+	out := new(ExtraValue)
+	in.DeepCopyInto(out)
+	return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	out.Status = in.Status
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSubjectAccessReview.
+func (in *LocalSubjectAccessReview) DeepCopy() *LocalSubjectAccessReview {
+	if in == nil {
+		return nil
+	}
+	out := new(LocalSubjectAccessReview)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LocalSubjectAccessReview) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NonResourceAttributes) DeepCopyInto(out *NonResourceAttributes) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceAttributes.
+func (in *NonResourceAttributes) DeepCopy() *NonResourceAttributes {
+	if in == nil {
+		return nil
+	}
+	out := new(NonResourceAttributes)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NonResourceRule) DeepCopyInto(out *NonResourceRule) {
+	*out = *in
+	if in.Verbs != nil {
+		in, out := &in.Verbs, &out.Verbs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.NonResourceURLs != nil {
+		in, out := &in.NonResourceURLs, &out.NonResourceURLs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceRule.
+func (in *NonResourceRule) DeepCopy() *NonResourceRule {
+	if in == nil {
+		return nil
+	}
+	out := new(NonResourceRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAttributes.
+func (in *ResourceAttributes) DeepCopy() *ResourceAttributes {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceAttributes)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceRule) DeepCopyInto(out *ResourceRule) {
+	*out = *in
+	if in.Verbs != nil {
+		in, out := &in.Verbs, &out.Verbs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.APIGroups != nil {
+		in, out := &in.APIGroups, &out.APIGroups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ResourceNames != nil {
+		in, out := &in.ResourceNames, &out.ResourceNames
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRule.
+func (in *ResourceRule) DeepCopy() *ResourceRule {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SelfSubjectAccessReview) DeepCopyInto(out *SelfSubjectAccessReview) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	out.Status = in.Status
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReview.
+func (in *SelfSubjectAccessReview) DeepCopy() *SelfSubjectAccessReview {
+	if in == nil {
+		return nil
+	}
+	out := new(SelfSubjectAccessReview)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SelfSubjectAccessReview) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReviewSpec) {
+	*out = *in
+	if in.ResourceAttributes != nil {
+		in, out := &in.ResourceAttributes, &out.ResourceAttributes
+		*out = new(ResourceAttributes)
+		**out = **in
+	}
+	if in.NonResourceAttributes != nil {
+		in, out := &in.NonResourceAttributes, &out.NonResourceAttributes
+		*out = new(NonResourceAttributes)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReviewSpec.
+func (in *SelfSubjectAccessReviewSpec) DeepCopy() *SelfSubjectAccessReviewSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(SelfSubjectAccessReviewSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SelfSubjectRulesReview) DeepCopyInto(out *SelfSubjectRulesReview) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	out.Spec = in.Spec
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReview.
+func (in *SelfSubjectRulesReview) DeepCopy() *SelfSubjectRulesReview {
+	if in == nil {
+		return nil
+	}
+	out := new(SelfSubjectRulesReview)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SelfSubjectRulesReview) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SelfSubjectRulesReviewSpec) DeepCopyInto(out *SelfSubjectRulesReviewSpec) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReviewSpec.
+func (in *SelfSubjectRulesReviewSpec) DeepCopy() *SelfSubjectRulesReviewSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(SelfSubjectRulesReviewSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubjectAccessReview) DeepCopyInto(out *SubjectAccessReview) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	out.Status = in.Status
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReview.
+func (in *SubjectAccessReview) DeepCopy() *SubjectAccessReview {
+	if in == nil {
+		return nil
+	}
+	out := new(SubjectAccessReview)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SubjectAccessReview) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) {
+	*out = *in
+	if in.ResourceAttributes != nil {
+		in, out := &in.ResourceAttributes, &out.ResourceAttributes
+		*out = new(ResourceAttributes)
+		**out = **in
+	}
+	if in.NonResourceAttributes != nil {
+		in, out := &in.NonResourceAttributes, &out.NonResourceAttributes
+		*out = new(NonResourceAttributes)
+		**out = **in
+	}
+	if in.Groups != nil {
+		in, out := &in.Groups, &out.Groups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Extra != nil {
+		in, out := &in.Extra, &out.Extra
+		*out = make(map[string]ExtraValue, len(*in))
+		for key, val := range *in {
+			var outVal []string
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = make(ExtraValue, len(*in))
+				copy(*out, *in)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewSpec.
+func (in *SubjectAccessReviewSpec) DeepCopy() *SubjectAccessReviewSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(SubjectAccessReviewSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubjectAccessReviewStatus) DeepCopyInto(out *SubjectAccessReviewStatus) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewStatus.
+func (in *SubjectAccessReviewStatus) DeepCopy() *SubjectAccessReviewStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(SubjectAccessReviewStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubjectRulesReviewStatus) DeepCopyInto(out *SubjectRulesReviewStatus) {
+	*out = *in
+	if in.ResourceRules != nil {
+		in, out := &in.ResourceRules, &out.ResourceRules
+		*out = make([]ResourceRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.NonResourceRules != nil {
+		in, out := &in.NonResourceRules, &out.NonResourceRules
+		*out = make([]NonResourceRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewStatus.
+func (in *SubjectRulesReviewStatus) DeepCopy() *SubjectRulesReviewStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(SubjectRulesReviewStatus)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/authorization/v1beta1/doc.go b/vendor/k8s.io/api/authorization/v1beta1/doc.go
new file mode 100644
index 0000000..324f293
--- /dev/null
+++ b/vendor/k8s.io/api/authorization/v1beta1/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+// +groupName=authorization.k8s.io
+
+package v1beta1 // import "k8s.io/api/authorization/v1beta1"
diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.proto b/vendor/k8s.io/api/authorization/v1beta1/generated.proto
new file mode 100644
index 0000000..3876a3e
--- /dev/null
+++ b/vendor/k8s.io/api/authorization/v1beta1/generated.proto
@@ -0,0 +1,272 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.authorization.v1beta1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta1";
+
+// ExtraValue masks the value so protobuf can generate
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message ExtraValue {
+  // items, if empty, will result in an empty slice
+
+  repeated string items = 1;
+}
+
+// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace.
+// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions
+// checking.
+message LocalSubjectAccessReview {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec holds information about the request being evaluated.  spec.namespace must be equal to the namespace
+  // you made the request against.  If empty, it is defaulted.
+  optional SubjectAccessReviewSpec spec = 2;
+
+  // Status is filled in by the server and indicates whether the request is allowed or not
+  // +optional
+  optional SubjectAccessReviewStatus status = 3;
+}
+
+// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface
+message NonResourceAttributes {
+  // Path is the URL path of the request
+  // +optional
+  optional string path = 1;
+
+  // Verb is the standard HTTP verb
+  // +optional
+  optional string verb = 2;
+}
+
+// NonResourceRule holds information that describes a rule for the non-resource
+message NonResourceRule {
+  // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options.  "*" means all.
+  repeated string verbs = 1;
+
+  // NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full,
+  // final step in the path.  "*" means all.
+  // +optional
+  repeated string nonResourceURLs = 2;
+}
+
+// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface
+message ResourceAttributes {
+  // Namespace is the namespace of the action being requested.  Currently, there is no distinction between no namespace and all namespaces
+  // "" (empty) is defaulted for LocalSubjectAccessReviews
+  // "" (empty) is empty for cluster-scoped resources
+  // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview
+  // +optional
+  optional string namespace = 1;
+
+  // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy.  "*" means all.
+  // +optional
+  optional string verb = 2;
+
+  // Group is the API Group of the Resource.  "*" means all.
+  // +optional
+  optional string group = 3;
+
+  // Version is the API Version of the Resource.  "*" means all.
+  // +optional
+  optional string version = 4;
+
+  // Resource is one of the existing resource types.  "*" means all.
+  // +optional
+  optional string resource = 5;
+
+  // Subresource is one of the existing resource types.  "" means none.
+  // +optional
+  optional string subresource = 6;
+
+  // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all.
+  // +optional
+  optional string name = 7;
+}
+
+// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant,
+// may contain duplicates, and possibly be incomplete.
+message ResourceRule {
+  // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy.  "*" means all.
+  repeated string verbs = 1;
+
+  // APIGroups is the name of the APIGroup that contains the resources.  If multiple API groups are specified, any action requested against one of
+  // the enumerated resources in any API group will be allowed.  "*" means all.
+  // +optional
+  repeated string apiGroups = 2;
+
+  // Resources is a list of resources this rule applies to.  "*" means all in the specified apiGroups.
+  //  "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups.
+  // +optional
+  repeated string resources = 3;
+
+  // ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.  "*" means all.
+  // +optional
+  repeated string resourceNames = 4;
+}
+
+// SelfSubjectAccessReview checks whether or the current user can perform an action.  Not filling in a
+// spec.namespace means "in all namespaces".  Self is a special case, because users should always be able
+// to check whether they can perform an action
+message SelfSubjectAccessReview {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec holds information about the request being evaluated.  user and groups must be empty
+  optional SelfSubjectAccessReviewSpec spec = 2;
+
+  // Status is filled in by the server and indicates whether the request is allowed or not
+  // +optional
+  optional SubjectAccessReviewStatus status = 3;
+}
+
+// SelfSubjectAccessReviewSpec is a description of the access request.  Exactly one of ResourceAuthorizationAttributes
+// and NonResourceAuthorizationAttributes must be set
+message SelfSubjectAccessReviewSpec {
+  // ResourceAuthorizationAttributes describes information for a resource access request
+  // +optional
+  optional ResourceAttributes resourceAttributes = 1;
+
+  // NonResourceAttributes describes information for a non-resource access request
+  // +optional
+  optional NonResourceAttributes nonResourceAttributes = 2;
+}
+
+// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace.
+// The returned list of actions may be incomplete depending on the server's authorization mode,
+// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions,
+// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to
+// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns.
+// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.
+message SelfSubjectRulesReview {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec holds information about the request being evaluated.
+  optional SelfSubjectRulesReviewSpec spec = 2;
+
+  // Status is filled in by the server and indicates the set of actions a user can perform.
+  // +optional
+  optional SubjectRulesReviewStatus status = 3;
+}
+
+message SelfSubjectRulesReviewSpec {
+  // Namespace to evaluate rules for. Required.
+  optional string namespace = 1;
+}
+
+// SubjectAccessReview checks whether or not a user or group can perform an action.
+message SubjectAccessReview {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec holds information about the request being evaluated
+  optional SubjectAccessReviewSpec spec = 2;
+
+  // Status is filled in by the server and indicates whether the request is allowed or not
+  // +optional
+  optional SubjectAccessReviewStatus status = 3;
+}
+
+// SubjectAccessReviewSpec is a description of the access request.  Exactly one of ResourceAuthorizationAttributes
+// and NonResourceAuthorizationAttributes must be set
+message SubjectAccessReviewSpec {
+  // ResourceAuthorizationAttributes describes information for a resource access request
+  // +optional
+  optional ResourceAttributes resourceAttributes = 1;
+
+  // NonResourceAttributes describes information for a non-resource access request
+  // +optional
+  optional NonResourceAttributes nonResourceAttributes = 2;
+
+  // User is the user you're testing for.
+  // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups
+  // +optional
+  optional string user = 3;
+
+  // Groups is the groups you're testing for.
+  // +optional
+  repeated string group = 4;
+
+  // Extra corresponds to the user.Info.GetExtra() method from the authenticator.  Since that is input to the authorizer
+  // it needs a reflection here.
+  // +optional
+  map<string, ExtraValue> extra = 5;
+
+  // UID information about the requesting user.
+  // +optional
+  optional string uid = 6;
+}
+
+// SubjectAccessReviewStatus
+message SubjectAccessReviewStatus {
+  // Allowed is required. True if the action would be allowed, false otherwise.
+  optional bool allowed = 1;
+
+  // Denied is optional. True if the action would be denied, otherwise
+  // false. If both allowed is false and denied is false, then the
+  // authorizer has no opinion on whether to authorize the action. Denied
+  // may not be true if Allowed is true.
+  // +optional
+  optional bool denied = 4;
+
+  // Reason is optional.  It indicates why a request was allowed or denied.
+  // +optional
+  optional string reason = 2;
+
+  // EvaluationError is an indication that some error occurred during the authorization check.
+  // It is entirely possible to get an error and be able to continue determine authorization status in spite of it.
+  // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.
+  // +optional
+  optional string evaluationError = 3;
+}
+
+// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on
+// the set of authorizers the server is configured with and any errors experienced during evaluation.
+// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission,
+// even if that list is incomplete.
+message SubjectRulesReviewStatus {
+  // ResourceRules is the list of actions the subject is allowed to perform on resources.
+  // The list ordering isn't significant, may contain duplicates, and possibly be incomplete.
+  repeated ResourceRule resourceRules = 1;
+
+  // NonResourceRules is the list of actions the subject is allowed to perform on non-resources.
+  // The list ordering isn't significant, may contain duplicates, and possibly be incomplete.
+  repeated NonResourceRule nonResourceRules = 2;
+
+  // Incomplete is true when the rules returned by this call are incomplete. This is most commonly
+  // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.
+  optional bool incomplete = 3;
+
+  // EvaluationError can appear in combination with Rules. It indicates an error occurred during
+  // rule evaluation, such as an authorizer that doesn't support rule evaluation, and that
+  // ResourceRules and/or NonResourceRules may be incomplete.
+  // +optional
+  optional string evaluationError = 4;
+}
+
diff --git a/vendor/k8s.io/api/authorization/v1beta1/register.go b/vendor/k8s.io/api/authorization/v1beta1/register.go
new file mode 100644
index 0000000..84255dd
--- /dev/null
+++ b/vendor/k8s.io/api/authorization/v1beta1/register.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "authorization.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&SelfSubjectRulesReview{},
+		&SelfSubjectAccessReview{},
+		&SubjectAccessReview{},
+		&LocalSubjectAccessReview{},
+	)
+
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/authorization/v1beta1/types.go b/vendor/k8s.io/api/authorization/v1beta1/types.go
new file mode 100644
index 0000000..618ff8c
--- /dev/null
+++ b/vendor/k8s.io/api/authorization/v1beta1/types.go
@@ -0,0 +1,268 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:noVerbs
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SubjectAccessReview checks whether or not a user or group can perform an action.
+type SubjectAccessReview struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec holds information about the request being evaluated
+	Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is filled in by the server and indicates whether the request is allowed or not
+	// +optional
+	Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:noVerbs
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SelfSubjectAccessReview checks whether or the current user can perform an action.  Not filling in a
+// spec.namespace means "in all namespaces".  Self is a special case, because users should always be able
+// to check whether they can perform an action
+type SelfSubjectAccessReview struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec holds information about the request being evaluated.  user and groups must be empty
+	Spec SelfSubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is filled in by the server and indicates whether the request is allowed or not
+	// +optional
+	Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +genclient
+// +genclient:noVerbs
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace.
+// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions
+// checking.
+type LocalSubjectAccessReview struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec holds information about the request being evaluated.  spec.namespace must be equal to the namespace
+	// you made the request against.  If empty, it is defaulted.
+	Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is filled in by the server and indicates whether the request is allowed or not
+	// +optional
+	Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface
+type ResourceAttributes struct {
+	// Namespace is the namespace of the action being requested.  Currently, there is no distinction between no namespace and all namespaces
+	// "" (empty) is defaulted for LocalSubjectAccessReviews
+	// "" (empty) is empty for cluster-scoped resources
+	// "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview
+	// +optional
+	Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"`
+	// Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy.  "*" means all.
+	// +optional
+	Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"`
+	// Group is the API Group of the Resource.  "*" means all.
+	// +optional
+	Group string `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"`
+	// Version is the API Version of the Resource.  "*" means all.
+	// +optional
+	Version string `json:"version,omitempty" protobuf:"bytes,4,opt,name=version"`
+	// Resource is one of the existing resource types.  "*" means all.
+	// +optional
+	Resource string `json:"resource,omitempty" protobuf:"bytes,5,opt,name=resource"`
+	// Subresource is one of the existing resource types.  "" means none.
+	// +optional
+	Subresource string `json:"subresource,omitempty" protobuf:"bytes,6,opt,name=subresource"`
+	// Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all.
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"`
+}
+
+// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface
+type NonResourceAttributes struct {
+	// Path is the URL path of the request
+	// +optional
+	Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
+	// Verb is the standard HTTP verb
+	// +optional
+	Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"`
+}
+
+// SubjectAccessReviewSpec is a description of the access request.  Exactly one of ResourceAuthorizationAttributes
+// and NonResourceAuthorizationAttributes must be set
+type SubjectAccessReviewSpec struct {
+	// ResourceAuthorizationAttributes describes information for a resource access request
+	// +optional
+	ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"`
+	// NonResourceAttributes describes information for a non-resource access request
+	// +optional
+	NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"`
+
+	// User is the user you're testing for.
+	// If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups
+	// +optional
+	User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
+	// Groups is the groups you're testing for.
+	// +optional
+	Groups []string `json:"group,omitempty" protobuf:"bytes,4,rep,name=group"`
+	// Extra corresponds to the user.Info.GetExtra() method from the authenticator.  Since that is input to the authorizer
+	// it needs a reflection here.
+	// +optional
+	Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"`
+	// UID information about the requesting user.
+	// +optional
+	UID string `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid"`
+}
+
+// ExtraValue masks the value so protobuf can generate
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type ExtraValue []string
+
+func (t ExtraValue) String() string {
+	return fmt.Sprintf("%v", []string(t))
+}
+
+// SelfSubjectAccessReviewSpec is a description of the access request.  Exactly one of ResourceAuthorizationAttributes
+// and NonResourceAuthorizationAttributes must be set
+type SelfSubjectAccessReviewSpec struct {
+	// ResourceAuthorizationAttributes describes information for a resource access request
+	// +optional
+	ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"`
+	// NonResourceAttributes describes information for a non-resource access request
+	// +optional
+	NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"`
+}
+
+// SubjectAccessReviewStatus
+type SubjectAccessReviewStatus struct {
+	// Allowed is required. True if the action would be allowed, false otherwise.
+	Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"`
+	// Denied is optional. True if the action would be denied, otherwise
+	// false. If both allowed is false and denied is false, then the
+	// authorizer has no opinion on whether to authorize the action. Denied
+	// may not be true if Allowed is true.
+	// +optional
+	Denied bool `json:"denied,omitempty" protobuf:"varint,4,opt,name=denied"`
+	// Reason is optional.  It indicates why a request was allowed or denied.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"`
+	// EvaluationError is an indication that some error occurred during the authorization check.
+	// It is entirely possible to get an error and be able to continue determine authorization status in spite of it.
+	// For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.
+	// +optional
+	EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,3,opt,name=evaluationError"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:noVerbs
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace.
+// The returned list of actions may be incomplete depending on the server's authorization mode,
+// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions,
+// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to
+// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns.
+// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.
+type SelfSubjectRulesReview struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec holds information about the request being evaluated.
+	Spec SelfSubjectRulesReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is filled in by the server and indicates the set of actions a user can perform.
+	// +optional
+	Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+type SelfSubjectRulesReviewSpec struct {
+	// Namespace to evaluate rules for. Required.
+	Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"`
+}
+
+// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on
+// the set of authorizers the server is configured with and any errors experienced during evaluation.
+// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission,
+// even if that list is incomplete.
+type SubjectRulesReviewStatus struct {
+	// ResourceRules is the list of actions the subject is allowed to perform on resources.
+	// The list ordering isn't significant, may contain duplicates, and possibly be incomplete.
+	ResourceRules []ResourceRule `json:"resourceRules" protobuf:"bytes,1,rep,name=resourceRules"`
+	// NonResourceRules is the list of actions the subject is allowed to perform on non-resources.
+	// The list ordering isn't significant, may contain duplicates, and possibly be incomplete.
+	NonResourceRules []NonResourceRule `json:"nonResourceRules" protobuf:"bytes,2,rep,name=nonResourceRules"`
+	// Incomplete is true when the rules returned by this call are incomplete. This is most commonly
+	// encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.
+	Incomplete bool `json:"incomplete" protobuf:"bytes,3,rep,name=incomplete"`
+	// EvaluationError can appear in combination with Rules. It indicates an error occurred during
+	// rule evaluation, such as an authorizer that doesn't support rule evaluation, and that
+	// ResourceRules and/or NonResourceRules may be incomplete.
+	// +optional
+	EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,4,opt,name=evaluationError"`
+}
+
+// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant,
+// may contain duplicates, and possibly be incomplete.
+type ResourceRule struct {
+	// Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy.  "*" means all.
+	Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
+
+	// APIGroups is the name of the APIGroup that contains the resources.  If multiple API groups are specified, any action requested against one of
+	// the enumerated resources in any API group will be allowed.  "*" means all.
+	// +optional
+	APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"`
+	// Resources is a list of resources this rule applies to.  "*" means all in the specified apiGroups.
+	//  "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups.
+	// +optional
+	Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"`
+	// ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.  "*" means all.
+	// +optional
+	ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"`
+}
+
+// NonResourceRule holds information that describes a rule for the non-resource
+type NonResourceRule struct {
+	// Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options.  "*" means all.
+	Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
+
+	// NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full,
+	// final step in the path.  "*" means all.
+	// +optional
+	NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,2,rep,name=nonResourceURLs"`
+}
diff --git a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..3ae6e72
--- /dev/null
+++ b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,173 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_LocalSubjectAccessReview = map[string]string{
+	"":       "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.",
+	"spec":   "Spec holds information about the request being evaluated.  spec.namespace must be equal to the namespace you made the request against.  If empty, it is defaulted.",
+	"status": "Status is filled in by the server and indicates whether the request is allowed or not",
+}
+
+func (LocalSubjectAccessReview) SwaggerDoc() map[string]string {
+	return map_LocalSubjectAccessReview
+}
+
+var map_NonResourceAttributes = map[string]string{
+	"":     "NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface",
+	"path": "Path is the URL path of the request",
+	"verb": "Verb is the standard HTTP verb",
+}
+
+func (NonResourceAttributes) SwaggerDoc() map[string]string {
+	return map_NonResourceAttributes
+}
+
+var map_NonResourceRule = map[string]string{
+	"":                "NonResourceRule holds information that describes a rule for the non-resource",
+	"verbs":           "Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options.  \"*\" means all.",
+	"nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full, final step in the path.  \"*\" means all.",
+}
+
+func (NonResourceRule) SwaggerDoc() map[string]string {
+	return map_NonResourceRule
+}
+
+var map_ResourceAttributes = map[string]string{
+	"":            "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface",
+	"namespace":   "Namespace is the namespace of the action being requested.  Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview",
+	"verb":        "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy.  \"*\" means all.",
+	"group":       "Group is the API Group of the Resource.  \"*\" means all.",
+	"version":     "Version is the API Version of the Resource.  \"*\" means all.",
+	"resource":    "Resource is one of the existing resource types.  \"*\" means all.",
+	"subresource": "Subresource is one of the existing resource types.  \"\" means none.",
+	"name":        "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.",
+}
+
+func (ResourceAttributes) SwaggerDoc() map[string]string {
+	return map_ResourceAttributes
+}
+
+var map_ResourceRule = map[string]string{
+	"":              "ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.",
+	"verbs":         "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy.  \"*\" means all.",
+	"apiGroups":     "APIGroups is the name of the APIGroup that contains the resources.  If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.  \"*\" means all.",
+	"resources":     "Resources is a list of resources this rule applies to.  \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.",
+	"resourceNames": "ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.  \"*\" means all.",
+}
+
+func (ResourceRule) SwaggerDoc() map[string]string {
+	return map_ResourceRule
+}
+
+var map_SelfSubjectAccessReview = map[string]string{
+	"":       "SelfSubjectAccessReview checks whether or the current user can perform an action.  Not filling in a spec.namespace means \"in all namespaces\".  Self is a special case, because users should always be able to check whether they can perform an action",
+	"spec":   "Spec holds information about the request being evaluated.  user and groups must be empty",
+	"status": "Status is filled in by the server and indicates whether the request is allowed or not",
+}
+
+func (SelfSubjectAccessReview) SwaggerDoc() map[string]string {
+	return map_SelfSubjectAccessReview
+}
+
+var map_SelfSubjectAccessReviewSpec = map[string]string{
+	"":                      "SelfSubjectAccessReviewSpec is a description of the access request.  Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set",
+	"resourceAttributes":    "ResourceAuthorizationAttributes describes information for a resource access request",
+	"nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request",
+}
+
+func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string {
+	return map_SelfSubjectAccessReviewSpec
+}
+
+var map_SelfSubjectRulesReview = map[string]string{
+	"":       "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.",
+	"spec":   "Spec holds information about the request being evaluated.",
+	"status": "Status is filled in by the server and indicates the set of actions a user can perform.",
+}
+
+func (SelfSubjectRulesReview) SwaggerDoc() map[string]string {
+	return map_SelfSubjectRulesReview
+}
+
+var map_SelfSubjectRulesReviewSpec = map[string]string{
+	"namespace": "Namespace to evaluate rules for. Required.",
+}
+
+func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string {
+	return map_SelfSubjectRulesReviewSpec
+}
+
+var map_SubjectAccessReview = map[string]string{
+	"":       "SubjectAccessReview checks whether or not a user or group can perform an action.",
+	"spec":   "Spec holds information about the request being evaluated",
+	"status": "Status is filled in by the server and indicates whether the request is allowed or not",
+}
+
+func (SubjectAccessReview) SwaggerDoc() map[string]string {
+	return map_SubjectAccessReview
+}
+
+var map_SubjectAccessReviewSpec = map[string]string{
+	"":                      "SubjectAccessReviewSpec is a description of the access request.  Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set",
+	"resourceAttributes":    "ResourceAuthorizationAttributes describes information for a resource access request",
+	"nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request",
+	"user":                  "User is the user you're testing for. If you specify \"User\" but not \"Group\", then is it interpreted as \"What if User were not a member of any groups",
+	"group":                 "Groups is the groups you're testing for.",
+	"extra":                 "Extra corresponds to the user.Info.GetExtra() method from the authenticator.  Since that is input to the authorizer it needs a reflection here.",
+	"uid":                   "UID information about the requesting user.",
+}
+
+func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string {
+	return map_SubjectAccessReviewSpec
+}
+
+var map_SubjectAccessReviewStatus = map[string]string{
+	"":                "SubjectAccessReviewStatus",
+	"allowed":         "Allowed is required. True if the action would be allowed, false otherwise.",
+	"denied":          "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.",
+	"reason":          "Reason is optional.  It indicates why a request was allowed or denied.",
+	"evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.",
+}
+
+func (SubjectAccessReviewStatus) SwaggerDoc() map[string]string {
+	return map_SubjectAccessReviewStatus
+}
+
+var map_SubjectRulesReviewStatus = map[string]string{
+	"":                 "SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.",
+	"resourceRules":    "ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.",
+	"nonResourceRules": "NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.",
+	"incomplete":       "Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.",
+	"evaluationError":  "EvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.",
+}
+
+func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string {
+	return map_SubjectRulesReviewStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..58b2dfe
--- /dev/null
+++ b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,385 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
+	{
+		in := &in
+		*out = make(ExtraValue, len(*in))
+		copy(*out, *in)
+		return
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue.
+func (in ExtraValue) DeepCopy() ExtraValue {
+	if in == nil {
+		return nil
+	}
+	out := new(ExtraValue)
+	in.DeepCopyInto(out)
+	return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	out.Status = in.Status
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSubjectAccessReview.
+func (in *LocalSubjectAccessReview) DeepCopy() *LocalSubjectAccessReview {
+	if in == nil {
+		return nil
+	}
+	out := new(LocalSubjectAccessReview)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LocalSubjectAccessReview) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NonResourceAttributes) DeepCopyInto(out *NonResourceAttributes) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceAttributes.
+func (in *NonResourceAttributes) DeepCopy() *NonResourceAttributes {
+	if in == nil {
+		return nil
+	}
+	out := new(NonResourceAttributes)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NonResourceRule) DeepCopyInto(out *NonResourceRule) {
+	*out = *in
+	if in.Verbs != nil {
+		in, out := &in.Verbs, &out.Verbs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.NonResourceURLs != nil {
+		in, out := &in.NonResourceURLs, &out.NonResourceURLs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceRule.
+func (in *NonResourceRule) DeepCopy() *NonResourceRule {
+	if in == nil {
+		return nil
+	}
+	out := new(NonResourceRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAttributes.
+func (in *ResourceAttributes) DeepCopy() *ResourceAttributes {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceAttributes)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceRule) DeepCopyInto(out *ResourceRule) {
+	*out = *in
+	if in.Verbs != nil {
+		in, out := &in.Verbs, &out.Verbs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.APIGroups != nil {
+		in, out := &in.APIGroups, &out.APIGroups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ResourceNames != nil {
+		in, out := &in.ResourceNames, &out.ResourceNames
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRule.
+func (in *ResourceRule) DeepCopy() *ResourceRule {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SelfSubjectAccessReview) DeepCopyInto(out *SelfSubjectAccessReview) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	out.Status = in.Status
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReview.
+func (in *SelfSubjectAccessReview) DeepCopy() *SelfSubjectAccessReview {
+	if in == nil {
+		return nil
+	}
+	out := new(SelfSubjectAccessReview)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SelfSubjectAccessReview) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReviewSpec) {
+	*out = *in
+	if in.ResourceAttributes != nil {
+		in, out := &in.ResourceAttributes, &out.ResourceAttributes
+		*out = new(ResourceAttributes)
+		**out = **in
+	}
+	if in.NonResourceAttributes != nil {
+		in, out := &in.NonResourceAttributes, &out.NonResourceAttributes
+		*out = new(NonResourceAttributes)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReviewSpec.
+func (in *SelfSubjectAccessReviewSpec) DeepCopy() *SelfSubjectAccessReviewSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(SelfSubjectAccessReviewSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SelfSubjectRulesReview) DeepCopyInto(out *SelfSubjectRulesReview) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	out.Spec = in.Spec
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReview.
+func (in *SelfSubjectRulesReview) DeepCopy() *SelfSubjectRulesReview {
+	if in == nil {
+		return nil
+	}
+	out := new(SelfSubjectRulesReview)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SelfSubjectRulesReview) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SelfSubjectRulesReviewSpec) DeepCopyInto(out *SelfSubjectRulesReviewSpec) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReviewSpec.
+func (in *SelfSubjectRulesReviewSpec) DeepCopy() *SelfSubjectRulesReviewSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(SelfSubjectRulesReviewSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubjectAccessReview) DeepCopyInto(out *SubjectAccessReview) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	out.Status = in.Status
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReview.
+func (in *SubjectAccessReview) DeepCopy() *SubjectAccessReview {
+	if in == nil {
+		return nil
+	}
+	out := new(SubjectAccessReview)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SubjectAccessReview) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) {
+	*out = *in
+	if in.ResourceAttributes != nil {
+		in, out := &in.ResourceAttributes, &out.ResourceAttributes
+		*out = new(ResourceAttributes)
+		**out = **in
+	}
+	if in.NonResourceAttributes != nil {
+		in, out := &in.NonResourceAttributes, &out.NonResourceAttributes
+		*out = new(NonResourceAttributes)
+		**out = **in
+	}
+	if in.Groups != nil {
+		in, out := &in.Groups, &out.Groups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Extra != nil {
+		in, out := &in.Extra, &out.Extra
+		*out = make(map[string]ExtraValue, len(*in))
+		for key, val := range *in {
+			var outVal []string
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = make(ExtraValue, len(*in))
+				copy(*out, *in)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewSpec.
+func (in *SubjectAccessReviewSpec) DeepCopy() *SubjectAccessReviewSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(SubjectAccessReviewSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubjectAccessReviewStatus) DeepCopyInto(out *SubjectAccessReviewStatus) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewStatus.
+func (in *SubjectAccessReviewStatus) DeepCopy() *SubjectAccessReviewStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(SubjectAccessReviewStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubjectRulesReviewStatus) DeepCopyInto(out *SubjectRulesReviewStatus) {
+	*out = *in
+	if in.ResourceRules != nil {
+		in, out := &in.ResourceRules, &out.ResourceRules
+		*out = make([]ResourceRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.NonResourceRules != nil {
+		in, out := &in.NonResourceRules, &out.NonResourceRules
+		*out = make([]NonResourceRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewStatus.
+func (in *SubjectRulesReviewStatus) DeepCopy() *SubjectRulesReviewStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(SubjectRulesReviewStatus)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/autoscaling/v1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go
new file mode 100644
index 0000000..9c3be84
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+package v1 // import "k8s.io/api/autoscaling/v1"
diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto
new file mode 100644
index 0000000..5b56b2a
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto
@@ -0,0 +1,415 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.autoscaling.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1";
+
+// CrossVersionObjectReference contains enough information to let you identify the referred resource.
+message CrossVersionObjectReference {
+  // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"
+  optional string kind = 1;
+
+  // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
+  optional string name = 2;
+
+  // API version of the referent
+  // +optional
+  optional string apiVersion = 3;
+}
+
+// ExternalMetricSource indicates how to scale on a metric not associated with
+// any Kubernetes object (for example length of queue in cloud
+// messaging service, or QPS from loadbalancer running outside of cluster).
+message ExternalMetricSource {
+  // metricName is the name of the metric in question.
+  optional string metricName = 1;
+
+  // metricSelector is used to identify a specific time series
+  // within a given metric.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
+
+  // targetValue is the target value of the metric (as a quantity).
+  // Mutually exclusive with TargetAverageValue.
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3;
+
+  // targetAverageValue is the target per-pod value of global metric (as a quantity).
+  // Mutually exclusive with TargetValue.
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4;
+}
+
+// ExternalMetricStatus indicates the current value of a global metric
+// not associated with any Kubernetes object.
+message ExternalMetricStatus {
+  // metricName is the name of a metric used for autoscaling in
+  // metric system.
+  optional string metricName = 1;
+
+  // metricSelector is used to identify a specific time series
+  // within a given metric.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
+
+  // currentValue is the current value of the metric (as a quantity)
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3;
+
+  // currentAverageValue is the current value of metric averaged over autoscaled pods.
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4;
+}
+
+// configuration of a horizontal pod autoscaler.
+message HorizontalPodAutoscaler {
+  // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
+  // +optional
+  optional HorizontalPodAutoscalerSpec spec = 2;
+
+  // current information about the autoscaler.
+  // +optional
+  optional HorizontalPodAutoscalerStatus status = 3;
+}
+
+// HorizontalPodAutoscalerCondition describes the state of
+// a HorizontalPodAutoscaler at a certain point.
+message HorizontalPodAutoscalerCondition {
+  // type describes the current condition
+  optional string type = 1;
+
+  // status is the status of the condition (True, False, Unknown)
+  optional string status = 2;
+
+  // lastTransitionTime is the last time the condition transitioned from
+  // one status to another
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+  // reason is the reason for the condition's last transition.
+  // +optional
+  optional string reason = 4;
+
+  // message is a human-readable explanation containing details about
+  // the transition
+  // +optional
+  optional string message = 5;
+}
+
+// list of horizontal pod autoscaler objects.
+message HorizontalPodAutoscalerList {
+  // Standard list metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // list of horizontal pod autoscaler objects.
+  repeated HorizontalPodAutoscaler items = 2;
+}
+
+// specification of a horizontal pod autoscaler.
+message HorizontalPodAutoscalerSpec {
+  // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption
+  // and will set the desired number of pods by using its Scale subresource.
+  optional CrossVersionObjectReference scaleTargetRef = 1;
+
+  // lower limit for the number of pods that can be set by the autoscaler, default 1.
+  // +optional
+  optional int32 minReplicas = 2;
+
+  // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
+  optional int32 maxReplicas = 3;
+
+  // target average CPU utilization (represented as a percentage of requested CPU) over all the pods;
+  // if not specified the default autoscaling policy will be used.
+  // +optional
+  optional int32 targetCPUUtilizationPercentage = 4;
+}
+
+// current status of a horizontal pod autoscaler
+message HorizontalPodAutoscalerStatus {
+  // most recent generation observed by this autoscaler.
+  // +optional
+  optional int64 observedGeneration = 1;
+
+  // last time the HorizontalPodAutoscaler scaled the number of pods;
+  // used by the autoscaler to control how often the number of pods is changed.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2;
+
+  // current number of replicas of pods managed by this autoscaler.
+  optional int32 currentReplicas = 3;
+
+  // desired number of replicas of pods managed by this autoscaler.
+  optional int32 desiredReplicas = 4;
+
+  // current average CPU utilization over all pods, represented as a percentage of requested CPU,
+  // e.g. 70 means that an average pod is using now 70% of its requested CPU.
+  // +optional
+  optional int32 currentCPUUtilizationPercentage = 5;
+}
+
+// MetricSpec specifies how to scale based on a single metric
+// (only `type` and one other matching field should be set at once).
+message MetricSpec {
+  // type is the type of metric source.  It should be one of "Object",
+  // "Pods" or "Resource", each mapping to a matching field in the object.
+  optional string type = 1;
+
+  // object refers to a metric describing a single kubernetes object
+  // (for example, hits-per-second on an Ingress object).
+  // +optional
+  optional ObjectMetricSource object = 2;
+
+  // pods refers to a metric describing each pod in the current scale target
+  // (for example, transactions-processed-per-second).  The values will be
+  // averaged together before being compared to the target value.
+  // +optional
+  optional PodsMetricSource pods = 3;
+
+  // resource refers to a resource metric (such as those specified in
+  // requests and limits) known to Kubernetes describing each pod in the
+  // current scale target (e.g. CPU or memory). Such metrics are built in to
+  // Kubernetes, and have special scaling options on top of those available
+  // to normal per-pod metrics using the "pods" source.
+  // +optional
+  optional ResourceMetricSource resource = 4;
+
+  // external refers to a global metric that is not associated
+  // with any Kubernetes object. It allows autoscaling based on information
+  // coming from components running outside of cluster
+  // (for example length of queue in cloud messaging service, or
+  // QPS from loadbalancer running outside of cluster).
+  // +optional
+  optional ExternalMetricSource external = 5;
+}
+
+// MetricStatus describes the last-read state of a single metric.
+message MetricStatus {
+  // type is the type of metric source.  It will be one of "Object",
+  // "Pods" or "Resource", each corresponds to a matching field in the object.
+  optional string type = 1;
+
+  // object refers to a metric describing a single kubernetes object
+  // (for example, hits-per-second on an Ingress object).
+  // +optional
+  optional ObjectMetricStatus object = 2;
+
+  // pods refers to a metric describing each pod in the current scale target
+  // (for example, transactions-processed-per-second).  The values will be
+  // averaged together before being compared to the target value.
+  // +optional
+  optional PodsMetricStatus pods = 3;
+
+  // resource refers to a resource metric (such as those specified in
+  // requests and limits) known to Kubernetes describing each pod in the
+  // current scale target (e.g. CPU or memory). Such metrics are built in to
+  // Kubernetes, and have special scaling options on top of those available
+  // to normal per-pod metrics using the "pods" source.
+  // +optional
+  optional ResourceMetricStatus resource = 4;
+
+  // external refers to a global metric that is not associated
+  // with any Kubernetes object. It allows autoscaling based on information
+  // coming from components running outside of cluster
+  // (for example length of queue in cloud messaging service, or
+  // QPS from loadbalancer running outside of cluster).
+  // +optional
+  optional ExternalMetricStatus external = 5;
+}
+
+// ObjectMetricSource indicates how to scale on a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
+message ObjectMetricSource {
+  // target is the described Kubernetes object.
+  optional CrossVersionObjectReference target = 1;
+
+  // metricName is the name of the metric in question.
+  optional string metricName = 2;
+
+  // targetValue is the target value of the metric (as a quantity).
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3;
+
+  // selector is the string-encoded form of a standard kubernetes label selector for the given metric.
+  // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping
+  // When unset, just the metricName will be used to gather metrics.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
+
+  // averageValue is the target value of the average of the
+  // metric across all relevant pods (as a quantity)
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5;
+}
+
+// ObjectMetricStatus indicates the current value of a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
+message ObjectMetricStatus {
+  // target is the described Kubernetes object.
+  optional CrossVersionObjectReference target = 1;
+
+  // metricName is the name of the metric in question.
+  optional string metricName = 2;
+
+  // currentValue is the current value of the metric (as a quantity).
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3;
+
+  // selector is the string-encoded form of a standard kubernetes label selector for the given metric
+  // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
+  // When unset, just the metricName will be used to gather metrics.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
+
+  // averageValue is the current value of the average of the
+  // metric across all relevant pods (as a quantity)
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5;
+}
+
+// PodsMetricSource indicates how to scale on a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
+// The values will be averaged together before being compared to the target
+// value.
+message PodsMetricSource {
+  // metricName is the name of the metric in question
+  optional string metricName = 1;
+
+  // targetAverageValue is the target value of the average of the
+  // metric across all relevant pods (as a quantity)
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2;
+
+  // selector is the string-encoded form of a standard kubernetes label selector for the given metric
+  // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping
+  // When unset, just the metricName will be used to gather metrics.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
+}
+
+// PodsMetricStatus indicates the current value of a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
+message PodsMetricStatus {
+  // metricName is the name of the metric in question
+  optional string metricName = 1;
+
+  // currentAverageValue is the current value of the average of the
+  // metric across all relevant pods (as a quantity)
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2;
+
+  // selector is the string-encoded form of a standard kubernetes label selector for the given metric
+  // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
+  // When unset, just the metricName will be used to gather metrics.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
+}
+
+// ResourceMetricSource indicates how to scale on a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory).  The values will be averaged
+// together before being compared to the target.  Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.  Only one "target" type
+// should be set.
+message ResourceMetricSource {
+  // name is the name of the resource in question.
+  optional string name = 1;
+
+  // targetAverageUtilization is the target value of the average of the
+  // resource metric across all relevant pods, represented as a percentage of
+  // the requested value of the resource for the pods.
+  // +optional
+  optional int32 targetAverageUtilization = 2;
+
+  // targetAverageValue is the target value of the average of the
+  // resource metric across all relevant pods, as a raw value (instead of as
+  // a percentage of the request), similar to the "pods" metric source type.
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3;
+}
+
+// ResourceMetricStatus indicates the current value of a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory).  Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.
+message ResourceMetricStatus {
+  // name is the name of the resource in question.
+  optional string name = 1;
+
+  // currentAverageUtilization is the current value of the average of the
+  // resource metric across all relevant pods, represented as a percentage of
+  // the requested value of the resource for the pods.  It will only be
+  // present if `targetAverageValue` was set in the corresponding metric
+  // specification.
+  // +optional
+  optional int32 currentAverageUtilization = 2;
+
+  // currentAverageValue is the current value of the average of the
+  // resource metric across all relevant pods, as a raw value (instead of as
+  // a percentage of the request), similar to the "pods" metric source type.
+  // It will always be set, regardless of the corresponding metric specification.
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3;
+}
+
+// Scale represents a scaling request for a resource.
+message Scale {
+  // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
+  // +optional
+  optional ScaleSpec spec = 2;
+
+  // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
+  // +optional
+  optional ScaleStatus status = 3;
+}
+
+// ScaleSpec describes the attributes of a scale subresource.
+message ScaleSpec {
+  // desired number of instances for the scaled object.
+  // +optional
+  optional int32 replicas = 1;
+}
+
+// ScaleStatus represents the current status of a scale subresource.
+message ScaleStatus {
+  // actual number of observed instances of the scaled object.
+  optional int32 replicas = 1;
+
+  // label query over pods that should match the replicas count. This is same
+  // as the label selector but in the string format to avoid introspection
+  // by clients. The string will be in the same format as the query-param syntax.
+  // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors
+  // +optional
+  optional string selector = 2;
+}
+
diff --git a/vendor/k8s.io/api/autoscaling/v1/register.go b/vendor/k8s.io/api/autoscaling/v1/register.go
new file mode 100644
index 0000000..8dfe361
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v1/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "autoscaling"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&HorizontalPodAutoscaler{},
+		&HorizontalPodAutoscalerList{},
+		&Scale{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go
new file mode 100644
index 0000000..c03af13
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v1/types.go
@@ -0,0 +1,428 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/api/resource"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// CrossVersionObjectReference contains enough information to let you identify the referred resource.
+type CrossVersionObjectReference struct {
+	// Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"
+	Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+	// Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
+	Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
+	// API version of the referent
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"`
+}
+
+// specification of a horizontal pod autoscaler.
+type HorizontalPodAutoscalerSpec struct {
+	// reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption
+	// and will set the desired number of pods by using its Scale subresource.
+	ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"`
+	// lower limit for the number of pods that can be set by the autoscaler, default 1.
+	// +optional
+	MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"`
+	// upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
+	MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"`
+	// target average CPU utilization (represented as a percentage of requested CPU) over all the pods;
+	// if not specified the default autoscaling policy will be used.
+	// +optional
+	TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty" protobuf:"varint,4,opt,name=targetCPUUtilizationPercentage"`
+}
+
+// current status of a horizontal pod autoscaler
+type HorizontalPodAutoscalerStatus struct {
+	// most recent generation observed by this autoscaler.
+	// +optional
+	ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+	// last time the HorizontalPodAutoscaler scaled the number of pods;
+	// used by the autoscaler to control how often the number of pods is changed.
+	// +optional
+	LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"`
+
+	// current number of replicas of pods managed by this autoscaler.
+	CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"`
+
+	// desired number of replicas of pods managed by this autoscaler.
+	DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"`
+
+	// current average CPU utilization over all pods, represented as a percentage of requested CPU,
+	// e.g. 70 means that an average pod is using now 70% of its requested CPU.
+	// +optional
+	CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// configuration of a horizontal pod autoscaler.
+type HorizontalPodAutoscaler struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
+	// +optional
+	Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// current information about the autoscaler.
+	// +optional
+	Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// list of horizontal pod autoscaler objects.
+type HorizontalPodAutoscalerList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// list of horizontal pod autoscaler objects.
+	Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Scale represents a scaling request for a resource.
+type Scale struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
+	// +optional
+	Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
+	// +optional
+	Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// ScaleSpec describes the attributes of a scale subresource.
+type ScaleSpec struct {
+	// desired number of instances for the scaled object.
+	// +optional
+	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+}
+
+// ScaleStatus represents the current status of a scale subresource.
+type ScaleStatus struct {
+	// actual number of observed instances of the scaled object.
+	Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
+
+	// label query over pods that should match the replicas count. This is same
+	// as the label selector but in the string format to avoid introspection
+	// by clients. The string will be in the same format as the query-param syntax.
+	// More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors
+	// +optional
+	Selector string `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
+}
+
+// the types below are used in the alpha metrics annotation
+
+// MetricSourceType indicates the type of metric.
+type MetricSourceType string
+
+var (
+	// ObjectMetricSourceType is a metric describing a kubernetes object
+	// (for example, hits-per-second on an Ingress object).
+	ObjectMetricSourceType MetricSourceType = "Object"
+	// PodsMetricSourceType is a metric describing each pod in the current scale
+	// target (for example, transactions-processed-per-second).  The values
+	// will be averaged together before being compared to the target value.
+	PodsMetricSourceType MetricSourceType = "Pods"
+	// ResourceMetricSourceType is a resource metric known to Kubernetes, as
+	// specified in requests and limits, describing each pod in the current
+	// scale target (e.g. CPU or memory).  Such metrics are built in to
+	// Kubernetes, and have special scaling options on top of those available
+	// to normal per-pod metrics (the "pods" source).
+	ResourceMetricSourceType MetricSourceType = "Resource"
+	// ExternalMetricSourceType is a global metric that is not associated
+	// with any Kubernetes object. It allows autoscaling based on information
+	// coming from components running outside of cluster
+	// (for example length of queue in cloud messaging service, or
+	// QPS from loadbalancer running outside of cluster).
+	ExternalMetricSourceType MetricSourceType = "External"
+)
+
+// MetricSpec specifies how to scale based on a single metric
+// (only `type` and one other matching field should be set at once).
+type MetricSpec struct {
+	// type is the type of metric source.  It should be one of "Object",
+	// "Pods" or "Resource", each mapping to a matching field in the object.
+	Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"`
+
+	// object refers to a metric describing a single kubernetes object
+	// (for example, hits-per-second on an Ingress object).
+	// +optional
+	Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"`
+	// pods refers to a metric describing each pod in the current scale target
+	// (for example, transactions-processed-per-second).  The values will be
+	// averaged together before being compared to the target value.
+	// +optional
+	Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"`
+	// resource refers to a resource metric (such as those specified in
+	// requests and limits) known to Kubernetes describing each pod in the
+	// current scale target (e.g. CPU or memory). Such metrics are built in to
+	// Kubernetes, and have special scaling options on top of those available
+	// to normal per-pod metrics using the "pods" source.
+	// +optional
+	Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"`
+	// external refers to a global metric that is not associated
+	// with any Kubernetes object. It allows autoscaling based on information
+	// coming from components running outside of cluster
+	// (for example length of queue in cloud messaging service, or
+	// QPS from loadbalancer running outside of cluster).
+	// +optional
+	External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"`
+}
+
+// ObjectMetricSource indicates how to scale on a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
+type ObjectMetricSource struct {
+	// target is the described Kubernetes object.
+	Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"`
+
+	// metricName is the name of the metric in question.
+	MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"`
+	// targetValue is the target value of the metric (as a quantity).
+	TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"`
+
+	// selector is the string-encoded form of a standard kubernetes label selector for the given metric.
+	// When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping
+	// When unset, just the metricName will be used to gather metrics.
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"`
+	// averageValue is the target value of the average of the
+	// metric across all relevant pods (as a quantity)
+	// +optional
+	AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,5,name=averageValue"`
+}
+
+// PodsMetricSource indicates how to scale on a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
+// The values will be averaged together before being compared to the target
+// value.
+type PodsMetricSource struct {
+	// metricName is the name of the metric in question
+	MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"`
+	// targetAverageValue is the target value of the average of the
+	// metric across all relevant pods (as a quantity)
+	TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"`
+
+	// selector is the string-encoded form of a standard kubernetes label selector for the given metric
+	// When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping
+	// When unset, just the metricName will be used to gather metrics.
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,name=selector"`
+}
+
+// ResourceMetricSource indicates how to scale on a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory).  The values will be averaged
+// together before being compared to the target.  Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.  Only one "target" type
+// should be set.
+type ResourceMetricSource struct {
+	// name is the name of the resource in question.
+	Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"`
+	// targetAverageUtilization is the target value of the average of the
+	// resource metric across all relevant pods, represented as a percentage of
+	// the requested value of the resource for the pods.
+	// +optional
+	TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"`
+	// targetAverageValue is the target value of the average of the
+	// resource metric across all relevant pods, as a raw value (instead of as
+	// a percentage of the request), similar to the "pods" metric source type.
+	// +optional
+	TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"`
+}
+
+// ExternalMetricSource indicates how to scale on a metric not associated with
+// any Kubernetes object (for example length of queue in cloud
+// messaging service, or QPS from loadbalancer running outside of cluster).
+type ExternalMetricSource struct {
+	// metricName is the name of the metric in question.
+	MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"`
+	// metricSelector is used to identify a specific time series
+	// within a given metric.
+	// +optional
+	MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"`
+	// targetValue is the target value of the metric (as a quantity).
+	// Mutually exclusive with TargetAverageValue.
+	// +optional
+	TargetValue *resource.Quantity `json:"targetValue,omitempty" protobuf:"bytes,3,opt,name=targetValue"`
+	// targetAverageValue is the target per-pod value of global metric (as a quantity).
+	// Mutually exclusive with TargetValue.
+	// +optional
+	TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,4,opt,name=targetAverageValue"`
+}
+
+// MetricStatus describes the last-read state of a single metric.
+type MetricStatus struct {
+	// type is the type of metric source.  It will be one of "Object",
+	// "Pods" or "Resource", each corresponds to a matching field in the object.
+	Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"`
+
+	// object refers to a metric describing a single kubernetes object
+	// (for example, hits-per-second on an Ingress object).
+	// +optional
+	Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"`
+	// pods refers to a metric describing each pod in the current scale target
+	// (for example, transactions-processed-per-second).  The values will be
+	// averaged together before being compared to the target value.
+	// +optional
+	Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"`
+	// resource refers to a resource metric (such as those specified in
+	// requests and limits) known to Kubernetes describing each pod in the
+	// current scale target (e.g. CPU or memory). Such metrics are built in to
+	// Kubernetes, and have special scaling options on top of those available
+	// to normal per-pod metrics using the "pods" source.
+	// +optional
+	Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"`
+	// external refers to a global metric that is not associated
+	// with any Kubernetes object. It allows autoscaling based on information
+	// coming from components running outside of cluster
+	// (for example length of queue in cloud messaging service, or
+	// QPS from loadbalancer running outside of cluster).
+	// +optional
+	External *ExternalMetricStatus `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"`
+}
+
+// HorizontalPodAutoscalerConditionType are the valid conditions of
+// a HorizontalPodAutoscaler.
+type HorizontalPodAutoscalerConditionType string
+
+var (
+	// ScalingActive indicates that the HPA controller is able to scale if necessary:
+	// it's correctly configured, can fetch the desired metrics, and isn't disabled.
+	ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive"
+	// AbleToScale indicates a lack of transient issues which prevent scaling from occurring,
+	// such as being in a backoff window, or being unable to access/update the target scale.
+	AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale"
+	// ScalingLimited indicates that the calculated scale based on metrics would be above or
+	// below the range for the HPA, and has thus been capped.
+	ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited"
+)
+
+// HorizontalPodAutoscalerCondition describes the state of
+// a HorizontalPodAutoscaler at a certain point.
+type HorizontalPodAutoscalerCondition struct {
+	// type describes the current condition
+	Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"`
+	// status is the status of the condition (True, False, Unknown)
+	Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"`
+	// lastTransitionTime is the last time the condition transitioned from
+	// one status to another
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+	// reason is the reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// message is a human-readable explanation containing details about
+	// the transition
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// ObjectMetricStatus indicates the current value of a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
+type ObjectMetricStatus struct {
+	// target is the described Kubernetes object.
+	Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"`
+
+	// metricName is the name of the metric in question.
+	MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"`
+	// currentValue is the current value of the metric (as a quantity).
+	CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"`
+
+	// selector is the string-encoded form of a standard kubernetes label selector for the given metric
+	// When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
+	// When unset, just the metricName will be used to gather metrics.
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"`
+	// averageValue is the current value of the average of the
+	// metric across all relevant pods (as a quantity)
+	// +optional
+	AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,5,name=averageValue"`
+}
+
+// PodsMetricStatus indicates the current value of a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
+type PodsMetricStatus struct {
+	// metricName is the name of the metric in question
+	MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"`
+	// currentAverageValue is the current value of the average of the
+	// metric across all relevant pods (as a quantity)
+	CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"`
+
+	// selector is the string-encoded form of a standard kubernetes label selector for the given metric
+	// When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
+	// When unset, just the metricName will be used to gather metrics.
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,name=selector"`
+}
+
+// ResourceMetricStatus indicates the current value of a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory).  Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.
+type ResourceMetricStatus struct {
+	// name is the name of the resource in question.
+	Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"`
+	// currentAverageUtilization is the current value of the average of the
+	// resource metric across all relevant pods, represented as a percentage of
+	// the requested value of the resource for the pods.  It will only be
+	// present if `targetAverageValue` was set in the corresponding metric
+	// specification.
+	// +optional
+	CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"`
+	// currentAverageValue is the current value of the average of the
+	// resource metric across all relevant pods, as a raw value (instead of as
+	// a percentage of the request), similar to the "pods" metric source type.
+	// It will always be set, regardless of the corresponding metric specification.
+	CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"`
+}
+
+// ExternalMetricStatus indicates the current value of a global metric
+// not associated with any Kubernetes object.
+type ExternalMetricStatus struct {
+	// metricName is the name of a metric used for autoscaling in
+	// metric system.
+	MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"`
+	// metricSelector is used to identify a specific time series
+	// within a given metric.
+	// +optional
+	MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"`
+	// currentValue is the current value of the metric (as a quantity)
+	CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"`
+	// currentAverageValue is the current value of metric averaged over autoscaled pods.
+	// +optional
+	CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty" protobuf:"bytes,4,opt,name=currentAverageValue"`
+}
diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..72ac972
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go
@@ -0,0 +1,250 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_CrossVersionObjectReference = map[string]string{
+	"":           "CrossVersionObjectReference contains enough information to let you identify the referred resource.",
+	"kind":       "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"",
+	"name":       "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names",
+	"apiVersion": "API version of the referent",
+}
+
+func (CrossVersionObjectReference) SwaggerDoc() map[string]string {
+	return map_CrossVersionObjectReference
+}
+
+var map_ExternalMetricSource = map[string]string{
+	"":                   "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
+	"metricName":         "metricName is the name of the metric in question.",
+	"metricSelector":     "metricSelector is used to identify a specific time series within a given metric.",
+	"targetValue":        "targetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue.",
+	"targetAverageValue": "targetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue.",
+}
+
+func (ExternalMetricSource) SwaggerDoc() map[string]string {
+	return map_ExternalMetricSource
+}
+
+var map_ExternalMetricStatus = map[string]string{
+	"":                    "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.",
+	"metricName":          "metricName is the name of a metric used for autoscaling in metric system.",
+	"metricSelector":      "metricSelector is used to identify a specific time series within a given metric.",
+	"currentValue":        "currentValue is the current value of the metric (as a quantity)",
+	"currentAverageValue": "currentAverageValue is the current value of metric averaged over autoscaled pods.",
+}
+
+func (ExternalMetricStatus) SwaggerDoc() map[string]string {
+	return map_ExternalMetricStatus
+}
+
+var map_HorizontalPodAutoscaler = map[string]string{
+	"":         "configuration of a horizontal pod autoscaler.",
+	"metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.",
+	"status":   "current information about the autoscaler.",
+}
+
+func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string {
+	return map_HorizontalPodAutoscaler
+}
+
+var map_HorizontalPodAutoscalerCondition = map[string]string{
+	"":                   "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.",
+	"type":               "type describes the current condition",
+	"status":             "status is the status of the condition (True, False, Unknown)",
+	"lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another",
+	"reason":             "reason is the reason for the condition's last transition.",
+	"message":            "message is a human-readable explanation containing details about the transition",
+}
+
+func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string {
+	return map_HorizontalPodAutoscalerCondition
+}
+
+var map_HorizontalPodAutoscalerList = map[string]string{
+	"":         "list of horizontal pod autoscaler objects.",
+	"metadata": "Standard list metadata.",
+	"items":    "list of horizontal pod autoscaler objects.",
+}
+
+func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string {
+	return map_HorizontalPodAutoscalerList
+}
+
+var map_HorizontalPodAutoscalerSpec = map[string]string{
+	"":                               "specification of a horizontal pod autoscaler.",
+	"scaleTargetRef":                 "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.",
+	"minReplicas":                    "lower limit for the number of pods that can be set by the autoscaler, default 1.",
+	"maxReplicas":                    "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.",
+	"targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.",
+}
+
+func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string {
+	return map_HorizontalPodAutoscalerSpec
+}
+
+var map_HorizontalPodAutoscalerStatus = map[string]string{
+	"":                                "current status of a horizontal pod autoscaler",
+	"observedGeneration":              "most recent generation observed by this autoscaler.",
+	"lastScaleTime":                   "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.",
+	"currentReplicas":                 "current number of replicas of pods managed by this autoscaler.",
+	"desiredReplicas":                 "desired number of replicas of pods managed by this autoscaler.",
+	"currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.",
+}
+
+func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string {
+	return map_HorizontalPodAutoscalerStatus
+}
+
+var map_MetricSpec = map[string]string{
+	"":         "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).",
+	"type":     "type is the type of metric source.  It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.",
+	"object":   "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
+	"pods":     "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second).  The values will be averaged together before being compared to the target value.",
+	"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
+	"external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
+}
+
+func (MetricSpec) SwaggerDoc() map[string]string {
+	return map_MetricSpec
+}
+
+var map_MetricStatus = map[string]string{
+	"":         "MetricStatus describes the last-read state of a single metric.",
+	"type":     "type is the type of metric source.  It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.",
+	"object":   "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
+	"pods":     "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second).  The values will be averaged together before being compared to the target value.",
+	"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
+	"external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
+}
+
+func (MetricStatus) SwaggerDoc() map[string]string {
+	return map_MetricStatus
+}
+
+var map_ObjectMetricSource = map[string]string{
+	"":             "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).",
+	"target":       "target is the described Kubernetes object.",
+	"metricName":   "metricName is the name of the metric in question.",
+	"targetValue":  "targetValue is the target value of the metric (as a quantity).",
+	"selector":     "selector is the string-encoded form of a standard kubernetes label selector for the given metric. When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.",
+	"averageValue": "averageValue is the target value of the average of the metric across all relevant pods (as a quantity)",
+}
+
+func (ObjectMetricSource) SwaggerDoc() map[string]string {
+	return map_ObjectMetricSource
+}
+
+var map_ObjectMetricStatus = map[string]string{
+	"":             "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).",
+	"target":       "target is the described Kubernetes object.",
+	"metricName":   "metricName is the name of the metric in question.",
+	"currentValue": "currentValue is the current value of the metric (as a quantity).",
+	"selector":     "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.",
+	"averageValue": "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)",
+}
+
+func (ObjectMetricStatus) SwaggerDoc() map[string]string {
+	return map_ObjectMetricStatus
+}
+
+var map_PodsMetricSource = map[string]string{
+	"":                   "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
+	"metricName":         "metricName is the name of the metric in question",
+	"targetAverageValue": "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)",
+	"selector":           "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.",
+}
+
+func (PodsMetricSource) SwaggerDoc() map[string]string {
+	return map_PodsMetricSource
+}
+
+var map_PodsMetricStatus = map[string]string{
+	"":                    "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).",
+	"metricName":          "metricName is the name of the metric in question",
+	"currentAverageValue": "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)",
+	"selector":            "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.",
+}
+
+func (PodsMetricStatus) SwaggerDoc() map[string]string {
+	return map_PodsMetricStatus
+}
+
+var map_ResourceMetricSource = map[string]string{
+	"":                         "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory).  The values will be averaged together before being compared to the target.  Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.  Only one \"target\" type should be set.",
+	"name":                     "name is the name of the resource in question.",
+	"targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.",
+	"targetAverageValue":       "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.",
+}
+
+func (ResourceMetricSource) SwaggerDoc() map[string]string {
+	return map_ResourceMetricSource
+}
+
+var map_ResourceMetricStatus = map[string]string{
+	"":                          "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory).  Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
+	"name":                      "name is the name of the resource in question.",
+	"currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.  It will only be present if `targetAverageValue` was set in the corresponding metric specification.",
+	"currentAverageValue":       "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.",
+}
+
+func (ResourceMetricStatus) SwaggerDoc() map[string]string {
+	return map_ResourceMetricStatus
+}
+
+var map_Scale = map[string]string{
+	"":         "Scale represents a scaling request for a resource.",
+	"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.",
+	"spec":     "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.",
+	"status":   "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.",
+}
+
+func (Scale) SwaggerDoc() map[string]string {
+	return map_Scale
+}
+
+var map_ScaleSpec = map[string]string{
+	"":         "ScaleSpec describes the attributes of a scale subresource.",
+	"replicas": "desired number of instances for the scaled object.",
+}
+
+func (ScaleSpec) SwaggerDoc() map[string]string {
+	return map_ScaleSpec
+}
+
+var map_ScaleStatus = map[string]string{
+	"":         "ScaleStatus represents the current status of a scale subresource.",
+	"replicas": "actual number of observed instances of the scaled object.",
+	"selector": "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors",
+}
+
+func (ScaleStatus) SwaggerDoc() map[string]string {
+	return map_ScaleStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..3fda47d
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go
@@ -0,0 +1,515 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference.
+func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference {
+	if in == nil {
+		return nil
+	}
+	out := new(CrossVersionObjectReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) {
+	*out = *in
+	if in.MetricSelector != nil {
+		in, out := &in.MetricSelector, &out.MetricSelector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.TargetValue != nil {
+		in, out := &in.TargetValue, &out.TargetValue
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	if in.TargetAverageValue != nil {
+		in, out := &in.TargetAverageValue, &out.TargetAverageValue
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource.
+func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ExternalMetricSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) {
+	*out = *in
+	if in.MetricSelector != nil {
+		in, out := &in.MetricSelector, &out.MetricSelector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	out.CurrentValue = in.CurrentValue.DeepCopy()
+	if in.CurrentAverageValue != nil {
+		in, out := &in.CurrentAverageValue, &out.CurrentAverageValue
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus.
+func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ExternalMetricStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler.
+func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler {
+	if in == nil {
+		return nil
+	}
+	out := new(HorizontalPodAutoscaler)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) {
+	*out = *in
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition.
+func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(HorizontalPodAutoscalerCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]HorizontalPodAutoscaler, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList.
+func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList {
+	if in == nil {
+		return nil
+	}
+	out := new(HorizontalPodAutoscalerList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) {
+	*out = *in
+	out.ScaleTargetRef = in.ScaleTargetRef
+	if in.MinReplicas != nil {
+		in, out := &in.MinReplicas, &out.MinReplicas
+		*out = new(int32)
+		**out = **in
+	}
+	if in.TargetCPUUtilizationPercentage != nil {
+		in, out := &in.TargetCPUUtilizationPercentage, &out.TargetCPUUtilizationPercentage
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec.
+func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(HorizontalPodAutoscalerSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) {
+	*out = *in
+	if in.ObservedGeneration != nil {
+		in, out := &in.ObservedGeneration, &out.ObservedGeneration
+		*out = new(int64)
+		**out = **in
+	}
+	if in.LastScaleTime != nil {
+		in, out := &in.LastScaleTime, &out.LastScaleTime
+		*out = (*in).DeepCopy()
+	}
+	if in.CurrentCPUUtilizationPercentage != nil {
+		in, out := &in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus.
+func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(HorizontalPodAutoscalerStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MetricSpec) DeepCopyInto(out *MetricSpec) {
+	*out = *in
+	if in.Object != nil {
+		in, out := &in.Object, &out.Object
+		*out = new(ObjectMetricSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Pods != nil {
+		in, out := &in.Pods, &out.Pods
+		*out = new(PodsMetricSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Resource != nil {
+		in, out := &in.Resource, &out.Resource
+		*out = new(ResourceMetricSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.External != nil {
+		in, out := &in.External, &out.External
+		*out = new(ExternalMetricSource)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec.
+func (in *MetricSpec) DeepCopy() *MetricSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(MetricSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MetricStatus) DeepCopyInto(out *MetricStatus) {
+	*out = *in
+	if in.Object != nil {
+		in, out := &in.Object, &out.Object
+		*out = new(ObjectMetricStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Pods != nil {
+		in, out := &in.Pods, &out.Pods
+		*out = new(PodsMetricStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Resource != nil {
+		in, out := &in.Resource, &out.Resource
+		*out = new(ResourceMetricStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.External != nil {
+		in, out := &in.External, &out.External
+		*out = new(ExternalMetricStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus.
+func (in *MetricStatus) DeepCopy() *MetricStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(MetricStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) {
+	*out = *in
+	out.Target = in.Target
+	out.TargetValue = in.TargetValue.DeepCopy()
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.AverageValue != nil {
+		in, out := &in.AverageValue, &out.AverageValue
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource.
+func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ObjectMetricSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) {
+	*out = *in
+	out.Target = in.Target
+	out.CurrentValue = in.CurrentValue.DeepCopy()
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.AverageValue != nil {
+		in, out := &in.AverageValue, &out.AverageValue
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus.
+func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ObjectMetricStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) {
+	*out = *in
+	out.TargetAverageValue = in.TargetAverageValue.DeepCopy()
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource.
+func (in *PodsMetricSource) DeepCopy() *PodsMetricSource {
+	if in == nil {
+		return nil
+	}
+	out := new(PodsMetricSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) {
+	*out = *in
+	out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy()
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus.
+func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(PodsMetricStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) {
+	*out = *in
+	if in.TargetAverageUtilization != nil {
+		in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization
+		*out = new(int32)
+		**out = **in
+	}
+	if in.TargetAverageValue != nil {
+		in, out := &in.TargetAverageValue, &out.TargetAverageValue
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource.
+func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceMetricSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) {
+	*out = *in
+	if in.CurrentAverageUtilization != nil {
+		in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization
+		*out = new(int32)
+		**out = **in
+	}
+	out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy()
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus.
+func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceMetricStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Scale) DeepCopyInto(out *Scale) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	out.Spec = in.Spec
+	out.Status = in.Status
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale.
+func (in *Scale) DeepCopy() *Scale {
+	if in == nil {
+		return nil
+	}
+	out := new(Scale)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Scale) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec.
+func (in *ScaleSpec) DeepCopy() *ScaleSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ScaleSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus.
+func (in *ScaleStatus) DeepCopy() *ScaleStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ScaleStatus)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go
new file mode 100644
index 0000000..da9789e
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+package v2beta1 // import "k8s.io/api/autoscaling/v2beta1"
diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto
new file mode 100644
index 0000000..04bc0ed
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto
@@ -0,0 +1,397 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.autoscaling.v2beta1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v2beta1";
+
+// CrossVersionObjectReference contains enough information to let you identify the referred resource.
+message CrossVersionObjectReference {
+  // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"
+  optional string kind = 1;
+
+  // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
+  optional string name = 2;
+
+  // API version of the referent
+  // +optional
+  optional string apiVersion = 3;
+}
+
+// ExternalMetricSource indicates how to scale on a metric not associated with
+// any Kubernetes object (for example length of queue in cloud
+// messaging service, or QPS from loadbalancer running outside of cluster).
+// Exactly one "target" type should be set.
+message ExternalMetricSource {
+  // metricName is the name of the metric in question.
+  optional string metricName = 1;
+
+  // metricSelector is used to identify a specific time series
+  // within a given metric.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
+
+  // targetValue is the target value of the metric (as a quantity).
+  // Mutually exclusive with TargetAverageValue.
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3;
+
+  // targetAverageValue is the target per-pod value of global metric (as a quantity).
+  // Mutually exclusive with TargetValue.
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4;
+}
+
+// ExternalMetricStatus indicates the current value of a global metric
+// not associated with any Kubernetes object.
+message ExternalMetricStatus {
+  // metricName is the name of a metric used for autoscaling in
+  // metric system.
+  optional string metricName = 1;
+
+  // metricSelector is used to identify a specific time series
+  // within a given metric.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
+
+  // currentValue is the current value of the metric (as a quantity)
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3;
+
+  // currentAverageValue is the current value of metric averaged over autoscaled pods.
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4;
+}
+
+// HorizontalPodAutoscaler is the configuration for a horizontal pod
+// autoscaler, which automatically manages the replica count of any resource
+// implementing the scale subresource based on the metrics specified.
+message HorizontalPodAutoscaler {
+  // metadata is the standard object metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // spec is the specification for the behaviour of the autoscaler.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
+  // +optional
+  optional HorizontalPodAutoscalerSpec spec = 2;
+
+  // status is the current information about the autoscaler.
+  // +optional
+  optional HorizontalPodAutoscalerStatus status = 3;
+}
+
+// HorizontalPodAutoscalerCondition describes the state of
+// a HorizontalPodAutoscaler at a certain point.
+message HorizontalPodAutoscalerCondition {
+  // type describes the current condition
+  optional string type = 1;
+
+  // status is the status of the condition (True, False, Unknown)
+  optional string status = 2;
+
+  // lastTransitionTime is the last time the condition transitioned from
+  // one status to another
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+  // reason is the reason for the condition's last transition.
+  // +optional
+  optional string reason = 4;
+
+  // message is a human-readable explanation containing details about
+  // the transition
+  // +optional
+  optional string message = 5;
+}
+
+// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects.
+message HorizontalPodAutoscalerList {
+  // metadata is the standard list metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // items is the list of horizontal pod autoscaler objects.
+  repeated HorizontalPodAutoscaler items = 2;
+}
+
+// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.
+message HorizontalPodAutoscalerSpec {
+  // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics
+  // should be collected, as well as to actually change the replica count.
+  optional CrossVersionObjectReference scaleTargetRef = 1;
+
+  // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down.
+  // It defaults to 1 pod.
+  // +optional
+  optional int32 minReplicas = 2;
+
+  // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
+  // It cannot be less that minReplicas.
+  optional int32 maxReplicas = 3;
+
+  // metrics contains the specifications for which to use to calculate the
+  // desired replica count (the maximum replica count across all metrics will
+  // be used).  The desired replica count is calculated multiplying the
+  // ratio between the target value and the current value by the current
+  // number of pods.  Ergo, metrics used must decrease as the pod count is
+  // increased, and vice-versa.  See the individual metric source types for
+  // more information about how each type of metric must respond.
+  // +optional
+  repeated MetricSpec metrics = 4;
+}
+
+// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.
+message HorizontalPodAutoscalerStatus {
+  // observedGeneration is the most recent generation observed by this autoscaler.
+  // +optional
+  optional int64 observedGeneration = 1;
+
+  // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods,
+  // used by the autoscaler to control how often the number of pods is changed.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2;
+
+  // currentReplicas is current number of replicas of pods managed by this autoscaler,
+  // as last seen by the autoscaler.
+  optional int32 currentReplicas = 3;
+
+  // desiredReplicas is the desired number of replicas of pods managed by this autoscaler,
+  // as last calculated by the autoscaler.
+  optional int32 desiredReplicas = 4;
+
+  // currentMetrics is the last read state of the metrics used by this autoscaler.
+  // +optional
+  repeated MetricStatus currentMetrics = 5;
+
+  // conditions is the set of conditions required for this autoscaler to scale its target,
+  // and indicates whether or not those conditions are met.
+  repeated HorizontalPodAutoscalerCondition conditions = 6;
+}
+
+// MetricSpec specifies how to scale based on a single metric
+// (only `type` and one other matching field should be set at once).
+message MetricSpec {
+  // type is the type of metric source.  It should be one of "Object",
+  // "Pods" or "Resource", each mapping to a matching field in the object.
+  optional string type = 1;
+
+  // object refers to a metric describing a single kubernetes object
+  // (for example, hits-per-second on an Ingress object).
+  // +optional
+  optional ObjectMetricSource object = 2;
+
+  // pods refers to a metric describing each pod in the current scale target
+  // (for example, transactions-processed-per-second).  The values will be
+  // averaged together before being compared to the target value.
+  // +optional
+  optional PodsMetricSource pods = 3;
+
+  // resource refers to a resource metric (such as those specified in
+  // requests and limits) known to Kubernetes describing each pod in the
+  // current scale target (e.g. CPU or memory). Such metrics are built in to
+  // Kubernetes, and have special scaling options on top of those available
+  // to normal per-pod metrics using the "pods" source.
+  // +optional
+  optional ResourceMetricSource resource = 4;
+
+  // external refers to a global metric that is not associated
+  // with any Kubernetes object. It allows autoscaling based on information
+  // coming from components running outside of cluster
+  // (for example length of queue in cloud messaging service, or
+  // QPS from loadbalancer running outside of cluster).
+  // +optional
+  optional ExternalMetricSource external = 5;
+}
+
+// MetricStatus describes the last-read state of a single metric.
+message MetricStatus {
+  // type is the type of metric source.  It will be one of "Object",
+  // "Pods" or "Resource", each corresponds to a matching field in the object.
+  optional string type = 1;
+
+  // object refers to a metric describing a single kubernetes object
+  // (for example, hits-per-second on an Ingress object).
+  // +optional
+  optional ObjectMetricStatus object = 2;
+
+  // pods refers to a metric describing each pod in the current scale target
+  // (for example, transactions-processed-per-second).  The values will be
+  // averaged together before being compared to the target value.
+  // +optional
+  optional PodsMetricStatus pods = 3;
+
+  // resource refers to a resource metric (such as those specified in
+  // requests and limits) known to Kubernetes describing each pod in the
+  // current scale target (e.g. CPU or memory). Such metrics are built in to
+  // Kubernetes, and have special scaling options on top of those available
+  // to normal per-pod metrics using the "pods" source.
+  // +optional
+  optional ResourceMetricStatus resource = 4;
+
+  // external refers to a global metric that is not associated
+  // with any Kubernetes object. It allows autoscaling based on information
+  // coming from components running outside of cluster
+  // (for example length of queue in cloud messaging service, or
+  // QPS from loadbalancer running outside of cluster).
+  // +optional
+  optional ExternalMetricStatus external = 5;
+}
+
+// ObjectMetricSource indicates how to scale on a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
+message ObjectMetricSource {
+  // target is the described Kubernetes object.
+  optional CrossVersionObjectReference target = 1;
+
+  // metricName is the name of the metric in question.
+  optional string metricName = 2;
+
+  // targetValue is the target value of the metric (as a quantity).
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3;
+
+  // selector is the string-encoded form of a standard kubernetes label selector for the given metric
+  // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping
+  // When unset, just the metricName will be used to gather metrics.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
+
+  // averageValue is the target value of the average of the
+  // metric across all relevant pods (as a quantity)
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5;
+}
+
+// ObjectMetricStatus indicates the current value of a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
+message ObjectMetricStatus {
+  // target is the described Kubernetes object.
+  optional CrossVersionObjectReference target = 1;
+
+  // metricName is the name of the metric in question.
+  optional string metricName = 2;
+
+  // currentValue is the current value of the metric (as a quantity).
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3;
+
+  // selector is the string-encoded form of a standard kubernetes label selector for the given metric
+  // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
+  // When unset, just the metricName will be used to gather metrics.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
+
+  // averageValue is the current value of the average of the
+  // metric across all relevant pods (as a quantity)
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5;
+}
+
+// PodsMetricSource indicates how to scale on a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
+// The values will be averaged together before being compared to the target
+// value.
+message PodsMetricSource {
+  // metricName is the name of the metric in question
+  optional string metricName = 1;
+
+  // targetAverageValue is the target value of the average of the
+  // metric across all relevant pods (as a quantity)
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2;
+
+  // selector is the string-encoded form of a standard kubernetes label selector for the given metric
+  // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping
+  // When unset, just the metricName will be used to gather metrics.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
+}
+
+// PodsMetricStatus indicates the current value of a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
+message PodsMetricStatus {
+  // metricName is the name of the metric in question
+  optional string metricName = 1;
+
+  // currentAverageValue is the current value of the average of the
+  // metric across all relevant pods (as a quantity)
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2;
+
+  // selector is the string-encoded form of a standard kubernetes label selector for the given metric
+  // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
+  // When unset, just the metricName will be used to gather metrics.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
+}
+
+// ResourceMetricSource indicates how to scale on a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory).  The values will be averaged
+// together before being compared to the target.  Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.  Only one "target" type
+// should be set.
+message ResourceMetricSource {
+  // name is the name of the resource in question.
+  optional string name = 1;
+
+  // targetAverageUtilization is the target value of the average of the
+  // resource metric across all relevant pods, represented as a percentage of
+  // the requested value of the resource for the pods.
+  // +optional
+  optional int32 targetAverageUtilization = 2;
+
+  // targetAverageValue is the target value of the average of the
+  // resource metric across all relevant pods, as a raw value (instead of as
+  // a percentage of the request), similar to the "pods" metric source type.
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3;
+}
+
+// ResourceMetricStatus indicates the current value of a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory).  Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.
+message ResourceMetricStatus {
+  // name is the name of the resource in question.
+  optional string name = 1;
+
+  // currentAverageUtilization is the current value of the average of the
+  // resource metric across all relevant pods, represented as a percentage of
+  // the requested value of the resource for the pods.  It will only be
+  // present if `targetAverageValue` was set in the corresponding metric
+  // specification.
+  // +optional
+  optional int32 currentAverageUtilization = 2;
+
+  // currentAverageValue is the current value of the average of the
+  // resource metric across all relevant pods, as a raw value (instead of as
+  // a percentage of the request), similar to the "pods" metric source type.
+  // It will always be set, regardless of the corresponding metric specification.
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3;
+}
+
diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/register.go b/vendor/k8s.io/api/autoscaling/v2beta1/register.go
new file mode 100644
index 0000000..12d697f
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v2beta1/register.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "autoscaling"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&HorizontalPodAutoscaler{},
+		&HorizontalPodAutoscalerList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go
new file mode 100644
index 0000000..6a30e67
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go
@@ -0,0 +1,405 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2beta1
+
+import (
+	"k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/api/resource"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// CrossVersionObjectReference contains enough information to let you identify the referred resource.
+type CrossVersionObjectReference struct {
+	// Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"
+	Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+	// Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
+	Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
+	// API version of the referent
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"`
+}
+
+// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.
+type HorizontalPodAutoscalerSpec struct {
+	// scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics
+	// should be collected, as well as to actually change the replica count.
+	ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"`
+	// minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down.
+	// It defaults to 1 pod.
+	// +optional
+	MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"`
+	// maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
+	// It cannot be less that minReplicas.
+	MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"`
+	// metrics contains the specifications for which to use to calculate the
+	// desired replica count (the maximum replica count across all metrics will
+	// be used).  The desired replica count is calculated multiplying the
+	// ratio between the target value and the current value by the current
+	// number of pods.  Ergo, metrics used must decrease as the pod count is
+	// increased, and vice-versa.  See the individual metric source types for
+	// more information about how each type of metric must respond.
+	// +optional
+	Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"`
+}
+
+// MetricSourceType indicates the type of metric.
+type MetricSourceType string
+
+var (
+	// ObjectMetricSourceType is a metric describing a kubernetes object
+	// (for example, hits-per-second on an Ingress object).
+	ObjectMetricSourceType MetricSourceType = "Object"
+	// PodsMetricSourceType is a metric describing each pod in the current scale
+	// target (for example, transactions-processed-per-second).  The values
+	// will be averaged together before being compared to the target value.
+	PodsMetricSourceType MetricSourceType = "Pods"
+	// ResourceMetricSourceType is a resource metric known to Kubernetes, as
+	// specified in requests and limits, describing each pod in the current
+	// scale target (e.g. CPU or memory).  Such metrics are built in to
+	// Kubernetes, and have special scaling options on top of those available
+	// to normal per-pod metrics (the "pods" source).
+	ResourceMetricSourceType MetricSourceType = "Resource"
+	// ExternalMetricSourceType is a global metric that is not associated
+	// with any Kubernetes object. It allows autoscaling based on information
+	// coming from components running outside of cluster
+	// (for example length of queue in cloud messaging service, or
+	// QPS from loadbalancer running outside of cluster).
+	ExternalMetricSourceType MetricSourceType = "External"
+)
+
+// MetricSpec specifies how to scale based on a single metric
+// (only `type` and one other matching field should be set at once).
+type MetricSpec struct {
+	// type is the type of metric source.  It should be one of "Object",
+	// "Pods" or "Resource", each mapping to a matching field in the object.
+	Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"`
+
+	// object refers to a metric describing a single kubernetes object
+	// (for example, hits-per-second on an Ingress object).
+	// +optional
+	Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"`
+	// pods refers to a metric describing each pod in the current scale target
+	// (for example, transactions-processed-per-second).  The values will be
+	// averaged together before being compared to the target value.
+	// +optional
+	Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"`
+	// resource refers to a resource metric (such as those specified in
+	// requests and limits) known to Kubernetes describing each pod in the
+	// current scale target (e.g. CPU or memory). Such metrics are built in to
+	// Kubernetes, and have special scaling options on top of those available
+	// to normal per-pod metrics using the "pods" source.
+	// +optional
+	Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"`
+	// external refers to a global metric that is not associated
+	// with any Kubernetes object. It allows autoscaling based on information
+	// coming from components running outside of cluster
+	// (for example length of queue in cloud messaging service, or
+	// QPS from loadbalancer running outside of cluster).
+	// +optional
+	External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"`
+}
+
+// ObjectMetricSource indicates how to scale on a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
+type ObjectMetricSource struct {
+	// target is the described Kubernetes object.
+	Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"`
+
+	// metricName is the name of the metric in question.
+	MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"`
+	// targetValue is the target value of the metric (as a quantity).
+	TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"`
+
+	// selector is the string-encoded form of a standard kubernetes label selector for the given metric
+	// When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping
+	// When unset, just the metricName will be used to gather metrics.
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"`
+	// averageValue is the target value of the average of the
+	// metric across all relevant pods (as a quantity)
+	// +optional
+	AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,5,name=averageValue"`
+}
+
+// PodsMetricSource indicates how to scale on a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
+// The values will be averaged together before being compared to the target
+// value.
+type PodsMetricSource struct {
+	// metricName is the name of the metric in question
+	MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"`
+	// targetAverageValue is the target value of the average of the
+	// metric across all relevant pods (as a quantity)
+	TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"`
+
+	// selector is the string-encoded form of a standard kubernetes label selector for the given metric
+	// When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping
+	// When unset, just the metricName will be used to gather metrics.
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,name=selector"`
+}
+
+// ResourceMetricSource indicates how to scale on a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory).  The values will be averaged
+// together before being compared to the target.  Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.  Only one "target" type
+// should be set.
+type ResourceMetricSource struct {
+	// name is the name of the resource in question.
+	Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"`
+	// targetAverageUtilization is the target value of the average of the
+	// resource metric across all relevant pods, represented as a percentage of
+	// the requested value of the resource for the pods.
+	// +optional
+	TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"`
+	// targetAverageValue is the target value of the average of the
+	// resource metric across all relevant pods, as a raw value (instead of as
+	// a percentage of the request), similar to the "pods" metric source type.
+	// +optional
+	TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"`
+}
+
+// ExternalMetricSource indicates how to scale on a metric not associated with
+// any Kubernetes object (for example length of queue in cloud
+// messaging service, or QPS from loadbalancer running outside of cluster).
+// Exactly one "target" type should be set.
+type ExternalMetricSource struct {
+	// metricName is the name of the metric in question.
+	MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"`
+	// metricSelector is used to identify a specific time series
+	// within a given metric.
+	// +optional
+	MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"`
+	// targetValue is the target value of the metric (as a quantity).
+	// Mutually exclusive with TargetAverageValue.
+	// +optional
+	TargetValue *resource.Quantity `json:"targetValue,omitempty" protobuf:"bytes,3,opt,name=targetValue"`
+	// targetAverageValue is the target per-pod value of global metric (as a quantity).
+	// Mutually exclusive with TargetValue.
+	// +optional
+	TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,4,opt,name=targetAverageValue"`
+}
+
+// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.
+type HorizontalPodAutoscalerStatus struct {
+	// observedGeneration is the most recent generation observed by this autoscaler.
+	// +optional
+	ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+	// lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods,
+	// used by the autoscaler to control how often the number of pods is changed.
+	// +optional
+	LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"`
+
+	// currentReplicas is current number of replicas of pods managed by this autoscaler,
+	// as last seen by the autoscaler.
+	CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"`
+
+	// desiredReplicas is the desired number of replicas of pods managed by this autoscaler,
+	// as last calculated by the autoscaler.
+	DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"`
+
+	// currentMetrics is the last read state of the metrics used by this autoscaler.
+	// +optional
+	CurrentMetrics []MetricStatus `json:"currentMetrics" protobuf:"bytes,5,rep,name=currentMetrics"`
+
+	// conditions is the set of conditions required for this autoscaler to scale its target,
+	// and indicates whether or not those conditions are met.
+	Conditions []HorizontalPodAutoscalerCondition `json:"conditions" protobuf:"bytes,6,rep,name=conditions"`
+}
+
+// HorizontalPodAutoscalerConditionType are the valid conditions of
+// a HorizontalPodAutoscaler.
+type HorizontalPodAutoscalerConditionType string
+
+var (
+	// ScalingActive indicates that the HPA controller is able to scale if necessary:
+	// it's correctly configured, can fetch the desired metrics, and isn't disabled.
+	ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive"
+	// AbleToScale indicates a lack of transient issues which prevent scaling from occurring,
+	// such as being in a backoff window, or being unable to access/update the target scale.
+	AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale"
+	// ScalingLimited indicates that the calculated scale based on metrics would be above or
+	// below the range for the HPA, and has thus been capped.
+	ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited"
+)
+
+// HorizontalPodAutoscalerCondition describes the state of
+// a HorizontalPodAutoscaler at a certain point.
+type HorizontalPodAutoscalerCondition struct {
+	// type describes the current condition
+	Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"`
+	// status is the status of the condition (True, False, Unknown)
+	Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"`
+	// lastTransitionTime is the last time the condition transitioned from
+	// one status to another
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+	// reason is the reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// message is a human-readable explanation containing details about
+	// the transition
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// MetricStatus describes the last-read state of a single metric.
+type MetricStatus struct {
+	// type is the type of metric source.  It will be one of "Object",
+	// "Pods" or "Resource", each corresponds to a matching field in the object.
+	Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"`
+
+	// object refers to a metric describing a single kubernetes object
+	// (for example, hits-per-second on an Ingress object).
+	// +optional
+	Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"`
+	// pods refers to a metric describing each pod in the current scale target
+	// (for example, transactions-processed-per-second).  The values will be
+	// averaged together before being compared to the target value.
+	// +optional
+	Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"`
+	// resource refers to a resource metric (such as those specified in
+	// requests and limits) known to Kubernetes describing each pod in the
+	// current scale target (e.g. CPU or memory). Such metrics are built in to
+	// Kubernetes, and have special scaling options on top of those available
+	// to normal per-pod metrics using the "pods" source.
+	// +optional
+	Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"`
+	// external refers to a global metric that is not associated
+	// with any Kubernetes object. It allows autoscaling based on information
+	// coming from components running outside of cluster
+	// (for example length of queue in cloud messaging service, or
+	// QPS from loadbalancer running outside of cluster).
+	// +optional
+	External *ExternalMetricStatus `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"`
+}
+
+// ObjectMetricStatus indicates the current value of a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
+type ObjectMetricStatus struct {
+	// target is the described Kubernetes object.
+	Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"`
+
+	// metricName is the name of the metric in question.
+	MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"`
+	// currentValue is the current value of the metric (as a quantity).
+	CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"`
+
+	// selector is the string-encoded form of a standard kubernetes label selector for the given metric
+	// When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
+	// When unset, just the metricName will be used to gather metrics.
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"`
+	// averageValue is the current value of the average of the
+	// metric across all relevant pods (as a quantity)
+	// +optional
+	AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,5,name=averageValue"`
+}
+
+// PodsMetricStatus indicates the current value of a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
+type PodsMetricStatus struct {
+	// metricName is the name of the metric in question
+	MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"`
+	// currentAverageValue is the current value of the average of the
+	// metric across all relevant pods (as a quantity)
+	CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"`
+
+	// selector is the string-encoded form of a standard kubernetes label selector for the given metric
+	// When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
+	// When unset, just the metricName will be used to gather metrics.
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,name=selector"`
+}
+
+// ResourceMetricStatus indicates the current value of a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory).  Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.
+type ResourceMetricStatus struct {
+	// name is the name of the resource in question.
+	Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"`
+	// currentAverageUtilization is the current value of the average of the
+	// resource metric across all relevant pods, represented as a percentage of
+	// the requested value of the resource for the pods.  It will only be
+	// present if `targetAverageValue` was set in the corresponding metric
+	// specification.
+	// +optional
+	CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"`
+	// currentAverageValue is the current value of the average of the
+	// resource metric across all relevant pods, as a raw value (instead of as
+	// a percentage of the request), similar to the "pods" metric source type.
+	// It will always be set, regardless of the corresponding metric specification.
+	CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"`
+}
+
+// ExternalMetricStatus indicates the current value of a global metric
+// not associated with any Kubernetes object.
+type ExternalMetricStatus struct {
+	// metricName is the name of a metric used for autoscaling in
+	// metric system.
+	MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"`
+	// metricSelector is used to identify a specific time series
+	// within a given metric.
+	// +optional
+	MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"`
+	// currentValue is the current value of the metric (as a quantity)
+	CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"`
+	// currentAverageValue is the current value of metric averaged over autoscaled pods.
+	// +optional
+	CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty" protobuf:"bytes,4,opt,name=currentAverageValue"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// HorizontalPodAutoscaler is the configuration for a horizontal pod
+// autoscaler, which automatically manages the replica count of any resource
+// implementing the scale subresource based on the metrics specified.
+type HorizontalPodAutoscaler struct {
+	metav1.TypeMeta `json:",inline"`
+	// metadata is the standard object metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// spec is the specification for the behaviour of the autoscaler.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
+	// +optional
+	Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// status is the current information about the autoscaler.
+	// +optional
+	Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects.
+type HorizontalPodAutoscalerList struct {
+	metav1.TypeMeta `json:",inline"`
+	// metadata is the standard list metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// items is the list of horizontal pod autoscaler objects.
+	Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..589408a
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go
@@ -0,0 +1,221 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_CrossVersionObjectReference = map[string]string{
+	"":           "CrossVersionObjectReference contains enough information to let you identify the referred resource.",
+	"kind":       "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"",
+	"name":       "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names",
+	"apiVersion": "API version of the referent",
+}
+
+func (CrossVersionObjectReference) SwaggerDoc() map[string]string {
+	return map_CrossVersionObjectReference
+}
+
+var map_ExternalMetricSource = map[string]string{
+	"":                   "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). Exactly one \"target\" type should be set.",
+	"metricName":         "metricName is the name of the metric in question.",
+	"metricSelector":     "metricSelector is used to identify a specific time series within a given metric.",
+	"targetValue":        "targetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue.",
+	"targetAverageValue": "targetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue.",
+}
+
+func (ExternalMetricSource) SwaggerDoc() map[string]string {
+	return map_ExternalMetricSource
+}
+
+var map_ExternalMetricStatus = map[string]string{
+	"":                    "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.",
+	"metricName":          "metricName is the name of a metric used for autoscaling in metric system.",
+	"metricSelector":      "metricSelector is used to identify a specific time series within a given metric.",
+	"currentValue":        "currentValue is the current value of the metric (as a quantity)",
+	"currentAverageValue": "currentAverageValue is the current value of metric averaged over autoscaled pods.",
+}
+
+func (ExternalMetricStatus) SwaggerDoc() map[string]string {
+	return map_ExternalMetricStatus
+}
+
+var map_HorizontalPodAutoscaler = map[string]string{
+	"":         "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.",
+	"metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.",
+	"status":   "status is the current information about the autoscaler.",
+}
+
+func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string {
+	return map_HorizontalPodAutoscaler
+}
+
+var map_HorizontalPodAutoscalerCondition = map[string]string{
+	"":                   "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.",
+	"type":               "type describes the current condition",
+	"status":             "status is the status of the condition (True, False, Unknown)",
+	"lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another",
+	"reason":             "reason is the reason for the condition's last transition.",
+	"message":            "message is a human-readable explanation containing details about the transition",
+}
+
+func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string {
+	return map_HorizontalPodAutoscalerCondition
+}
+
+var map_HorizontalPodAutoscalerList = map[string]string{
+	"":         "HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects.",
+	"metadata": "metadata is the standard list metadata.",
+	"items":    "items is the list of horizontal pod autoscaler objects.",
+}
+
+func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string {
+	return map_HorizontalPodAutoscalerList
+}
+
+var map_HorizontalPodAutoscalerSpec = map[string]string{
+	"":               "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.",
+	"scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.",
+	"minReplicas":    "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.",
+	"maxReplicas":    "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.",
+	"metrics":        "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used).  The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods.  Ergo, metrics used must decrease as the pod count is increased, and vice-versa.  See the individual metric source types for more information about how each type of metric must respond.",
+}
+
+func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string {
+	return map_HorizontalPodAutoscalerSpec
+}
+
+var map_HorizontalPodAutoscalerStatus = map[string]string{
+	"":                   "HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.",
+	"observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.",
+	"lastScaleTime":      "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed.",
+	"currentReplicas":    "currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler.",
+	"desiredReplicas":    "desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler.",
+	"currentMetrics":     "currentMetrics is the last read state of the metrics used by this autoscaler.",
+	"conditions":         "conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met.",
+}
+
+func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string {
+	return map_HorizontalPodAutoscalerStatus
+}
+
+var map_MetricSpec = map[string]string{
+	"":         "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).",
+	"type":     "type is the type of metric source.  It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.",
+	"object":   "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
+	"pods":     "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second).  The values will be averaged together before being compared to the target value.",
+	"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
+	"external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
+}
+
+func (MetricSpec) SwaggerDoc() map[string]string {
+	return map_MetricSpec
+}
+
+var map_MetricStatus = map[string]string{
+	"":         "MetricStatus describes the last-read state of a single metric.",
+	"type":     "type is the type of metric source.  It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.",
+	"object":   "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
+	"pods":     "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second).  The values will be averaged together before being compared to the target value.",
+	"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
+	"external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
+}
+
+func (MetricStatus) SwaggerDoc() map[string]string {
+	return map_MetricStatus
+}
+
+var map_ObjectMetricSource = map[string]string{
+	"":             "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).",
+	"target":       "target is the described Kubernetes object.",
+	"metricName":   "metricName is the name of the metric in question.",
+	"targetValue":  "targetValue is the target value of the metric (as a quantity).",
+	"selector":     "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.",
+	"averageValue": "averageValue is the target value of the average of the metric across all relevant pods (as a quantity)",
+}
+
+func (ObjectMetricSource) SwaggerDoc() map[string]string {
+	return map_ObjectMetricSource
+}
+
+var map_ObjectMetricStatus = map[string]string{
+	"":             "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).",
+	"target":       "target is the described Kubernetes object.",
+	"metricName":   "metricName is the name of the metric in question.",
+	"currentValue": "currentValue is the current value of the metric (as a quantity).",
+	"selector":     "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.",
+	"averageValue": "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)",
+}
+
+func (ObjectMetricStatus) SwaggerDoc() map[string]string {
+	return map_ObjectMetricStatus
+}
+
+var map_PodsMetricSource = map[string]string{
+	"":                   "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
+	"metricName":         "metricName is the name of the metric in question",
+	"targetAverageValue": "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)",
+	"selector":           "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.",
+}
+
+func (PodsMetricSource) SwaggerDoc() map[string]string {
+	return map_PodsMetricSource
+}
+
+var map_PodsMetricStatus = map[string]string{
+	"":                    "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).",
+	"metricName":          "metricName is the name of the metric in question",
+	"currentAverageValue": "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)",
+	"selector":            "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.",
+}
+
+func (PodsMetricStatus) SwaggerDoc() map[string]string {
+	return map_PodsMetricStatus
+}
+
+var map_ResourceMetricSource = map[string]string{
+	"":                         "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory).  The values will be averaged together before being compared to the target.  Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.  Only one \"target\" type should be set.",
+	"name":                     "name is the name of the resource in question.",
+	"targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.",
+	"targetAverageValue":       "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.",
+}
+
+func (ResourceMetricSource) SwaggerDoc() map[string]string {
+	return map_ResourceMetricSource
+}
+
+var map_ResourceMetricStatus = map[string]string{
+	"":                          "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory).  Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
+	"name":                      "name is the name of the resource in question.",
+	"currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.  It will only be present if `targetAverageValue` was set in the corresponding metric specification.",
+	"currentAverageValue":       "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.",
+}
+
+func (ResourceMetricStatus) SwaggerDoc() map[string]string {
+	return map_ResourceMetricStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..2ec7e61
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go
@@ -0,0 +1,466 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v2beta1
+
+import (
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference.
+func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference {
+	if in == nil {
+		return nil
+	}
+	out := new(CrossVersionObjectReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) {
+	*out = *in
+	if in.MetricSelector != nil {
+		in, out := &in.MetricSelector, &out.MetricSelector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.TargetValue != nil {
+		in, out := &in.TargetValue, &out.TargetValue
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	if in.TargetAverageValue != nil {
+		in, out := &in.TargetAverageValue, &out.TargetAverageValue
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource.
+func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ExternalMetricSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) {
+	*out = *in
+	if in.MetricSelector != nil {
+		in, out := &in.MetricSelector, &out.MetricSelector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	out.CurrentValue = in.CurrentValue.DeepCopy()
+	if in.CurrentAverageValue != nil {
+		in, out := &in.CurrentAverageValue, &out.CurrentAverageValue
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus.
+func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ExternalMetricStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler.
+func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler {
+	if in == nil {
+		return nil
+	}
+	out := new(HorizontalPodAutoscaler)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) {
+	*out = *in
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition.
+func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(HorizontalPodAutoscalerCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]HorizontalPodAutoscaler, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList.
+func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList {
+	if in == nil {
+		return nil
+	}
+	out := new(HorizontalPodAutoscalerList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) {
+	*out = *in
+	out.ScaleTargetRef = in.ScaleTargetRef
+	if in.MinReplicas != nil {
+		in, out := &in.MinReplicas, &out.MinReplicas
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Metrics != nil {
+		in, out := &in.Metrics, &out.Metrics
+		*out = make([]MetricSpec, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec.
+func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(HorizontalPodAutoscalerSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) {
+	*out = *in
+	if in.ObservedGeneration != nil {
+		in, out := &in.ObservedGeneration, &out.ObservedGeneration
+		*out = new(int64)
+		**out = **in
+	}
+	if in.LastScaleTime != nil {
+		in, out := &in.LastScaleTime, &out.LastScaleTime
+		*out = (*in).DeepCopy()
+	}
+	if in.CurrentMetrics != nil {
+		in, out := &in.CurrentMetrics, &out.CurrentMetrics
+		*out = make([]MetricStatus, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]HorizontalPodAutoscalerCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus.
+func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(HorizontalPodAutoscalerStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MetricSpec) DeepCopyInto(out *MetricSpec) {
+	*out = *in
+	if in.Object != nil {
+		in, out := &in.Object, &out.Object
+		*out = new(ObjectMetricSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Pods != nil {
+		in, out := &in.Pods, &out.Pods
+		*out = new(PodsMetricSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Resource != nil {
+		in, out := &in.Resource, &out.Resource
+		*out = new(ResourceMetricSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.External != nil {
+		in, out := &in.External, &out.External
+		*out = new(ExternalMetricSource)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec.
+func (in *MetricSpec) DeepCopy() *MetricSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(MetricSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MetricStatus) DeepCopyInto(out *MetricStatus) {
+	*out = *in
+	if in.Object != nil {
+		in, out := &in.Object, &out.Object
+		*out = new(ObjectMetricStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Pods != nil {
+		in, out := &in.Pods, &out.Pods
+		*out = new(PodsMetricStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Resource != nil {
+		in, out := &in.Resource, &out.Resource
+		*out = new(ResourceMetricStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.External != nil {
+		in, out := &in.External, &out.External
+		*out = new(ExternalMetricStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus.
+func (in *MetricStatus) DeepCopy() *MetricStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(MetricStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) {
+	*out = *in
+	out.Target = in.Target
+	out.TargetValue = in.TargetValue.DeepCopy()
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.AverageValue != nil {
+		in, out := &in.AverageValue, &out.AverageValue
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource.
+func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ObjectMetricSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) {
+	*out = *in
+	out.Target = in.Target
+	out.CurrentValue = in.CurrentValue.DeepCopy()
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.AverageValue != nil {
+		in, out := &in.AverageValue, &out.AverageValue
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus.
+func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ObjectMetricStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) {
+	*out = *in
+	out.TargetAverageValue = in.TargetAverageValue.DeepCopy()
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource.
+func (in *PodsMetricSource) DeepCopy() *PodsMetricSource {
+	if in == nil {
+		return nil
+	}
+	out := new(PodsMetricSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) {
+	*out = *in
+	out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy()
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus.
+func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(PodsMetricStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) {
+	*out = *in
+	if in.TargetAverageUtilization != nil {
+		in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization
+		*out = new(int32)
+		**out = **in
+	}
+	if in.TargetAverageValue != nil {
+		in, out := &in.TargetAverageValue, &out.TargetAverageValue
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource.
+func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceMetricSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) {
+	*out = *in
+	if in.CurrentAverageUtilization != nil {
+		in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization
+		*out = new(int32)
+		**out = **in
+	}
+	out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy()
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus.
+func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceMetricStatus)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go
new file mode 100644
index 0000000..7c7d2b6
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+package v2beta2 // import "k8s.io/api/autoscaling/v2beta2"
diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto
new file mode 100644
index 0000000..b4e4c95
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto
@@ -0,0 +1,369 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.autoscaling.v2beta2;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v2beta2";
+
+// CrossVersionObjectReference contains enough information to let you identify the referred resource.
+message CrossVersionObjectReference {
+  // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"
+  optional string kind = 1;
+
+  // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
+  optional string name = 2;
+
+  // API version of the referent
+  // +optional
+  optional string apiVersion = 3;
+}
+
+// ExternalMetricSource indicates how to scale on a metric not associated with
+// any Kubernetes object (for example length of queue in cloud
+// messaging service, or QPS from loadbalancer running outside of cluster).
+message ExternalMetricSource {
+  // metric identifies the target metric by name and selector
+  optional MetricIdentifier metric = 1;
+
+  // target specifies the target value for the given metric
+  optional MetricTarget target = 2;
+}
+
+// ExternalMetricStatus indicates the current value of a global metric
+// not associated with any Kubernetes object.
+message ExternalMetricStatus {
+  // metric identifies the target metric by name and selector
+  optional MetricIdentifier metric = 1;
+
+  // current contains the current value for the given metric
+  optional MetricValueStatus current = 2;
+}
+
+// HorizontalPodAutoscaler is the configuration for a horizontal pod
+// autoscaler, which automatically manages the replica count of any resource
+// implementing the scale subresource based on the metrics specified.
+message HorizontalPodAutoscaler {
+  // metadata is the standard object metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // spec is the specification for the behaviour of the autoscaler.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
+  // +optional
+  optional HorizontalPodAutoscalerSpec spec = 2;
+
+  // status is the current information about the autoscaler.
+  // +optional
+  optional HorizontalPodAutoscalerStatus status = 3;
+}
+
+// HorizontalPodAutoscalerCondition describes the state of
+// a HorizontalPodAutoscaler at a certain point.
+message HorizontalPodAutoscalerCondition {
+  // type describes the current condition
+  optional string type = 1;
+
+  // status is the status of the condition (True, False, Unknown)
+  optional string status = 2;
+
+  // lastTransitionTime is the last time the condition transitioned from
+  // one status to another
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+  // reason is the reason for the condition's last transition.
+  // +optional
+  optional string reason = 4;
+
+  // message is a human-readable explanation containing details about
+  // the transition
+  // +optional
+  optional string message = 5;
+}
+
+// HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects.
+message HorizontalPodAutoscalerList {
+  // metadata is the standard list metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // items is the list of horizontal pod autoscaler objects.
+  repeated HorizontalPodAutoscaler items = 2;
+}
+
+// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.
+message HorizontalPodAutoscalerSpec {
+  // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics
+  // should be collected, as well as to actually change the replica count.
+  optional CrossVersionObjectReference scaleTargetRef = 1;
+
+  // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down.
+  // It defaults to 1 pod.
+  // +optional
+  optional int32 minReplicas = 2;
+
+  // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
+  // It cannot be less that minReplicas.
+  optional int32 maxReplicas = 3;
+
+  // metrics contains the specifications for which to use to calculate the
+  // desired replica count (the maximum replica count across all metrics will
+  // be used).  The desired replica count is calculated multiplying the
+  // ratio between the target value and the current value by the current
+  // number of pods.  Ergo, metrics used must decrease as the pod count is
+  // increased, and vice-versa.  See the individual metric source types for
+  // more information about how each type of metric must respond.
+  // If not set, the default metric will be set to 80% average CPU utilization.
+  // +optional
+  repeated MetricSpec metrics = 4;
+}
+
+// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.
+message HorizontalPodAutoscalerStatus {
+  // observedGeneration is the most recent generation observed by this autoscaler.
+  // +optional
+  optional int64 observedGeneration = 1;
+
+  // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods,
+  // used by the autoscaler to control how often the number of pods is changed.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2;
+
+  // currentReplicas is current number of replicas of pods managed by this autoscaler,
+  // as last seen by the autoscaler.
+  optional int32 currentReplicas = 3;
+
+  // desiredReplicas is the desired number of replicas of pods managed by this autoscaler,
+  // as last calculated by the autoscaler.
+  optional int32 desiredReplicas = 4;
+
+  // currentMetrics is the last read state of the metrics used by this autoscaler.
+  // +optional
+  repeated MetricStatus currentMetrics = 5;
+
+  // conditions is the set of conditions required for this autoscaler to scale its target,
+  // and indicates whether or not those conditions are met.
+  repeated HorizontalPodAutoscalerCondition conditions = 6;
+}
+
+// MetricIdentifier defines the name and optionally selector for a metric
+message MetricIdentifier {
+  // name is the name of the given metric
+  optional string name = 1;
+
+  // selector is the string-encoded form of a standard kubernetes label selector for the given metric
+  // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
+  // When unset, just the metricName will be used to gather metrics.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+}
+
+// MetricSpec specifies how to scale based on a single metric
+// (only `type` and one other matching field should be set at once).
+message MetricSpec {
+  // type is the type of metric source.  It should be one of "Object",
+  // "Pods" or "Resource", each mapping to a matching field in the object.
+  optional string type = 1;
+
+  // object refers to a metric describing a single kubernetes object
+  // (for example, hits-per-second on an Ingress object).
+  // +optional
+  optional ObjectMetricSource object = 2;
+
+  // pods refers to a metric describing each pod in the current scale target
+  // (for example, transactions-processed-per-second).  The values will be
+  // averaged together before being compared to the target value.
+  // +optional
+  optional PodsMetricSource pods = 3;
+
+  // resource refers to a resource metric (such as those specified in
+  // requests and limits) known to Kubernetes describing each pod in the
+  // current scale target (e.g. CPU or memory). Such metrics are built in to
+  // Kubernetes, and have special scaling options on top of those available
+  // to normal per-pod metrics using the "pods" source.
+  // +optional
+  optional ResourceMetricSource resource = 4;
+
+  // external refers to a global metric that is not associated
+  // with any Kubernetes object. It allows autoscaling based on information
+  // coming from components running outside of cluster
+  // (for example length of queue in cloud messaging service, or
+  // QPS from loadbalancer running outside of cluster).
+  // +optional
+  optional ExternalMetricSource external = 5;
+}
+
+// MetricStatus describes the last-read state of a single metric.
+message MetricStatus {
+  // type is the type of metric source.  It will be one of "Object",
+  // "Pods" or "Resource", each corresponds to a matching field in the object.
+  optional string type = 1;
+
+  // object refers to a metric describing a single kubernetes object
+  // (for example, hits-per-second on an Ingress object).
+  // +optional
+  optional ObjectMetricStatus object = 2;
+
+  // pods refers to a metric describing each pod in the current scale target
+  // (for example, transactions-processed-per-second).  The values will be
+  // averaged together before being compared to the target value.
+  // +optional
+  optional PodsMetricStatus pods = 3;
+
+  // resource refers to a resource metric (such as those specified in
+  // requests and limits) known to Kubernetes describing each pod in the
+  // current scale target (e.g. CPU or memory). Such metrics are built in to
+  // Kubernetes, and have special scaling options on top of those available
+  // to normal per-pod metrics using the "pods" source.
+  // +optional
+  optional ResourceMetricStatus resource = 4;
+
+  // external refers to a global metric that is not associated
+  // with any Kubernetes object. It allows autoscaling based on information
+  // coming from components running outside of cluster
+  // (for example length of queue in cloud messaging service, or
+  // QPS from loadbalancer running outside of cluster).
+  // +optional
+  optional ExternalMetricStatus external = 5;
+}
+
+// MetricTarget defines the target value, average value, or average utilization of a specific metric
+message MetricTarget {
+  // type represents whether the metric type is Utilization, Value, or AverageValue
+  optional string type = 1;
+
+  // value is the target value of the metric (as a quantity).
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2;
+
+  // averageValue is the target value of the average of the
+  // metric across all relevant pods (as a quantity)
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3;
+
+  // averageUtilization is the target value of the average of the
+  // resource metric across all relevant pods, represented as a percentage of
+  // the requested value of the resource for the pods.
+  // Currently only valid for Resource metric source type
+  // +optional
+  optional int32 averageUtilization = 4;
+}
+
+// MetricValueStatus holds the current value for a metric
+message MetricValueStatus {
+  // value is the current value of the metric (as a quantity).
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 1;
+
+  // averageValue is the current value of the average of the
+  // metric across all relevant pods (as a quantity)
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2;
+
+  // currentAverageUtilization is the current value of the average of the
+  // resource metric across all relevant pods, represented as a percentage of
+  // the requested value of the resource for the pods.
+  // +optional
+  optional int32 averageUtilization = 3;
+}
+
+// ObjectMetricSource indicates how to scale on a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
+message ObjectMetricSource {
+  optional CrossVersionObjectReference describedObject = 1;
+
+  // target specifies the target value for the given metric
+  optional MetricTarget target = 2;
+
+  // metric identifies the target metric by name and selector
+  optional MetricIdentifier metric = 3;
+}
+
+// ObjectMetricStatus indicates the current value of a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
+message ObjectMetricStatus {
+  // metric identifies the target metric by name and selector
+  optional MetricIdentifier metric = 1;
+
+  // current contains the current value for the given metric
+  optional MetricValueStatus current = 2;
+
+  optional CrossVersionObjectReference describedObject = 3;
+}
+
+// PodsMetricSource indicates how to scale on a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
+// The values will be averaged together before being compared to the target
+// value.
+message PodsMetricSource {
+  // metric identifies the target metric by name and selector
+  optional MetricIdentifier metric = 1;
+
+  // target specifies the target value for the given metric
+  optional MetricTarget target = 2;
+}
+
+// PodsMetricStatus indicates the current value of a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
+message PodsMetricStatus {
+  // metric identifies the target metric by name and selector
+  optional MetricIdentifier metric = 1;
+
+  // current contains the current value for the given metric
+  optional MetricValueStatus current = 2;
+}
+
+// ResourceMetricSource indicates how to scale on a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory).  The values will be averaged
+// together before being compared to the target.  Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.  Only one "target" type
+// should be set.
+message ResourceMetricSource {
+  // name is the name of the resource in question.
+  optional string name = 1;
+
+  // target specifies the target value for the given metric
+  optional MetricTarget target = 2;
+}
+
+// ResourceMetricStatus indicates the current value of a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory).  Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.
+message ResourceMetricStatus {
+  // Name is the name of the resource in question.
+  optional string name = 1;
+
+  // current contains the current value for the given metric
+  optional MetricValueStatus current = 2;
+}
+
diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/register.go b/vendor/k8s.io/api/autoscaling/v2beta2/register.go
new file mode 100644
index 0000000..eb1265c
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v2beta2/register.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2beta2
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "autoscaling"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2beta2"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&HorizontalPodAutoscaler{},
+		&HorizontalPodAutoscalerList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go
new file mode 100644
index 0000000..2d33795
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go
@@ -0,0 +1,393 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:openapi-gen=true
+
+package v2beta2
+
+import (
+	"k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/api/resource"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// HorizontalPodAutoscaler is the configuration for a horizontal pod
+// autoscaler, which automatically manages the replica count of any resource
+// implementing the scale subresource based on the metrics specified.
+type HorizontalPodAutoscaler struct {
+	metav1.TypeMeta `json:",inline"`
+	// metadata is the standard object metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// spec is the specification for the behaviour of the autoscaler.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
+	// +optional
+	Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// status is the current information about the autoscaler.
+	// +optional
+	Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.
+type HorizontalPodAutoscalerSpec struct {
+	// scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics
+	// should be collected, as well as to actually change the replica count.
+	ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"`
+	// minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down.
+	// It defaults to 1 pod.
+	// +optional
+	MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"`
+	// maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
+	// It cannot be less that minReplicas.
+	MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"`
+	// metrics contains the specifications for which to use to calculate the
+	// desired replica count (the maximum replica count across all metrics will
+	// be used).  The desired replica count is calculated multiplying the
+	// ratio between the target value and the current value by the current
+	// number of pods.  Ergo, metrics used must decrease as the pod count is
+	// increased, and vice-versa.  See the individual metric source types for
+	// more information about how each type of metric must respond.
+	// If not set, the default metric will be set to 80% average CPU utilization.
+	// +optional
+	Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"`
+}
+
+// CrossVersionObjectReference contains enough information to let you identify the referred resource.
+type CrossVersionObjectReference struct {
+	// Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"
+	Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+	// Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
+	Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
+	// API version of the referent
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"`
+}
+
+// MetricSpec specifies how to scale based on a single metric
+// (only `type` and one other matching field should be set at once).
+type MetricSpec struct {
+	// type is the type of metric source.  It should be one of "Object",
+	// "Pods" or "Resource", each mapping to a matching field in the object.
+	Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"`
+
+	// object refers to a metric describing a single kubernetes object
+	// (for example, hits-per-second on an Ingress object).
+	// +optional
+	Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"`
+	// pods refers to a metric describing each pod in the current scale target
+	// (for example, transactions-processed-per-second).  The values will be
+	// averaged together before being compared to the target value.
+	// +optional
+	Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"`
+	// resource refers to a resource metric (such as those specified in
+	// requests and limits) known to Kubernetes describing each pod in the
+	// current scale target (e.g. CPU or memory). Such metrics are built in to
+	// Kubernetes, and have special scaling options on top of those available
+	// to normal per-pod metrics using the "pods" source.
+	// +optional
+	Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"`
+	// external refers to a global metric that is not associated
+	// with any Kubernetes object. It allows autoscaling based on information
+	// coming from components running outside of cluster
+	// (for example length of queue in cloud messaging service, or
+	// QPS from loadbalancer running outside of cluster).
+	// +optional
+	External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"`
+}
+
+// MetricSourceType indicates the type of metric.
+type MetricSourceType string
+
+var (
+	// ObjectMetricSourceType is a metric describing a kubernetes object
+	// (for example, hits-per-second on an Ingress object).
+	ObjectMetricSourceType MetricSourceType = "Object"
+	// PodsMetricSourceType is a metric describing each pod in the current scale
+	// target (for example, transactions-processed-per-second).  The values
+	// will be averaged together before being compared to the target value.
+	PodsMetricSourceType MetricSourceType = "Pods"
+	// ResourceMetricSourceType is a resource metric known to Kubernetes, as
+	// specified in requests and limits, describing each pod in the current
+	// scale target (e.g. CPU or memory).  Such metrics are built in to
+	// Kubernetes, and have special scaling options on top of those available
+	// to normal per-pod metrics (the "pods" source).
+	ResourceMetricSourceType MetricSourceType = "Resource"
+	// ExternalMetricSourceType is a global metric that is not associated
+	// with any Kubernetes object. It allows autoscaling based on information
+	// coming from components running outside of cluster
+	// (for example length of queue in cloud messaging service, or
+	// QPS from loadbalancer running outside of cluster).
+	ExternalMetricSourceType MetricSourceType = "External"
+)
+
+// ObjectMetricSource indicates how to scale on a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
+type ObjectMetricSource struct {
+	DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,1,name=describedObject"`
+	// target specifies the target value for the given metric
+	Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"`
+	// metric identifies the target metric by name and selector
+	Metric MetricIdentifier `json:"metric" protobuf:"bytes,3,name=metric"`
+}
+
+// PodsMetricSource indicates how to scale on a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
+// The values will be averaged together before being compared to the target
+// value.
+type PodsMetricSource struct {
+	// metric identifies the target metric by name and selector
+	Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"`
+	// target specifies the target value for the given metric
+	Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"`
+}
+
+// ResourceMetricSource indicates how to scale on a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory).  The values will be averaged
+// together before being compared to the target.  Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.  Only one "target" type
+// should be set.
+type ResourceMetricSource struct {
+	// name is the name of the resource in question.
+	Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"`
+	// target specifies the target value for the given metric
+	Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"`
+}
+
+// ExternalMetricSource indicates how to scale on a metric not associated with
+// any Kubernetes object (for example length of queue in cloud
+// messaging service, or QPS from loadbalancer running outside of cluster).
+type ExternalMetricSource struct {
+	// metric identifies the target metric by name and selector
+	Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"`
+	// target specifies the target value for the given metric
+	Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"`
+}
+
+// MetricIdentifier defines the name and optionally selector for a metric
+type MetricIdentifier struct {
+	// name is the name of the given metric
+	Name string `json:"name" protobuf:"bytes,1,name=name"`
+	// selector is the string-encoded form of a standard kubernetes label selector for the given metric
+	// When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
+	// When unset, just the metricName will be used to gather metrics.
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,name=selector"`
+}
+
+// MetricTarget defines the target value, average value, or average utilization of a specific metric
+type MetricTarget struct {
+	// type represents whether the metric type is Utilization, Value, or AverageValue
+	Type MetricTargetType `json:"type" protobuf:"bytes,1,name=type"`
+	// value is the target value of the metric (as a quantity).
+	// +optional
+	Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
+	// averageValue is the target value of the average of the
+	// metric across all relevant pods (as a quantity)
+	// +optional
+	AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,3,opt,name=averageValue"`
+	// averageUtilization is the target value of the average of the
+	// resource metric across all relevant pods, represented as a percentage of
+	// the requested value of the resource for the pods.
+	// Currently only valid for Resource metric source type
+	// +optional
+	AverageUtilization *int32 `json:"averageUtilization,omitempty" protobuf:"bytes,4,opt,name=averageUtilization"`
+}
+
+// MetricTargetType specifies the type of metric being targeted, and should be either
+// "Value", "AverageValue", or "Utilization"
+type MetricTargetType string
+
+var (
+	// UtilizationMetricType declares a MetricTarget is an AverageUtilization value
+	UtilizationMetricType MetricTargetType = "Utilization"
+	// ValueMetricType declares a MetricTarget is a raw value
+	ValueMetricType MetricTargetType = "Value"
+	// AverageValueMetricType declares a MetricTarget is an
+	AverageValueMetricType MetricTargetType = "AverageValue"
+)
+
+// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.
+type HorizontalPodAutoscalerStatus struct {
+	// observedGeneration is the most recent generation observed by this autoscaler.
+	// +optional
+	ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+	// lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods,
+	// used by the autoscaler to control how often the number of pods is changed.
+	// +optional
+	LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"`
+
+	// currentReplicas is current number of replicas of pods managed by this autoscaler,
+	// as last seen by the autoscaler.
+	CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"`
+
+	// desiredReplicas is the desired number of replicas of pods managed by this autoscaler,
+	// as last calculated by the autoscaler.
+	DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"`
+
+	// currentMetrics is the last read state of the metrics used by this autoscaler.
+	// +optional
+	CurrentMetrics []MetricStatus `json:"currentMetrics" protobuf:"bytes,5,rep,name=currentMetrics"`
+
+	// conditions is the set of conditions required for this autoscaler to scale its target,
+	// and indicates whether or not those conditions are met.
+	Conditions []HorizontalPodAutoscalerCondition `json:"conditions" protobuf:"bytes,6,rep,name=conditions"`
+}
+
+// HorizontalPodAutoscalerConditionType are the valid conditions of
+// a HorizontalPodAutoscaler.
+type HorizontalPodAutoscalerConditionType string
+
+var (
+	// ScalingActive indicates that the HPA controller is able to scale if necessary:
+	// it's correctly configured, can fetch the desired metrics, and isn't disabled.
+	ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive"
+	// AbleToScale indicates a lack of transient issues which prevent scaling from occurring,
+	// such as being in a backoff window, or being unable to access/update the target scale.
+	AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale"
+	// ScalingLimited indicates that the calculated scale based on metrics would be above or
+	// below the range for the HPA, and has thus been capped.
+	ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited"
+)
+
+// HorizontalPodAutoscalerCondition describes the state of
+// a HorizontalPodAutoscaler at a certain point.
+type HorizontalPodAutoscalerCondition struct {
+	// type describes the current condition
+	Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"`
+	// status is the status of the condition (True, False, Unknown)
+	Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"`
+	// lastTransitionTime is the last time the condition transitioned from
+	// one status to another
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+	// reason is the reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// message is a human-readable explanation containing details about
+	// the transition
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// MetricStatus describes the last-read state of a single metric.
+type MetricStatus struct {
+	// type is the type of metric source.  It will be one of "Object",
+	// "Pods" or "Resource", each corresponds to a matching field in the object.
+	Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"`
+
+	// object refers to a metric describing a single kubernetes object
+	// (for example, hits-per-second on an Ingress object).
+	// +optional
+	Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"`
+	// pods refers to a metric describing each pod in the current scale target
+	// (for example, transactions-processed-per-second).  The values will be
+	// averaged together before being compared to the target value.
+	// +optional
+	Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"`
+	// resource refers to a resource metric (such as those specified in
+	// requests and limits) known to Kubernetes describing each pod in the
+	// current scale target (e.g. CPU or memory). Such metrics are built in to
+	// Kubernetes, and have special scaling options on top of those available
+	// to normal per-pod metrics using the "pods" source.
+	// +optional
+	Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"`
+	// external refers to a global metric that is not associated
+	// with any Kubernetes object. It allows autoscaling based on information
+	// coming from components running outside of cluster
+	// (for example length of queue in cloud messaging service, or
+	// QPS from loadbalancer running outside of cluster).
+	// +optional
+	External *ExternalMetricStatus `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"`
+}
+
+// ObjectMetricStatus indicates the current value of a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
+type ObjectMetricStatus struct {
+	// metric identifies the target metric by name and selector
+	Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"`
+	// current contains the current value for the given metric
+	Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"`
+
+	DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,3,name=describedObject"`
+}
+
+// PodsMetricStatus indicates the current value of a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
+type PodsMetricStatus struct {
+	// metric identifies the target metric by name and selector
+	Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"`
+	// current contains the current value for the given metric
+	Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"`
+}
+
+// ResourceMetricStatus indicates the current value of a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory).  Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.
+type ResourceMetricStatus struct {
+	// Name is the name of the resource in question.
+	Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"`
+	// current contains the current value for the given metric
+	Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"`
+}
+
+// ExternalMetricStatus indicates the current value of a global metric
+// not associated with any Kubernetes object.
+type ExternalMetricStatus struct {
+	// metric identifies the target metric by name and selector
+	Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"`
+	// current contains the current value for the given metric
+	Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"`
+}
+
+// MetricValueStatus holds the current value for a metric
+type MetricValueStatus struct {
+	// value is the current value of the metric (as a quantity).
+	// +optional
+	Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,1,opt,name=value"`
+	// averageValue is the current value of the average of the
+	// metric across all relevant pods (as a quantity)
+	// +optional
+	AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,2,opt,name=averageValue"`
+	// currentAverageUtilization is the current value of the average of the
+	// resource metric across all relevant pods, represented as a percentage of
+	// the requested value of the resource for the pods.
+	// +optional
+	AverageUtilization *int32 `json:"averageUtilization,omitempty" protobuf:"bytes,3,opt,name=averageUtilization"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects.
+type HorizontalPodAutoscalerList struct {
+	metav1.TypeMeta `json:",inline"`
+	// metadata is the standard list metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// items is the list of horizontal pod autoscaler objects.
+	Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go
new file mode 100644
index 0000000..996dc18
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go
@@ -0,0 +1,240 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2beta2
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_CrossVersionObjectReference = map[string]string{
+	"":           "CrossVersionObjectReference contains enough information to let you identify the referred resource.",
+	"kind":       "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"",
+	"name":       "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names",
+	"apiVersion": "API version of the referent",
+}
+
+func (CrossVersionObjectReference) SwaggerDoc() map[string]string {
+	return map_CrossVersionObjectReference
+}
+
+var map_ExternalMetricSource = map[string]string{
+	"":       "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
+	"metric": "metric identifies the target metric by name and selector",
+	"target": "target specifies the target value for the given metric",
+}
+
+func (ExternalMetricSource) SwaggerDoc() map[string]string {
+	return map_ExternalMetricSource
+}
+
+var map_ExternalMetricStatus = map[string]string{
+	"":        "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.",
+	"metric":  "metric identifies the target metric by name and selector",
+	"current": "current contains the current value for the given metric",
+}
+
+func (ExternalMetricStatus) SwaggerDoc() map[string]string {
+	return map_ExternalMetricStatus
+}
+
+var map_HorizontalPodAutoscaler = map[string]string{
+	"":         "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.",
+	"metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.",
+	"status":   "status is the current information about the autoscaler.",
+}
+
+func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string {
+	return map_HorizontalPodAutoscaler
+}
+
+var map_HorizontalPodAutoscalerCondition = map[string]string{
+	"":                   "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.",
+	"type":               "type describes the current condition",
+	"status":             "status is the status of the condition (True, False, Unknown)",
+	"lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another",
+	"reason":             "reason is the reason for the condition's last transition.",
+	"message":            "message is a human-readable explanation containing details about the transition",
+}
+
+func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string {
+	return map_HorizontalPodAutoscalerCondition
+}
+
+var map_HorizontalPodAutoscalerList = map[string]string{
+	"":         "HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects.",
+	"metadata": "metadata is the standard list metadata.",
+	"items":    "items is the list of horizontal pod autoscaler objects.",
+}
+
+func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string {
+	return map_HorizontalPodAutoscalerList
+}
+
+var map_HorizontalPodAutoscalerSpec = map[string]string{
+	"":               "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.",
+	"scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.",
+	"minReplicas":    "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.",
+	"maxReplicas":    "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.",
+	"metrics":        "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used).  The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods.  Ergo, metrics used must decrease as the pod count is increased, and vice-versa.  See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.",
+}
+
+func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string {
+	return map_HorizontalPodAutoscalerSpec
+}
+
+var map_HorizontalPodAutoscalerStatus = map[string]string{
+	"":                   "HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.",
+	"observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.",
+	"lastScaleTime":      "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed.",
+	"currentReplicas":    "currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler.",
+	"desiredReplicas":    "desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler.",
+	"currentMetrics":     "currentMetrics is the last read state of the metrics used by this autoscaler.",
+	"conditions":         "conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met.",
+}
+
+func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string {
+	return map_HorizontalPodAutoscalerStatus
+}
+
+var map_MetricIdentifier = map[string]string{
+	"":         "MetricIdentifier defines the name and optionally selector for a metric",
+	"name":     "name is the name of the given metric",
+	"selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.",
+}
+
+func (MetricIdentifier) SwaggerDoc() map[string]string {
+	return map_MetricIdentifier
+}
+
+var map_MetricSpec = map[string]string{
+	"":         "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).",
+	"type":     "type is the type of metric source.  It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.",
+	"object":   "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
+	"pods":     "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second).  The values will be averaged together before being compared to the target value.",
+	"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
+	"external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
+}
+
+func (MetricSpec) SwaggerDoc() map[string]string {
+	return map_MetricSpec
+}
+
+var map_MetricStatus = map[string]string{
+	"":         "MetricStatus describes the last-read state of a single metric.",
+	"type":     "type is the type of metric source.  It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.",
+	"object":   "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
+	"pods":     "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second).  The values will be averaged together before being compared to the target value.",
+	"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
+	"external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
+}
+
+func (MetricStatus) SwaggerDoc() map[string]string {
+	return map_MetricStatus
+}
+
+var map_MetricTarget = map[string]string{
+	"":                   "MetricTarget defines the target value, average value, or average utilization of a specific metric",
+	"type":               "type represents whether the metric type is Utilization, Value, or AverageValue",
+	"value":              "value is the target value of the metric (as a quantity).",
+	"averageValue":       "averageValue is the target value of the average of the metric across all relevant pods (as a quantity)",
+	"averageUtilization": "averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type",
+}
+
+func (MetricTarget) SwaggerDoc() map[string]string {
+	return map_MetricTarget
+}
+
+var map_MetricValueStatus = map[string]string{
+	"":                   "MetricValueStatus holds the current value for a metric",
+	"value":              "value is the current value of the metric (as a quantity).",
+	"averageValue":       "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)",
+	"averageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.",
+}
+
+func (MetricValueStatus) SwaggerDoc() map[string]string {
+	return map_MetricValueStatus
+}
+
+var map_ObjectMetricSource = map[string]string{
+	"":       "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).",
+	"target": "target specifies the target value for the given metric",
+	"metric": "metric identifies the target metric by name and selector",
+}
+
+func (ObjectMetricSource) SwaggerDoc() map[string]string {
+	return map_ObjectMetricSource
+}
+
+var map_ObjectMetricStatus = map[string]string{
+	"":        "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).",
+	"metric":  "metric identifies the target metric by name and selector",
+	"current": "current contains the current value for the given metric",
+}
+
+func (ObjectMetricStatus) SwaggerDoc() map[string]string {
+	return map_ObjectMetricStatus
+}
+
+var map_PodsMetricSource = map[string]string{
+	"":       "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
+	"metric": "metric identifies the target metric by name and selector",
+	"target": "target specifies the target value for the given metric",
+}
+
+func (PodsMetricSource) SwaggerDoc() map[string]string {
+	return map_PodsMetricSource
+}
+
+var map_PodsMetricStatus = map[string]string{
+	"":        "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).",
+	"metric":  "metric identifies the target metric by name and selector",
+	"current": "current contains the current value for the given metric",
+}
+
+func (PodsMetricStatus) SwaggerDoc() map[string]string {
+	return map_PodsMetricStatus
+}
+
+var map_ResourceMetricSource = map[string]string{
+	"":       "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory).  The values will be averaged together before being compared to the target.  Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.  Only one \"target\" type should be set.",
+	"name":   "name is the name of the resource in question.",
+	"target": "target specifies the target value for the given metric",
+}
+
+func (ResourceMetricSource) SwaggerDoc() map[string]string {
+	return map_ResourceMetricSource
+}
+
+var map_ResourceMetricStatus = map[string]string{
+	"":        "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory).  Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
+	"name":    "Name is the name of the resource in question.",
+	"current": "current contains the current value for the given metric",
+}
+
+func (ResourceMetricStatus) SwaggerDoc() map[string]string {
+	return map_ResourceMetricStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go
new file mode 100644
index 0000000..a6a9565
--- /dev/null
+++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go
@@ -0,0 +1,487 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v2beta2
+
+import (
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference.
+func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference {
+	if in == nil {
+		return nil
+	}
+	out := new(CrossVersionObjectReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) {
+	*out = *in
+	in.Metric.DeepCopyInto(&out.Metric)
+	in.Target.DeepCopyInto(&out.Target)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource.
+func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ExternalMetricSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) {
+	*out = *in
+	in.Metric.DeepCopyInto(&out.Metric)
+	in.Current.DeepCopyInto(&out.Current)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus.
+func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ExternalMetricStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler.
+func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler {
+	if in == nil {
+		return nil
+	}
+	out := new(HorizontalPodAutoscaler)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) {
+	*out = *in
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition.
+func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(HorizontalPodAutoscalerCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]HorizontalPodAutoscaler, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList.
+func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList {
+	if in == nil {
+		return nil
+	}
+	out := new(HorizontalPodAutoscalerList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) {
+	*out = *in
+	out.ScaleTargetRef = in.ScaleTargetRef
+	if in.MinReplicas != nil {
+		in, out := &in.MinReplicas, &out.MinReplicas
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Metrics != nil {
+		in, out := &in.Metrics, &out.Metrics
+		*out = make([]MetricSpec, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec.
+func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(HorizontalPodAutoscalerSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) {
+	*out = *in
+	if in.ObservedGeneration != nil {
+		in, out := &in.ObservedGeneration, &out.ObservedGeneration
+		*out = new(int64)
+		**out = **in
+	}
+	if in.LastScaleTime != nil {
+		in, out := &in.LastScaleTime, &out.LastScaleTime
+		*out = (*in).DeepCopy()
+	}
+	if in.CurrentMetrics != nil {
+		in, out := &in.CurrentMetrics, &out.CurrentMetrics
+		*out = make([]MetricStatus, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]HorizontalPodAutoscalerCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus.
+func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(HorizontalPodAutoscalerStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MetricIdentifier) DeepCopyInto(out *MetricIdentifier) {
+	*out = *in
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricIdentifier.
+func (in *MetricIdentifier) DeepCopy() *MetricIdentifier {
+	if in == nil {
+		return nil
+	}
+	out := new(MetricIdentifier)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MetricSpec) DeepCopyInto(out *MetricSpec) {
+	*out = *in
+	if in.Object != nil {
+		in, out := &in.Object, &out.Object
+		*out = new(ObjectMetricSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Pods != nil {
+		in, out := &in.Pods, &out.Pods
+		*out = new(PodsMetricSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Resource != nil {
+		in, out := &in.Resource, &out.Resource
+		*out = new(ResourceMetricSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.External != nil {
+		in, out := &in.External, &out.External
+		*out = new(ExternalMetricSource)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec.
+func (in *MetricSpec) DeepCopy() *MetricSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(MetricSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MetricStatus) DeepCopyInto(out *MetricStatus) {
+	*out = *in
+	if in.Object != nil {
+		in, out := &in.Object, &out.Object
+		*out = new(ObjectMetricStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Pods != nil {
+		in, out := &in.Pods, &out.Pods
+		*out = new(PodsMetricStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Resource != nil {
+		in, out := &in.Resource, &out.Resource
+		*out = new(ResourceMetricStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.External != nil {
+		in, out := &in.External, &out.External
+		*out = new(ExternalMetricStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus.
+func (in *MetricStatus) DeepCopy() *MetricStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(MetricStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MetricTarget) DeepCopyInto(out *MetricTarget) {
+	*out = *in
+	if in.Value != nil {
+		in, out := &in.Value, &out.Value
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	if in.AverageValue != nil {
+		in, out := &in.AverageValue, &out.AverageValue
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	if in.AverageUtilization != nil {
+		in, out := &in.AverageUtilization, &out.AverageUtilization
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricTarget.
+func (in *MetricTarget) DeepCopy() *MetricTarget {
+	if in == nil {
+		return nil
+	}
+	out := new(MetricTarget)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MetricValueStatus) DeepCopyInto(out *MetricValueStatus) {
+	*out = *in
+	if in.Value != nil {
+		in, out := &in.Value, &out.Value
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	if in.AverageValue != nil {
+		in, out := &in.AverageValue, &out.AverageValue
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	if in.AverageUtilization != nil {
+		in, out := &in.AverageUtilization, &out.AverageUtilization
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricValueStatus.
+func (in *MetricValueStatus) DeepCopy() *MetricValueStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(MetricValueStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) {
+	*out = *in
+	out.DescribedObject = in.DescribedObject
+	in.Target.DeepCopyInto(&out.Target)
+	in.Metric.DeepCopyInto(&out.Metric)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource.
+func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ObjectMetricSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) {
+	*out = *in
+	in.Metric.DeepCopyInto(&out.Metric)
+	in.Current.DeepCopyInto(&out.Current)
+	out.DescribedObject = in.DescribedObject
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus.
+func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ObjectMetricStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) {
+	*out = *in
+	in.Metric.DeepCopyInto(&out.Metric)
+	in.Target.DeepCopyInto(&out.Target)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource.
+func (in *PodsMetricSource) DeepCopy() *PodsMetricSource {
+	if in == nil {
+		return nil
+	}
+	out := new(PodsMetricSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) {
+	*out = *in
+	in.Metric.DeepCopyInto(&out.Metric)
+	in.Current.DeepCopyInto(&out.Current)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus.
+func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(PodsMetricStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) {
+	*out = *in
+	in.Target.DeepCopyInto(&out.Target)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource.
+func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceMetricSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) {
+	*out = *in
+	in.Current.DeepCopyInto(&out.Current)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus.
+func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceMetricStatus)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/batch/v1/doc.go b/vendor/k8s.io/api/batch/v1/doc.go
new file mode 100644
index 0000000..0449180
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+package v1 // import "k8s.io/api/batch/v1"
diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto
new file mode 100644
index 0000000..039149d
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v1/generated.proto
@@ -0,0 +1,184 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.batch.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1";
+
+// Job represents the configuration of a single job.
+message Job {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior of a job.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional JobSpec spec = 2;
+
+  // Current status of a job.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional JobStatus status = 3;
+}
+
+// JobCondition describes current state of a job.
+message JobCondition {
+  // Type of job condition, Complete or Failed.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // Last time the condition was checked.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3;
+
+  // Last time the condition transit from one status to another.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
+
+  // (brief) reason for the condition's last transition.
+  // +optional
+  optional string reason = 5;
+
+  // Human readable message indicating details about last transition.
+  // +optional
+  optional string message = 6;
+}
+
+// JobList is a collection of jobs.
+message JobList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // items is the list of Jobs.
+  repeated Job items = 2;
+}
+
+// JobSpec describes how the job execution will look like.
+message JobSpec {
+  // Specifies the maximum desired number of pods the job should
+  // run at any given time. The actual number of pods running in steady state will
+  // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
+  // i.e. when the work left to do is less than max parallelism.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
+  // +optional
+  optional int32 parallelism = 1;
+
+  // Specifies the desired number of successfully finished pods the
+  // job should be run with.  Setting to nil means that the success of any
+  // pod signals the success of all pods, and allows parallelism to have any positive
+  // value.  Setting to 1 means that parallelism is limited to 1 and the success of that
+  // pod signals the success of the job.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
+  // +optional
+  optional int32 completions = 2;
+
+  // Specifies the duration in seconds relative to the startTime that the job may be active
+  // before the system tries to terminate it; value must be positive integer
+  // +optional
+  optional int64 activeDeadlineSeconds = 3;
+
+  // Specifies the number of retries before marking this job failed.
+  // Defaults to 6
+  // +optional
+  optional int32 backoffLimit = 7;
+
+  // A label query over pods that should match the pod count.
+  // Normally, the system sets this field for you.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
+
+  // manualSelector controls generation of pod labels and pod selectors.
+  // Leave `manualSelector` unset unless you are certain what you are doing.
+  // When false or unset, the system pick labels unique to this job
+  // and appends those labels to the pod template.  When true,
+  // the user is responsible for picking unique labels and specifying
+  // the selector.  Failure to pick a unique label may cause this
+  // and other jobs to not function correctly.  However, You may see
+  // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1`
+  // API.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector
+  // +optional
+  optional bool manualSelector = 5;
+
+  // Describes the pod that will be created when executing a job.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
+  optional k8s.io.api.core.v1.PodTemplateSpec template = 6;
+
+  // ttlSecondsAfterFinished limits the lifetime of a Job that has finished
+  // execution (either Complete or Failed). If this field is set,
+  // ttlSecondsAfterFinished after the Job finishes, it is eligible to be
+  // automatically deleted. When the Job is being deleted, its lifecycle
+  // guarantees (e.g. finalizers) will be honored. If this field is unset,
+  // the Job won't be automatically deleted. If this field is set to zero,
+  // the Job becomes eligible to be deleted immediately after it finishes.
+  // This field is alpha-level and is only honored by servers that enable the
+  // TTLAfterFinished feature.
+  // +optional
+  optional int32 ttlSecondsAfterFinished = 8;
+}
+
+// JobStatus represents the current state of a Job.
+message JobStatus {
+  // The latest available observations of an object's current state.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated JobCondition conditions = 1;
+
+  // Represents time when the job was acknowledged by the job controller.
+  // It is not guaranteed to be set in happens-before order across separate operations.
+  // It is represented in RFC3339 form and is in UTC.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2;
+
+  // Represents time when the job was completed. It is not guaranteed to
+  // be set in happens-before order across separate operations.
+  // It is represented in RFC3339 form and is in UTC.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3;
+
+  // The number of actively running pods.
+  // +optional
+  optional int32 active = 4;
+
+  // The number of pods which reached phase Succeeded.
+  // +optional
+  optional int32 succeeded = 5;
+
+  // The number of pods which reached phase Failed.
+  // +optional
+  optional int32 failed = 6;
+}
+
diff --git a/vendor/k8s.io/api/batch/v1/register.go b/vendor/k8s.io/api/batch/v1/register.go
new file mode 100644
index 0000000..32fa51f
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v1/register.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "batch"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Job{},
+		&JobList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go
new file mode 100644
index 0000000..8dad904
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v1/types.go
@@ -0,0 +1,193 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Job represents the configuration of a single job.
+type Job struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired behavior of a job.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Current status of a job.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// JobList is a collection of jobs.
+type JobList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// items is the list of Jobs.
+	Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// JobSpec describes how the job execution will look like.
+type JobSpec struct {
+
+	// Specifies the maximum desired number of pods the job should
+	// run at any given time. The actual number of pods running in steady state will
+	// be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
+	// i.e. when the work left to do is less than max parallelism.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
+	// +optional
+	Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"`
+
+	// Specifies the desired number of successfully finished pods the
+	// job should be run with.  Setting to nil means that the success of any
+	// pod signals the success of all pods, and allows parallelism to have any positive
+	// value.  Setting to 1 means that parallelism is limited to 1 and the success of that
+	// pod signals the success of the job.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
+	// +optional
+	Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"`
+
+	// Specifies the duration in seconds relative to the startTime that the job may be active
+	// before the system tries to terminate it; value must be positive integer
+	// +optional
+	ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"`
+
+	// Specifies the number of retries before marking this job failed.
+	// Defaults to 6
+	// +optional
+	BackoffLimit *int32 `json:"backoffLimit,omitempty" protobuf:"varint,7,opt,name=backoffLimit"`
+
+	// TODO enabled it when https://github.com/kubernetes/kubernetes/issues/28486 has been fixed
+	// Optional number of failed pods to retain.
+	// +optional
+	// FailedPodsLimit *int32 `json:"failedPodsLimit,omitempty" protobuf:"varint,9,opt,name=failedPodsLimit"`
+
+	// A label query over pods that should match the pod count.
+	// Normally, the system sets this field for you.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
+
+	// manualSelector controls generation of pod labels and pod selectors.
+	// Leave `manualSelector` unset unless you are certain what you are doing.
+	// When false or unset, the system pick labels unique to this job
+	// and appends those labels to the pod template.  When true,
+	// the user is responsible for picking unique labels and specifying
+	// the selector.  Failure to pick a unique label may cause this
+	// and other jobs to not function correctly.  However, You may see
+	// `manualSelector=true` in jobs that were created with the old `extensions/v1beta1`
+	// API.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector
+	// +optional
+	ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"`
+
+	// Describes the pod that will be created when executing a job.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
+	Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"`
+
+	// ttlSecondsAfterFinished limits the lifetime of a Job that has finished
+	// execution (either Complete or Failed). If this field is set,
+	// ttlSecondsAfterFinished after the Job finishes, it is eligible to be
+	// automatically deleted. When the Job is being deleted, its lifecycle
+	// guarantees (e.g. finalizers) will be honored. If this field is unset,
+	// the Job won't be automatically deleted. If this field is set to zero,
+	// the Job becomes eligible to be deleted immediately after it finishes.
+	// This field is alpha-level and is only honored by servers that enable the
+	// TTLAfterFinished feature.
+	// +optional
+	TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty" protobuf:"varint,8,opt,name=ttlSecondsAfterFinished"`
+}
+
+// JobStatus represents the current state of a Job.
+type JobStatus struct {
+	// The latest available observations of an object's current state.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+
+	// Represents time when the job was acknowledged by the job controller.
+	// It is not guaranteed to be set in happens-before order across separate operations.
+	// It is represented in RFC3339 form and is in UTC.
+	// +optional
+	StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"`
+
+	// Represents time when the job was completed. It is not guaranteed to
+	// be set in happens-before order across separate operations.
+	// It is represented in RFC3339 form and is in UTC.
+	// +optional
+	CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"`
+
+	// The number of actively running pods.
+	// +optional
+	Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"`
+
+	// The number of pods which reached phase Succeeded.
+	// +optional
+	Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"`
+
+	// The number of pods which reached phase Failed.
+	// +optional
+	Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"`
+}
+
+type JobConditionType string
+
+// These are valid conditions of a job.
+const (
+	// JobComplete means the job has completed its execution.
+	JobComplete JobConditionType = "Complete"
+	// JobFailed means the job has failed its execution.
+	JobFailed JobConditionType = "Failed"
+)
+
+// JobCondition describes current state of a job.
+type JobCondition struct {
+	// Type of job condition, Complete or Failed.
+	Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+	// Last time the condition was checked.
+	// +optional
+	LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
+	// Last time the condition transit from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+	// (brief) reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
+	// Human readable message indicating details about last transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
+}
diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..d8e2bdd
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
@@ -0,0 +1,95 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_Job = map[string]string{
+	"":         "Job represents the configuration of a single job.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+	"status":   "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (Job) SwaggerDoc() map[string]string {
+	return map_Job
+}
+
+var map_JobCondition = map[string]string{
+	"":                   "JobCondition describes current state of a job.",
+	"type":               "Type of job condition, Complete or Failed.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastProbeTime":      "Last time the condition was checked.",
+	"lastTransitionTime": "Last time the condition transit from one status to another.",
+	"reason":             "(brief) reason for the condition's last transition.",
+	"message":            "Human readable message indicating details about last transition.",
+}
+
+func (JobCondition) SwaggerDoc() map[string]string {
+	return map_JobCondition
+}
+
+var map_JobList = map[string]string{
+	"":         "JobList is a collection of jobs.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "items is the list of Jobs.",
+}
+
+func (JobList) SwaggerDoc() map[string]string {
+	return map_JobList
+}
+
+var map_JobSpec = map[string]string{
+	"":                        "JobSpec describes how the job execution will look like.",
+	"parallelism":             "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
+	"completions":             "Specifies the desired number of successfully finished pods the job should be run with.  Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value.  Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
+	"activeDeadlineSeconds":   "Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer",
+	"backoffLimit":            "Specifies the number of retries before marking this job failed. Defaults to 6",
+	"selector":                "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+	"manualSelector":          "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template.  When true, the user is responsible for picking unique labels and specifying the selector.  Failure to pick a unique label may cause this and other jobs to not function correctly.  However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector",
+	"template":                "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
+	"ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.",
+}
+
+func (JobSpec) SwaggerDoc() map[string]string {
+	return map_JobSpec
+}
+
+var map_JobStatus = map[string]string{
+	"":               "JobStatus represents the current state of a Job.",
+	"conditions":     "The latest available observations of an object's current state. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
+	"startTime":      "Represents time when the job was acknowledged by the job controller. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.",
+	"completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.",
+	"active":         "The number of actively running pods.",
+	"succeeded":      "The number of pods which reached phase Succeeded.",
+	"failed":         "The number of pods which reached phase Failed.",
+}
+
+func (JobStatus) SwaggerDoc() map[string]string {
+	return map_JobStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..88cb016
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go
@@ -0,0 +1,188 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Job) DeepCopyInto(out *Job) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Job.
+func (in *Job) DeepCopy() *Job {
+	if in == nil {
+		return nil
+	}
+	out := new(Job)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Job) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JobCondition) DeepCopyInto(out *JobCondition) {
+	*out = *in
+	in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobCondition.
+func (in *JobCondition) DeepCopy() *JobCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(JobCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JobList) DeepCopyInto(out *JobList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Job, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobList.
+func (in *JobList) DeepCopy() *JobList {
+	if in == nil {
+		return nil
+	}
+	out := new(JobList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *JobList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JobSpec) DeepCopyInto(out *JobSpec) {
+	*out = *in
+	if in.Parallelism != nil {
+		in, out := &in.Parallelism, &out.Parallelism
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Completions != nil {
+		in, out := &in.Completions, &out.Completions
+		*out = new(int32)
+		**out = **in
+	}
+	if in.ActiveDeadlineSeconds != nil {
+		in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	if in.BackoffLimit != nil {
+		in, out := &in.BackoffLimit, &out.BackoffLimit
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ManualSelector != nil {
+		in, out := &in.ManualSelector, &out.ManualSelector
+		*out = new(bool)
+		**out = **in
+	}
+	in.Template.DeepCopyInto(&out.Template)
+	if in.TTLSecondsAfterFinished != nil {
+		in, out := &in.TTLSecondsAfterFinished, &out.TTLSecondsAfterFinished
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobSpec.
+func (in *JobSpec) DeepCopy() *JobSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(JobSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JobStatus) DeepCopyInto(out *JobStatus) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]JobCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.StartTime != nil {
+		in, out := &in.StartTime, &out.StartTime
+		*out = (*in).DeepCopy()
+	}
+	if in.CompletionTime != nil {
+		in, out := &in.CompletionTime, &out.CompletionTime
+		*out = (*in).DeepCopy()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStatus.
+func (in *JobStatus) DeepCopy() *JobStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(JobStatus)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/batch/v1beta1/doc.go b/vendor/k8s.io/api/batch/v1beta1/doc.go
new file mode 100644
index 0000000..43020ed
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+package v1beta1 // import "k8s.io/api/batch/v1beta1"
diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto
new file mode 100644
index 0000000..043b355
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto
@@ -0,0 +1,137 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.batch.v1beta1;
+
+import "k8s.io/api/batch/v1/generated.proto";
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta1";
+
+// CronJob represents the configuration of a single cron job.
+message CronJob {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior of a cron job, including the schedule.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional CronJobSpec spec = 2;
+
+  // Current status of a cron job.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional CronJobStatus status = 3;
+}
+
+// CronJobList is a collection of cron jobs.
+message CronJobList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // items is the list of CronJobs.
+  repeated CronJob items = 2;
+}
+
+// CronJobSpec describes how the job execution will look like and when it will actually run.
+message CronJobSpec {
+  // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.
+  optional string schedule = 1;
+
+  // Optional deadline in seconds for starting the job if it misses scheduled
+  // time for any reason.  Missed jobs executions will be counted as failed ones.
+  // +optional
+  optional int64 startingDeadlineSeconds = 2;
+
+  // Specifies how to treat concurrent executions of a Job.
+  // Valid values are:
+  // - "Allow" (default): allows CronJobs to run concurrently;
+  // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet;
+  // - "Replace": cancels currently running job and replaces it with a new one
+  // +optional
+  optional string concurrencyPolicy = 3;
+
+  // This flag tells the controller to suspend subsequent executions, it does
+  // not apply to already started executions.  Defaults to false.
+  // +optional
+  optional bool suspend = 4;
+
+  // Specifies the job that will be created when executing a CronJob.
+  optional JobTemplateSpec jobTemplate = 5;
+
+  // The number of successful finished jobs to retain.
+  // This is a pointer to distinguish between explicit zero and not specified.
+  // Defaults to 3.
+  // +optional
+  optional int32 successfulJobsHistoryLimit = 6;
+
+  // The number of failed finished jobs to retain.
+  // This is a pointer to distinguish between explicit zero and not specified.
+  // Defaults to 1.
+  // +optional
+  optional int32 failedJobsHistoryLimit = 7;
+}
+
+// CronJobStatus represents the current state of a cron job.
+message CronJobStatus {
+  // A list of pointers to currently running jobs.
+  // +optional
+  repeated k8s.io.api.core.v1.ObjectReference active = 1;
+
+  // Information when was the last time the job was successfully scheduled.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4;
+}
+
+// JobTemplate describes a template for creating copies of a predefined pod.
+message JobTemplate {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Defines jobs that will be created from this template.
+  // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional JobTemplateSpec template = 2;
+}
+
+// JobTemplateSpec describes the data a Job should have when created from a template
+message JobTemplateSpec {
+  // Standard object's metadata of the jobs created from this template.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior of the job.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional k8s.io.api.batch.v1.JobSpec spec = 2;
+}
+
diff --git a/vendor/k8s.io/api/batch/v1beta1/register.go b/vendor/k8s.io/api/batch/v1beta1/register.go
new file mode 100644
index 0000000..226de49
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v1beta1/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "batch"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&JobTemplate{},
+		&CronJob{},
+		&CronJobList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/batch/v1beta1/types.go b/vendor/k8s.io/api/batch/v1beta1/types.go
new file mode 100644
index 0000000..cb5c9ba
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v1beta1/types.go
@@ -0,0 +1,158 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	batchv1 "k8s.io/api/batch/v1"
+	"k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// JobTemplate describes a template for creating copies of a predefined pod.
+type JobTemplate struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Defines jobs that will be created from this template.
+	// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
+}
+
+// JobTemplateSpec describes the data a Job should have when created from a template
+type JobTemplateSpec struct {
+	// Standard object's metadata of the jobs created from this template.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired behavior of the job.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CronJob represents the configuration of a single cron job.
+type CronJob struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired behavior of a cron job, including the schedule.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Current status of a cron job.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CronJobList is a collection of cron jobs.
+type CronJobList struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// items is the list of CronJobs.
+	Items []CronJob `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// CronJobSpec describes how the job execution will look like and when it will actually run.
+type CronJobSpec struct {
+
+	// The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.
+	Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"`
+
+	// Optional deadline in seconds for starting the job if it misses scheduled
+	// time for any reason.  Missed jobs executions will be counted as failed ones.
+	// +optional
+	StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"`
+
+	// Specifies how to treat concurrent executions of a Job.
+	// Valid values are:
+	// - "Allow" (default): allows CronJobs to run concurrently;
+	// - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet;
+	// - "Replace": cancels currently running job and replaces it with a new one
+	// +optional
+	ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"`
+
+	// This flag tells the controller to suspend subsequent executions, it does
+	// not apply to already started executions.  Defaults to false.
+	// +optional
+	Suspend *bool `json:"suspend,omitempty" protobuf:"varint,4,opt,name=suspend"`
+
+	// Specifies the job that will be created when executing a CronJob.
+	JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"`
+
+	// The number of successful finished jobs to retain.
+	// This is a pointer to distinguish between explicit zero and not specified.
+	// Defaults to 3.
+	// +optional
+	SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty" protobuf:"varint,6,opt,name=successfulJobsHistoryLimit"`
+
+	// The number of failed finished jobs to retain.
+	// This is a pointer to distinguish between explicit zero and not specified.
+	// Defaults to 1.
+	// +optional
+	FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty" protobuf:"varint,7,opt,name=failedJobsHistoryLimit"`
+}
+
+// ConcurrencyPolicy describes how the job will be handled.
+// Only one of the following concurrent policies may be specified.
+// If none of the following policies is specified, the default one
+// is AllowConcurrent.
+type ConcurrencyPolicy string
+
+const (
+	// AllowConcurrent allows CronJobs to run concurrently.
+	AllowConcurrent ConcurrencyPolicy = "Allow"
+
+	// ForbidConcurrent forbids concurrent runs, skipping next run if previous
+	// hasn't finished yet.
+	ForbidConcurrent ConcurrencyPolicy = "Forbid"
+
+	// ReplaceConcurrent cancels currently running job and replaces it with a new one.
+	ReplaceConcurrent ConcurrencyPolicy = "Replace"
+)
+
+// CronJobStatus represents the current state of a cron job.
+type CronJobStatus struct {
+	// A list of pointers to currently running jobs.
+	// +optional
+	Active []v1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"`
+
+	// Information when was the last time the job was successfully scheduled.
+	// +optional
+	LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"`
+}
diff --git a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..abbdfec
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,96 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_CronJob = map[string]string{
+	"":         "CronJob represents the configuration of a single cron job.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+	"status":   "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (CronJob) SwaggerDoc() map[string]string {
+	return map_CronJob
+}
+
+var map_CronJobList = map[string]string{
+	"":         "CronJobList is a collection of cron jobs.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "items is the list of CronJobs.",
+}
+
+func (CronJobList) SwaggerDoc() map[string]string {
+	return map_CronJobList
+}
+
+var map_CronJobSpec = map[string]string{
+	"":                           "CronJobSpec describes how the job execution will look like and when it will actually run.",
+	"schedule":                   "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.",
+	"startingDeadlineSeconds":    "Optional deadline in seconds for starting the job if it misses scheduled time for any reason.  Missed jobs executions will be counted as failed ones.",
+	"concurrencyPolicy":          "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one",
+	"suspend":                    "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions.  Defaults to false.",
+	"jobTemplate":                "Specifies the job that will be created when executing a CronJob.",
+	"successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.",
+	"failedJobsHistoryLimit":     "The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.",
+}
+
+func (CronJobSpec) SwaggerDoc() map[string]string {
+	return map_CronJobSpec
+}
+
+var map_CronJobStatus = map[string]string{
+	"":                 "CronJobStatus represents the current state of a cron job.",
+	"active":           "A list of pointers to currently running jobs.",
+	"lastScheduleTime": "Information when was the last time the job was successfully scheduled.",
+}
+
+func (CronJobStatus) SwaggerDoc() map[string]string {
+	return map_CronJobStatus
+}
+
+var map_JobTemplate = map[string]string{
+	"":         "JobTemplate describes a template for creating copies of a predefined pod.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (JobTemplate) SwaggerDoc() map[string]string {
+	return map_JobTemplate
+}
+
+var map_JobTemplateSpec = map[string]string{
+	"":         "JobTemplateSpec describes the data a Job should have when created from a template",
+	"metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (JobTemplateSpec) SwaggerDoc() map[string]string {
+	return map_JobTemplateSpec
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..1c8bc44
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,194 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1 "k8s.io/api/core/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CronJob) DeepCopyInto(out *CronJob) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJob.
+func (in *CronJob) DeepCopy() *CronJob {
+	if in == nil {
+		return nil
+	}
+	out := new(CronJob)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CronJob) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CronJobList) DeepCopyInto(out *CronJobList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]CronJob, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobList.
+func (in *CronJobList) DeepCopy() *CronJobList {
+	if in == nil {
+		return nil
+	}
+	out := new(CronJobList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CronJobList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CronJobSpec) DeepCopyInto(out *CronJobSpec) {
+	*out = *in
+	if in.StartingDeadlineSeconds != nil {
+		in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	if in.Suspend != nil {
+		in, out := &in.Suspend, &out.Suspend
+		*out = new(bool)
+		**out = **in
+	}
+	in.JobTemplate.DeepCopyInto(&out.JobTemplate)
+	if in.SuccessfulJobsHistoryLimit != nil {
+		in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit
+		*out = new(int32)
+		**out = **in
+	}
+	if in.FailedJobsHistoryLimit != nil {
+		in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobSpec.
+func (in *CronJobSpec) DeepCopy() *CronJobSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(CronJobSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CronJobStatus) DeepCopyInto(out *CronJobStatus) {
+	*out = *in
+	if in.Active != nil {
+		in, out := &in.Active, &out.Active
+		*out = make([]v1.ObjectReference, len(*in))
+		copy(*out, *in)
+	}
+	if in.LastScheduleTime != nil {
+		in, out := &in.LastScheduleTime, &out.LastScheduleTime
+		*out = (*in).DeepCopy()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobStatus.
+func (in *CronJobStatus) DeepCopy() *CronJobStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(CronJobStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JobTemplate) DeepCopyInto(out *JobTemplate) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Template.DeepCopyInto(&out.Template)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplate.
+func (in *JobTemplate) DeepCopy() *JobTemplate {
+	if in == nil {
+		return nil
+	}
+	out := new(JobTemplate)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *JobTemplate) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JobTemplateSpec) DeepCopyInto(out *JobTemplateSpec) {
+	*out = *in
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplateSpec.
+func (in *JobTemplateSpec) DeepCopy() *JobTemplateSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(JobTemplateSpec)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/batch/v2alpha1/doc.go b/vendor/k8s.io/api/batch/v2alpha1/doc.go
new file mode 100644
index 0000000..f4ed01a
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v2alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+package v2alpha1 // import "k8s.io/api/batch/v2alpha1"
diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.proto b/vendor/k8s.io/api/batch/v2alpha1/generated.proto
new file mode 100644
index 0000000..4321c33
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v2alpha1/generated.proto
@@ -0,0 +1,135 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.batch.v2alpha1;
+
+import "k8s.io/api/batch/v1/generated.proto";
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v2alpha1";
+
+// CronJob represents the configuration of a single cron job.
+message CronJob {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior of a cron job, including the schedule.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional CronJobSpec spec = 2;
+
+  // Current status of a cron job.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional CronJobStatus status = 3;
+}
+
+// CronJobList is a collection of cron jobs.
+message CronJobList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // items is the list of CronJobs.
+  repeated CronJob items = 2;
+}
+
+// CronJobSpec describes how the job execution will look like and when it will actually run.
+message CronJobSpec {
+  // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.
+  optional string schedule = 1;
+
+  // Optional deadline in seconds for starting the job if it misses scheduled
+  // time for any reason.  Missed jobs executions will be counted as failed ones.
+  // +optional
+  optional int64 startingDeadlineSeconds = 2;
+
+  // Specifies how to treat concurrent executions of a Job.
+  // Valid values are:
+  // - "Allow" (default): allows CronJobs to run concurrently;
+  // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet;
+  // - "Replace": cancels currently running job and replaces it with a new one
+  // +optional
+  optional string concurrencyPolicy = 3;
+
+  // This flag tells the controller to suspend subsequent executions, it does
+  // not apply to already started executions.  Defaults to false.
+  // +optional
+  optional bool suspend = 4;
+
+  // Specifies the job that will be created when executing a CronJob.
+  optional JobTemplateSpec jobTemplate = 5;
+
+  // The number of successful finished jobs to retain.
+  // This is a pointer to distinguish between explicit zero and not specified.
+  // +optional
+  optional int32 successfulJobsHistoryLimit = 6;
+
+  // The number of failed finished jobs to retain.
+  // This is a pointer to distinguish between explicit zero and not specified.
+  // +optional
+  optional int32 failedJobsHistoryLimit = 7;
+}
+
+// CronJobStatus represents the current state of a cron job.
+message CronJobStatus {
+  // A list of pointers to currently running jobs.
+  // +optional
+  repeated k8s.io.api.core.v1.ObjectReference active = 1;
+
+  // Information when was the last time the job was successfully scheduled.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4;
+}
+
+// JobTemplate describes a template for creating copies of a predefined pod.
+message JobTemplate {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Defines jobs that will be created from this template.
+  // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional JobTemplateSpec template = 2;
+}
+
+// JobTemplateSpec describes the data a Job should have when created from a template
+message JobTemplateSpec {
+  // Standard object's metadata of the jobs created from this template.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior of the job.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional k8s.io.api.batch.v1.JobSpec spec = 2;
+}
+
diff --git a/vendor/k8s.io/api/batch/v2alpha1/register.go b/vendor/k8s.io/api/batch/v2alpha1/register.go
new file mode 100644
index 0000000..ac7fa50
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v2alpha1/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "batch"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2alpha1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&JobTemplate{},
+		&CronJob{},
+		&CronJobList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/batch/v2alpha1/types.go b/vendor/k8s.io/api/batch/v2alpha1/types.go
new file mode 100644
index 0000000..cccff94
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v2alpha1/types.go
@@ -0,0 +1,156 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2alpha1
+
+import (
+	batchv1 "k8s.io/api/batch/v1"
+	"k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// JobTemplate describes a template for creating copies of a predefined pod.
+type JobTemplate struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Defines jobs that will be created from this template.
+	// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
+}
+
+// JobTemplateSpec describes the data a Job should have when created from a template
+type JobTemplateSpec struct {
+	// Standard object's metadata of the jobs created from this template.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired behavior of the job.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CronJob represents the configuration of a single cron job.
+type CronJob struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired behavior of a cron job, including the schedule.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Current status of a cron job.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CronJobList is a collection of cron jobs.
+type CronJobList struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// items is the list of CronJobs.
+	Items []CronJob `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// CronJobSpec describes how the job execution will look like and when it will actually run.
+type CronJobSpec struct {
+
+	// The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.
+	Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"`
+
+	// Optional deadline in seconds for starting the job if it misses scheduled
+	// time for any reason.  Missed jobs executions will be counted as failed ones.
+	// +optional
+	StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"`
+
+	// Specifies how to treat concurrent executions of a Job.
+	// Valid values are:
+	// - "Allow" (default): allows CronJobs to run concurrently;
+	// - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet;
+	// - "Replace": cancels currently running job and replaces it with a new one
+	// +optional
+	ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"`
+
+	// This flag tells the controller to suspend subsequent executions, it does
+	// not apply to already started executions.  Defaults to false.
+	// +optional
+	Suspend *bool `json:"suspend,omitempty" protobuf:"varint,4,opt,name=suspend"`
+
+	// Specifies the job that will be created when executing a CronJob.
+	JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"`
+
+	// The number of successful finished jobs to retain.
+	// This is a pointer to distinguish between explicit zero and not specified.
+	// +optional
+	SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty" protobuf:"varint,6,opt,name=successfulJobsHistoryLimit"`
+
+	// The number of failed finished jobs to retain.
+	// This is a pointer to distinguish between explicit zero and not specified.
+	// +optional
+	FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty" protobuf:"varint,7,opt,name=failedJobsHistoryLimit"`
+}
+
+// ConcurrencyPolicy describes how the job will be handled.
+// Only one of the following concurrent policies may be specified.
+// If none of the following policies is specified, the default one
+// is AllowConcurrent.
+type ConcurrencyPolicy string
+
+const (
+	// AllowConcurrent allows CronJobs to run concurrently.
+	AllowConcurrent ConcurrencyPolicy = "Allow"
+
+	// ForbidConcurrent forbids concurrent runs, skipping next run if previous
+	// hasn't finished yet.
+	ForbidConcurrent ConcurrencyPolicy = "Forbid"
+
+	// ReplaceConcurrent cancels currently running job and replaces it with a new one.
+	ReplaceConcurrent ConcurrencyPolicy = "Replace"
+)
+
+// CronJobStatus represents the current state of a cron job.
+type CronJobStatus struct {
+	// A list of pointers to currently running jobs.
+	// +optional
+	Active []v1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"`
+
+	// Information when was the last time the job was successfully scheduled.
+	// +optional
+	LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"`
+}
diff --git a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..f448a92
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go
@@ -0,0 +1,96 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_CronJob = map[string]string{
+	"":         "CronJob represents the configuration of a single cron job.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+	"status":   "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (CronJob) SwaggerDoc() map[string]string {
+	return map_CronJob
+}
+
+var map_CronJobList = map[string]string{
+	"":         "CronJobList is a collection of cron jobs.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "items is the list of CronJobs.",
+}
+
+func (CronJobList) SwaggerDoc() map[string]string {
+	return map_CronJobList
+}
+
+var map_CronJobSpec = map[string]string{
+	"":                           "CronJobSpec describes how the job execution will look like and when it will actually run.",
+	"schedule":                   "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.",
+	"startingDeadlineSeconds":    "Optional deadline in seconds for starting the job if it misses scheduled time for any reason.  Missed jobs executions will be counted as failed ones.",
+	"concurrencyPolicy":          "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one",
+	"suspend":                    "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions.  Defaults to false.",
+	"jobTemplate":                "Specifies the job that will be created when executing a CronJob.",
+	"successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.",
+	"failedJobsHistoryLimit":     "The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.",
+}
+
+func (CronJobSpec) SwaggerDoc() map[string]string {
+	return map_CronJobSpec
+}
+
+var map_CronJobStatus = map[string]string{
+	"":                 "CronJobStatus represents the current state of a cron job.",
+	"active":           "A list of pointers to currently running jobs.",
+	"lastScheduleTime": "Information when was the last time the job was successfully scheduled.",
+}
+
+func (CronJobStatus) SwaggerDoc() map[string]string {
+	return map_CronJobStatus
+}
+
+var map_JobTemplate = map[string]string{
+	"":         "JobTemplate describes a template for creating copies of a predefined pod.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (JobTemplate) SwaggerDoc() map[string]string {
+	return map_JobTemplate
+}
+
+var map_JobTemplateSpec = map[string]string{
+	"":         "JobTemplateSpec describes the data a Job should have when created from a template",
+	"metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (JobTemplateSpec) SwaggerDoc() map[string]string {
+	return map_JobTemplateSpec
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..20d87e7
--- /dev/null
+++ b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,194 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v2alpha1
+
+import (
+	v1 "k8s.io/api/core/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CronJob) DeepCopyInto(out *CronJob) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJob.
+func (in *CronJob) DeepCopy() *CronJob {
+	if in == nil {
+		return nil
+	}
+	out := new(CronJob)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CronJob) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CronJobList) DeepCopyInto(out *CronJobList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]CronJob, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobList.
+func (in *CronJobList) DeepCopy() *CronJobList {
+	if in == nil {
+		return nil
+	}
+	out := new(CronJobList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CronJobList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CronJobSpec) DeepCopyInto(out *CronJobSpec) {
+	*out = *in
+	if in.StartingDeadlineSeconds != nil {
+		in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	if in.Suspend != nil {
+		in, out := &in.Suspend, &out.Suspend
+		*out = new(bool)
+		**out = **in
+	}
+	in.JobTemplate.DeepCopyInto(&out.JobTemplate)
+	if in.SuccessfulJobsHistoryLimit != nil {
+		in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit
+		*out = new(int32)
+		**out = **in
+	}
+	if in.FailedJobsHistoryLimit != nil {
+		in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobSpec.
+func (in *CronJobSpec) DeepCopy() *CronJobSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(CronJobSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CronJobStatus) DeepCopyInto(out *CronJobStatus) {
+	*out = *in
+	if in.Active != nil {
+		in, out := &in.Active, &out.Active
+		*out = make([]v1.ObjectReference, len(*in))
+		copy(*out, *in)
+	}
+	if in.LastScheduleTime != nil {
+		in, out := &in.LastScheduleTime, &out.LastScheduleTime
+		*out = (*in).DeepCopy()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobStatus.
+func (in *CronJobStatus) DeepCopy() *CronJobStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(CronJobStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JobTemplate) DeepCopyInto(out *JobTemplate) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Template.DeepCopyInto(&out.Template)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplate.
+func (in *JobTemplate) DeepCopy() *JobTemplate {
+	if in == nil {
+		return nil
+	}
+	out := new(JobTemplate)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *JobTemplate) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JobTemplateSpec) DeepCopyInto(out *JobTemplateSpec) {
+	*out = *in
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplateSpec.
+func (in *JobTemplateSpec) DeepCopy() *JobTemplateSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(JobTemplateSpec)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/certificates/v1beta1/doc.go b/vendor/k8s.io/api/certificates/v1beta1/doc.go
new file mode 100644
index 0000000..8473b64
--- /dev/null
+++ b/vendor/k8s.io/api/certificates/v1beta1/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+// +groupName=certificates.k8s.io
+
+package v1beta1 // import "k8s.io/api/certificates/v1beta1"
diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto
new file mode 100644
index 0000000..5200224
--- /dev/null
+++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto
@@ -0,0 +1,121 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.certificates.v1beta1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta1";
+
+// Describes a certificate signing request
+message CertificateSigningRequest {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // The certificate request itself and any additional information.
+  // +optional
+  optional CertificateSigningRequestSpec spec = 2;
+
+  // Derived information about the request.
+  // +optional
+  optional CertificateSigningRequestStatus status = 3;
+}
+
+message CertificateSigningRequestCondition {
+  // request approval state, currently Approved or Denied.
+  optional string type = 1;
+
+  // brief reason for the request state
+  // +optional
+  optional string reason = 2;
+
+  // human readable message with details about the request state
+  // +optional
+  optional string message = 3;
+
+  // timestamp for the last update to this condition
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4;
+}
+
+message CertificateSigningRequestList {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  repeated CertificateSigningRequest items = 2;
+}
+
+// This information is immutable after the request is created. Only the Request
+// and Usages fields can be set on creation, other fields are derived by
+// Kubernetes and cannot be modified by users.
+message CertificateSigningRequestSpec {
+  // Base64-encoded PKCS#10 CSR data
+  optional bytes request = 1;
+
+  // allowedUsages specifies a set of usage contexts the key will be
+  // valid for.
+  // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
+  //      https://tools.ietf.org/html/rfc5280#section-4.2.1.12
+  repeated string usages = 5;
+
+  // Information about the requesting user.
+  // See user.Info interface for details.
+  // +optional
+  optional string username = 2;
+
+  // UID information about the requesting user.
+  // See user.Info interface for details.
+  // +optional
+  optional string uid = 3;
+
+  // Group information about the requesting user.
+  // See user.Info interface for details.
+  // +optional
+  repeated string groups = 4;
+
+  // Extra information about the requesting user.
+  // See user.Info interface for details.
+  // +optional
+  map<string, ExtraValue> extra = 6;
+}
+
+message CertificateSigningRequestStatus {
+  // Conditions applied to the request, such as approval or denial.
+  // +optional
+  repeated CertificateSigningRequestCondition conditions = 1;
+
+  // If request was approved, the controller will place the issued certificate here.
+  // +optional
+  optional bytes certificate = 2;
+}
+
+// ExtraValue masks the value so protobuf can generate
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message ExtraValue {
+  // items, if empty, will result in an empty slice
+
+  repeated string items = 1;
+}
+
diff --git a/vendor/k8s.io/api/certificates/v1beta1/register.go b/vendor/k8s.io/api/certificates/v1beta1/register.go
new file mode 100644
index 0000000..b4f3af9
--- /dev/null
+++ b/vendor/k8s.io/api/certificates/v1beta1/register.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "certificates.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+	return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&CertificateSigningRequest{},
+		&CertificateSigningRequestList{},
+	)
+
+	// Add the watch version that applies
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go
new file mode 100644
index 0000000..bb9e82d
--- /dev/null
+++ b/vendor/k8s.io/api/certificates/v1beta1/types.go
@@ -0,0 +1,155 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Describes a certificate signing request
+type CertificateSigningRequest struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// The certificate request itself and any additional information.
+	// +optional
+	Spec CertificateSigningRequestSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Derived information about the request.
+	// +optional
+	Status CertificateSigningRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// This information is immutable after the request is created. Only the Request
+// and Usages fields can be set on creation, other fields are derived by
+// Kubernetes and cannot be modified by users.
+type CertificateSigningRequestSpec struct {
+	// Base64-encoded PKCS#10 CSR data
+	Request []byte `json:"request" protobuf:"bytes,1,opt,name=request"`
+
+	// allowedUsages specifies a set of usage contexts the key will be
+	// valid for.
+	// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
+	//      https://tools.ietf.org/html/rfc5280#section-4.2.1.12
+	Usages []KeyUsage `json:"usages,omitempty" protobuf:"bytes,5,opt,name=usages"`
+
+	// Information about the requesting user.
+	// See user.Info interface for details.
+	// +optional
+	Username string `json:"username,omitempty" protobuf:"bytes,2,opt,name=username"`
+	// UID information about the requesting user.
+	// See user.Info interface for details.
+	// +optional
+	UID string `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"`
+	// Group information about the requesting user.
+	// See user.Info interface for details.
+	// +optional
+	Groups []string `json:"groups,omitempty" protobuf:"bytes,4,rep,name=groups"`
+	// Extra information about the requesting user.
+	// See user.Info interface for details.
+	// +optional
+	Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,6,rep,name=extra"`
+}
+
+// ExtraValue masks the value so protobuf can generate
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type ExtraValue []string
+
+func (t ExtraValue) String() string {
+	return fmt.Sprintf("%v", []string(t))
+}
+
+type CertificateSigningRequestStatus struct {
+	// Conditions applied to the request, such as approval or denial.
+	// +optional
+	Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
+
+	// If request was approved, the controller will place the issued certificate here.
+	// +optional
+	Certificate []byte `json:"certificate,omitempty" protobuf:"bytes,2,opt,name=certificate"`
+}
+
+type RequestConditionType string
+
+// These are the possible conditions for a certificate request.
+const (
+	CertificateApproved RequestConditionType = "Approved"
+	CertificateDenied   RequestConditionType = "Denied"
+)
+
+type CertificateSigningRequestCondition struct {
+	// request approval state, currently Approved or Denied.
+	Type RequestConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RequestConditionType"`
+	// brief reason for the request state
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"`
+	// human readable message with details about the request state
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
+	// timestamp for the last update to this condition
+	// +optional
+	LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,4,opt,name=lastUpdateTime"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type CertificateSigningRequestList struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	Items []CertificateSigningRequest `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// KeyUsages specifies valid usage contexts for keys.
+// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
+//      https://tools.ietf.org/html/rfc5280#section-4.2.1.12
+type KeyUsage string
+
+const (
+	UsageSigning            KeyUsage = "signing"
+	UsageDigitalSignature   KeyUsage = "digital signature"
+	UsageContentCommittment KeyUsage = "content commitment"
+	UsageKeyEncipherment    KeyUsage = "key encipherment"
+	UsageKeyAgreement       KeyUsage = "key agreement"
+	UsageDataEncipherment   KeyUsage = "data encipherment"
+	UsageCertSign           KeyUsage = "cert sign"
+	UsageCRLSign            KeyUsage = "crl sign"
+	UsageEncipherOnly       KeyUsage = "encipher only"
+	UsageDecipherOnly       KeyUsage = "decipher only"
+	UsageAny                KeyUsage = "any"
+	UsageServerAuth         KeyUsage = "server auth"
+	UsageClientAuth         KeyUsage = "client auth"
+	UsageCodeSigning        KeyUsage = "code signing"
+	UsageEmailProtection    KeyUsage = "email protection"
+	UsageSMIME              KeyUsage = "s/mime"
+	UsageIPsecEndSystem     KeyUsage = "ipsec end system"
+	UsageIPsecTunnel        KeyUsage = "ipsec tunnel"
+	UsageIPsecUser          KeyUsage = "ipsec user"
+	UsageTimestamping       KeyUsage = "timestamping"
+	UsageOCSPSigning        KeyUsage = "ocsp signing"
+	UsageMicrosoftSGC       KeyUsage = "microsoft sgc"
+	UsageNetscapSGC         KeyUsage = "netscape sgc"
+)
diff --git a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..f6a7e16
--- /dev/null
+++ b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,74 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_CertificateSigningRequest = map[string]string{
+	"":       "Describes a certificate signing request",
+	"spec":   "The certificate request itself and any additional information.",
+	"status": "Derived information about the request.",
+}
+
+func (CertificateSigningRequest) SwaggerDoc() map[string]string {
+	return map_CertificateSigningRequest
+}
+
+var map_CertificateSigningRequestCondition = map[string]string{
+	"type":           "request approval state, currently Approved or Denied.",
+	"reason":         "brief reason for the request state",
+	"message":        "human readable message with details about the request state",
+	"lastUpdateTime": "timestamp for the last update to this condition",
+}
+
+func (CertificateSigningRequestCondition) SwaggerDoc() map[string]string {
+	return map_CertificateSigningRequestCondition
+}
+
+var map_CertificateSigningRequestSpec = map[string]string{
+	"":         "This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.",
+	"request":  "Base64-encoded PKCS#10 CSR data",
+	"usages":   "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n     https://tools.ietf.org/html/rfc5280#section-4.2.1.12",
+	"username": "Information about the requesting user. See user.Info interface for details.",
+	"uid":      "UID information about the requesting user. See user.Info interface for details.",
+	"groups":   "Group information about the requesting user. See user.Info interface for details.",
+	"extra":    "Extra information about the requesting user. See user.Info interface for details.",
+}
+
+func (CertificateSigningRequestSpec) SwaggerDoc() map[string]string {
+	return map_CertificateSigningRequestSpec
+}
+
+var map_CertificateSigningRequestStatus = map[string]string{
+	"conditions":  "Conditions applied to the request, such as approval or denial.",
+	"certificate": "If request was approved, the controller will place the issued certificate here.",
+}
+
+func (CertificateSigningRequestStatus) SwaggerDoc() map[string]string {
+	return map_CertificateSigningRequestStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..1b103f1
--- /dev/null
+++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,197 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CertificateSigningRequest) DeepCopyInto(out *CertificateSigningRequest) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequest.
+func (in *CertificateSigningRequest) DeepCopy() *CertificateSigningRequest {
+	if in == nil {
+		return nil
+	}
+	out := new(CertificateSigningRequest)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CertificateSigningRequest) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CertificateSigningRequestCondition) DeepCopyInto(out *CertificateSigningRequestCondition) {
+	*out = *in
+	in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestCondition.
+func (in *CertificateSigningRequestCondition) DeepCopy() *CertificateSigningRequestCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(CertificateSigningRequestCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CertificateSigningRequestList) DeepCopyInto(out *CertificateSigningRequestList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]CertificateSigningRequest, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestList.
+func (in *CertificateSigningRequestList) DeepCopy() *CertificateSigningRequestList {
+	if in == nil {
+		return nil
+	}
+	out := new(CertificateSigningRequestList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CertificateSigningRequestList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CertificateSigningRequestSpec) DeepCopyInto(out *CertificateSigningRequestSpec) {
+	*out = *in
+	if in.Request != nil {
+		in, out := &in.Request, &out.Request
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.Usages != nil {
+		in, out := &in.Usages, &out.Usages
+		*out = make([]KeyUsage, len(*in))
+		copy(*out, *in)
+	}
+	if in.Groups != nil {
+		in, out := &in.Groups, &out.Groups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Extra != nil {
+		in, out := &in.Extra, &out.Extra
+		*out = make(map[string]ExtraValue, len(*in))
+		for key, val := range *in {
+			var outVal []string
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = make(ExtraValue, len(*in))
+				copy(*out, *in)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestSpec.
+func (in *CertificateSigningRequestSpec) DeepCopy() *CertificateSigningRequestSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(CertificateSigningRequestSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CertificateSigningRequestStatus) DeepCopyInto(out *CertificateSigningRequestStatus) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]CertificateSigningRequestCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Certificate != nil {
+		in, out := &in.Certificate, &out.Certificate
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestStatus.
+func (in *CertificateSigningRequestStatus) DeepCopy() *CertificateSigningRequestStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(CertificateSigningRequestStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
+	{
+		in := &in
+		*out = make(ExtraValue, len(*in))
+		copy(*out, *in)
+		return
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue.
+func (in ExtraValue) DeepCopy() ExtraValue {
+	if in == nil {
+		return nil
+	}
+	out := new(ExtraValue)
+	in.DeepCopyInto(out)
+	return *out
+}
diff --git a/vendor/k8s.io/api/coordination/v1beta1/doc.go b/vendor/k8s.io/api/coordination/v1beta1/doc.go
new file mode 100644
index 0000000..bc95fd1
--- /dev/null
+++ b/vendor/k8s.io/api/coordination/v1beta1/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+// +groupName=coordination.k8s.io
+
+package v1beta1 // import "k8s.io/api/coordination/v1beta1"
diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto
new file mode 100644
index 0000000..918e0de
--- /dev/null
+++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto
@@ -0,0 +1,80 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.coordination.v1beta1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta1";
+
+// Lease defines a lease concept.
+message Lease {
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the Lease.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional LeaseSpec spec = 2;
+}
+
+// LeaseList is a list of Lease objects.
+message LeaseList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of schema objects.
+  repeated Lease items = 2;
+}
+
+// LeaseSpec is a specification of a Lease.
+message LeaseSpec {
+  // holderIdentity contains the identity of the holder of a current lease.
+  // +optional
+  optional string holderIdentity = 1;
+
+  // leaseDurationSeconds is a duration that candidates for a lease need
+  // to wait to force acquire it. This is measure against time of last
+  // observed RenewTime.
+  // +optional
+  optional int32 leaseDurationSeconds = 2;
+
+  // acquireTime is a time when the current lease was acquired.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3;
+
+  // renewTime is a time when the current holder of a lease has last
+  // updated the lease.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4;
+
+  // leaseTransitions is the number of transitions of a lease between
+  // holders.
+  // +optional
+  optional int32 leaseTransitions = 5;
+}
+
diff --git a/vendor/k8s.io/api/coordination/v1beta1/register.go b/vendor/k8s.io/api/coordination/v1beta1/register.go
new file mode 100644
index 0000000..85efaa6
--- /dev/null
+++ b/vendor/k8s.io/api/coordination/v1beta1/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "coordination.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Lease{},
+		&LeaseList{},
+	)
+
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go
new file mode 100644
index 0000000..846f728
--- /dev/null
+++ b/vendor/k8s.io/api/coordination/v1beta1/types.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Lease defines a lease concept.
+type Lease struct {
+	metav1.TypeMeta `json:",inline"`
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the Lease.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// LeaseSpec is a specification of a Lease.
+type LeaseSpec struct {
+	// holderIdentity contains the identity of the holder of a current lease.
+	// +optional
+	HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"`
+	// leaseDurationSeconds is a duration that candidates for a lease need
+	// to wait to force acquire it. This is measure against time of last
+	// observed RenewTime.
+	// +optional
+	LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"`
+	// acquireTime is a time when the current lease was acquired.
+	// +optional
+	AcquireTime *metav1.MicroTime `json:"acquireTime,omitempty" protobuf:"bytes,3,opt,name=acquireTime"`
+	// renewTime is a time when the current holder of a lease has last
+	// updated the lease.
+	// +optional
+	RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,4,opt,name=renewTime"`
+	// leaseTransitions is the number of transitions of a lease between
+	// holders.
+	// +optional
+	LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// LeaseList is a list of Lease objects.
+type LeaseList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of schema objects.
+	Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..4532d32
--- /dev/null
+++ b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,63 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_Lease = map[string]string{
+	"":         "Lease defines a lease concept.",
+	"metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (Lease) SwaggerDoc() map[string]string {
+	return map_Lease
+}
+
+var map_LeaseList = map[string]string{
+	"":         "LeaseList is a list of Lease objects.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "Items is a list of schema objects.",
+}
+
+func (LeaseList) SwaggerDoc() map[string]string {
+	return map_LeaseList
+}
+
+var map_LeaseSpec = map[string]string{
+	"":                     "LeaseSpec is a specification of a Lease.",
+	"holderIdentity":       "holderIdentity contains the identity of the holder of a current lease.",
+	"leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.",
+	"acquireTime":          "acquireTime is a time when the current lease was acquired.",
+	"renewTime":            "renewTime is a time when the current holder of a lease has last updated the lease.",
+	"leaseTransitions":     "leaseTransitions is the number of transitions of a lease between holders.",
+}
+
+func (LeaseSpec) SwaggerDoc() map[string]string {
+	return map_LeaseSpec
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..a628ac1
--- /dev/null
+++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,124 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Lease) DeepCopyInto(out *Lease) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lease.
+func (in *Lease) DeepCopy() *Lease {
+	if in == nil {
+		return nil
+	}
+	out := new(Lease)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Lease) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LeaseList) DeepCopyInto(out *LeaseList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Lease, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseList.
+func (in *LeaseList) DeepCopy() *LeaseList {
+	if in == nil {
+		return nil
+	}
+	out := new(LeaseList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LeaseList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) {
+	*out = *in
+	if in.HolderIdentity != nil {
+		in, out := &in.HolderIdentity, &out.HolderIdentity
+		*out = new(string)
+		**out = **in
+	}
+	if in.LeaseDurationSeconds != nil {
+		in, out := &in.LeaseDurationSeconds, &out.LeaseDurationSeconds
+		*out = new(int32)
+		**out = **in
+	}
+	if in.AcquireTime != nil {
+		in, out := &in.AcquireTime, &out.AcquireTime
+		*out = (*in).DeepCopy()
+	}
+	if in.RenewTime != nil {
+		in, out := &in.RenewTime, &out.RenewTime
+		*out = (*in).DeepCopy()
+	}
+	if in.LeaseTransitions != nil {
+		in, out := &in.LeaseTransitions, &out.LeaseTransitions
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseSpec.
+func (in *LeaseSpec) DeepCopy() *LeaseSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(LeaseSpec)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go
new file mode 100644
index 0000000..2c72ec2
--- /dev/null
+++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go
@@ -0,0 +1,100 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file should be consistent with pkg/api/annotation_key_constants.go.
+
+package v1
+
+const (
+	// ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy
+	// webhook backend fails.
+	ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open"
+
+	// PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation
+	PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude"
+
+	// MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods
+	MirrorPodAnnotationKey string = "kubernetes.io/config.mirror"
+
+	// TolerationsAnnotationKey represents the key of tolerations data (json serialized)
+	// in the Annotations of a Pod.
+	TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations"
+
+	// TaintsAnnotationKey represents the key of taints data (json serialized)
+	// in the Annotations of a Node.
+	TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints"
+
+	// SeccompPodAnnotationKey represents the key of a seccomp profile applied
+	// to all containers of a pod.
+	SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod"
+
+	// SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied
+	// to one container of a pod.
+	SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/"
+
+	// SeccompProfileRuntimeDefault represents the default seccomp profile used by container runtime.
+	SeccompProfileRuntimeDefault string = "runtime/default"
+
+	// DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker.
+	// This is now deprecated and should be replaced by SeccompProfileRuntimeDefault.
+	DeprecatedSeccompProfileDockerDefault string = "docker/default"
+
+	// PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized)
+	// in the Annotations of a Node.
+	PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods"
+
+	// ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache
+	// an object (e.g. secret, config map) before fetching it again from apiserver.
+	// This annotation can be attached to node.
+	ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl"
+
+	// annotation key prefix used to identify non-convertible json paths.
+	NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io"
+
+	kubectlPrefix = "kubectl.kubernetes.io/"
+
+	// LastAppliedConfigAnnotation is the annotation used to store the previous
+	// configuration of a resource for use in a three way diff by UpdateApplyAnnotation.
+	LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration"
+
+	// AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers
+	//
+	// It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to
+	// allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow
+	// access only from the CIDRs currently allocated to MIT & the USPS.
+	//
+	// Not all cloud providers support this annotation, though AWS & GCE do.
+	AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges"
+
+	// EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that
+	// represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z')
+	// of the last change, of some Pod or Service object, that triggered the endpoints object change.
+	// In other words, if a Pod / Service changed at time T0, that change was observed by endpoints
+	// controller at T1, and the Endpoints object was changed at T2, the
+	// EndpointsLastChangeTriggerTime would be set to T0.
+	//
+	// The "endpoints change trigger" here means any Pod or Service change that resulted in the
+	// Endpoints object change.
+	//
+	// Given the definition of the "endpoints change trigger", please note that this annotation will
+	// be set ONLY for endpoints object changes triggered by either Pod or Service change. If the
+	// Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's
+	// already set).
+	//
+	// This annotation will be used to compute the in-cluster network programming latency SLI, see
+	// https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md
+	EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time"
+)
diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go
new file mode 100644
index 0000000..96994c6
--- /dev/null
+++ b/vendor/k8s.io/api/core/v1/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:openapi-gen=true
+// +k8s:deepcopy-gen=package
+
+// Package v1 is the v1 version of the core API.
+package v1 // import "k8s.io/api/core/v1"
diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto
new file mode 100644
index 0000000..165aa2a
--- /dev/null
+++ b/vendor/k8s.io/api/core/v1/generated.proto
@@ -0,0 +1,4789 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.core.v1;
+
+import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1";
+
+// Represents a Persistent Disk resource in AWS.
+//
+// An AWS EBS disk must exist before mounting to a container. The disk
+// must also be in the same AWS zone as the kubelet. An AWS EBS disk
+// can only be mounted as read/write once. AWS EBS volumes support
+// ownership management and SELinux relabeling.
+message AWSElasticBlockStoreVolumeSource {
+  // Unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+  optional string volumeID = 1;
+
+  // Filesystem type of the volume that you want to mount.
+  // Tip: Ensure that the filesystem type is supported by the host operating system.
+  // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+  // TODO: how do we prevent errors in the filesystem from compromising the machine
+  // +optional
+  optional string fsType = 2;
+
+  // The partition in the volume that you want to mount.
+  // If omitted, the default is to mount by volume name.
+  // Examples: For volume /dev/sda1, you specify the partition as "1".
+  // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+  // +optional
+  optional int32 partition = 3;
+
+  // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true".
+  // If omitted, the default is "false".
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+  // +optional
+  optional bool readOnly = 4;
+}
+
+// Affinity is a group of affinity scheduling rules.
+message Affinity {
+  // Describes node affinity scheduling rules for the pod.
+  // +optional
+  optional NodeAffinity nodeAffinity = 1;
+
+  // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+  // +optional
+  optional PodAffinity podAffinity = 2;
+
+  // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+  // +optional
+  optional PodAntiAffinity podAntiAffinity = 3;
+}
+
+// AttachedVolume describes a volume attached to a node
+message AttachedVolume {
+  // Name of the attached volume
+  optional string name = 1;
+
+  // DevicePath represents the device path where the volume should be available
+  optional string devicePath = 2;
+}
+
+// AvoidPods describes pods that should avoid this node. This is the value for a
+// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and
+// will eventually become a field of NodeStatus.
+message AvoidPods {
+  // Bounded-sized list of signatures of pods that should avoid this node, sorted
+  // in timestamp order from oldest to newest. Size of the slice is unspecified.
+  // +optional
+  repeated PreferAvoidPodsEntry preferAvoidPods = 1;
+}
+
+// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+message AzureDiskVolumeSource {
+  // The Name of the data disk in the blob storage
+  optional string diskName = 1;
+
+  // The URI the data disk in the blob storage
+  optional string diskURI = 2;
+
+  // Host Caching mode: None, Read Only, Read Write.
+  // +optional
+  optional string cachingMode = 3;
+
+  // Filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // +optional
+  optional string fsType = 4;
+
+  // Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 5;
+
+  // Expected values Shared: multiple blob disks per storage account  Dedicated: single blob disk per storage account  Managed: azure managed data disk (only in managed availability set). defaults to shared
+  optional string kind = 6;
+}
+
+// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+message AzureFilePersistentVolumeSource {
+  // the name of secret that contains Azure Storage Account Name and Key
+  optional string secretName = 1;
+
+  // Share Name
+  optional string shareName = 2;
+
+  // Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 3;
+
+  // the namespace of the secret that contains Azure Storage Account Name and Key
+  // default is the same as the Pod
+  // +optional
+  optional string secretNamespace = 4;
+}
+
+// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+message AzureFileVolumeSource {
+  // the name of secret that contains Azure Storage Account Name and Key
+  optional string secretName = 1;
+
+  // Share Name
+  optional string shareName = 2;
+
+  // Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 3;
+}
+
+// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
+// Deprecated in 1.7, please use the bindings subresource of pods instead.
+message Binding {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // The target object that you want to bind to the standard object.
+  optional ObjectReference target = 2;
+}
+
+// Represents storage that is managed by an external CSI volume driver (Beta feature)
+message CSIPersistentVolumeSource {
+  // Driver is the name of the driver to use for this volume.
+  // Required.
+  optional string driver = 1;
+
+  // VolumeHandle is the unique volume name returned by the CSI volume
+  // plugin’s CreateVolume to refer to the volume on all subsequent calls.
+  // Required.
+  optional string volumeHandle = 2;
+
+  // Optional: The value to pass to ControllerPublishVolumeRequest.
+  // Defaults to false (read/write).
+  // +optional
+  optional bool readOnly = 3;
+
+  // Filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs".
+  // +optional
+  optional string fsType = 4;
+
+  // Attributes of the volume to publish.
+  // +optional
+  map<string, string> volumeAttributes = 5;
+
+  // ControllerPublishSecretRef is a reference to the secret object containing
+  // sensitive information to pass to the CSI driver to complete the CSI
+  // ControllerPublishVolume and ControllerUnpublishVolume calls.
+  // This field is optional, and may be empty if no secret is required. If the
+  // secret object contains more than one secret, all secrets are passed.
+  // +optional
+  optional SecretReference controllerPublishSecretRef = 6;
+
+  // NodeStageSecretRef is a reference to the secret object containing sensitive
+  // information to pass to the CSI driver to complete the CSI NodeStageVolume
+  // and NodeStageVolume and NodeUnstageVolume calls.
+  // This field is optional, and may be empty if no secret is required. If the
+  // secret object contains more than one secret, all secrets are passed.
+  // +optional
+  optional SecretReference nodeStageSecretRef = 7;
+
+  // NodePublishSecretRef is a reference to the secret object containing
+  // sensitive information to pass to the CSI driver to complete the CSI
+  // NodePublishVolume and NodeUnpublishVolume calls.
+  // This field is optional, and may be empty if no secret is required. If the
+  // secret object contains more than one secret, all secrets are passed.
+  // +optional
+  optional SecretReference nodePublishSecretRef = 8;
+}
+
+// Adds and removes POSIX capabilities from running containers.
+message Capabilities {
+  // Added capabilities
+  // +optional
+  repeated string add = 1;
+
+  // Removed capabilities
+  // +optional
+  repeated string drop = 2;
+}
+
+// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
+// Cephfs volumes do not support ownership management or SELinux relabeling.
+message CephFSPersistentVolumeSource {
+  // Required: Monitors is a collection of Ceph monitors
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+  repeated string monitors = 1;
+
+  // Optional: Used as the mounted root, rather than the full Ceph tree, default is /
+  // +optional
+  optional string path = 2;
+
+  // Optional: User is the rados user name, default is admin
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+  // +optional
+  optional string user = 3;
+
+  // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+  // +optional
+  optional string secretFile = 4;
+
+  // Optional: SecretRef is reference to the authentication secret for User, default is empty.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+  // +optional
+  optional SecretReference secretRef = 5;
+
+  // Optional: Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+  // +optional
+  optional bool readOnly = 6;
+}
+
+// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
+// Cephfs volumes do not support ownership management or SELinux relabeling.
+message CephFSVolumeSource {
+  // Required: Monitors is a collection of Ceph monitors
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+  repeated string monitors = 1;
+
+  // Optional: Used as the mounted root, rather than the full Ceph tree, default is /
+  // +optional
+  optional string path = 2;
+
+  // Optional: User is the rados user name, default is admin
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+  // +optional
+  optional string user = 3;
+
+  // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+  // +optional
+  optional string secretFile = 4;
+
+  // Optional: SecretRef is reference to the authentication secret for User, default is empty.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+  // +optional
+  optional LocalObjectReference secretRef = 5;
+
+  // Optional: Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+  // +optional
+  optional bool readOnly = 6;
+}
+
+// Represents a cinder volume resource in Openstack.
+// A Cinder volume must exist before mounting to a container.
+// The volume must also be in the same region as the kubelet.
+// Cinder volumes support ownership management and SELinux relabeling.
+message CinderPersistentVolumeSource {
+  // volume id used to identify the volume in cinder
+  // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+  optional string volumeID = 1;
+
+  // Filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+  // +optional
+  optional string fsType = 2;
+
+  // Optional: Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+  // +optional
+  optional bool readOnly = 3;
+
+  // Optional: points to a secret object containing parameters used to connect
+  // to OpenStack.
+  // +optional
+  optional SecretReference secretRef = 4;
+}
+
+// Represents a cinder volume resource in Openstack.
+// A Cinder volume must exist before mounting to a container.
+// The volume must also be in the same region as the kubelet.
+// Cinder volumes support ownership management and SELinux relabeling.
+message CinderVolumeSource {
+  // volume id used to identify the volume in cinder
+  // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+  optional string volumeID = 1;
+
+  // Filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+  // +optional
+  optional string fsType = 2;
+
+  // Optional: Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+  // +optional
+  optional bool readOnly = 3;
+
+  // Optional: points to a secret object containing parameters used to connect
+  // to OpenStack.
+  // +optional
+  optional LocalObjectReference secretRef = 4;
+}
+
+// ClientIPConfig represents the configurations of Client IP based session affinity.
+message ClientIPConfig {
+  // timeoutSeconds specifies the seconds of ClientIP type session sticky time.
+  // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
+  // Default value is 10800(for 3 hours).
+  // +optional
+  optional int32 timeoutSeconds = 1;
+}
+
+// Information about the condition of a component.
+message ComponentCondition {
+  // Type of condition for a component.
+  // Valid value: "Healthy"
+  optional string type = 1;
+
+  // Status of the condition for a component.
+  // Valid values for "Healthy": "True", "False", or "Unknown".
+  optional string status = 2;
+
+  // Message about the condition for a component.
+  // For example, information about a health check.
+  // +optional
+  optional string message = 3;
+
+  // Condition error code for a component.
+  // For example, a health check error code.
+  // +optional
+  optional string error = 4;
+}
+
+// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
+message ComponentStatus {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // List of component conditions observed
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated ComponentCondition conditions = 2;
+}
+
+// Status of all the conditions for the component as a list of ComponentStatus objects.
+message ComponentStatusList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of ComponentStatus objects.
+  repeated ComponentStatus items = 2;
+}
+
+// ConfigMap holds configuration data for pods to consume.
+message ConfigMap {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Data contains the configuration data.
+  // Each key must consist of alphanumeric characters, '-', '_' or '.'.
+  // Values with non-UTF-8 byte sequences must use the BinaryData field.
+  // The keys stored in Data must not overlap with the keys in
+  // the BinaryData field, this is enforced during validation process.
+  // +optional
+  map<string, string> data = 2;
+
+  // BinaryData contains the binary data.
+  // Each key must consist of alphanumeric characters, '-', '_' or '.'.
+  // BinaryData can contain byte sequences that are not in the UTF-8 range.
+  // The keys stored in BinaryData must not overlap with the ones in
+  // the Data field, this is enforced during validation process.
+  // Using this field will require 1.10+ apiserver and
+  // kubelet.
+  // +optional
+  map<string, bytes> binaryData = 3;
+}
+
+// ConfigMapEnvSource selects a ConfigMap to populate the environment
+// variables with.
+//
+// The contents of the target ConfigMap's Data field will represent the
+// key-value pairs as environment variables.
+message ConfigMapEnvSource {
+  // The ConfigMap to select from.
+  optional LocalObjectReference localObjectReference = 1;
+
+  // Specify whether the ConfigMap must be defined
+  // +optional
+  optional bool optional = 2;
+}
+
+// Selects a key from a ConfigMap.
+message ConfigMapKeySelector {
+  // The ConfigMap to select from.
+  optional LocalObjectReference localObjectReference = 1;
+
+  // The key to select.
+  optional string key = 2;
+
+  // Specify whether the ConfigMap or it's key must be defined
+  // +optional
+  optional bool optional = 3;
+}
+
+// ConfigMapList is a resource containing a list of ConfigMap objects.
+message ConfigMapList {
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of ConfigMaps.
+  repeated ConfigMap items = 2;
+}
+
+// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.
+message ConfigMapNodeConfigSource {
+  // Namespace is the metadata.namespace of the referenced ConfigMap.
+  // This field is required in all cases.
+  optional string namespace = 1;
+
+  // Name is the metadata.name of the referenced ConfigMap.
+  // This field is required in all cases.
+  optional string name = 2;
+
+  // UID is the metadata.UID of the referenced ConfigMap.
+  // This field is forbidden in Node.Spec, and required in Node.Status.
+  // +optional
+  optional string uid = 3;
+
+  // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap.
+  // This field is forbidden in Node.Spec, and required in Node.Status.
+  // +optional
+  optional string resourceVersion = 4;
+
+  // KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure
+  // This field is required in all cases.
+  optional string kubeletConfigKey = 5;
+}
+
+// Adapts a ConfigMap into a projected volume.
+//
+// The contents of the target ConfigMap's Data field will be presented in a
+// projected volume as files using the keys in the Data field as the file names,
+// unless the items element is populated with specific mappings of keys to paths.
+// Note that this is identical to a configmap volume source without the default
+// mode.
+message ConfigMapProjection {
+  optional LocalObjectReference localObjectReference = 1;
+
+  // If unspecified, each key-value pair in the Data field of the referenced
+  // ConfigMap will be projected into the volume as a file whose name is the
+  // key and content is the value. If specified, the listed keys will be
+  // projected into the specified paths, and unlisted keys will not be
+  // present. If a key is specified which is not present in the ConfigMap,
+  // the volume setup will error unless it is marked optional. Paths must be
+  // relative and may not contain the '..' path or start with '..'.
+  // +optional
+  repeated KeyToPath items = 2;
+
+  // Specify whether the ConfigMap or it's keys must be defined
+  // +optional
+  optional bool optional = 4;
+}
+
+// Adapts a ConfigMap into a volume.
+//
+// The contents of the target ConfigMap's Data field will be presented in a
+// volume as files using the keys in the Data field as the file names, unless
+// the items element is populated with specific mappings of keys to paths.
+// ConfigMap volumes support ownership management and SELinux relabeling.
+message ConfigMapVolumeSource {
+  optional LocalObjectReference localObjectReference = 1;
+
+  // If unspecified, each key-value pair in the Data field of the referenced
+  // ConfigMap will be projected into the volume as a file whose name is the
+  // key and content is the value. If specified, the listed keys will be
+  // projected into the specified paths, and unlisted keys will not be
+  // present. If a key is specified which is not present in the ConfigMap,
+  // the volume setup will error unless it is marked optional. Paths must be
+  // relative and may not contain the '..' path or start with '..'.
+  // +optional
+  repeated KeyToPath items = 2;
+
+  // Optional: mode bits to use on created files by default. Must be a
+  // value between 0 and 0777. Defaults to 0644.
+  // Directories within the path are not affected by this setting.
+  // This might be in conflict with other options that affect the file
+  // mode, like fsGroup, and the result can be other mode bits set.
+  // +optional
+  optional int32 defaultMode = 3;
+
+  // Specify whether the ConfigMap or it's keys must be defined
+  // +optional
+  optional bool optional = 4;
+}
+
+// A single application container that you want to run within a pod.
+message Container {
+  // Name of the container specified as a DNS_LABEL.
+  // Each container in a pod must have a unique name (DNS_LABEL).
+  // Cannot be updated.
+  optional string name = 1;
+
+  // Docker image name.
+  // More info: https://kubernetes.io/docs/concepts/containers/images
+  // This field is optional to allow higher level config management to default or override
+  // container images in workload controllers like Deployments and StatefulSets.
+  // +optional
+  optional string image = 2;
+
+  // Entrypoint array. Not executed within a shell.
+  // The docker image's ENTRYPOINT is used if this is not provided.
+  // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+  // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
+  // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
+  // regardless of whether the variable exists or not.
+  // Cannot be updated.
+  // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+  // +optional
+  repeated string command = 3;
+
+  // Arguments to the entrypoint.
+  // The docker image's CMD is used if this is not provided.
+  // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+  // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
+  // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
+  // regardless of whether the variable exists or not.
+  // Cannot be updated.
+  // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+  // +optional
+  repeated string args = 4;
+
+  // Container's working directory.
+  // If not specified, the container runtime's default will be used, which
+  // might be configured in the container image.
+  // Cannot be updated.
+  // +optional
+  optional string workingDir = 5;
+
+  // List of ports to expose from the container. Exposing a port here gives
+  // the system additional information about the network connections a
+  // container uses, but is primarily informational. Not specifying a port here
+  // DOES NOT prevent that port from being exposed. Any port which is
+  // listening on the default "0.0.0.0" address inside a container will be
+  // accessible from the network.
+  // Cannot be updated.
+  // +optional
+  // +patchMergeKey=containerPort
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=containerPort
+  // +listMapKey=protocol
+  repeated ContainerPort ports = 6;
+
+  // List of sources to populate environment variables in the container.
+  // The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+  // will be reported as an event when the container is starting. When a key exists in multiple
+  // sources, the value associated with the last source will take precedence.
+  // Values defined by an Env with a duplicate key will take precedence.
+  // Cannot be updated.
+  // +optional
+  repeated EnvFromSource envFrom = 19;
+
+  // List of environment variables to set in the container.
+  // Cannot be updated.
+  // +optional
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  repeated EnvVar env = 7;
+
+  // Compute Resources required by this container.
+  // Cannot be updated.
+  // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
+  // +optional
+  optional ResourceRequirements resources = 8;
+
+  // Pod volumes to mount into the container's filesystem.
+  // Cannot be updated.
+  // +optional
+  // +patchMergeKey=mountPath
+  // +patchStrategy=merge
+  repeated VolumeMount volumeMounts = 9;
+
+  // volumeDevices is the list of block devices to be used by the container.
+  // This is a beta feature.
+  // +patchMergeKey=devicePath
+  // +patchStrategy=merge
+  // +optional
+  repeated VolumeDevice volumeDevices = 21;
+
+  // Periodic probe of container liveness.
+  // Container will be restarted if the probe fails.
+  // Cannot be updated.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+  // +optional
+  optional Probe livenessProbe = 10;
+
+  // Periodic probe of container service readiness.
+  // Container will be removed from service endpoints if the probe fails.
+  // Cannot be updated.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+  // +optional
+  optional Probe readinessProbe = 11;
+
+  // Actions that the management system should take in response to container lifecycle events.
+  // Cannot be updated.
+  // +optional
+  optional Lifecycle lifecycle = 12;
+
+  // Optional: Path at which the file to which the container's termination message
+  // will be written is mounted into the container's filesystem.
+  // Message written is intended to be brief final status, such as an assertion failure message.
+  // Will be truncated by the node if greater than 4096 bytes. The total message length across
+  // all containers will be limited to 12kb.
+  // Defaults to /dev/termination-log.
+  // Cannot be updated.
+  // +optional
+  optional string terminationMessagePath = 13;
+
+  // Indicate how the termination message should be populated. File will use the contents of
+  // terminationMessagePath to populate the container status message on both success and failure.
+  // FallbackToLogsOnError will use the last chunk of container log output if the termination
+  // message file is empty and the container exited with an error.
+  // The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+  // Defaults to File.
+  // Cannot be updated.
+  // +optional
+  optional string terminationMessagePolicy = 20;
+
+  // Image pull policy.
+  // One of Always, Never, IfNotPresent.
+  // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+  // Cannot be updated.
+  // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+  // +optional
+  optional string imagePullPolicy = 14;
+
+  // Security options the pod should run with.
+  // More info: https://kubernetes.io/docs/concepts/policy/security-context/
+  // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  // +optional
+  optional SecurityContext securityContext = 15;
+
+  // Whether this container should allocate a buffer for stdin in the container runtime. If this
+  // is not set, reads from stdin in the container will always result in EOF.
+  // Default is false.
+  // +optional
+  optional bool stdin = 16;
+
+  // Whether the container runtime should close the stdin channel after it has been opened by
+  // a single attach. When stdin is true the stdin stream will remain open across multiple attach
+  // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+  // first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+  // at which time stdin is closed and remains closed until the container is restarted. If this
+  // flag is false, a container processes that reads from stdin will never receive an EOF.
+  // Default is false
+  // +optional
+  optional bool stdinOnce = 17;
+
+  // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+  // Default is false.
+  // +optional
+  optional bool tty = 18;
+}
+
+// Describe a container image
+message ContainerImage {
+  // Names by which this image is known.
+  // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]
+  repeated string names = 1;
+
+  // The size of the image in bytes.
+  // +optional
+  optional int64 sizeBytes = 2;
+}
+
+// ContainerPort represents a network port in a single container.
+message ContainerPort {
+  // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+  // named port in a pod must have a unique name. Name for the port that can be
+  // referred to by services.
+  // +optional
+  optional string name = 1;
+
+  // Number of port to expose on the host.
+  // If specified, this must be a valid port number, 0 < x < 65536.
+  // If HostNetwork is specified, this must match ContainerPort.
+  // Most containers do not need this.
+  // +optional
+  optional int32 hostPort = 2;
+
+  // Number of port to expose on the pod's IP address.
+  // This must be a valid port number, 0 < x < 65536.
+  optional int32 containerPort = 3;
+
+  // Protocol for port. Must be UDP, TCP, or SCTP.
+  // Defaults to "TCP".
+  // +optional
+  optional string protocol = 4;
+
+  // What host IP to bind the external port to.
+  // +optional
+  optional string hostIP = 5;
+}
+
+// ContainerState holds a possible state of container.
+// Only one of its members may be specified.
+// If none of them is specified, the default one is ContainerStateWaiting.
+message ContainerState {
+  // Details about a waiting container
+  // +optional
+  optional ContainerStateWaiting waiting = 1;
+
+  // Details about a running container
+  // +optional
+  optional ContainerStateRunning running = 2;
+
+  // Details about a terminated container
+  // +optional
+  optional ContainerStateTerminated terminated = 3;
+}
+
+// ContainerStateRunning is a running state of a container.
+message ContainerStateRunning {
+  // Time at which the container was last (re-)started
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1;
+}
+
+// ContainerStateTerminated is a terminated state of a container.
+message ContainerStateTerminated {
+  // Exit status from the last termination of the container
+  optional int32 exitCode = 1;
+
+  // Signal from the last termination of the container
+  // +optional
+  optional int32 signal = 2;
+
+  // (brief) reason from the last termination of the container
+  // +optional
+  optional string reason = 3;
+
+  // Message regarding the last termination of the container
+  // +optional
+  optional string message = 4;
+
+  // Time at which previous execution of the container started
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5;
+
+  // Time at which the container last terminated
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6;
+
+  // Container's ID in the format 'docker://<container_id>'
+  // +optional
+  optional string containerID = 7;
+}
+
+// ContainerStateWaiting is a waiting state of a container.
+message ContainerStateWaiting {
+  // (brief) reason the container is not yet running.
+  // +optional
+  optional string reason = 1;
+
+  // Message regarding why the container is not yet running.
+  // +optional
+  optional string message = 2;
+}
+
+// ContainerStatus contains details for the current status of this container.
+message ContainerStatus {
+  // This must be a DNS_LABEL. Each container in a pod must have a unique name.
+  // Cannot be updated.
+  optional string name = 1;
+
+  // Details about the container's current condition.
+  // +optional
+  optional ContainerState state = 2;
+
+  // Details about the container's last termination condition.
+  // +optional
+  optional ContainerState lastState = 3;
+
+  // Specifies whether the container has passed its readiness probe.
+  optional bool ready = 4;
+
+  // The number of times the container has been restarted, currently based on
+  // the number of dead containers that have not yet been removed.
+  // Note that this is calculated from dead containers. But those containers are subject to
+  // garbage collection. This value will get capped at 5 by GC.
+  optional int32 restartCount = 5;
+
+  // The image the container is running.
+  // More info: https://kubernetes.io/docs/concepts/containers/images
+  // TODO(dchen1107): Which image the container is running with?
+  optional string image = 6;
+
+  // ImageID of the container's image.
+  optional string imageID = 7;
+
+  // Container's ID in the format 'docker://<container_id>'.
+  // +optional
+  optional string containerID = 8;
+}
+
+// DaemonEndpoint contains information about a single Daemon endpoint.
+message DaemonEndpoint {
+  // Port number of the given endpoint.
+  optional int32 Port = 1;
+}
+
+// Represents downward API info for projecting into a projected volume.
+// Note that this is identical to a downwardAPI volume source without the default
+// mode.
+message DownwardAPIProjection {
+  // Items is a list of DownwardAPIVolume file
+  // +optional
+  repeated DownwardAPIVolumeFile items = 1;
+}
+
+// DownwardAPIVolumeFile represents information to create the file containing the pod field
+message DownwardAPIVolumeFile {
+  // Required: Path is  the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
+  optional string path = 1;
+
+  // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
+  // +optional
+  optional ObjectFieldSelector fieldRef = 2;
+
+  // Selects a resource of the container: only resources limits and requests
+  // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+  // +optional
+  optional ResourceFieldSelector resourceFieldRef = 3;
+
+  // Optional: mode bits to use on this file, must be a value between 0
+  // and 0777. If not specified, the volume defaultMode will be used.
+  // This might be in conflict with other options that affect the file
+  // mode, like fsGroup, and the result can be other mode bits set.
+  // +optional
+  optional int32 mode = 4;
+}
+
+// DownwardAPIVolumeSource represents a volume containing downward API info.
+// Downward API volumes support ownership management and SELinux relabeling.
+message DownwardAPIVolumeSource {
+  // Items is a list of downward API volume file
+  // +optional
+  repeated DownwardAPIVolumeFile items = 1;
+
+  // Optional: mode bits to use on created files by default. Must be a
+  // value between 0 and 0777. Defaults to 0644.
+  // Directories within the path are not affected by this setting.
+  // This might be in conflict with other options that affect the file
+  // mode, like fsGroup, and the result can be other mode bits set.
+  // +optional
+  optional int32 defaultMode = 2;
+}
+
+// Represents an empty directory for a pod.
+// Empty directory volumes support ownership management and SELinux relabeling.
+message EmptyDirVolumeSource {
+  // What type of storage medium should back this directory.
+  // The default is "" which means to use the node's default medium.
+  // Must be an empty string (default) or Memory.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+  // +optional
+  optional string medium = 1;
+
+  // Total amount of local storage required for this EmptyDir volume.
+  // The size limit is also applicable for memory medium.
+  // The maximum usage on memory medium EmptyDir would be the minimum value between
+  // the SizeLimit specified here and the sum of memory limits of all containers in a pod.
+  // The default is nil which means that the limit is undefined.
+  // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2;
+}
+
+// EndpointAddress is a tuple that describes single IP address.
+message EndpointAddress {
+  // The IP of this endpoint.
+  // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
+  // or link-local multicast ((224.0.0.0/24).
+  // IPv6 is also accepted but not fully supported on all platforms. Also, certain
+  // kubernetes components, like kube-proxy, are not IPv6 ready.
+  // TODO: This should allow hostname or IP, See #4447.
+  optional string ip = 1;
+
+  // The Hostname of this endpoint
+  // +optional
+  optional string hostname = 3;
+
+  // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.
+  // +optional
+  optional string nodeName = 4;
+
+  // Reference to object providing the endpoint.
+  // +optional
+  optional ObjectReference targetRef = 2;
+}
+
+// EndpointPort is a tuple that describes a single port.
+message EndpointPort {
+  // The name of this port (corresponds to ServicePort.Name).
+  // Must be a DNS_LABEL.
+  // Optional only if one port is defined.
+  // +optional
+  optional string name = 1;
+
+  // The port number of the endpoint.
+  optional int32 port = 2;
+
+  // The IP protocol for this port.
+  // Must be UDP, TCP, or SCTP.
+  // Default is TCP.
+  // +optional
+  optional string protocol = 3;
+}
+
+// EndpointSubset is a group of addresses with a common set of ports. The
+// expanded set of endpoints is the Cartesian product of Addresses x Ports.
+// For example, given:
+//   {
+//     Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+//     Ports:     [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+//   }
+// The resulting set of endpoints can be viewed as:
+//     a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
+//     b: [ 10.10.1.1:309, 10.10.2.2:309 ]
+message EndpointSubset {
+  // IP addresses which offer the related ports that are marked as ready. These endpoints
+  // should be considered safe for load balancers and clients to utilize.
+  // +optional
+  repeated EndpointAddress addresses = 1;
+
+  // IP addresses which offer the related ports but are not currently marked as ready
+  // because they have not yet finished starting, have recently failed a readiness check,
+  // or have recently failed a liveness check.
+  // +optional
+  repeated EndpointAddress notReadyAddresses = 2;
+
+  // Port numbers available on the related IP addresses.
+  // +optional
+  repeated EndpointPort ports = 3;
+}
+
+// Endpoints is a collection of endpoints that implement the actual service. Example:
+//   Name: "mysvc",
+//   Subsets: [
+//     {
+//       Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+//       Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+//     },
+//     {
+//       Addresses: [{"ip": "10.10.3.3"}],
+//       Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
+//     },
+//  ]
+message Endpoints {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // The set of all endpoints is the union of all subsets. Addresses are placed into
+  // subsets according to the IPs they share. A single address with multiple ports,
+  // some of which are ready and some of which are not (because they come from
+  // different containers) will result in the address being displayed in different
+  // subsets for the different ports. No address will appear in both Addresses and
+  // NotReadyAddresses in the same subset.
+  // Sets of addresses and ports that comprise a service.
+  // +optional
+  repeated EndpointSubset subsets = 2;
+}
+
+// EndpointsList is a list of endpoints.
+message EndpointsList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of endpoints.
+  repeated Endpoints items = 2;
+}
+
+// EnvFromSource represents the source of a set of ConfigMaps
+message EnvFromSource {
+  // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
+  // +optional
+  optional string prefix = 1;
+
+  // The ConfigMap to select from
+  // +optional
+  optional ConfigMapEnvSource configMapRef = 2;
+
+  // The Secret to select from
+  // +optional
+  optional SecretEnvSource secretRef = 3;
+}
+
+// EnvVar represents an environment variable present in a Container.
+message EnvVar {
+  // Name of the environment variable. Must be a C_IDENTIFIER.
+  optional string name = 1;
+
+  // Variable references $(VAR_NAME) are expanded
+  // using the previous defined environment variables in the container and
+  // any service environment variables. If a variable cannot be resolved,
+  // the reference in the input string will be unchanged. The $(VAR_NAME)
+  // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
+  // references will never be expanded, regardless of whether the variable
+  // exists or not.
+  // Defaults to "".
+  // +optional
+  optional string value = 2;
+
+  // Source for the environment variable's value. Cannot be used if value is not empty.
+  // +optional
+  optional EnvVarSource valueFrom = 3;
+}
+
+// EnvVarSource represents a source for the value of an EnvVar.
+message EnvVarSource {
+  // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
+  // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
+  // +optional
+  optional ObjectFieldSelector fieldRef = 1;
+
+  // Selects a resource of the container: only resources limits and requests
+  // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+  // +optional
+  optional ResourceFieldSelector resourceFieldRef = 2;
+
+  // Selects a key of a ConfigMap.
+  // +optional
+  optional ConfigMapKeySelector configMapKeyRef = 3;
+
+  // Selects a key of a secret in the pod's namespace
+  // +optional
+  optional SecretKeySelector secretKeyRef = 4;
+}
+
+// Event is a report of an event somewhere in the cluster.
+message Event {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // The object that this event is about.
+  optional ObjectReference involvedObject = 2;
+
+  // This should be a short, machine understandable string that gives the reason
+  // for the transition into the object's current status.
+  // TODO: provide exact specification for format.
+  // +optional
+  optional string reason = 3;
+
+  // A human-readable description of the status of this operation.
+  // TODO: decide on maximum length.
+  // +optional
+  optional string message = 4;
+
+  // The component reporting this event. Should be a short machine understandable string.
+  // +optional
+  optional EventSource source = 5;
+
+  // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6;
+
+  // The time at which the most recent occurrence of this event was recorded.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7;
+
+  // The number of times this event has occurred.
+  // +optional
+  optional int32 count = 8;
+
+  // Type of this event (Normal, Warning), new types could be added in the future
+  // +optional
+  optional string type = 9;
+
+  // Time when this Event was first observed.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10;
+
+  // Data about the Event series this event represents or nil if it's a singleton Event.
+  // +optional
+  optional EventSeries series = 11;
+
+  // What action was taken/failed regarding to the Regarding object.
+  // +optional
+  optional string action = 12;
+
+  // Optional secondary object for more complex actions.
+  // +optional
+  optional ObjectReference related = 13;
+
+  // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
+  // +optional
+  optional string reportingComponent = 14;
+
+  // ID of the controller instance, e.g. `kubelet-xyzf`.
+  // +optional
+  optional string reportingInstance = 15;
+}
+
+// EventList is a list of events.
+message EventList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of events
+  repeated Event items = 2;
+}
+
+// EventSeries contain information on series of events, i.e. thing that was/is happening
+// continuously for some time.
+message EventSeries {
+  // Number of occurrences in this series up to the last heartbeat time
+  optional int32 count = 1;
+
+  // Time of the last occurrence observed
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2;
+
+  // State of this Series: Ongoing or Finished
+  optional string state = 3;
+}
+
+// EventSource contains information for an event.
+message EventSource {
+  // Component from which the event is generated.
+  // +optional
+  optional string component = 1;
+
+  // Node name on which the event is generated.
+  // +optional
+  optional string host = 2;
+}
+
+// ExecAction describes a "run in container" action.
+message ExecAction {
+  // Command is the command line to execute inside the container, the working directory for the
+  // command  is root ('/') in the container's filesystem. The command is simply exec'd, it is
+  // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+  // a shell, you need to explicitly call out to that shell.
+  // Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+  // +optional
+  repeated string command = 1;
+}
+
+// Represents a Fibre Channel volume.
+// Fibre Channel volumes can only be mounted as read/write once.
+// Fibre Channel volumes support ownership management and SELinux relabeling.
+message FCVolumeSource {
+  // Optional: FC target worldwide names (WWNs)
+  // +optional
+  repeated string targetWWNs = 1;
+
+  // Optional: FC target lun number
+  // +optional
+  optional int32 lun = 2;
+
+  // Filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // TODO: how do we prevent errors in the filesystem from compromising the machine
+  // +optional
+  optional string fsType = 3;
+
+  // Optional: Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 4;
+
+  // Optional: FC volume world wide identifiers (wwids)
+  // Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+  // +optional
+  repeated string wwids = 5;
+}
+
+// FlexPersistentVolumeSource represents a generic persistent volume resource that is
+// provisioned/attached using an exec based plugin.
+message FlexPersistentVolumeSource {
+  // Driver is the name of the driver to use for this volume.
+  optional string driver = 1;
+
+  // Filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+  // +optional
+  optional string fsType = 2;
+
+  // Optional: SecretRef is reference to the secret object containing
+  // sensitive information to pass to the plugin scripts. This may be
+  // empty if no secret object is specified. If the secret object
+  // contains more than one secret, all secrets are passed to the plugin
+  // scripts.
+  // +optional
+  optional SecretReference secretRef = 3;
+
+  // Optional: Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 4;
+
+  // Optional: Extra command options if any.
+  // +optional
+  map<string, string> options = 5;
+}
+
+// FlexVolume represents a generic volume resource that is
+// provisioned/attached using an exec based plugin.
+message FlexVolumeSource {
+  // Driver is the name of the driver to use for this volume.
+  optional string driver = 1;
+
+  // Filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+  // +optional
+  optional string fsType = 2;
+
+  // Optional: SecretRef is reference to the secret object containing
+  // sensitive information to pass to the plugin scripts. This may be
+  // empty if no secret object is specified. If the secret object
+  // contains more than one secret, all secrets are passed to the plugin
+  // scripts.
+  // +optional
+  optional LocalObjectReference secretRef = 3;
+
+  // Optional: Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 4;
+
+  // Optional: Extra command options if any.
+  // +optional
+  map<string, string> options = 5;
+}
+
+// Represents a Flocker volume mounted by the Flocker agent.
+// One and only one of datasetName and datasetUUID should be set.
+// Flocker volumes do not support ownership management or SELinux relabeling.
+message FlockerVolumeSource {
+  // Name of the dataset stored as metadata -> name on the dataset for Flocker
+  // should be considered as deprecated
+  // +optional
+  optional string datasetName = 1;
+
+  // UUID of the dataset. This is unique identifier of a Flocker dataset
+  // +optional
+  optional string datasetUUID = 2;
+}
+
+// Represents a Persistent Disk resource in Google Compute Engine.
+//
+// A GCE PD must exist before mounting to a container. The disk must
+// also be in the same GCE project and zone as the kubelet. A GCE PD
+// can only be mounted as read/write once or read-only many times. GCE
+// PDs support ownership management and SELinux relabeling.
+message GCEPersistentDiskVolumeSource {
+  // Unique name of the PD resource in GCE. Used to identify the disk in GCE.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+  optional string pdName = 1;
+
+  // Filesystem type of the volume that you want to mount.
+  // Tip: Ensure that the filesystem type is supported by the host operating system.
+  // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+  // TODO: how do we prevent errors in the filesystem from compromising the machine
+  // +optional
+  optional string fsType = 2;
+
+  // The partition in the volume that you want to mount.
+  // If omitted, the default is to mount by volume name.
+  // Examples: For volume /dev/sda1, you specify the partition as "1".
+  // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+  // +optional
+  optional int32 partition = 3;
+
+  // ReadOnly here will force the ReadOnly setting in VolumeMounts.
+  // Defaults to false.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+  // +optional
+  optional bool readOnly = 4;
+}
+
+// Represents a volume that is populated with the contents of a git repository.
+// Git repo volumes do not support ownership management.
+// Git repo volumes support SELinux relabeling.
+//
+// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+// into the Pod's container.
+message GitRepoVolumeSource {
+  // Repository URL
+  optional string repository = 1;
+
+  // Commit hash for the specified revision.
+  // +optional
+  optional string revision = 2;
+
+  // Target directory name.
+  // Must not contain or start with '..'.  If '.' is supplied, the volume directory will be the
+  // git repository.  Otherwise, if specified, the volume will contain the git repository in
+  // the subdirectory with the given name.
+  // +optional
+  optional string directory = 3;
+}
+
+// Represents a Glusterfs mount that lasts the lifetime of a pod.
+// Glusterfs volumes do not support ownership management or SELinux relabeling.
+message GlusterfsPersistentVolumeSource {
+  // EndpointsName is the endpoint name that details Glusterfs topology.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+  optional string endpoints = 1;
+
+  // Path is the Glusterfs volume path.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+  optional string path = 2;
+
+  // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+  // Defaults to false.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+  // +optional
+  optional bool readOnly = 3;
+
+  // EndpointsNamespace is the namespace that contains Glusterfs endpoint.
+  // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+  // +optional
+  optional string endpointsNamespace = 4;
+}
+
+// Represents a Glusterfs mount that lasts the lifetime of a pod.
+// Glusterfs volumes do not support ownership management or SELinux relabeling.
+message GlusterfsVolumeSource {
+  // EndpointsName is the endpoint name that details Glusterfs topology.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+  optional string endpoints = 1;
+
+  // Path is the Glusterfs volume path.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+  optional string path = 2;
+
+  // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+  // Defaults to false.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+  // +optional
+  optional bool readOnly = 3;
+}
+
+// HTTPGetAction describes an action based on HTTP Get requests.
+message HTTPGetAction {
+  // Path to access on the HTTP server.
+  // +optional
+  optional string path = 1;
+
+  // Name or number of the port to access on the container.
+  // Number must be in the range 1 to 65535.
+  // Name must be an IANA_SVC_NAME.
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2;
+
+  // Host name to connect to, defaults to the pod IP. You probably want to set
+  // "Host" in httpHeaders instead.
+  // +optional
+  optional string host = 3;
+
+  // Scheme to use for connecting to the host.
+  // Defaults to HTTP.
+  // +optional
+  optional string scheme = 4;
+
+  // Custom headers to set in the request. HTTP allows repeated headers.
+  // +optional
+  repeated HTTPHeader httpHeaders = 5;
+}
+
+// HTTPHeader describes a custom header to be used in HTTP probes
+message HTTPHeader {
+  // The header field name
+  optional string name = 1;
+
+  // The header field value
+  optional string value = 2;
+}
+
+// Handler defines a specific action that should be taken
+// TODO: pass structured data to these actions, and document that data here.
+message Handler {
+  // One and only one of the following should be specified.
+  // Exec specifies the action to take.
+  // +optional
+  optional ExecAction exec = 1;
+
+  // HTTPGet specifies the http request to perform.
+  // +optional
+  optional HTTPGetAction httpGet = 2;
+
+  // TCPSocket specifies an action involving a TCP port.
+  // TCP hooks not yet supported
+  // TODO: implement a realistic TCP lifecycle hook
+  // +optional
+  optional TCPSocketAction tcpSocket = 3;
+}
+
+// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
+// pod's hosts file.
+message HostAlias {
+  // IP address of the host file entry.
+  optional string ip = 1;
+
+  // Hostnames for the above IP address.
+  repeated string hostnames = 2;
+}
+
+// Represents a host path mapped into a pod.
+// Host path volumes do not support ownership management or SELinux relabeling.
+message HostPathVolumeSource {
+  // Path of the directory on the host.
+  // If the path is a symlink, it will follow the link to the real path.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+  optional string path = 1;
+
+  // Type for HostPath Volume
+  // Defaults to ""
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+  // +optional
+  optional string type = 2;
+}
+
+// ISCSIPersistentVolumeSource represents an ISCSI disk.
+// ISCSI volumes can only be mounted as read/write once.
+// ISCSI volumes support ownership management and SELinux relabeling.
+message ISCSIPersistentVolumeSource {
+  // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+  // is other than default (typically TCP ports 860 and 3260).
+  optional string targetPortal = 1;
+
+  // Target iSCSI Qualified Name.
+  optional string iqn = 2;
+
+  // iSCSI Target Lun number.
+  optional int32 lun = 3;
+
+  // iSCSI Interface Name that uses an iSCSI transport.
+  // Defaults to 'default' (tcp).
+  // +optional
+  optional string iscsiInterface = 4;
+
+  // Filesystem type of the volume that you want to mount.
+  // Tip: Ensure that the filesystem type is supported by the host operating system.
+  // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+  // TODO: how do we prevent errors in the filesystem from compromising the machine
+  // +optional
+  optional string fsType = 5;
+
+  // ReadOnly here will force the ReadOnly setting in VolumeMounts.
+  // Defaults to false.
+  // +optional
+  optional bool readOnly = 6;
+
+  // iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port
+  // is other than default (typically TCP ports 860 and 3260).
+  // +optional
+  repeated string portals = 7;
+
+  // whether support iSCSI Discovery CHAP authentication
+  // +optional
+  optional bool chapAuthDiscovery = 8;
+
+  // whether support iSCSI Session CHAP authentication
+  // +optional
+  optional bool chapAuthSession = 11;
+
+  // CHAP Secret for iSCSI target and initiator authentication
+  // +optional
+  optional SecretReference secretRef = 10;
+
+  // Custom iSCSI Initiator Name.
+  // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+  // <target portal>:<volume name> will be created for the connection.
+  // +optional
+  optional string initiatorName = 12;
+}
+
+// Represents an ISCSI disk.
+// ISCSI volumes can only be mounted as read/write once.
+// ISCSI volumes support ownership management and SELinux relabeling.
+message ISCSIVolumeSource {
+  // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+  // is other than default (typically TCP ports 860 and 3260).
+  optional string targetPortal = 1;
+
+  // Target iSCSI Qualified Name.
+  optional string iqn = 2;
+
+  // iSCSI Target Lun number.
+  optional int32 lun = 3;
+
+  // iSCSI Interface Name that uses an iSCSI transport.
+  // Defaults to 'default' (tcp).
+  // +optional
+  optional string iscsiInterface = 4;
+
+  // Filesystem type of the volume that you want to mount.
+  // Tip: Ensure that the filesystem type is supported by the host operating system.
+  // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+  // TODO: how do we prevent errors in the filesystem from compromising the machine
+  // +optional
+  optional string fsType = 5;
+
+  // ReadOnly here will force the ReadOnly setting in VolumeMounts.
+  // Defaults to false.
+  // +optional
+  optional bool readOnly = 6;
+
+  // iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+  // is other than default (typically TCP ports 860 and 3260).
+  // +optional
+  repeated string portals = 7;
+
+  // whether support iSCSI Discovery CHAP authentication
+  // +optional
+  optional bool chapAuthDiscovery = 8;
+
+  // whether support iSCSI Session CHAP authentication
+  // +optional
+  optional bool chapAuthSession = 11;
+
+  // CHAP Secret for iSCSI target and initiator authentication
+  // +optional
+  optional LocalObjectReference secretRef = 10;
+
+  // Custom iSCSI Initiator Name.
+  // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+  // <target portal>:<volume name> will be created for the connection.
+  // +optional
+  optional string initiatorName = 12;
+}
+
+// Maps a string key to a path within a volume.
+message KeyToPath {
+  // The key to project.
+  optional string key = 1;
+
+  // The relative path of the file to map the key to.
+  // May not be an absolute path.
+  // May not contain the path element '..'.
+  // May not start with the string '..'.
+  optional string path = 2;
+
+  // Optional: mode bits to use on this file, must be a value between 0
+  // and 0777. If not specified, the volume defaultMode will be used.
+  // This might be in conflict with other options that affect the file
+  // mode, like fsGroup, and the result can be other mode bits set.
+  // +optional
+  optional int32 mode = 3;
+}
+
+// Lifecycle describes actions that the management system should take in response to container lifecycle
+// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
+// until the action is complete, unless the container process fails, in which case the handler is aborted.
+message Lifecycle {
+  // PostStart is called immediately after a container is created. If the handler fails,
+  // the container is terminated and restarted according to its restart policy.
+  // Other management of the container blocks until the hook completes.
+  // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+  // +optional
+  optional Handler postStart = 1;
+
+  // PreStop is called immediately before a container is terminated.
+  // The container is terminated after the handler completes.
+  // The reason for termination is passed to the handler.
+  // Regardless of the outcome of the handler, the container is eventually terminated.
+  // Other management of the container blocks until the hook completes.
+  // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+  // +optional
+  optional Handler preStop = 2;
+}
+
+// LimitRange sets resource usage limits for each kind of resource in a Namespace.
+message LimitRange {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the limits enforced.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional LimitRangeSpec spec = 2;
+}
+
+// LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
+message LimitRangeItem {
+  // Type of resource that this limit applies to.
+  // +optional
+  optional string type = 1;
+
+  // Max usage constraints on this kind by resource name.
+  // +optional
+  map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> max = 2;
+
+  // Min usage constraints on this kind by resource name.
+  // +optional
+  map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> min = 3;
+
+  // Default resource requirement limit value by resource name if resource limit is omitted.
+  // +optional
+  map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> default = 4;
+
+  // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
+  // +optional
+  map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> defaultRequest = 5;
+
+  // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
+  // +optional
+  map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> maxLimitRequestRatio = 6;
+}
+
+// LimitRangeList is a list of LimitRange items.
+message LimitRangeList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of LimitRange objects.
+  // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
+  repeated LimitRange items = 2;
+}
+
+// LimitRangeSpec defines a min/max usage limit for resources that match on kind.
+message LimitRangeSpec {
+  // Limits is the list of LimitRangeItem objects that are enforced.
+  repeated LimitRangeItem limits = 1;
+}
+
+// List holds a list of objects, which may not be known by the server.
+message List {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of objects
+  repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
+}
+
+// LoadBalancerIngress represents the status of a load-balancer ingress point:
+// traffic intended for the service should be sent to an ingress point.
+message LoadBalancerIngress {
+  // IP is set for load-balancer ingress points that are IP based
+  // (typically GCE or OpenStack load-balancers)
+  // +optional
+  optional string ip = 1;
+
+  // Hostname is set for load-balancer ingress points that are DNS based
+  // (typically AWS load-balancers)
+  // +optional
+  optional string hostname = 2;
+}
+
+// LoadBalancerStatus represents the status of a load-balancer.
+message LoadBalancerStatus {
+  // Ingress is a list containing ingress points for the load-balancer.
+  // Traffic intended for the service should be sent to these ingress points.
+  // +optional
+  repeated LoadBalancerIngress ingress = 1;
+}
+
+// LocalObjectReference contains enough information to let you locate the
+// referenced object inside the same namespace.
+message LocalObjectReference {
+  // Name of the referent.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+  // TODO: Add other useful fields. apiVersion, kind, uid?
+  // +optional
+  optional string name = 1;
+}
+
+// Local represents directly-attached storage with node affinity (Beta feature)
+message LocalVolumeSource {
+  // The full path to the volume on the node.
+  // It can be either a directory or block device (disk, partition, ...).
+  optional string path = 1;
+
+  // Filesystem type to mount.
+  // It applies only when the Path is a block device.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified.
+  // +optional
+  optional string fsType = 2;
+}
+
+// Represents an NFS mount that lasts the lifetime of a pod.
+// NFS volumes do not support ownership management or SELinux relabeling.
+message NFSVolumeSource {
+  // Server is the hostname or IP address of the NFS server.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+  optional string server = 1;
+
+  // Path that is exported by the NFS server.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+  optional string path = 2;
+
+  // ReadOnly here will force
+  // the NFS export to be mounted with read-only permissions.
+  // Defaults to false.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+  // +optional
+  optional bool readOnly = 3;
+}
+
+// Namespace provides a scope for Names.
+// Use of multiple namespaces is optional.
+message Namespace {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the behavior of the Namespace.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional NamespaceSpec spec = 2;
+
+  // Status describes the current status of a Namespace.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional NamespaceStatus status = 3;
+}
+
+// NamespaceList is a list of Namespaces.
+message NamespaceList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of Namespace objects in the list.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+  repeated Namespace items = 2;
+}
+
+// NamespaceSpec describes the attributes on a Namespace.
+message NamespaceSpec {
+  // Finalizers is an opaque list of values that must be empty to permanently remove object from storage.
+  // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
+  // +optional
+  repeated string finalizers = 1;
+}
+
+// NamespaceStatus is information about the current status of a Namespace.
+message NamespaceStatus {
+  // Phase is the current lifecycle phase of the namespace.
+  // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
+  // +optional
+  optional string phase = 1;
+}
+
+// Node is a worker node in Kubernetes.
+// Each node will have a unique identifier in the cache (i.e. in etcd).
+message Node {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the behavior of a node.
+  // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional NodeSpec spec = 2;
+
+  // Most recently observed status of the node.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional NodeStatus status = 3;
+}
+
+// NodeAddress contains information for the node's address.
+message NodeAddress {
+  // Node address type, one of Hostname, ExternalIP or InternalIP.
+  optional string type = 1;
+
+  // The node address.
+  optional string address = 2;
+}
+
+// Node affinity is a group of node affinity scheduling rules.
+message NodeAffinity {
+  // If the affinity requirements specified by this field are not met at
+  // scheduling time, the pod will not be scheduled onto the node.
+  // If the affinity requirements specified by this field cease to be met
+  // at some point during pod execution (e.g. due to an update), the system
+  // may or may not try to eventually evict the pod from its node.
+  // +optional
+  optional NodeSelector requiredDuringSchedulingIgnoredDuringExecution = 1;
+
+  // The scheduler will prefer to schedule pods to nodes that satisfy
+  // the affinity expressions specified by this field, but it may choose
+  // a node that violates one or more of the expressions. The node that is
+  // most preferred is the one with the greatest sum of weights, i.e.
+  // for each node that meets all of the scheduling requirements (resource
+  // request, requiredDuringScheduling affinity expressions, etc.),
+  // compute a sum by iterating through the elements of this field and adding
+  // "weight" to the sum if the node matches the corresponding matchExpressions; the
+  // node(s) with the highest sum are the most preferred.
+  // +optional
+  repeated PreferredSchedulingTerm preferredDuringSchedulingIgnoredDuringExecution = 2;
+}
+
+// NodeCondition contains condition information for a node.
+message NodeCondition {
+  // Type of node condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // Last time we got an update on a given condition.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3;
+
+  // Last time the condition transit from one status to another.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
+
+  // (brief) reason for the condition's last transition.
+  // +optional
+  optional string reason = 5;
+
+  // Human readable message indicating details about last transition.
+  // +optional
+  optional string message = 6;
+}
+
+// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
+message NodeConfigSource {
+  // ConfigMap is a reference to a Node's ConfigMap
+  optional ConfigMapNodeConfigSource configMap = 2;
+}
+
+// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
+message NodeConfigStatus {
+  // Assigned reports the checkpointed config the node will try to use.
+  // When Node.Spec.ConfigSource is updated, the node checkpoints the associated
+  // config payload to local disk, along with a record indicating intended
+  // config. The node refers to this record to choose its config checkpoint, and
+  // reports this record in Assigned. Assigned only updates in the status after
+  // the record has been checkpointed to disk. When the Kubelet is restarted,
+  // it tries to make the Assigned config the Active config by loading and
+  // validating the checkpointed payload identified by Assigned.
+  // +optional
+  optional NodeConfigSource assigned = 1;
+
+  // Active reports the checkpointed config the node is actively using.
+  // Active will represent either the current version of the Assigned config,
+  // or the current LastKnownGood config, depending on whether attempting to use the
+  // Assigned config results in an error.
+  // +optional
+  optional NodeConfigSource active = 2;
+
+  // LastKnownGood reports the checkpointed config the node will fall back to
+  // when it encounters an error attempting to use the Assigned config.
+  // The Assigned config becomes the LastKnownGood config when the node determines
+  // that the Assigned config is stable and correct.
+  // This is currently implemented as a 10-minute soak period starting when the local
+  // record of Assigned config is updated. If the Assigned config is Active at the end
+  // of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is
+  // reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil,
+  // because the local default config is always assumed good.
+  // You should not make assumptions about the node's method of determining config stability
+  // and correctness, as this may change or become configurable in the future.
+  // +optional
+  optional NodeConfigSource lastKnownGood = 3;
+
+  // Error describes any problems reconciling the Spec.ConfigSource to the Active config.
+  // Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned
+  // record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting
+  // to load or validate the Assigned config, etc.
+  // Errors may occur at different points while syncing config. Earlier errors (e.g. download or
+  // checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across
+  // Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in
+  // a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error
+  // by fixing the config assigned in Spec.ConfigSource.
+  // You can find additional information for debugging by searching the error message in the Kubelet log.
+  // Error is a human-readable description of the error state; machines can check whether or not Error
+  // is empty, but should not rely on the stability of the Error text across Kubelet versions.
+  // +optional
+  optional string error = 4;
+}
+
+// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
+message NodeDaemonEndpoints {
+  // Endpoint on which Kubelet is listening.
+  // +optional
+  optional DaemonEndpoint kubeletEndpoint = 1;
+}
+
+// NodeList is the whole list of all Nodes which have been registered with master.
+message NodeList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of nodes
+  repeated Node items = 2;
+}
+
+// NodeProxyOptions is the query options to a Node's proxy call.
+message NodeProxyOptions {
+  // Path is the URL path to use for the current proxy request to node.
+  // +optional
+  optional string path = 1;
+}
+
+// NodeResources is an object for conveying resource information about a node.
+// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.
+message NodeResources {
+  // Capacity represents the available resources of a node
+  map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> capacity = 1;
+}
+
+// A node selector represents the union of the results of one or more label queries
+// over a set of nodes; that is, it represents the OR of the selectors represented
+// by the node selector terms.
+message NodeSelector {
+  // Required. A list of node selector terms. The terms are ORed.
+  repeated NodeSelectorTerm nodeSelectorTerms = 1;
+}
+
+// A node selector requirement is a selector that contains values, a key, and an operator
+// that relates the key and values.
+message NodeSelectorRequirement {
+  // The label key that the selector applies to.
+  optional string key = 1;
+
+  // Represents a key's relationship to a set of values.
+  // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+  optional string operator = 2;
+
+  // An array of string values. If the operator is In or NotIn,
+  // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+  // the values array must be empty. If the operator is Gt or Lt, the values
+  // array must have a single element, which will be interpreted as an integer.
+  // This array is replaced during a strategic merge patch.
+  // +optional
+  repeated string values = 3;
+}
+
+// A null or empty node selector term matches no objects. The requirements of
+// them are ANDed.
+// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+message NodeSelectorTerm {
+  // A list of node selector requirements by node's labels.
+  // +optional
+  repeated NodeSelectorRequirement matchExpressions = 1;
+
+  // A list of node selector requirements by node's fields.
+  // +optional
+  repeated NodeSelectorRequirement matchFields = 2;
+}
+
+// NodeSpec describes the attributes that a node is created with.
+message NodeSpec {
+  // PodCIDR represents the pod IP range assigned to the node.
+  // +optional
+  optional string podCIDR = 1;
+
+  // ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>
+  // +optional
+  optional string providerID = 3;
+
+  // Unschedulable controls node schedulability of new pods. By default, node is schedulable.
+  // More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration
+  // +optional
+  optional bool unschedulable = 4;
+
+  // If specified, the node's taints.
+  // +optional
+  repeated Taint taints = 5;
+
+  // If specified, the source to get node configuration from
+  // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field
+  // +optional
+  optional NodeConfigSource configSource = 6;
+
+  // Deprecated. Not all kubelets will set this field. Remove field after 1.13.
+  // see: https://issues.k8s.io/61966
+  // +optional
+  optional string externalID = 2;
+}
+
+// NodeStatus is information about the current status of a node.
+message NodeStatus {
+  // Capacity represents the total resources of a node.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
+  // +optional
+  map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> capacity = 1;
+
+  // Allocatable represents the resources of a node that are available for scheduling.
+  // Defaults to Capacity.
+  // +optional
+  map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> allocatable = 2;
+
+  // NodePhase is the recently observed lifecycle phase of the node.
+  // More info: https://kubernetes.io/docs/concepts/nodes/node/#phase
+  // The field is never populated, and now is deprecated.
+  // +optional
+  optional string phase = 3;
+
+  // Conditions is an array of current observed node conditions.
+  // More info: https://kubernetes.io/docs/concepts/nodes/node/#condition
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated NodeCondition conditions = 4;
+
+  // List of addresses reachable to the node.
+  // Queried from cloud provider, if available.
+  // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated NodeAddress addresses = 5;
+
+  // Endpoints of daemons running on the Node.
+  // +optional
+  optional NodeDaemonEndpoints daemonEndpoints = 6;
+
+  // Set of ids/uuids to uniquely identify the node.
+  // More info: https://kubernetes.io/docs/concepts/nodes/node/#info
+  // +optional
+  optional NodeSystemInfo nodeInfo = 7;
+
+  // List of container images on this node
+  // +optional
+  repeated ContainerImage images = 8;
+
+  // List of attachable volumes in use (mounted) by the node.
+  // +optional
+  repeated string volumesInUse = 9;
+
+  // List of volumes that are attached to the node.
+  // +optional
+  repeated AttachedVolume volumesAttached = 10;
+
+  // Status of the config assigned to the node via the dynamic Kubelet config feature.
+  // +optional
+  optional NodeConfigStatus config = 11;
+}
+
+// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
+message NodeSystemInfo {
+  // MachineID reported by the node. For unique machine identification
+  // in the cluster this field is preferred. Learn more from man(5)
+  // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
+  optional string machineID = 1;
+
+  // SystemUUID reported by the node. For unique machine identification
+  // MachineID is preferred. This field is specific to Red Hat hosts
+  // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
+  optional string systemUUID = 2;
+
+  // Boot ID reported by the node.
+  optional string bootID = 3;
+
+  // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
+  optional string kernelVersion = 4;
+
+  // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
+  optional string osImage = 5;
+
+  // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
+  optional string containerRuntimeVersion = 6;
+
+  // Kubelet Version reported by the node.
+  optional string kubeletVersion = 7;
+
+  // KubeProxy Version reported by the node.
+  optional string kubeProxyVersion = 8;
+
+  // The Operating System reported by the node
+  optional string operatingSystem = 9;
+
+  // The Architecture reported by the node
+  optional string architecture = 10;
+}
+
+// ObjectFieldSelector selects an APIVersioned field of an object.
+message ObjectFieldSelector {
+  // Version of the schema the FieldPath is written in terms of, defaults to "v1".
+  // +optional
+  optional string apiVersion = 1;
+
+  // Path of the field to select in the specified API version.
+  optional string fieldPath = 2;
+}
+
+// ObjectReference contains enough information to let you inspect or modify the referred object.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+message ObjectReference {
+  // Kind of the referent.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional string kind = 1;
+
+  // Namespace of the referent.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+  // +optional
+  optional string namespace = 2;
+
+  // Name of the referent.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+  // +optional
+  optional string name = 3;
+
+  // UID of the referent.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+  // +optional
+  optional string uid = 4;
+
+  // API version of the referent.
+  // +optional
+  optional string apiVersion = 5;
+
+  // Specific resourceVersion to which this reference is made, if any.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
+  // +optional
+  optional string resourceVersion = 6;
+
+  // If referring to a piece of an object instead of an entire object, this string
+  // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+  // For example, if the object reference is to a container within a pod, this would take on a value like:
+  // "spec.containers{name}" (where "name" refers to the name of the container that triggered
+  // the event) or if no container name is specified "spec.containers[2]" (container with
+  // index 2 in this pod). This syntax is chosen only to have some well-defined way of
+  // referencing a part of an object.
+  // TODO: this design is not final and this field is subject to change in the future.
+  // +optional
+  optional string fieldPath = 7;
+}
+
+// PersistentVolume (PV) is a storage resource provisioned by an administrator.
+// It is analogous to a node.
+// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
+message PersistentVolume {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines a specification of a persistent volume owned by the cluster.
+  // Provisioned by an administrator.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
+  // +optional
+  optional PersistentVolumeSpec spec = 2;
+
+  // Status represents the current information/status for the persistent volume.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
+  // +optional
+  optional PersistentVolumeStatus status = 3;
+}
+
+// PersistentVolumeClaim is a user's request for and claim to a persistent volume
+message PersistentVolumeClaim {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the desired characteristics of a volume requested by a pod author.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+  // +optional
+  optional PersistentVolumeClaimSpec spec = 2;
+
+  // Status represents the current information/status of a persistent volume claim.
+  // Read-only.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+  // +optional
+  optional PersistentVolumeClaimStatus status = 3;
+}
+
+// PersistentVolumeClaimCondition contails details about state of pvc
+message PersistentVolumeClaimCondition {
+  optional string type = 1;
+
+  optional string status = 2;
+
+  // Last time we probed the condition.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3;
+
+  // Last time the condition transitioned from one status to another.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
+
+  // Unique, this should be a short, machine understandable string that gives the reason
+  // for condition's last transition. If it reports "ResizeStarted" that means the underlying
+  // persistent volume is being resized.
+  // +optional
+  optional string reason = 5;
+
+  // Human-readable message indicating details about last transition.
+  // +optional
+  optional string message = 6;
+}
+
+// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
+message PersistentVolumeClaimList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // A list of persistent volume claims.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+  repeated PersistentVolumeClaim items = 2;
+}
+
+// PersistentVolumeClaimSpec describes the common attributes of storage devices
+// and allows a Source for provider-specific attributes
+message PersistentVolumeClaimSpec {
+  // AccessModes contains the desired access modes the volume should have.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+  // +optional
+  repeated string accessModes = 1;
+
+  // A label query over volumes to consider for binding.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
+
+  // Resources represents the minimum resources the volume should have.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+  // +optional
+  optional ResourceRequirements resources = 2;
+
+  // VolumeName is the binding reference to the PersistentVolume backing this claim.
+  // +optional
+  optional string volumeName = 3;
+
+  // Name of the StorageClass required by the claim.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+  // +optional
+  optional string storageClassName = 5;
+
+  // volumeMode defines what type of volume is required by the claim.
+  // Value of Filesystem is implied when not included in claim spec.
+  // This is a beta feature.
+  // +optional
+  optional string volumeMode = 6;
+
+  // This field requires the VolumeSnapshotDataSource alpha feature gate to be
+  // enabled and currently VolumeSnapshot is the only supported data source.
+  // If the provisioner can support VolumeSnapshot data source, it will create
+  // a new volume and data will be restored to the volume at the same time.
+  // If the provisioner does not support VolumeSnapshot data source, volume will
+  // not be created and the failure will be reported as an event.
+  // In the future, we plan to support more data source types and the behavior
+  // of the provisioner may change.
+  // +optional
+  optional TypedLocalObjectReference dataSource = 7;
+}
+
+// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
+message PersistentVolumeClaimStatus {
+  // Phase represents the current phase of PersistentVolumeClaim.
+  // +optional
+  optional string phase = 1;
+
+  // AccessModes contains the actual access modes the volume backing the PVC has.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+  // +optional
+  repeated string accessModes = 2;
+
+  // Represents the actual resources of the underlying volume.
+  // +optional
+  map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> capacity = 3;
+
+  // Current Condition of persistent volume claim. If underlying persistent volume is being
+  // resized then the Condition will be set to 'ResizeStarted'.
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated PersistentVolumeClaimCondition conditions = 4;
+}
+
+// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
+// This volume finds the bound PV and mounts that volume for the pod. A
+// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another
+// type of volume that is owned by someone else (the system).
+message PersistentVolumeClaimVolumeSource {
+  // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+  optional string claimName = 1;
+
+  // Will force the ReadOnly setting in VolumeMounts.
+  // Default false.
+  // +optional
+  optional bool readOnly = 2;
+}
+
+// PersistentVolumeList is a list of PersistentVolume items.
+message PersistentVolumeList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of persistent volumes.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
+  repeated PersistentVolume items = 2;
+}
+
+// PersistentVolumeSource is similar to VolumeSource but meant for the
+// administrator who creates PVs. Exactly one of its members must be set.
+message PersistentVolumeSource {
+  // GCEPersistentDisk represents a GCE Disk resource that is attached to a
+  // kubelet's host machine and then exposed to the pod. Provisioned by an admin.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+  // +optional
+  optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1;
+
+  // AWSElasticBlockStore represents an AWS Disk resource that is attached to a
+  // kubelet's host machine and then exposed to the pod.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+  // +optional
+  optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2;
+
+  // HostPath represents a directory on the host.
+  // Provisioned by a developer or tester.
+  // This is useful for single-node development and testing only!
+  // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+  // +optional
+  optional HostPathVolumeSource hostPath = 3;
+
+  // Glusterfs represents a Glusterfs volume that is attached to a host and
+  // exposed to the pod. Provisioned by an admin.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
+  // +optional
+  optional GlusterfsPersistentVolumeSource glusterfs = 4;
+
+  // NFS represents an NFS mount on the host. Provisioned by an admin.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+  // +optional
+  optional NFSVolumeSource nfs = 5;
+
+  // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
+  // +optional
+  optional RBDPersistentVolumeSource rbd = 6;
+
+  // ISCSI represents an ISCSI Disk resource that is attached to a
+  // kubelet's host machine and then exposed to the pod. Provisioned by an admin.
+  // +optional
+  optional ISCSIPersistentVolumeSource iscsi = 7;
+
+  // Cinder represents a cinder volume attached and mounted on kubelets host machine
+  // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+  // +optional
+  optional CinderPersistentVolumeSource cinder = 8;
+
+  // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
+  // +optional
+  optional CephFSPersistentVolumeSource cephfs = 9;
+
+  // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
+  // +optional
+  optional FCVolumeSource fc = 10;
+
+  // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
+  // +optional
+  optional FlockerVolumeSource flocker = 11;
+
+  // FlexVolume represents a generic volume resource that is
+  // provisioned/attached using an exec based plugin.
+  // +optional
+  optional FlexPersistentVolumeSource flexVolume = 12;
+
+  // AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+  // +optional
+  optional AzureFilePersistentVolumeSource azureFile = 13;
+
+  // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
+  // +optional
+  optional VsphereVirtualDiskVolumeSource vsphereVolume = 14;
+
+  // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
+  // +optional
+  optional QuobyteVolumeSource quobyte = 15;
+
+  // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+  // +optional
+  optional AzureDiskVolumeSource azureDisk = 16;
+
+  // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
+  optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 17;
+
+  // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
+  // +optional
+  optional PortworxVolumeSource portworxVolume = 18;
+
+  // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+  // +optional
+  optional ScaleIOPersistentVolumeSource scaleIO = 19;
+
+  // Local represents directly-attached storage with node affinity
+  // +optional
+  optional LocalVolumeSource local = 20;
+
+  // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md
+  // +optional
+  optional StorageOSPersistentVolumeSource storageos = 21;
+
+  // CSI represents storage that handled by an external CSI driver (Beta feature).
+  // +optional
+  optional CSIPersistentVolumeSource csi = 22;
+}
+
+// PersistentVolumeSpec is the specification of a persistent volume.
+message PersistentVolumeSpec {
+  // A description of the persistent volume's resources and capacity.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
+  // +optional
+  map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> capacity = 1;
+
+  // The actual volume backing the persistent volume.
+  optional PersistentVolumeSource persistentVolumeSource = 2;
+
+  // AccessModes contains all ways the volume can be mounted.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes
+  // +optional
+  repeated string accessModes = 3;
+
+  // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim.
+  // Expected to be non-nil when bound.
+  // claim.VolumeName is the authoritative bind between PV and PVC.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding
+  // +optional
+  optional ObjectReference claimRef = 4;
+
+  // What happens to a persistent volume when released from its claim.
+  // Valid options are Retain (default for manually created PersistentVolumes), Delete (default
+  // for dynamically provisioned PersistentVolumes), and Recycle (deprecated).
+  // Recycle must be supported by the volume plugin underlying this PersistentVolume.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming
+  // +optional
+  optional string persistentVolumeReclaimPolicy = 5;
+
+  // Name of StorageClass to which this persistent volume belongs. Empty value
+  // means that this volume does not belong to any StorageClass.
+  // +optional
+  optional string storageClassName = 6;
+
+  // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will
+  // simply fail if one is invalid.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options
+  // +optional
+  repeated string mountOptions = 7;
+
+  // volumeMode defines if a volume is intended to be used with a formatted filesystem
+  // or to remain in raw block state. Value of Filesystem is implied when not included in spec.
+  // This is a beta feature.
+  // +optional
+  optional string volumeMode = 8;
+
+  // NodeAffinity defines constraints that limit what nodes this volume can be accessed from.
+  // This field influences the scheduling of pods that use this volume.
+  // +optional
+  optional VolumeNodeAffinity nodeAffinity = 9;
+}
+
+// PersistentVolumeStatus is the current status of a persistent volume.
+message PersistentVolumeStatus {
+  // Phase indicates if a volume is available, bound to a claim, or released by a claim.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase
+  // +optional
+  optional string phase = 1;
+
+  // A human-readable message indicating details about why the volume is in this state.
+  // +optional
+  optional string message = 2;
+
+  // Reason is a brief CamelCase string that describes any failure and is meant
+  // for machine parsing and tidy display in the CLI.
+  // +optional
+  optional string reason = 3;
+}
+
+// Represents a Photon Controller persistent disk resource.
+message PhotonPersistentDiskVolumeSource {
+  // ID that identifies Photon Controller persistent disk
+  optional string pdID = 1;
+
+  // Filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  optional string fsType = 2;
+}
+
+// Pod is a collection of containers that can run on a host. This resource is created
+// by clients and scheduled onto hosts.
+message Pod {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior of the pod.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional PodSpec spec = 2;
+
+  // Most recently observed status of the pod.
+  // This data may not be up to date.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional PodStatus status = 3;
+}
+
+// Pod affinity is a group of inter pod affinity scheduling rules.
+message PodAffinity {
+  // If the affinity requirements specified by this field are not met at
+  // scheduling time, the pod will not be scheduled onto the node.
+  // If the affinity requirements specified by this field cease to be met
+  // at some point during pod execution (e.g. due to a pod label update), the
+  // system may or may not try to eventually evict the pod from its node.
+  // When there are multiple elements, the lists of nodes corresponding to each
+  // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+  // +optional
+  repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1;
+
+  // The scheduler will prefer to schedule pods to nodes that satisfy
+  // the affinity expressions specified by this field, but it may choose
+  // a node that violates one or more of the expressions. The node that is
+  // most preferred is the one with the greatest sum of weights, i.e.
+  // for each node that meets all of the scheduling requirements (resource
+  // request, requiredDuringScheduling affinity expressions, etc.),
+  // compute a sum by iterating through the elements of this field and adding
+  // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+  // node(s) with the highest sum are the most preferred.
+  // +optional
+  repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2;
+}
+
+// Defines a set of pods (namely those matching the labelSelector
+// relative to the given namespace(s)) that this pod should be
+// co-located (affinity) or not co-located (anti-affinity) with,
+// where co-located is defined as running on a node whose value of
+// the label with key <topologyKey> matches that of any node on which
+// a pod of the set of pods is running
+message PodAffinityTerm {
+  // A label query over a set of resources, in this case pods.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1;
+
+  // namespaces specifies which namespaces the labelSelector applies to (matches against);
+  // null or empty list means "this pod's namespace"
+  // +optional
+  repeated string namespaces = 2;
+
+  // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+  // the labelSelector in the specified namespaces, where co-located is defined as running on a node
+  // whose value of the label with key topologyKey matches that of any node on which any of the
+  // selected pods is running.
+  // Empty topologyKey is not allowed.
+  optional string topologyKey = 3;
+}
+
+// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
+message PodAntiAffinity {
+  // If the anti-affinity requirements specified by this field are not met at
+  // scheduling time, the pod will not be scheduled onto the node.
+  // If the anti-affinity requirements specified by this field cease to be met
+  // at some point during pod execution (e.g. due to a pod label update), the
+  // system may or may not try to eventually evict the pod from its node.
+  // When there are multiple elements, the lists of nodes corresponding to each
+  // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+  // +optional
+  repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1;
+
+  // The scheduler will prefer to schedule pods to nodes that satisfy
+  // the anti-affinity expressions specified by this field, but it may choose
+  // a node that violates one or more of the expressions. The node that is
+  // most preferred is the one with the greatest sum of weights, i.e.
+  // for each node that meets all of the scheduling requirements (resource
+  // request, requiredDuringScheduling anti-affinity expressions, etc.),
+  // compute a sum by iterating through the elements of this field and adding
+  // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+  // node(s) with the highest sum are the most preferred.
+  // +optional
+  repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2;
+}
+
+// PodAttachOptions is the query options to a Pod's remote attach call.
+// ---
+// TODO: merge w/ PodExecOptions below for stdin, stdout, etc
+// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
+message PodAttachOptions {
+  // Stdin if true, redirects the standard input stream of the pod for this call.
+  // Defaults to false.
+  // +optional
+  optional bool stdin = 1;
+
+  // Stdout if true indicates that stdout is to be redirected for the attach call.
+  // Defaults to true.
+  // +optional
+  optional bool stdout = 2;
+
+  // Stderr if true indicates that stderr is to be redirected for the attach call.
+  // Defaults to true.
+  // +optional
+  optional bool stderr = 3;
+
+  // TTY if true indicates that a tty will be allocated for the attach call.
+  // This is passed through the container runtime so the tty
+  // is allocated on the worker node by the container runtime.
+  // Defaults to false.
+  // +optional
+  optional bool tty = 4;
+
+  // The container in which to execute the command.
+  // Defaults to only container if there is only one container in the pod.
+  // +optional
+  optional string container = 5;
+}
+
+// PodCondition contains details for the current condition of this pod.
+message PodCondition {
+  // Type is the type of the condition.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
+  optional string type = 1;
+
+  // Status is the status of the condition.
+  // Can be True, False, Unknown.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
+  optional string status = 2;
+
+  // Last time we probed the condition.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3;
+
+  // Last time the condition transitioned from one status to another.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
+
+  // Unique, one-word, CamelCase reason for the condition's last transition.
+  // +optional
+  optional string reason = 5;
+
+  // Human-readable message indicating details about last transition.
+  // +optional
+  optional string message = 6;
+}
+
+// PodDNSConfig defines the DNS parameters of a pod in addition to
+// those generated from DNSPolicy.
+message PodDNSConfig {
+  // A list of DNS name server IP addresses.
+  // This will be appended to the base nameservers generated from DNSPolicy.
+  // Duplicated nameservers will be removed.
+  // +optional
+  repeated string nameservers = 1;
+
+  // A list of DNS search domains for host-name lookup.
+  // This will be appended to the base search paths generated from DNSPolicy.
+  // Duplicated search paths will be removed.
+  // +optional
+  repeated string searches = 2;
+
+  // A list of DNS resolver options.
+  // This will be merged with the base options generated from DNSPolicy.
+  // Duplicated entries will be removed. Resolution options given in Options
+  // will override those that appear in the base DNSPolicy.
+  // +optional
+  repeated PodDNSConfigOption options = 3;
+}
+
+// PodDNSConfigOption defines DNS resolver options of a pod.
+message PodDNSConfigOption {
+  // Required.
+  optional string name = 1;
+
+  // +optional
+  optional string value = 2;
+}
+
+// PodExecOptions is the query options to a Pod's remote exec call.
+// ---
+// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging
+// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
+message PodExecOptions {
+  // Redirect the standard input stream of the pod for this call.
+  // Defaults to false.
+  // +optional
+  optional bool stdin = 1;
+
+  // Redirect the standard output stream of the pod for this call.
+  // Defaults to true.
+  // +optional
+  optional bool stdout = 2;
+
+  // Redirect the standard error stream of the pod for this call.
+  // Defaults to true.
+  // +optional
+  optional bool stderr = 3;
+
+  // TTY if true indicates that a tty will be allocated for the exec call.
+  // Defaults to false.
+  // +optional
+  optional bool tty = 4;
+
+  // Container in which to execute the command.
+  // Defaults to only container if there is only one container in the pod.
+  // +optional
+  optional string container = 5;
+
+  // Command is the remote command to execute. argv array. Not executed within a shell.
+  repeated string command = 6;
+}
+
+// PodList is a list of Pods.
+message PodList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of pods.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md
+  repeated Pod items = 2;
+}
+
+// PodLogOptions is the query options for a Pod's logs REST call.
+message PodLogOptions {
+  // The container for which to stream logs. Defaults to only container if there is one container in the pod.
+  // +optional
+  optional string container = 1;
+
+  // Follow the log stream of the pod. Defaults to false.
+  // +optional
+  optional bool follow = 2;
+
+  // Return previous terminated container logs. Defaults to false.
+  // +optional
+  optional bool previous = 3;
+
+  // A relative time in seconds before the current time from which to show logs. If this value
+  // precedes the time a pod was started, only logs since the pod start will be returned.
+  // If this value is in the future, no logs will be returned.
+  // Only one of sinceSeconds or sinceTime may be specified.
+  // +optional
+  optional int64 sinceSeconds = 4;
+
+  // An RFC3339 timestamp from which to show logs. If this value
+  // precedes the time a pod was started, only logs since the pod start will be returned.
+  // If this value is in the future, no logs will be returned.
+  // Only one of sinceSeconds or sinceTime may be specified.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5;
+
+  // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
+  // of log output. Defaults to false.
+  // +optional
+  optional bool timestamps = 6;
+
+  // If set, the number of lines from the end of the logs to show. If not specified,
+  // logs are shown from the creation of the container or sinceSeconds or sinceTime
+  // +optional
+  optional int64 tailLines = 7;
+
+  // If set, the number of bytes to read from the server before terminating the
+  // log output. This may not display a complete final line of logging, and may return
+  // slightly more or slightly less than the specified limit.
+  // +optional
+  optional int64 limitBytes = 8;
+}
+
+// PodPortForwardOptions is the query options to a Pod's port forward call
+// when using WebSockets.
+// The `port` query parameter must specify the port or
+// ports (comma separated) to forward over.
+// Port forwarding over SPDY does not use these options. It requires the port
+// to be passed in the `port` header as part of request.
+message PodPortForwardOptions {
+  // List of ports to forward
+  // Required when using WebSockets
+  // +optional
+  repeated int32 ports = 1;
+}
+
+// PodProxyOptions is the query options to a Pod's proxy call.
+message PodProxyOptions {
+  // Path is the URL path to use for the current proxy request to pod.
+  // +optional
+  optional string path = 1;
+}
+
+// PodReadinessGate contains the reference to a pod condition
+message PodReadinessGate {
+  // ConditionType refers to a condition in the pod's condition list with matching type.
+  optional string conditionType = 1;
+}
+
+// PodSecurityContext holds pod-level security attributes and common container settings.
+// Some fields are also present in container.securityContext.  Field values of
+// container.securityContext take precedence over field values of PodSecurityContext.
+message PodSecurityContext {
+  // The SELinux context to be applied to all containers.
+  // If unspecified, the container runtime will allocate a random SELinux context for each
+  // container.  May also be set in SecurityContext.  If set in
+  // both SecurityContext and PodSecurityContext, the value specified in SecurityContext
+  // takes precedence for that container.
+  // +optional
+  optional SELinuxOptions seLinuxOptions = 1;
+
+  // The UID to run the entrypoint of the container process.
+  // Defaults to user specified in image metadata if unspecified.
+  // May also be set in SecurityContext.  If set in both SecurityContext and
+  // PodSecurityContext, the value specified in SecurityContext takes precedence
+  // for that container.
+  // +optional
+  optional int64 runAsUser = 2;
+
+  // The GID to run the entrypoint of the container process.
+  // Uses runtime default if unset.
+  // May also be set in SecurityContext.  If set in both SecurityContext and
+  // PodSecurityContext, the value specified in SecurityContext takes precedence
+  // for that container.
+  // +optional
+  optional int64 runAsGroup = 6;
+
+  // Indicates that the container must run as a non-root user.
+  // If true, the Kubelet will validate the image at runtime to ensure that it
+  // does not run as UID 0 (root) and fail to start the container if it does.
+  // If unset or false, no such validation will be performed.
+  // May also be set in SecurityContext.  If set in both SecurityContext and
+  // PodSecurityContext, the value specified in SecurityContext takes precedence.
+  // +optional
+  optional bool runAsNonRoot = 3;
+
+  // A list of groups applied to the first process run in each container, in addition
+  // to the container's primary GID.  If unspecified, no groups will be added to
+  // any container.
+  // +optional
+  repeated int64 supplementalGroups = 4;
+
+  // A special supplemental group that applies to all containers in a pod.
+  // Some volume types allow the Kubelet to change the ownership of that volume
+  // to be owned by the pod:
+  //
+  // 1. The owning GID will be the FSGroup
+  // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
+  // 3. The permission bits are OR'd with rw-rw----
+  //
+  // If unset, the Kubelet will not modify the ownership and permissions of any volume.
+  // +optional
+  optional int64 fsGroup = 5;
+
+  // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
+  // sysctls (by the container runtime) might fail to launch.
+  // +optional
+  repeated Sysctl sysctls = 7;
+}
+
+// Describes the class of pods that should avoid this node.
+// Exactly one field should be set.
+message PodSignature {
+  // Reference to controller whose pods should avoid this node.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1;
+}
+
+// PodSpec is a description of a pod.
+message PodSpec {
+  // List of volumes that can be mounted by containers belonging to the pod.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes
+  // +optional
+  // +patchMergeKey=name
+  // +patchStrategy=merge,retainKeys
+  repeated Volume volumes = 1;
+
+  // List of initialization containers belonging to the pod.
+  // Init containers are executed in order prior to containers being started. If any
+  // init container fails, the pod is considered to have failed and is handled according
+  // to its restartPolicy. The name for an init container or normal container must be
+  // unique among all containers.
+  // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes.
+  // The resourceRequirements of an init container are taken into account during scheduling
+  // by finding the highest request/limit for each resource type, and then using the max of
+  // of that value or the sum of the normal containers. Limits are applied to init containers
+  // in a similar fashion.
+  // Init containers cannot currently be added or removed.
+  // Cannot be updated.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  repeated Container initContainers = 20;
+
+  // List of containers belonging to the pod.
+  // Containers cannot currently be added or removed.
+  // There must be at least one container in a Pod.
+  // Cannot be updated.
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  repeated Container containers = 2;
+
+  // Restart policy for all containers within the pod.
+  // One of Always, OnFailure, Never.
+  // Default to Always.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
+  // +optional
+  optional string restartPolicy = 3;
+
+  // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
+  // Value must be non-negative integer. The value zero indicates delete immediately.
+  // If this value is nil, the default grace period will be used instead.
+  // The grace period is the duration in seconds after the processes running in the pod are sent
+  // a termination signal and the time when the processes are forcibly halted with a kill signal.
+  // Set this value longer than the expected cleanup time for your process.
+  // Defaults to 30 seconds.
+  // +optional
+  optional int64 terminationGracePeriodSeconds = 4;
+
+  // Optional duration in seconds the pod may be active on the node relative to
+  // StartTime before the system will actively try to mark it failed and kill associated containers.
+  // Value must be a positive integer.
+  // +optional
+  optional int64 activeDeadlineSeconds = 5;
+
+  // Set DNS policy for the pod.
+  // Defaults to "ClusterFirst".
+  // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
+  // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
+  // To have DNS options set along with hostNetwork, you have to specify DNS policy
+  // explicitly to 'ClusterFirstWithHostNet'.
+  // +optional
+  optional string dnsPolicy = 6;
+
+  // NodeSelector is a selector which must be true for the pod to fit on a node.
+  // Selector which must match a node's labels for the pod to be scheduled on that node.
+  // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+  // +optional
+  map<string, string> nodeSelector = 7;
+
+  // ServiceAccountName is the name of the ServiceAccount to use to run this pod.
+  // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+  // +optional
+  optional string serviceAccountName = 8;
+
+  // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName.
+  // Deprecated: Use serviceAccountName instead.
+  // +k8s:conversion-gen=false
+  // +optional
+  optional string serviceAccount = 9;
+
+  // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
+  // +optional
+  optional bool automountServiceAccountToken = 21;
+
+  // NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
+  // the scheduler simply schedules this pod onto that node, assuming that it fits resource
+  // requirements.
+  // +optional
+  optional string nodeName = 10;
+
+  // Host networking requested for this pod. Use the host's network namespace.
+  // If this option is set, the ports that will be used must be specified.
+  // Default to false.
+  // +k8s:conversion-gen=false
+  // +optional
+  optional bool hostNetwork = 11;
+
+  // Use the host's pid namespace.
+  // Optional: Default to false.
+  // +k8s:conversion-gen=false
+  // +optional
+  optional bool hostPID = 12;
+
+  // Use the host's ipc namespace.
+  // Optional: Default to false.
+  // +k8s:conversion-gen=false
+  // +optional
+  optional bool hostIPC = 13;
+
+  // Share a single process namespace between all of the containers in a pod.
+  // When this is set containers will be able to view and signal processes from other containers
+  // in the same pod, and the first process in each container will not be assigned PID 1.
+  // HostPID and ShareProcessNamespace cannot both be set.
+  // Optional: Default to false.
+  // This field is beta-level and may be disabled with the PodShareProcessNamespace feature.
+  // +k8s:conversion-gen=false
+  // +optional
+  optional bool shareProcessNamespace = 27;
+
+  // SecurityContext holds pod-level security attributes and common container settings.
+  // Optional: Defaults to empty.  See type description for default values of each field.
+  // +optional
+  optional PodSecurityContext securityContext = 14;
+
+  // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
+  // If specified, these secrets will be passed to individual puller implementations for them to use. For example,
+  // in the case of docker, only DockerConfig type secrets are honored.
+  // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
+  // +optional
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  repeated LocalObjectReference imagePullSecrets = 15;
+
+  // Specifies the hostname of the Pod
+  // If not specified, the pod's hostname will be set to a system-defined value.
+  // +optional
+  optional string hostname = 16;
+
+  // If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
+  // If not specified, the pod will not have a domainname at all.
+  // +optional
+  optional string subdomain = 17;
+
+  // If specified, the pod's scheduling constraints
+  // +optional
+  optional Affinity affinity = 18;
+
+  // If specified, the pod will be dispatched by specified scheduler.
+  // If not specified, the pod will be dispatched by default scheduler.
+  // +optional
+  optional string schedulerName = 19;
+
+  // If specified, the pod's tolerations.
+  // +optional
+  repeated Toleration tolerations = 22;
+
+  // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
+  // file if specified. This is only valid for non-hostNetwork pods.
+  // +optional
+  // +patchMergeKey=ip
+  // +patchStrategy=merge
+  repeated HostAlias hostAliases = 23;
+
+  // If specified, indicates the pod's priority. "system-node-critical" and
+  // "system-cluster-critical" are two special keywords which indicate the
+  // highest priorities with the former being the highest priority. Any other
+  // name must be defined by creating a PriorityClass object with that name.
+  // If not specified, the pod priority will be default or zero if there is no
+  // default.
+  // +optional
+  optional string priorityClassName = 24;
+
+  // The priority value. Various system components use this field to find the
+  // priority of the pod. When Priority Admission Controller is enabled, it
+  // prevents users from setting this field. The admission controller populates
+  // this field from PriorityClassName.
+  // The higher the value, the higher the priority.
+  // +optional
+  optional int32 priority = 25;
+
+  // Specifies the DNS parameters of a pod.
+  // Parameters specified here will be merged to the generated DNS
+  // configuration based on DNSPolicy.
+  // +optional
+  optional PodDNSConfig dnsConfig = 26;
+
+  // If specified, all readiness gates will be evaluated for pod readiness.
+  // A pod is ready when all its containers are ready AND
+  // all conditions specified in the readiness gates have status equal to "True"
+  // More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md
+  // +optional
+  repeated PodReadinessGate readinessGates = 28;
+
+  // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
+  // to run this pod.  If no RuntimeClass resource matches the named class, the pod will not be run.
+  // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
+  // empty definition that uses the default runtime handler.
+  // More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md
+  // This is an alpha feature and may change in the future.
+  // +optional
+  optional string runtimeClassName = 29;
+
+  // EnableServiceLinks indicates whether information about services should be injected into pod's
+  // environment variables, matching the syntax of Docker links.
+  // +optional
+  optional bool enableServiceLinks = 30;
+}
+
+// PodStatus represents information about the status of a pod. Status may trail the actual
+// state of a system, especially if the node that hosts the pod cannot contact the control
+// plane.
+message PodStatus {
+  // The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle.
+  // The conditions array, the reason and message fields, and the individual container status
+  // arrays contain more detail about the pod's status.
+  // There are five possible phase values:
+  //
+  // Pending: The pod has been accepted by the Kubernetes system, but one or more of the
+  // container images has not been created. This includes time before being scheduled as
+  // well as time spent downloading images over the network, which could take a while.
+  // Running: The pod has been bound to a node, and all of the containers have been created.
+  // At least one container is still running, or is in the process of starting or restarting.
+  // Succeeded: All containers in the pod have terminated in success, and will not be restarted.
+  // Failed: All containers in the pod have terminated, and at least one container has
+  // terminated in failure. The container either exited with non-zero status or was terminated
+  // by the system.
+  // Unknown: For some reason the state of the pod could not be obtained, typically due to an
+  // error in communicating with the host of the pod.
+  //
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
+  // +optional
+  optional string phase = 1;
+
+  // Current service state of pod.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated PodCondition conditions = 2;
+
+  // A human readable message indicating details about why the pod is in this condition.
+  // +optional
+  optional string message = 3;
+
+  // A brief CamelCase message indicating details about why the pod is in this state.
+  // e.g. 'Evicted'
+  // +optional
+  optional string reason = 4;
+
+  // nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be
+  // scheduled right away as preemption victims receive their graceful termination periods.
+  // This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide
+  // to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to
+  // give the resources on this node to a higher priority pod that is created after preemption.
+  // As a result, this field may be different than PodSpec.nodeName when the pod is
+  // scheduled.
+  // +optional
+  optional string nominatedNodeName = 11;
+
+  // IP address of the host to which the pod is assigned. Empty if not yet scheduled.
+  // +optional
+  optional string hostIP = 5;
+
+  // IP address allocated to the pod. Routable at least within the cluster.
+  // Empty if not yet allocated.
+  // +optional
+  optional string podIP = 6;
+
+  // RFC 3339 date and time at which the object was acknowledged by the Kubelet.
+  // This is before the Kubelet pulled the container image(s) for the pod.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7;
+
+  // The list has one entry per init container in the manifest. The most recent successful
+  // init container will have ready = true, the most recently started container will have
+  // startTime set.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
+  repeated ContainerStatus initContainerStatuses = 10;
+
+  // The list has one entry per container in the manifest. Each entry is currently the output
+  // of `docker inspect`.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
+  // +optional
+  repeated ContainerStatus containerStatuses = 8;
+
+  // The Quality of Service (QOS) classification assigned to the pod based on resource requirements
+  // See PodQOSClass type for available QOS classes
+  // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md
+  // +optional
+  optional string qosClass = 9;
+}
+
+// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
+message PodStatusResult {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Most recently observed status of the pod.
+  // This data may not be up to date.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional PodStatus status = 2;
+}
+
+// PodTemplate describes a template for creating copies of a predefined pod.
+message PodTemplate {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Template defines the pods that will be created from this pod template.
+  // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional PodTemplateSpec template = 2;
+}
+
+// PodTemplateList is a list of PodTemplates.
+message PodTemplateList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of pod templates
+  repeated PodTemplate items = 2;
+}
+
+// PodTemplateSpec describes the data a pod should have when created from a template
+message PodTemplateSpec {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior of the pod.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional PodSpec spec = 2;
+}
+
+// PortworxVolumeSource represents a Portworx volume resource.
+message PortworxVolumeSource {
+  // VolumeID uniquely identifies a Portworx volume
+  optional string volumeID = 1;
+
+  // FSType represents the filesystem type to mount
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
+  optional string fsType = 2;
+
+  // Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 3;
+}
+
+// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
+// +k8s:openapi-gen=false
+message Preconditions {
+  // Specifies the target UID.
+  // +optional
+  optional string uid = 1;
+}
+
+// Describes a class of pods that should avoid this node.
+message PreferAvoidPodsEntry {
+  // The class of pods.
+  optional PodSignature podSignature = 1;
+
+  // Time at which this entry was added to the list.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2;
+
+  // (brief) reason why this entry was added to the list.
+  // +optional
+  optional string reason = 3;
+
+  // Human readable message indicating why this entry was added to the list.
+  // +optional
+  optional string message = 4;
+}
+
+// An empty preferred scheduling term matches all objects with implicit weight 0
+// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+message PreferredSchedulingTerm {
+  // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+  optional int32 weight = 1;
+
+  // A node selector term, associated with the corresponding weight.
+  optional NodeSelectorTerm preference = 2;
+}
+
+// Probe describes a health check to be performed against a container to determine whether it is
+// alive or ready to receive traffic.
+message Probe {
+  // The action taken to determine the health of a container
+  optional Handler handler = 1;
+
+  // Number of seconds after the container has started before liveness probes are initiated.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+  // +optional
+  optional int32 initialDelaySeconds = 2;
+
+  // Number of seconds after which the probe times out.
+  // Defaults to 1 second. Minimum value is 1.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+  // +optional
+  optional int32 timeoutSeconds = 3;
+
+  // How often (in seconds) to perform the probe.
+  // Default to 10 seconds. Minimum value is 1.
+  // +optional
+  optional int32 periodSeconds = 4;
+
+  // Minimum consecutive successes for the probe to be considered successful after having failed.
+  // Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+  // +optional
+  optional int32 successThreshold = 5;
+
+  // Minimum consecutive failures for the probe to be considered failed after having succeeded.
+  // Defaults to 3. Minimum value is 1.
+  // +optional
+  optional int32 failureThreshold = 6;
+}
+
+// Represents a projected volume source
+message ProjectedVolumeSource {
+  // list of volume projections
+  repeated VolumeProjection sources = 1;
+
+  // Mode bits to use on created files by default. Must be a value between
+  // 0 and 0777.
+  // Directories within the path are not affected by this setting.
+  // This might be in conflict with other options that affect the file
+  // mode, like fsGroup, and the result can be other mode bits set.
+  // +optional
+  optional int32 defaultMode = 2;
+}
+
+// Represents a Quobyte mount that lasts the lifetime of a pod.
+// Quobyte volumes do not support ownership management or SELinux relabeling.
+message QuobyteVolumeSource {
+  // Registry represents a single or multiple Quobyte Registry services
+  // specified as a string as host:port pair (multiple entries are separated with commas)
+  // which acts as the central registry for volumes
+  optional string registry = 1;
+
+  // Volume is a string that references an already created Quobyte volume by name.
+  optional string volume = 2;
+
+  // ReadOnly here will force the Quobyte volume to be mounted with read-only permissions.
+  // Defaults to false.
+  // +optional
+  optional bool readOnly = 3;
+
+  // User to map volume access to
+  // Defaults to serivceaccount user
+  // +optional
+  optional string user = 4;
+
+  // Group to map volume access to
+  // Default is no group
+  // +optional
+  optional string group = 5;
+}
+
+// Represents a Rados Block Device mount that lasts the lifetime of a pod.
+// RBD volumes support ownership management and SELinux relabeling.
+message RBDPersistentVolumeSource {
+  // A collection of Ceph monitors.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+  repeated string monitors = 1;
+
+  // The rados image name.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+  optional string image = 2;
+
+  // Filesystem type of the volume that you want to mount.
+  // Tip: Ensure that the filesystem type is supported by the host operating system.
+  // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+  // TODO: how do we prevent errors in the filesystem from compromising the machine
+  // +optional
+  optional string fsType = 3;
+
+  // The rados pool name.
+  // Default is rbd.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  optional string pool = 4;
+
+  // The rados user name.
+  // Default is admin.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  optional string user = 5;
+
+  // Keyring is the path to key ring for RBDUser.
+  // Default is /etc/ceph/keyring.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  optional string keyring = 6;
+
+  // SecretRef is name of the authentication secret for RBDUser. If provided
+  // overrides keyring.
+  // Default is nil.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  optional SecretReference secretRef = 7;
+
+  // ReadOnly here will force the ReadOnly setting in VolumeMounts.
+  // Defaults to false.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  optional bool readOnly = 8;
+}
+
+// Represents a Rados Block Device mount that lasts the lifetime of a pod.
+// RBD volumes support ownership management and SELinux relabeling.
+message RBDVolumeSource {
+  // A collection of Ceph monitors.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+  repeated string monitors = 1;
+
+  // The rados image name.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+  optional string image = 2;
+
+  // Filesystem type of the volume that you want to mount.
+  // Tip: Ensure that the filesystem type is supported by the host operating system.
+  // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+  // TODO: how do we prevent errors in the filesystem from compromising the machine
+  // +optional
+  optional string fsType = 3;
+
+  // The rados pool name.
+  // Default is rbd.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  optional string pool = 4;
+
+  // The rados user name.
+  // Default is admin.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  optional string user = 5;
+
+  // Keyring is the path to key ring for RBDUser.
+  // Default is /etc/ceph/keyring.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  optional string keyring = 6;
+
+  // SecretRef is name of the authentication secret for RBDUser. If provided
+  // overrides keyring.
+  // Default is nil.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  optional LocalObjectReference secretRef = 7;
+
+  // ReadOnly here will force the ReadOnly setting in VolumeMounts.
+  // Defaults to false.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  optional bool readOnly = 8;
+}
+
+// RangeAllocation is not a public type.
+message RangeAllocation {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Range is string that identifies the range represented by 'data'.
+  optional string range = 2;
+
+  // Data is a bit array containing all allocated addresses in the previous segment.
+  optional bytes data = 3;
+}
+
+// ReplicationController represents the configuration of a replication controller.
+message ReplicationController {
+  // If the Labels of a ReplicationController are empty, they are defaulted to
+  // be the same as the Pod(s) that the replication controller manages.
+  // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the specification of the desired behavior of the replication controller.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional ReplicationControllerSpec spec = 2;
+
+  // Status is the most recently observed status of the replication controller.
+  // This data may be out of date by some window of time.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional ReplicationControllerStatus status = 3;
+}
+
+// ReplicationControllerCondition describes the state of a replication controller at a certain point.
+message ReplicationControllerCondition {
+  // Type of replication controller condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // The last time the condition transitioned from one status to another.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+  // The reason for the condition's last transition.
+  // +optional
+  optional string reason = 4;
+
+  // A human readable message indicating details about the transition.
+  // +optional
+  optional string message = 5;
+}
+
+// ReplicationControllerList is a collection of replication controllers.
+message ReplicationControllerList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of replication controllers.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+  repeated ReplicationController items = 2;
+}
+
+// ReplicationControllerSpec is the specification of a replication controller.
+message ReplicationControllerSpec {
+  // Replicas is the number of desired replicas.
+  // This is a pointer to distinguish between explicit zero and unspecified.
+  // Defaults to 1.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
+  // +optional
+  optional int32 replicas = 1;
+
+  // Minimum number of seconds for which a newly created pod should be ready
+  // without any of its container crashing, for it to be considered available.
+  // Defaults to 0 (pod will be considered available as soon as it is ready)
+  // +optional
+  optional int32 minReadySeconds = 4;
+
+  // Selector is a label query over pods that should match the Replicas count.
+  // If Selector is empty, it is defaulted to the labels present on the Pod template.
+  // Label keys and values that must match in order to be controlled by this replication
+  // controller, if empty defaulted to labels on Pod template.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+  // +optional
+  map<string, string> selector = 2;
+
+  // Template is the object that describes the pod that will be created if
+  // insufficient replicas are detected. This takes precedence over a TemplateRef.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+  // +optional
+  optional PodTemplateSpec template = 3;
+}
+
+// ReplicationControllerStatus represents the current status of a replication
+// controller.
+message ReplicationControllerStatus {
+  // Replicas is the most recently oberved number of replicas.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
+  optional int32 replicas = 1;
+
+  // The number of pods that have labels matching the labels of the pod template of the replication controller.
+  // +optional
+  optional int32 fullyLabeledReplicas = 2;
+
+  // The number of ready replicas for this replication controller.
+  // +optional
+  optional int32 readyReplicas = 4;
+
+  // The number of available replicas (ready for at least minReadySeconds) for this replication controller.
+  // +optional
+  optional int32 availableReplicas = 5;
+
+  // ObservedGeneration reflects the generation of the most recently observed replication controller.
+  // +optional
+  optional int64 observedGeneration = 3;
+
+  // Represents the latest available observations of a replication controller's current state.
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated ReplicationControllerCondition conditions = 6;
+}
+
+// ResourceFieldSelector represents container resources (cpu, memory) and their output format
+message ResourceFieldSelector {
+  // Container name: required for volumes, optional for env vars
+  // +optional
+  optional string containerName = 1;
+
+  // Required: resource to select
+  optional string resource = 2;
+
+  // Specifies the output format of the exposed resources, defaults to "1"
+  // +optional
+  optional k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3;
+}
+
+// ResourceQuota sets aggregate quota restrictions enforced per namespace
+message ResourceQuota {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the desired quota.
+  // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional ResourceQuotaSpec spec = 2;
+
+  // Status defines the actual enforced quota and its current usage.
+  // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional ResourceQuotaStatus status = 3;
+}
+
+// ResourceQuotaList is a list of ResourceQuota items.
+message ResourceQuotaList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of ResourceQuota objects.
+  // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
+  repeated ResourceQuota items = 2;
+}
+
+// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
+message ResourceQuotaSpec {
+  // hard is the set of desired hard limits for each named resource.
+  // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
+  // +optional
+  map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> hard = 1;
+
+  // A collection of filters that must match each object tracked by a quota.
+  // If not specified, the quota matches all objects.
+  // +optional
+  repeated string scopes = 2;
+
+  // scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota
+  // but expressed using ScopeSelectorOperator in combination with possible values.
+  // For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
+  // +optional
+  optional ScopeSelector scopeSelector = 3;
+}
+
+// ResourceQuotaStatus defines the enforced hard limits and observed use.
+message ResourceQuotaStatus {
+  // Hard is the set of enforced hard limits for each named resource.
+  // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
+  // +optional
+  map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> hard = 1;
+
+  // Used is the current observed total usage of the resource in the namespace.
+  // +optional
+  map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> used = 2;
+}
+
+// ResourceRequirements describes the compute resource requirements.
+message ResourceRequirements {
+  // Limits describes the maximum amount of compute resources allowed.
+  // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
+  // +optional
+  map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> limits = 1;
+
+  // Requests describes the minimum amount of compute resources required.
+  // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+  // otherwise to an implementation-defined value.
+  // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
+  // +optional
+  map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> requests = 2;
+}
+
+// SELinuxOptions are the labels to be applied to the container
+message SELinuxOptions {
+  // User is a SELinux user label that applies to the container.
+  // +optional
+  optional string user = 1;
+
+  // Role is a SELinux role label that applies to the container.
+  // +optional
+  optional string role = 2;
+
+  // Type is a SELinux type label that applies to the container.
+  // +optional
+  optional string type = 3;
+
+  // Level is SELinux level label that applies to the container.
+  // +optional
+  optional string level = 4;
+}
+
+// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume
+message ScaleIOPersistentVolumeSource {
+  // The host address of the ScaleIO API Gateway.
+  optional string gateway = 1;
+
+  // The name of the storage system as configured in ScaleIO.
+  optional string system = 2;
+
+  // SecretRef references to the secret for ScaleIO user and other
+  // sensitive information. If this is not provided, Login operation will fail.
+  optional SecretReference secretRef = 3;
+
+  // Flag to enable/disable SSL communication with Gateway, default false
+  // +optional
+  optional bool sslEnabled = 4;
+
+  // The name of the ScaleIO Protection Domain for the configured storage.
+  // +optional
+  optional string protectionDomain = 5;
+
+  // The ScaleIO Storage Pool associated with the protection domain.
+  // +optional
+  optional string storagePool = 6;
+
+  // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+  // Default is ThinProvisioned.
+  // +optional
+  optional string storageMode = 7;
+
+  // The name of a volume already created in the ScaleIO system
+  // that is associated with this volume source.
+  optional string volumeName = 8;
+
+  // Filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs".
+  // Default is "xfs"
+  // +optional
+  optional string fsType = 9;
+
+  // Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 10;
+}
+
+// ScaleIOVolumeSource represents a persistent ScaleIO volume
+message ScaleIOVolumeSource {
+  // The host address of the ScaleIO API Gateway.
+  optional string gateway = 1;
+
+  // The name of the storage system as configured in ScaleIO.
+  optional string system = 2;
+
+  // SecretRef references to the secret for ScaleIO user and other
+  // sensitive information. If this is not provided, Login operation will fail.
+  optional LocalObjectReference secretRef = 3;
+
+  // Flag to enable/disable SSL communication with Gateway, default false
+  // +optional
+  optional bool sslEnabled = 4;
+
+  // The name of the ScaleIO Protection Domain for the configured storage.
+  // +optional
+  optional string protectionDomain = 5;
+
+  // The ScaleIO Storage Pool associated with the protection domain.
+  // +optional
+  optional string storagePool = 6;
+
+  // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+  // Default is ThinProvisioned.
+  // +optional
+  optional string storageMode = 7;
+
+  // The name of a volume already created in the ScaleIO system
+  // that is associated with this volume source.
+  optional string volumeName = 8;
+
+  // Filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs".
+  // Default is "xfs".
+  // +optional
+  optional string fsType = 9;
+
+  // Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 10;
+}
+
+// A scope selector represents the AND of the selectors represented
+// by the scoped-resource selector requirements.
+message ScopeSelector {
+  // A list of scope selector requirements by scope of the resources.
+  // +optional
+  repeated ScopedResourceSelectorRequirement matchExpressions = 1;
+}
+
+// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
+// that relates the scope name and values.
+message ScopedResourceSelectorRequirement {
+  // The name of the scope that the selector applies to.
+  optional string scopeName = 1;
+
+  // Represents a scope's relationship to a set of values.
+  // Valid operators are In, NotIn, Exists, DoesNotExist.
+  optional string operator = 2;
+
+  // An array of string values. If the operator is In or NotIn,
+  // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+  // the values array must be empty.
+  // This array is replaced during a strategic merge patch.
+  // +optional
+  repeated string values = 3;
+}
+
+// Secret holds secret data of a certain type. The total bytes of the values in
+// the Data field must be less than MaxSecretSize bytes.
+message Secret {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Data contains the secret data. Each key must consist of alphanumeric
+  // characters, '-', '_' or '.'. The serialized form of the secret data is a
+  // base64 encoded string, representing the arbitrary (possibly non-string)
+  // data value here. Described in https://tools.ietf.org/html/rfc4648#section-4
+  // +optional
+  map<string, bytes> data = 2;
+
+  // stringData allows specifying non-binary secret data in string form.
+  // It is provided as a write-only convenience method.
+  // All keys and values are merged into the data field on write, overwriting any existing values.
+  // It is never output when reading from the API.
+  // +k8s:conversion-gen=false
+  // +optional
+  map<string, string> stringData = 4;
+
+  // Used to facilitate programmatic handling of secret data.
+  // +optional
+  optional string type = 3;
+}
+
+// SecretEnvSource selects a Secret to populate the environment
+// variables with.
+//
+// The contents of the target Secret's Data field will represent the
+// key-value pairs as environment variables.
+message SecretEnvSource {
+  // The Secret to select from.
+  optional LocalObjectReference localObjectReference = 1;
+
+  // Specify whether the Secret must be defined
+  // +optional
+  optional bool optional = 2;
+}
+
+// SecretKeySelector selects a key of a Secret.
+message SecretKeySelector {
+  // The name of the secret in the pod's namespace to select from.
+  optional LocalObjectReference localObjectReference = 1;
+
+  // The key of the secret to select from.  Must be a valid secret key.
+  optional string key = 2;
+
+  // Specify whether the Secret or it's key must be defined
+  // +optional
+  optional bool optional = 3;
+}
+
+// SecretList is a list of Secret.
+message SecretList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of secret objects.
+  // More info: https://kubernetes.io/docs/concepts/configuration/secret
+  repeated Secret items = 2;
+}
+
+// Adapts a secret into a projected volume.
+//
+// The contents of the target Secret's Data field will be presented in a
+// projected volume as files using the keys in the Data field as the file names.
+// Note that this is identical to a secret volume source without the default
+// mode.
+message SecretProjection {
+  optional LocalObjectReference localObjectReference = 1;
+
+  // If unspecified, each key-value pair in the Data field of the referenced
+  // Secret will be projected into the volume as a file whose name is the
+  // key and content is the value. If specified, the listed keys will be
+  // projected into the specified paths, and unlisted keys will not be
+  // present. If a key is specified which is not present in the Secret,
+  // the volume setup will error unless it is marked optional. Paths must be
+  // relative and may not contain the '..' path or start with '..'.
+  // +optional
+  repeated KeyToPath items = 2;
+
+  // Specify whether the Secret or its key must be defined
+  // +optional
+  optional bool optional = 4;
+}
+
+// SecretReference represents a Secret Reference. It has enough information to retrieve secret
+// in any namespace
+message SecretReference {
+  // Name is unique within a namespace to reference a secret resource.
+  // +optional
+  optional string name = 1;
+
+  // Namespace defines the space within which the secret name must be unique.
+  // +optional
+  optional string namespace = 2;
+}
+
+// Adapts a Secret into a volume.
+//
+// The contents of the target Secret's Data field will be presented in a volume
+// as files using the keys in the Data field as the file names.
+// Secret volumes support ownership management and SELinux relabeling.
+message SecretVolumeSource {
+  // Name of the secret in the pod's namespace to use.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+  // +optional
+  optional string secretName = 1;
+
+  // If unspecified, each key-value pair in the Data field of the referenced
+  // Secret will be projected into the volume as a file whose name is the
+  // key and content is the value. If specified, the listed keys will be
+  // projected into the specified paths, and unlisted keys will not be
+  // present. If a key is specified which is not present in the Secret,
+  // the volume setup will error unless it is marked optional. Paths must be
+  // relative and may not contain the '..' path or start with '..'.
+  // +optional
+  repeated KeyToPath items = 2;
+
+  // Optional: mode bits to use on created files by default. Must be a
+  // value between 0 and 0777. Defaults to 0644.
+  // Directories within the path are not affected by this setting.
+  // This might be in conflict with other options that affect the file
+  // mode, like fsGroup, and the result can be other mode bits set.
+  // +optional
+  optional int32 defaultMode = 3;
+
+  // Specify whether the Secret or it's keys must be defined
+  // +optional
+  optional bool optional = 4;
+}
+
+// SecurityContext holds security configuration that will be applied to a container.
+// Some fields are present in both SecurityContext and PodSecurityContext.  When both
+// are set, the values in SecurityContext take precedence.
+message SecurityContext {
+  // The capabilities to add/drop when running containers.
+  // Defaults to the default set of capabilities granted by the container runtime.
+  // +optional
+  optional Capabilities capabilities = 1;
+
+  // Run container in privileged mode.
+  // Processes in privileged containers are essentially equivalent to root on the host.
+  // Defaults to false.
+  // +optional
+  optional bool privileged = 2;
+
+  // The SELinux context to be applied to the container.
+  // If unspecified, the container runtime will allocate a random SELinux context for each
+  // container.  May also be set in PodSecurityContext.  If set in both SecurityContext and
+  // PodSecurityContext, the value specified in SecurityContext takes precedence.
+  // +optional
+  optional SELinuxOptions seLinuxOptions = 3;
+
+  // The UID to run the entrypoint of the container process.
+  // Defaults to user specified in image metadata if unspecified.
+  // May also be set in PodSecurityContext.  If set in both SecurityContext and
+  // PodSecurityContext, the value specified in SecurityContext takes precedence.
+  // +optional
+  optional int64 runAsUser = 4;
+
+  // The GID to run the entrypoint of the container process.
+  // Uses runtime default if unset.
+  // May also be set in PodSecurityContext.  If set in both SecurityContext and
+  // PodSecurityContext, the value specified in SecurityContext takes precedence.
+  // +optional
+  optional int64 runAsGroup = 8;
+
+  // Indicates that the container must run as a non-root user.
+  // If true, the Kubelet will validate the image at runtime to ensure that it
+  // does not run as UID 0 (root) and fail to start the container if it does.
+  // If unset or false, no such validation will be performed.
+  // May also be set in PodSecurityContext.  If set in both SecurityContext and
+  // PodSecurityContext, the value specified in SecurityContext takes precedence.
+  // +optional
+  optional bool runAsNonRoot = 5;
+
+  // Whether this container has a read-only root filesystem.
+  // Default is false.
+  // +optional
+  optional bool readOnlyRootFilesystem = 6;
+
+  // AllowPrivilegeEscalation controls whether a process can gain more
+  // privileges than its parent process. This bool directly controls if
+  // the no_new_privs flag will be set on the container process.
+  // AllowPrivilegeEscalation is true always when the container is:
+  // 1) run as Privileged
+  // 2) has CAP_SYS_ADMIN
+  // +optional
+  optional bool allowPrivilegeEscalation = 7;
+
+  // procMount denotes the type of proc mount to use for the containers.
+  // The default is DefaultProcMount which uses the container runtime defaults for
+  // readonly paths and masked paths.
+  // This requires the ProcMountType feature flag to be enabled.
+  // +optional
+  optional string procMount = 9;
+}
+
+// SerializedReference is a reference to serialized object.
+message SerializedReference {
+  // The reference to an object in the system.
+  // +optional
+  optional ObjectReference reference = 1;
+}
+
+// Service is a named abstraction of software service (for example, mysql) consisting of local port
+// (for example 3306) that the proxy listens on, and the selector that determines which pods
+// will answer requests sent through the proxy.
+message Service {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the behavior of a service.
+  // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional ServiceSpec spec = 2;
+
+  // Most recently observed status of the service.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional ServiceStatus status = 3;
+}
+
+// ServiceAccount binds together:
+// * a name, understood by users, and perhaps by peripheral systems, for an identity
+// * a principal that can be authenticated and authorized
+// * a set of secrets
+message ServiceAccount {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount.
+  // More info: https://kubernetes.io/docs/concepts/configuration/secret
+  // +optional
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  repeated ObjectReference secrets = 2;
+
+  // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
+  // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets
+  // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.
+  // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+  // +optional
+  repeated LocalObjectReference imagePullSecrets = 3;
+
+  // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.
+  // Can be overridden at the pod level.
+  // +optional
+  optional bool automountServiceAccountToken = 4;
+}
+
+// ServiceAccountList is a list of ServiceAccount objects
+message ServiceAccountList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of ServiceAccounts.
+  // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+  repeated ServiceAccount items = 2;
+}
+
+// ServiceAccountTokenProjection represents a projected service account token
+// volume. This projection can be used to insert a service account token into
+// the pods runtime filesystem for use against APIs (Kubernetes API Server or
+// otherwise).
+message ServiceAccountTokenProjection {
+  // Audience is the intended audience of the token. A recipient of a token
+  // must identify itself with an identifier specified in the audience of the
+  // token, and otherwise should reject the token. The audience defaults to the
+  // identifier of the apiserver.
+  // +optional
+  optional string audience = 1;
+
+  // ExpirationSeconds is the requested duration of validity of the service
+  // account token. As the token approaches expiration, the kubelet volume
+  // plugin will proactively rotate the service account token. The kubelet will
+  // start trying to rotate the token if the token is older than 80 percent of
+  // its time to live or if the token is older than 24 hours.Defaults to 1 hour
+  // and must be at least 10 minutes.
+  // +optional
+  optional int64 expirationSeconds = 2;
+
+  // Path is the path relative to the mount point of the file to project the
+  // token into.
+  optional string path = 3;
+}
+
+// ServiceList holds a list of services.
+message ServiceList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of services
+  repeated Service items = 2;
+}
+
+// ServicePort contains information on service's port.
+message ServicePort {
+  // The name of this port within the service. This must be a DNS_LABEL.
+  // All ports within a ServiceSpec must have unique names. This maps to
+  // the 'Name' field in EndpointPort objects.
+  // Optional if only one ServicePort is defined on this service.
+  // +optional
+  optional string name = 1;
+
+  // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
+  // Default is TCP.
+  // +optional
+  optional string protocol = 2;
+
+  // The port that will be exposed by this service.
+  optional int32 port = 3;
+
+  // Number or name of the port to access on the pods targeted by the service.
+  // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+  // If this is a string, it will be looked up as a named port in the
+  // target Pod's container ports. If this is not specified, the value
+  // of the 'port' field is used (an identity map).
+  // This field is ignored for services with clusterIP=None, and should be
+  // omitted or set equal to the 'port' field.
+  // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
+  // +optional
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4;
+
+  // The port on each node on which this service is exposed when type=NodePort or LoadBalancer.
+  // Usually assigned by the system. If specified, it will be allocated to the service
+  // if unused or else creation of the service will fail.
+  // Default is to auto-allocate a port if the ServiceType of this Service requires one.
+  // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+  // +optional
+  optional int32 nodePort = 5;
+}
+
+// ServiceProxyOptions is the query options to a Service's proxy call.
+message ServiceProxyOptions {
+  // Path is the part of URLs that include service endpoints, suffixes,
+  // and parameters to use for the current proxy request to service.
+  // For example, the whole request URL is
+  // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy.
+  // Path is _search?q=user:kimchy.
+  // +optional
+  optional string path = 1;
+}
+
+// ServiceSpec describes the attributes that a user creates on a service.
+message ServiceSpec {
+  // The list of ports that are exposed by this service.
+  // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+  // +patchMergeKey=port
+  // +patchStrategy=merge
+  repeated ServicePort ports = 1;
+
+  // Route service traffic to pods with label keys and values matching this
+  // selector. If empty or not present, the service is assumed to have an
+  // external process managing its endpoints, which Kubernetes will not
+  // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
+  // Ignored if type is ExternalName.
+  // More info: https://kubernetes.io/docs/concepts/services-networking/service/
+  // +optional
+  map<string, string> selector = 2;
+
+  // clusterIP is the IP address of the service and is usually assigned
+  // randomly by the master. If an address is specified manually and is not in
+  // use by others, it will be allocated to the service; otherwise, creation
+  // of the service will fail. This field can not be changed through updates.
+  // Valid values are "None", empty string (""), or a valid IP address. "None"
+  // can be specified for headless services when proxying is not required.
+  // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if
+  // type is ExternalName.
+  // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+  // +optional
+  optional string clusterIP = 3;
+
+  // type determines how the Service is exposed. Defaults to ClusterIP. Valid
+  // options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
+  // "ExternalName" maps to the specified externalName.
+  // "ClusterIP" allocates a cluster-internal IP address for load-balancing to
+  // endpoints. Endpoints are determined by the selector or if that is not
+  // specified, by manual construction of an Endpoints object. If clusterIP is
+  // "None", no virtual IP is allocated and the endpoints are published as a
+  // set of endpoints rather than a stable IP.
+  // "NodePort" builds on ClusterIP and allocates a port on every node which
+  // routes to the clusterIP.
+  // "LoadBalancer" builds on NodePort and creates an
+  // external load-balancer (if supported in the current cloud) which routes
+  // to the clusterIP.
+  // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types
+  // +optional
+  optional string type = 4;
+
+  // externalIPs is a list of IP addresses for which nodes in the cluster
+  // will also accept traffic for this service.  These IPs are not managed by
+  // Kubernetes.  The user is responsible for ensuring that traffic arrives
+  // at a node with this IP.  A common example is external load-balancers
+  // that are not part of the Kubernetes system.
+  // +optional
+  repeated string externalIPs = 5;
+
+  // Supports "ClientIP" and "None". Used to maintain session affinity.
+  // Enable client IP based session affinity.
+  // Must be ClientIP or None.
+  // Defaults to None.
+  // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+  // +optional
+  optional string sessionAffinity = 7;
+
+  // Only applies to Service Type: LoadBalancer
+  // LoadBalancer will get created with the IP specified in this field.
+  // This feature depends on whether the underlying cloud-provider supports specifying
+  // the loadBalancerIP when a load balancer is created.
+  // This field will be ignored if the cloud-provider does not support the feature.
+  // +optional
+  optional string loadBalancerIP = 8;
+
+  // If specified and supported by the platform, this will restrict traffic through the cloud-provider
+  // load-balancer will be restricted to the specified client IPs. This field will be ignored if the
+  // cloud-provider does not support the feature."
+  // More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
+  // +optional
+  repeated string loadBalancerSourceRanges = 9;
+
+  // externalName is the external reference that kubedns or equivalent will
+  // return as a CNAME record for this service. No proxying will be involved.
+  // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123)
+  // and requires Type to be ExternalName.
+  // +optional
+  optional string externalName = 10;
+
+  // externalTrafficPolicy denotes if this Service desires to route external
+  // traffic to node-local or cluster-wide endpoints. "Local" preserves the
+  // client source IP and avoids a second hop for LoadBalancer and Nodeport
+  // type services, but risks potentially imbalanced traffic spreading.
+  // "Cluster" obscures the client source IP and may cause a second hop to
+  // another node, but should have good overall load-spreading.
+  // +optional
+  optional string externalTrafficPolicy = 11;
+
+  // healthCheckNodePort specifies the healthcheck nodePort for the service.
+  // If not specified, HealthCheckNodePort is created by the service api
+  // backend with the allocated nodePort. Will use user-specified nodePort value
+  // if specified by the client. Only effects when Type is set to LoadBalancer
+  // and ExternalTrafficPolicy is set to Local.
+  // +optional
+  optional int32 healthCheckNodePort = 12;
+
+  // publishNotReadyAddresses, when set to true, indicates that DNS implementations
+  // must publish the notReadyAddresses of subsets for the Endpoints associated with
+  // the Service. The default value is false.
+  // The primary use case for setting this field is to use a StatefulSet's Headless Service
+  // to propagate SRV records for its Pods without respect to their readiness for purpose
+  // of peer discovery.
+  // +optional
+  optional bool publishNotReadyAddresses = 13;
+
+  // sessionAffinityConfig contains the configurations of session affinity.
+  // +optional
+  optional SessionAffinityConfig sessionAffinityConfig = 14;
+}
+
+// ServiceStatus represents the current status of a service.
+message ServiceStatus {
+  // LoadBalancer contains the current status of the load-balancer,
+  // if one is present.
+  // +optional
+  optional LoadBalancerStatus loadBalancer = 1;
+}
+
+// SessionAffinityConfig represents the configurations of session affinity.
+message SessionAffinityConfig {
+  // clientIP contains the configurations of Client IP based session affinity.
+  // +optional
+  optional ClientIPConfig clientIP = 1;
+}
+
+// Represents a StorageOS persistent volume resource.
+message StorageOSPersistentVolumeSource {
+  // VolumeName is the human-readable name of the StorageOS volume.  Volume
+  // names are only unique within a namespace.
+  optional string volumeName = 1;
+
+  // VolumeNamespace specifies the scope of the volume within StorageOS.  If no
+  // namespace is specified then the Pod's namespace will be used.  This allows the
+  // Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+  // Set VolumeName to any name to override the default behaviour.
+  // Set to "default" if you are not using namespaces within StorageOS.
+  // Namespaces that do not pre-exist within StorageOS will be created.
+  // +optional
+  optional string volumeNamespace = 2;
+
+  // Filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // +optional
+  optional string fsType = 3;
+
+  // Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 4;
+
+  // SecretRef specifies the secret to use for obtaining the StorageOS API
+  // credentials.  If not specified, default values will be attempted.
+  // +optional
+  optional ObjectReference secretRef = 5;
+}
+
+// Represents a StorageOS persistent volume resource.
+message StorageOSVolumeSource {
+  // VolumeName is the human-readable name of the StorageOS volume.  Volume
+  // names are only unique within a namespace.
+  optional string volumeName = 1;
+
+  // VolumeNamespace specifies the scope of the volume within StorageOS.  If no
+  // namespace is specified then the Pod's namespace will be used.  This allows the
+  // Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+  // Set VolumeName to any name to override the default behaviour.
+  // Set to "default" if you are not using namespaces within StorageOS.
+  // Namespaces that do not pre-exist within StorageOS will be created.
+  // +optional
+  optional string volumeNamespace = 2;
+
+  // Filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // +optional
+  optional string fsType = 3;
+
+  // Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 4;
+
+  // SecretRef specifies the secret to use for obtaining the StorageOS API
+  // credentials.  If not specified, default values will be attempted.
+  // +optional
+  optional LocalObjectReference secretRef = 5;
+}
+
+// Sysctl defines a kernel parameter to be set
+message Sysctl {
+  // Name of a property to set
+  optional string name = 1;
+
+  // Value of a property to set
+  optional string value = 2;
+}
+
+// TCPSocketAction describes an action based on opening a socket
+message TCPSocketAction {
+  // Number or name of the port to access on the container.
+  // Number must be in the range 1 to 65535.
+  // Name must be an IANA_SVC_NAME.
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1;
+
+  // Optional: Host name to connect to, defaults to the pod IP.
+  // +optional
+  optional string host = 2;
+}
+
+// The node this Taint is attached to has the "effect" on
+// any pod that does not tolerate the Taint.
+message Taint {
+  // Required. The taint key to be applied to a node.
+  optional string key = 1;
+
+  // Required. The taint value corresponding to the taint key.
+  // +optional
+  optional string value = 2;
+
+  // Required. The effect of the taint on pods
+  // that do not tolerate the taint.
+  // Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
+  optional string effect = 3;
+
+  // TimeAdded represents the time at which the taint was added.
+  // It is only written for NoExecute taints.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4;
+}
+
+// The pod this Toleration is attached to tolerates any taint that matches
+// the triple <key,value,effect> using the matching operator <operator>.
+message Toleration {
+  // Key is the taint key that the toleration applies to. Empty means match all taint keys.
+  // If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+  // +optional
+  optional string key = 1;
+
+  // Operator represents a key's relationship to the value.
+  // Valid operators are Exists and Equal. Defaults to Equal.
+  // Exists is equivalent to wildcard for value, so that a pod can
+  // tolerate all taints of a particular category.
+  // +optional
+  optional string operator = 2;
+
+  // Value is the taint value the toleration matches to.
+  // If the operator is Exists, the value should be empty, otherwise just a regular string.
+  // +optional
+  optional string value = 3;
+
+  // Effect indicates the taint effect to match. Empty means match all taint effects.
+  // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+  // +optional
+  optional string effect = 4;
+
+  // TolerationSeconds represents the period of time the toleration (which must be
+  // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+  // it is not set, which means tolerate the taint forever (do not evict). Zero and
+  // negative values will be treated as 0 (evict immediately) by the system.
+  // +optional
+  optional int64 tolerationSeconds = 5;
+}
+
+// A topology selector requirement is a selector that matches given label.
+// This is an alpha feature and may change in the future.
+message TopologySelectorLabelRequirement {
+  // The label key that the selector applies to.
+  optional string key = 1;
+
+  // An array of string values. One value must match the label to be selected.
+  // Each entry in Values is ORed.
+  repeated string values = 2;
+}
+
+// A topology selector term represents the result of label queries.
+// A null or empty topology selector term matches no objects.
+// The requirements of them are ANDed.
+// It provides a subset of functionality as NodeSelectorTerm.
+// This is an alpha feature and may change in the future.
+message TopologySelectorTerm {
+  // A list of topology selector requirements by labels.
+  // +optional
+  repeated TopologySelectorLabelRequirement matchLabelExpressions = 1;
+}
+
+// TypedLocalObjectReference contains enough information to let you locate the
+// typed referenced object inside the same namespace.
+message TypedLocalObjectReference {
+  // APIGroup is the group for the resource being referenced.
+  // If APIGroup is not specified, the specified Kind must be in the core API group.
+  // For any other third-party types, APIGroup is required.
+  // +optional
+  optional string apiGroup = 1;
+
+  // Kind is the type of resource being referenced
+  optional string kind = 2;
+
+  // Name is the name of resource being referenced
+  optional string name = 3;
+}
+
+// Volume represents a named volume in a pod that may be accessed by any container in the pod.
+message Volume {
+  // Volume's name.
+  // Must be a DNS_LABEL and unique within the pod.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+  optional string name = 1;
+
+  // VolumeSource represents the location and type of the mounted volume.
+  // If not specified, the Volume is implied to be an EmptyDir.
+  // This implied behavior is deprecated and will be removed in a future version.
+  optional VolumeSource volumeSource = 2;
+}
+
+// volumeDevice describes a mapping of a raw block device within a container.
+message VolumeDevice {
+  // name must match the name of a persistentVolumeClaim in the pod
+  optional string name = 1;
+
+  // devicePath is the path inside of the container that the device will be mapped to.
+  optional string devicePath = 2;
+}
+
+// VolumeMount describes a mounting of a Volume within a container.
+message VolumeMount {
+  // This must match the Name of a Volume.
+  optional string name = 1;
+
+  // Mounted read-only if true, read-write otherwise (false or unspecified).
+  // Defaults to false.
+  // +optional
+  optional bool readOnly = 2;
+
+  // Path within the container at which the volume should be mounted.  Must
+  // not contain ':'.
+  optional string mountPath = 3;
+
+  // Path within the volume from which the container's volume should be mounted.
+  // Defaults to "" (volume's root).
+  // +optional
+  optional string subPath = 4;
+
+  // mountPropagation determines how mounts are propagated from the host
+  // to container and the other way around.
+  // When not set, MountPropagationNone is used.
+  // This field is beta in 1.10.
+  // +optional
+  optional string mountPropagation = 5;
+}
+
+// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.
+message VolumeNodeAffinity {
+  // Required specifies hard node constraints that must be met.
+  optional NodeSelector required = 1;
+}
+
+// Projection that may be projected along with other supported volume types
+message VolumeProjection {
+  // information about the secret data to project
+  // +optional
+  optional SecretProjection secret = 1;
+
+  // information about the downwardAPI data to project
+  // +optional
+  optional DownwardAPIProjection downwardAPI = 2;
+
+  // information about the configMap data to project
+  // +optional
+  optional ConfigMapProjection configMap = 3;
+
+  // information about the serviceAccountToken data to project
+  // +optional
+  optional ServiceAccountTokenProjection serviceAccountToken = 4;
+}
+
+// Represents the source of a volume to mount.
+// Only one of its members may be specified.
+message VolumeSource {
+  // HostPath represents a pre-existing file or directory on the host
+  // machine that is directly exposed to the container. This is generally
+  // used for system agents or other privileged things that are allowed
+  // to see the host machine. Most containers will NOT need this.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+  // ---
+  // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
+  // mount host directories as read/write.
+  // +optional
+  optional HostPathVolumeSource hostPath = 1;
+
+  // EmptyDir represents a temporary directory that shares a pod's lifetime.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+  // +optional
+  optional EmptyDirVolumeSource emptyDir = 2;
+
+  // GCEPersistentDisk represents a GCE Disk resource that is attached to a
+  // kubelet's host machine and then exposed to the pod.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+  // +optional
+  optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3;
+
+  // AWSElasticBlockStore represents an AWS Disk resource that is attached to a
+  // kubelet's host machine and then exposed to the pod.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+  // +optional
+  optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4;
+
+  // GitRepo represents a git repository at a particular revision.
+  // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+  // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+  // into the Pod's container.
+  // +optional
+  optional GitRepoVolumeSource gitRepo = 5;
+
+  // Secret represents a secret that should populate this volume.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+  // +optional
+  optional SecretVolumeSource secret = 6;
+
+  // NFS represents an NFS mount on the host that shares a pod's lifetime
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+  // +optional
+  optional NFSVolumeSource nfs = 7;
+
+  // ISCSI represents an ISCSI Disk resource that is attached to a
+  // kubelet's host machine and then exposed to the pod.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md
+  // +optional
+  optional ISCSIVolumeSource iscsi = 8;
+
+  // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
+  // +optional
+  optional GlusterfsVolumeSource glusterfs = 9;
+
+  // PersistentVolumeClaimVolumeSource represents a reference to a
+  // PersistentVolumeClaim in the same namespace.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+  // +optional
+  optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10;
+
+  // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
+  // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
+  // +optional
+  optional RBDVolumeSource rbd = 11;
+
+  // FlexVolume represents a generic volume resource that is
+  // provisioned/attached using an exec based plugin.
+  // +optional
+  optional FlexVolumeSource flexVolume = 12;
+
+  // Cinder represents a cinder volume attached and mounted on kubelets host machine
+  // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+  // +optional
+  optional CinderVolumeSource cinder = 13;
+
+  // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
+  // +optional
+  optional CephFSVolumeSource cephfs = 14;
+
+  // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
+  // +optional
+  optional FlockerVolumeSource flocker = 15;
+
+  // DownwardAPI represents downward API about the pod that should populate this volume
+  // +optional
+  optional DownwardAPIVolumeSource downwardAPI = 16;
+
+  // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
+  // +optional
+  optional FCVolumeSource fc = 17;
+
+  // AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+  // +optional
+  optional AzureFileVolumeSource azureFile = 18;
+
+  // ConfigMap represents a configMap that should populate this volume
+  // +optional
+  optional ConfigMapVolumeSource configMap = 19;
+
+  // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
+  // +optional
+  optional VsphereVirtualDiskVolumeSource vsphereVolume = 20;
+
+  // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
+  // +optional
+  optional QuobyteVolumeSource quobyte = 21;
+
+  // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+  // +optional
+  optional AzureDiskVolumeSource azureDisk = 22;
+
+  // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
+  optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 23;
+
+  // Items for all in one resources secrets, configmaps, and downward API
+  optional ProjectedVolumeSource projected = 26;
+
+  // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
+  // +optional
+  optional PortworxVolumeSource portworxVolume = 24;
+
+  // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+  // +optional
+  optional ScaleIOVolumeSource scaleIO = 25;
+
+  // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+  // +optional
+  optional StorageOSVolumeSource storageos = 27;
+}
+
+// Represents a vSphere volume resource.
+message VsphereVirtualDiskVolumeSource {
+  // Path that identifies vSphere volume vmdk
+  optional string volumePath = 1;
+
+  // Filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // +optional
+  optional string fsType = 2;
+
+  // Storage Policy Based Management (SPBM) profile name.
+  // +optional
+  optional string storagePolicyName = 3;
+
+  // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
+  // +optional
+  optional string storagePolicyID = 4;
+}
+
+// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+message WeightedPodAffinityTerm {
+  // weight associated with matching the corresponding podAffinityTerm,
+  // in the range 1-100.
+  optional int32 weight = 1;
+
+  // Required. A pod affinity term, associated with the corresponding weight.
+  optional PodAffinityTerm podAffinityTerm = 2;
+}
+
diff --git a/vendor/k8s.io/api/core/v1/objectreference.go b/vendor/k8s.io/api/core/v1/objectreference.go
new file mode 100644
index 0000000..ee5335e
--- /dev/null
+++ b/vendor/k8s.io/api/core/v1/objectreference.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that
+// intend only to get a reference to that object. This simplifies the event recording interface.
+func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) {
+	obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
+}
+
+func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind {
+	return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
+}
+
+func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj }
diff --git a/vendor/k8s.io/api/core/v1/register.go b/vendor/k8s.io/api/core/v1/register.go
new file mode 100644
index 0000000..1aac0cb
--- /dev/null
+++ b/vendor/k8s.io/api/core/v1/register.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = ""
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// We only register manually written functions here. The registration of the
+	// generated functions takes place in the generated files. The separation
+	// makes the code compile even when the generated files are missing.
+	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+	AddToScheme   = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Pod{},
+		&PodList{},
+		&PodStatusResult{},
+		&PodTemplate{},
+		&PodTemplateList{},
+		&ReplicationController{},
+		&ReplicationControllerList{},
+		&Service{},
+		&ServiceProxyOptions{},
+		&ServiceList{},
+		&Endpoints{},
+		&EndpointsList{},
+		&Node{},
+		&NodeList{},
+		&NodeProxyOptions{},
+		&Binding{},
+		&Event{},
+		&EventList{},
+		&List{},
+		&LimitRange{},
+		&LimitRangeList{},
+		&ResourceQuota{},
+		&ResourceQuotaList{},
+		&Namespace{},
+		&NamespaceList{},
+		&Secret{},
+		&SecretList{},
+		&ServiceAccount{},
+		&ServiceAccountList{},
+		&PersistentVolume{},
+		&PersistentVolumeList{},
+		&PersistentVolumeClaim{},
+		&PersistentVolumeClaimList{},
+		&PodAttachOptions{},
+		&PodLogOptions{},
+		&PodExecOptions{},
+		&PodPortForwardOptions{},
+		&PodProxyOptions{},
+		&ComponentStatus{},
+		&ComponentStatusList{},
+		&SerializedReference{},
+		&RangeAllocation{},
+		&ConfigMap{},
+		&ConfigMapList{},
+	)
+
+	// Add common types
+	scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{})
+
+	// Add the watch version that applies
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/core/v1/resource.go b/vendor/k8s.io/api/core/v1/resource.go
new file mode 100644
index 0000000..bb80412
--- /dev/null
+++ b/vendor/k8s.io/api/core/v1/resource.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/api/resource"
+)
+
+// Returns string version of ResourceName.
+func (self ResourceName) String() string {
+	return string(self)
+}
+
+// Returns the CPU limit if specified.
+func (self *ResourceList) Cpu() *resource.Quantity {
+	if val, ok := (*self)[ResourceCPU]; ok {
+		return &val
+	}
+	return &resource.Quantity{Format: resource.DecimalSI}
+}
+
+// Returns the Memory limit if specified.
+func (self *ResourceList) Memory() *resource.Quantity {
+	if val, ok := (*self)[ResourceMemory]; ok {
+		return &val
+	}
+	return &resource.Quantity{Format: resource.BinarySI}
+}
+
+func (self *ResourceList) Pods() *resource.Quantity {
+	if val, ok := (*self)[ResourcePods]; ok {
+		return &val
+	}
+	return &resource.Quantity{}
+}
+
+func (self *ResourceList) StorageEphemeral() *resource.Quantity {
+	if val, ok := (*self)[ResourceEphemeralStorage]; ok {
+		return &val
+	}
+	return &resource.Quantity{}
+}
diff --git a/vendor/k8s.io/api/core/v1/taint.go b/vendor/k8s.io/api/core/v1/taint.go
new file mode 100644
index 0000000..7b606a3
--- /dev/null
+++ b/vendor/k8s.io/api/core/v1/taint.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import "fmt"
+
+// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect,
+// if the two taints have same key:effect, regard as they match.
+func (t *Taint) MatchTaint(taintToMatch *Taint) bool {
+	return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect
+}
+
+// taint.ToString() converts taint struct to string in format key=value:effect or key:effect.
+func (t *Taint) ToString() string {
+	if len(t.Value) == 0 {
+		return fmt.Sprintf("%v:%v", t.Key, t.Effect)
+	}
+	return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect)
+}
diff --git a/vendor/k8s.io/api/core/v1/toleration.go b/vendor/k8s.io/api/core/v1/toleration.go
new file mode 100644
index 0000000..b203d33
--- /dev/null
+++ b/vendor/k8s.io/api/core/v1/toleration.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by <key,effect,operator,value>,
+// if the two tolerations have same <key,effect,operator,value> combination, regard as they match.
+// TODO: uniqueness check for tolerations in api validations.
+func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool {
+	return t.Key == tolerationToMatch.Key &&
+		t.Effect == tolerationToMatch.Effect &&
+		t.Operator == tolerationToMatch.Operator &&
+		t.Value == tolerationToMatch.Value
+}
+
+// ToleratesTaint checks if the toleration tolerates the taint.
+// The matching follows the rules below:
+// (1) Empty toleration.effect means to match all taint effects,
+//     otherwise taint effect must equal to toleration.effect.
+// (2) If toleration.operator is 'Exists', it means to match all taint values.
+// (3) Empty toleration.key means to match all taint keys.
+//     If toleration.key is empty, toleration.operator must be 'Exists';
+//     this combination means to match all taint values and all taint keys.
+func (t *Toleration) ToleratesTaint(taint *Taint) bool {
+	if len(t.Effect) > 0 && t.Effect != taint.Effect {
+		return false
+	}
+
+	if len(t.Key) > 0 && t.Key != taint.Key {
+		return false
+	}
+
+	// TODO: Use proper defaulting when Toleration becomes a field of PodSpec
+	switch t.Operator {
+	// empty operator means Equal
+	case "", TolerationOpEqual:
+		return t.Value == taint.Value
+	case TolerationOpExists:
+		return true
+	default:
+		return false
+	}
+}
diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go
new file mode 100644
index 0000000..43e70b9
--- /dev/null
+++ b/vendor/k8s.io/api/core/v1/types.go
@@ -0,0 +1,5360 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/api/resource"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/apimachinery/pkg/util/intstr"
+)
+
+const (
+	// NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
+	NamespaceDefault string = "default"
+	// NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
+	NamespaceAll string = ""
+	// NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats)
+	NamespaceNodeLease string = "kube-node-lease"
+)
+
+// Volume represents a named volume in a pod that may be accessed by any container in the pod.
+type Volume struct {
+	// Volume's name.
+	// Must be a DNS_LABEL and unique within the pod.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// VolumeSource represents the location and type of the mounted volume.
+	// If not specified, the Volume is implied to be an EmptyDir.
+	// This implied behavior is deprecated and will be removed in a future version.
+	VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"`
+}
+
+// Represents the source of a volume to mount.
+// Only one of its members may be specified.
+type VolumeSource struct {
+	// HostPath represents a pre-existing file or directory on the host
+	// machine that is directly exposed to the container. This is generally
+	// used for system agents or other privileged things that are allowed
+	// to see the host machine. Most containers will NOT need this.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+	// ---
+	// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
+	// mount host directories as read/write.
+	// +optional
+	HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"`
+	// EmptyDir represents a temporary directory that shares a pod's lifetime.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+	// +optional
+	EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"`
+	// GCEPersistentDisk represents a GCE Disk resource that is attached to a
+	// kubelet's host machine and then exposed to the pod.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+	// +optional
+	GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"`
+	// AWSElasticBlockStore represents an AWS Disk resource that is attached to a
+	// kubelet's host machine and then exposed to the pod.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+	// +optional
+	AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"`
+	// GitRepo represents a git repository at a particular revision.
+	// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+	// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+	// into the Pod's container.
+	// +optional
+	GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"`
+	// Secret represents a secret that should populate this volume.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+	// +optional
+	Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"`
+	// NFS represents an NFS mount on the host that shares a pod's lifetime
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+	// +optional
+	NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"`
+	// ISCSI represents an ISCSI Disk resource that is attached to a
+	// kubelet's host machine and then exposed to the pod.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md
+	// +optional
+	ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"`
+	// Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
+	// +optional
+	Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"`
+	// PersistentVolumeClaimVolumeSource represents a reference to a
+	// PersistentVolumeClaim in the same namespace.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+	// +optional
+	PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"`
+	// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
+	// +optional
+	RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"`
+	// FlexVolume represents a generic volume resource that is
+	// provisioned/attached using an exec based plugin.
+	// +optional
+	FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
+	// Cinder represents a cinder volume attached and mounted on kubelets host machine
+	// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+	// +optional
+	Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"`
+	// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
+	// +optional
+	CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"`
+	// Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
+	// +optional
+	Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"`
+	// DownwardAPI represents downward API about the pod that should populate this volume
+	// +optional
+	DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"`
+	// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
+	// +optional
+	FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"`
+	// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+	// +optional
+	AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"`
+	// ConfigMap represents a configMap that should populate this volume
+	// +optional
+	ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"`
+	// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
+	// +optional
+	VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"`
+	// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
+	// +optional
+	Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"`
+	// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+	// +optional
+	AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"`
+	// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
+	PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"`
+	// Items for all in one resources secrets, configmaps, and downward API
+	Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"`
+	// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
+	// +optional
+	PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"`
+	// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+	// +optional
+	ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"`
+	// StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+	// +optional
+	StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"`
+}
+
+// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
+// This volume finds the bound PV and mounts that volume for the pod. A
+// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another
+// type of volume that is owned by someone else (the system).
+type PersistentVolumeClaimVolumeSource struct {
+	// ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+	ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"`
+	// Will force the ReadOnly setting in VolumeMounts.
+	// Default false.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
+}
+
+// PersistentVolumeSource is similar to VolumeSource but meant for the
+// administrator who creates PVs. Exactly one of its members must be set.
+type PersistentVolumeSource struct {
+	// GCEPersistentDisk represents a GCE Disk resource that is attached to a
+	// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+	// +optional
+	GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"`
+	// AWSElasticBlockStore represents an AWS Disk resource that is attached to a
+	// kubelet's host machine and then exposed to the pod.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+	// +optional
+	AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"`
+	// HostPath represents a directory on the host.
+	// Provisioned by a developer or tester.
+	// This is useful for single-node development and testing only!
+	// On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+	// +optional
+	HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"`
+	// Glusterfs represents a Glusterfs volume that is attached to a host and
+	// exposed to the pod. Provisioned by an admin.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
+	// +optional
+	Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"`
+	// NFS represents an NFS mount on the host. Provisioned by an admin.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+	// +optional
+	NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"`
+	// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
+	// +optional
+	RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"`
+	// ISCSI represents an ISCSI Disk resource that is attached to a
+	// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
+	// +optional
+	ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"`
+	// Cinder represents a cinder volume attached and mounted on kubelets host machine
+	// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+	// +optional
+	Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"`
+	// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
+	// +optional
+	CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"`
+	// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
+	// +optional
+	FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"`
+	// Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
+	// +optional
+	Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"`
+	// FlexVolume represents a generic volume resource that is
+	// provisioned/attached using an exec based plugin.
+	// +optional
+	FlexVolume *FlexPersistentVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
+	// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+	// +optional
+	AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"`
+	// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
+	// +optional
+	VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"`
+	// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
+	// +optional
+	Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"`
+	// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+	// +optional
+	AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"`
+	// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
+	PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"`
+	// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
+	// +optional
+	PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"`
+	// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+	// +optional
+	ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"`
+	// Local represents directly-attached storage with node affinity
+	// +optional
+	Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"`
+	// StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md
+	// +optional
+	StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"`
+	// CSI represents storage that handled by an external CSI driver (Beta feature).
+	// +optional
+	CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"`
+}
+
+const (
+	// BetaStorageClassAnnotation represents the beta/previous StorageClass annotation.
+	// It's currently still used and will be held for backwards compatibility
+	BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class"
+
+	// MountOptionAnnotation defines mount option annotation used in PVs
+	MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PersistentVolume (PV) is a storage resource provisioned by an administrator.
+// It is analogous to a node.
+// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
+type PersistentVolume struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines a specification of a persistent volume owned by the cluster.
+	// Provisioned by an administrator.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
+	// +optional
+	Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status represents the current information/status for the persistent volume.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
+	// +optional
+	Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// PersistentVolumeSpec is the specification of a persistent volume.
+type PersistentVolumeSpec struct {
+	// A description of the persistent volume's resources and capacity.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
+	// +optional
+	Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
+	// The actual volume backing the persistent volume.
+	PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"`
+	// AccessModes contains all ways the volume can be mounted.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes
+	// +optional
+	AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
+	// ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim.
+	// Expected to be non-nil when bound.
+	// claim.VolumeName is the authoritative bind between PV and PVC.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding
+	// +optional
+	ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"`
+	// What happens to a persistent volume when released from its claim.
+	// Valid options are Retain (default for manually created PersistentVolumes), Delete (default
+	// for dynamically provisioned PersistentVolumes), and Recycle (deprecated).
+	// Recycle must be supported by the volume plugin underlying this PersistentVolume.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming
+	// +optional
+	PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"`
+	// Name of StorageClass to which this persistent volume belongs. Empty value
+	// means that this volume does not belong to any StorageClass.
+	// +optional
+	StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"`
+	// A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will
+	// simply fail if one is invalid.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options
+	// +optional
+	MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"`
+	// volumeMode defines if a volume is intended to be used with a formatted filesystem
+	// or to remain in raw block state. Value of Filesystem is implied when not included in spec.
+	// This is a beta feature.
+	// +optional
+	VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"`
+	// NodeAffinity defines constraints that limit what nodes this volume can be accessed from.
+	// This field influences the scheduling of pods that use this volume.
+	// +optional
+	NodeAffinity *VolumeNodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,9,opt,name=nodeAffinity"`
+}
+
+// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.
+type VolumeNodeAffinity struct {
+	// Required specifies hard node constraints that must be met.
+	Required *NodeSelector `json:"required,omitempty" protobuf:"bytes,1,opt,name=required"`
+}
+
+// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes.
+type PersistentVolumeReclaimPolicy string
+
+const (
+	// PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim.
+	// The volume plugin must support Recycling.
+	PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle"
+	// PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim.
+	// The volume plugin must support Deletion.
+	PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete"
+	// PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator.
+	// The default policy is Retain.
+	PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain"
+)
+
+// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem.
+type PersistentVolumeMode string
+
+const (
+	// PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device.
+	PersistentVolumeBlock PersistentVolumeMode = "Block"
+	// PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem.
+	PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem"
+)
+
+// PersistentVolumeStatus is the current status of a persistent volume.
+type PersistentVolumeStatus struct {
+	// Phase indicates if a volume is available, bound to a claim, or released by a claim.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase
+	// +optional
+	Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"`
+	// A human-readable message indicating details about why the volume is in this state.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
+	// Reason is a brief CamelCase string that describes any failure and is meant
+	// for machine parsing and tidy display in the CLI.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PersistentVolumeList is a list of PersistentVolume items.
+type PersistentVolumeList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	// List of persistent volumes.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
+	Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PersistentVolumeClaim is a user's request for and claim to a persistent volume
+type PersistentVolumeClaim struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the desired characteristics of a volume requested by a pod author.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+	// +optional
+	Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status represents the current information/status of a persistent volume claim.
+	// Read-only.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+	// +optional
+	Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
+type PersistentVolumeClaimList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	// A list of persistent volume claims.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+	Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// PersistentVolumeClaimSpec describes the common attributes of storage devices
+// and allows a Source for provider-specific attributes
+type PersistentVolumeClaimSpec struct {
+	// AccessModes contains the desired access modes the volume should have.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+	// +optional
+	AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
+	// A label query over volumes to consider for binding.
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
+	// Resources represents the minimum resources the volume should have.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+	// +optional
+	Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"`
+	// VolumeName is the binding reference to the PersistentVolume backing this claim.
+	// +optional
+	VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"`
+	// Name of the StorageClass required by the claim.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+	// +optional
+	StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
+	// volumeMode defines what type of volume is required by the claim.
+	// Value of Filesystem is implied when not included in claim spec.
+	// This is a beta feature.
+	// +optional
+	VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"`
+	// This field requires the VolumeSnapshotDataSource alpha feature gate to be
+	// enabled and currently VolumeSnapshot is the only supported data source.
+	// If the provisioner can support VolumeSnapshot data source, it will create
+	// a new volume and data will be restored to the volume at the same time.
+	// If the provisioner does not support VolumeSnapshot data source, volume will
+	// not be created and the failure will be reported as an event.
+	// In the future, we plan to support more data source types and the behavior
+	// of the provisioner may change.
+	// +optional
+	DataSource *TypedLocalObjectReference `json:"dataSource" protobuf:"bytes,7,opt,name=dataSource"`
+}
+
+// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
+type PersistentVolumeClaimConditionType string
+
+const (
+	// PersistentVolumeClaimResizing - a user trigger resize of pvc has been started
+	PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
+	// PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node
+	PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending"
+)
+
+// PersistentVolumeClaimCondition contails details about state of pvc
+type PersistentVolumeClaimCondition struct {
+	Type   PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"`
+	Status ConditionStatus                    `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+	// Last time we probed the condition.
+	// +optional
+	LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
+	// Last time the condition transitioned from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+	// Unique, this should be a short, machine understandable string that gives the reason
+	// for condition's last transition. If it reports "ResizeStarted" that means the underlying
+	// persistent volume is being resized.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
+	// Human-readable message indicating details about last transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
+}
+
+// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
+type PersistentVolumeClaimStatus struct {
+	// Phase represents the current phase of PersistentVolumeClaim.
+	// +optional
+	Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"`
+	// AccessModes contains the actual access modes the volume backing the PVC has.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+	// +optional
+	AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
+	// Represents the actual resources of the underlying volume.
+	// +optional
+	Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
+	// Current Condition of persistent volume claim. If underlying persistent volume is being
+	// resized then the Condition will be set to 'ResizeStarted'.
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
+}
+
+type PersistentVolumeAccessMode string
+
+const (
+	// can be mounted read/write mode to exactly 1 host
+	ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce"
+	// can be mounted in read-only mode to many hosts
+	ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany"
+	// can be mounted in read/write mode to many hosts
+	ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany"
+)
+
+type PersistentVolumePhase string
+
+const (
+	// used for PersistentVolumes that are not available
+	VolumePending PersistentVolumePhase = "Pending"
+	// used for PersistentVolumes that are not yet bound
+	// Available volumes are held by the binder and matched to PersistentVolumeClaims
+	VolumeAvailable PersistentVolumePhase = "Available"
+	// used for PersistentVolumes that are bound
+	VolumeBound PersistentVolumePhase = "Bound"
+	// used for PersistentVolumes where the bound PersistentVolumeClaim was deleted
+	// released volumes must be recycled before becoming available again
+	// this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource
+	VolumeReleased PersistentVolumePhase = "Released"
+	// used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim
+	VolumeFailed PersistentVolumePhase = "Failed"
+)
+
+type PersistentVolumeClaimPhase string
+
+const (
+	// used for PersistentVolumeClaims that are not yet bound
+	ClaimPending PersistentVolumeClaimPhase = "Pending"
+	// used for PersistentVolumeClaims that are bound
+	ClaimBound PersistentVolumeClaimPhase = "Bound"
+	// used for PersistentVolumeClaims that lost their underlying
+	// PersistentVolume. The claim was bound to a PersistentVolume and this
+	// volume does not exist any longer and all data on it was lost.
+	ClaimLost PersistentVolumeClaimPhase = "Lost"
+)
+
+type HostPathType string
+
+const (
+	// For backwards compatible, leave it empty if unset
+	HostPathUnset HostPathType = ""
+	// If nothing exists at the given path, an empty directory will be created there
+	// as needed with file mode 0755, having the same group and ownership with Kubelet.
+	HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate"
+	// A directory must exist at the given path
+	HostPathDirectory HostPathType = "Directory"
+	// If nothing exists at the given path, an empty file will be created there
+	// as needed with file mode 0644, having the same group and ownership with Kubelet.
+	HostPathFileOrCreate HostPathType = "FileOrCreate"
+	// A file must exist at the given path
+	HostPathFile HostPathType = "File"
+	// A UNIX socket must exist at the given path
+	HostPathSocket HostPathType = "Socket"
+	// A character device must exist at the given path
+	HostPathCharDev HostPathType = "CharDevice"
+	// A block device must exist at the given path
+	HostPathBlockDev HostPathType = "BlockDevice"
+)
+
+// Represents a host path mapped into a pod.
+// Host path volumes do not support ownership management or SELinux relabeling.
+type HostPathVolumeSource struct {
+	// Path of the directory on the host.
+	// If the path is a symlink, it will follow the link to the real path.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+	Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
+	// Type for HostPath Volume
+	// Defaults to ""
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+	// +optional
+	Type *HostPathType `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"`
+}
+
+// Represents an empty directory for a pod.
+// Empty directory volumes support ownership management and SELinux relabeling.
+type EmptyDirVolumeSource struct {
+	// What type of storage medium should back this directory.
+	// The default is "" which means to use the node's default medium.
+	// Must be an empty string (default) or Memory.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+	// +optional
+	Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"`
+	// Total amount of local storage required for this EmptyDir volume.
+	// The size limit is also applicable for memory medium.
+	// The maximum usage on memory medium EmptyDir would be the minimum value between
+	// the SizeLimit specified here and the sum of memory limits of all containers in a pod.
+	// The default is nil which means that the limit is undefined.
+	// More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
+	// +optional
+	SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"`
+}
+
+// Represents a Glusterfs mount that lasts the lifetime of a pod.
+// Glusterfs volumes do not support ownership management or SELinux relabeling.
+type GlusterfsVolumeSource struct {
+	// EndpointsName is the endpoint name that details Glusterfs topology.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+	EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
+
+	// Path is the Glusterfs volume path.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+	Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
+
+	// ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+	// Defaults to false.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+}
+
+// Represents a Glusterfs mount that lasts the lifetime of a pod.
+// Glusterfs volumes do not support ownership management or SELinux relabeling.
+type GlusterfsPersistentVolumeSource struct {
+	// EndpointsName is the endpoint name that details Glusterfs topology.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+	EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
+
+	// Path is the Glusterfs volume path.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+	Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
+
+	// ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+	// Defaults to false.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+
+	// EndpointsNamespace is the namespace that contains Glusterfs endpoint.
+	// If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+	// +optional
+	EndpointsNamespace *string `json:"endpointsNamespace,omitempty" protobuf:"bytes,4,opt,name=endpointsNamespace"`
+}
+
+// Represents a Rados Block Device mount that lasts the lifetime of a pod.
+// RBD volumes support ownership management and SELinux relabeling.
+type RBDVolumeSource struct {
+	// A collection of Ceph monitors.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+	CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
+	// The rados image name.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+	RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
+	// Filesystem type of the volume that you want to mount.
+	// Tip: Ensure that the filesystem type is supported by the host operating system.
+	// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+	// TODO: how do we prevent errors in the filesystem from compromising the machine
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
+	// The rados pool name.
+	// Default is rbd.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
+	// The rados user name.
+	// Default is admin.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
+	// Keyring is the path to key ring for RBDUser.
+	// Default is /etc/ceph/keyring.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
+	// SecretRef is name of the authentication secret for RBDUser. If provided
+	// overrides keyring.
+	// Default is nil.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
+	// ReadOnly here will force the ReadOnly setting in VolumeMounts.
+	// Defaults to false.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
+}
+
+// Represents a Rados Block Device mount that lasts the lifetime of a pod.
+// RBD volumes support ownership management and SELinux relabeling.
+type RBDPersistentVolumeSource struct {
+	// A collection of Ceph monitors.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+	CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
+	// The rados image name.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+	RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
+	// Filesystem type of the volume that you want to mount.
+	// Tip: Ensure that the filesystem type is supported by the host operating system.
+	// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+	// TODO: how do we prevent errors in the filesystem from compromising the machine
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
+	// The rados pool name.
+	// Default is rbd.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
+	// The rados user name.
+	// Default is admin.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
+	// Keyring is the path to key ring for RBDUser.
+	// Default is /etc/ceph/keyring.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
+	// SecretRef is name of the authentication secret for RBDUser. If provided
+	// overrides keyring.
+	// Default is nil.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
+	// ReadOnly here will force the ReadOnly setting in VolumeMounts.
+	// Defaults to false.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
+}
+
+// Represents a cinder volume resource in Openstack.
+// A Cinder volume must exist before mounting to a container.
+// The volume must also be in the same region as the kubelet.
+// Cinder volumes support ownership management and SELinux relabeling.
+type CinderVolumeSource struct {
+	// volume id used to identify the volume in cinder
+	// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+	VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
+	// Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+	// Optional: Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+	// Optional: points to a secret object containing parameters used to connect
+	// to OpenStack.
+	// +optional
+	SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"`
+}
+
+// Represents a cinder volume resource in Openstack.
+// A Cinder volume must exist before mounting to a container.
+// The volume must also be in the same region as the kubelet.
+// Cinder volumes support ownership management and SELinux relabeling.
+type CinderPersistentVolumeSource struct {
+	// volume id used to identify the volume in cinder
+	// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+	VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
+	// Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+	// Optional: Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+	// Optional: points to a secret object containing parameters used to connect
+	// to OpenStack.
+	// +optional
+	SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"`
+}
+
+// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
+// Cephfs volumes do not support ownership management or SELinux relabeling.
+type CephFSVolumeSource struct {
+	// Required: Monitors is a collection of Ceph monitors
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+	Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
+	// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
+	// +optional
+	Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
+	// Optional: User is the rados user name, default is admin
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+	// +optional
+	User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
+	// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+	// +optional
+	SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
+	// Optional: SecretRef is reference to the authentication secret for User, default is empty.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+	// +optional
+	SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
+	// Optional: Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
+}
+
+// SecretReference represents a Secret Reference. It has enough information to retrieve secret
+// in any namespace
+type SecretReference struct {
+	// Name is unique within a namespace to reference a secret resource.
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+	// Namespace defines the space within which the secret name must be unique.
+	// +optional
+	Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
+}
+
+// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
+// Cephfs volumes do not support ownership management or SELinux relabeling.
+type CephFSPersistentVolumeSource struct {
+	// Required: Monitors is a collection of Ceph monitors
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+	Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
+	// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
+	// +optional
+	Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
+	// Optional: User is the rados user name, default is admin
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+	// +optional
+	User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
+	// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+	// +optional
+	SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
+	// Optional: SecretRef is reference to the authentication secret for User, default is empty.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+	// +optional
+	SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
+	// Optional: Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
+}
+
+// Represents a Flocker volume mounted by the Flocker agent.
+// One and only one of datasetName and datasetUUID should be set.
+// Flocker volumes do not support ownership management or SELinux relabeling.
+type FlockerVolumeSource struct {
+	// Name of the dataset stored as metadata -> name on the dataset for Flocker
+	// should be considered as deprecated
+	// +optional
+	DatasetName string `json:"datasetName,omitempty" protobuf:"bytes,1,opt,name=datasetName"`
+	// UUID of the dataset. This is unique identifier of a Flocker dataset
+	// +optional
+	DatasetUUID string `json:"datasetUUID,omitempty" protobuf:"bytes,2,opt,name=datasetUUID"`
+}
+
+// StorageMedium defines ways that storage can be allocated to a volume.
+type StorageMedium string
+
+const (
+	StorageMediumDefault   StorageMedium = ""          // use whatever the default is for the node, assume anything we don't explicitly handle is this
+	StorageMediumMemory    StorageMedium = "Memory"    // use memory (e.g. tmpfs on linux)
+	StorageMediumHugePages StorageMedium = "HugePages" // use hugepages
+)
+
+// Protocol defines network protocols supported for things like container ports.
+type Protocol string
+
+const (
+	// ProtocolTCP is the TCP protocol.
+	ProtocolTCP Protocol = "TCP"
+	// ProtocolUDP is the UDP protocol.
+	ProtocolUDP Protocol = "UDP"
+	// ProtocolSCTP is the SCTP protocol.
+	ProtocolSCTP Protocol = "SCTP"
+)
+
+// Represents a Persistent Disk resource in Google Compute Engine.
+//
+// A GCE PD must exist before mounting to a container. The disk must
+// also be in the same GCE project and zone as the kubelet. A GCE PD
+// can only be mounted as read/write once or read-only many times. GCE
+// PDs support ownership management and SELinux relabeling.
+type GCEPersistentDiskVolumeSource struct {
+	// Unique name of the PD resource in GCE. Used to identify the disk in GCE.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+	PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"`
+	// Filesystem type of the volume that you want to mount.
+	// Tip: Ensure that the filesystem type is supported by the host operating system.
+	// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+	// TODO: how do we prevent errors in the filesystem from compromising the machine
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+	// The partition in the volume that you want to mount.
+	// If omitted, the default is to mount by volume name.
+	// Examples: For volume /dev/sda1, you specify the partition as "1".
+	// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+	// +optional
+	Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
+	// ReadOnly here will force the ReadOnly setting in VolumeMounts.
+	// Defaults to false.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+}
+
+// Represents a Quobyte mount that lasts the lifetime of a pod.
+// Quobyte volumes do not support ownership management or SELinux relabeling.
+type QuobyteVolumeSource struct {
+	// Registry represents a single or multiple Quobyte Registry services
+	// specified as a string as host:port pair (multiple entries are separated with commas)
+	// which acts as the central registry for volumes
+	Registry string `json:"registry" protobuf:"bytes,1,opt,name=registry"`
+
+	// Volume is a string that references an already created Quobyte volume by name.
+	Volume string `json:"volume" protobuf:"bytes,2,opt,name=volume"`
+
+	// ReadOnly here will force the Quobyte volume to be mounted with read-only permissions.
+	// Defaults to false.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+
+	// User to map volume access to
+	// Defaults to serivceaccount user
+	// +optional
+	User string `json:"user,omitempty" protobuf:"bytes,4,opt,name=user"`
+
+	// Group to map volume access to
+	// Default is no group
+	// +optional
+	Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"`
+}
+
+// FlexPersistentVolumeSource represents a generic persistent volume resource that is
+// provisioned/attached using an exec based plugin.
+type FlexPersistentVolumeSource struct {
+	// Driver is the name of the driver to use for this volume.
+	Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
+	// Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+	// Optional: SecretRef is reference to the secret object containing
+	// sensitive information to pass to the plugin scripts. This may be
+	// empty if no secret object is specified. If the secret object
+	// contains more than one secret, all secrets are passed to the plugin
+	// scripts.
+	// +optional
+	SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
+	// Optional: Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+	// Optional: Extra command options if any.
+	// +optional
+	Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"`
+}
+
+// FlexVolume represents a generic volume resource that is
+// provisioned/attached using an exec based plugin.
+type FlexVolumeSource struct {
+	// Driver is the name of the driver to use for this volume.
+	Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
+	// Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+	// Optional: SecretRef is reference to the secret object containing
+	// sensitive information to pass to the plugin scripts. This may be
+	// empty if no secret object is specified. If the secret object
+	// contains more than one secret, all secrets are passed to the plugin
+	// scripts.
+	// +optional
+	SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
+	// Optional: Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+	// Optional: Extra command options if any.
+	// +optional
+	Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"`
+}
+
+// Represents a Persistent Disk resource in AWS.
+//
+// An AWS EBS disk must exist before mounting to a container. The disk
+// must also be in the same AWS zone as the kubelet. An AWS EBS disk
+// can only be mounted as read/write once. AWS EBS volumes support
+// ownership management and SELinux relabeling.
+type AWSElasticBlockStoreVolumeSource struct {
+	// Unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+	VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
+	// Filesystem type of the volume that you want to mount.
+	// Tip: Ensure that the filesystem type is supported by the host operating system.
+	// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+	// TODO: how do we prevent errors in the filesystem from compromising the machine
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+	// The partition in the volume that you want to mount.
+	// If omitted, the default is to mount by volume name.
+	// Examples: For volume /dev/sda1, you specify the partition as "1".
+	// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+	// +optional
+	Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
+	// Specify "true" to force and set the ReadOnly property in VolumeMounts to "true".
+	// If omitted, the default is "false".
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+}
+
+// Represents a volume that is populated with the contents of a git repository.
+// Git repo volumes do not support ownership management.
+// Git repo volumes support SELinux relabeling.
+//
+// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+// into the Pod's container.
+type GitRepoVolumeSource struct {
+	// Repository URL
+	Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"`
+	// Commit hash for the specified revision.
+	// +optional
+	Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"`
+	// Target directory name.
+	// Must not contain or start with '..'.  If '.' is supplied, the volume directory will be the
+	// git repository.  Otherwise, if specified, the volume will contain the git repository in
+	// the subdirectory with the given name.
+	// +optional
+	Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"`
+}
+
+// Adapts a Secret into a volume.
+//
+// The contents of the target Secret's Data field will be presented in a volume
+// as files using the keys in the Data field as the file names.
+// Secret volumes support ownership management and SELinux relabeling.
+type SecretVolumeSource struct {
+	// Name of the secret in the pod's namespace to use.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+	// +optional
+	SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"`
+	// If unspecified, each key-value pair in the Data field of the referenced
+	// Secret will be projected into the volume as a file whose name is the
+	// key and content is the value. If specified, the listed keys will be
+	// projected into the specified paths, and unlisted keys will not be
+	// present. If a key is specified which is not present in the Secret,
+	// the volume setup will error unless it is marked optional. Paths must be
+	// relative and may not contain the '..' path or start with '..'.
+	// +optional
+	Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
+	// Optional: mode bits to use on created files by default. Must be a
+	// value between 0 and 0777. Defaults to 0644.
+	// Directories within the path are not affected by this setting.
+	// This might be in conflict with other options that affect the file
+	// mode, like fsGroup, and the result can be other mode bits set.
+	// +optional
+	DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"`
+	// Specify whether the Secret or it's keys must be defined
+	// +optional
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
+}
+
+const (
+	SecretVolumeSourceDefaultMode int32 = 0644
+)
+
+// Adapts a secret into a projected volume.
+//
+// The contents of the target Secret's Data field will be presented in a
+// projected volume as files using the keys in the Data field as the file names.
+// Note that this is identical to a secret volume source without the default
+// mode.
+type SecretProjection struct {
+	LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
+	// If unspecified, each key-value pair in the Data field of the referenced
+	// Secret will be projected into the volume as a file whose name is the
+	// key and content is the value. If specified, the listed keys will be
+	// projected into the specified paths, and unlisted keys will not be
+	// present. If a key is specified which is not present in the Secret,
+	// the volume setup will error unless it is marked optional. Paths must be
+	// relative and may not contain the '..' path or start with '..'.
+	// +optional
+	Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
+	// Specify whether the Secret or its key must be defined
+	// +optional
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
+}
+
+// Represents an NFS mount that lasts the lifetime of a pod.
+// NFS volumes do not support ownership management or SELinux relabeling.
+type NFSVolumeSource struct {
+	// Server is the hostname or IP address of the NFS server.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+	Server string `json:"server" protobuf:"bytes,1,opt,name=server"`
+
+	// Path that is exported by the NFS server.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+	Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
+
+	// ReadOnly here will force
+	// the NFS export to be mounted with read-only permissions.
+	// Defaults to false.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+}
+
+// Represents an ISCSI disk.
+// ISCSI volumes can only be mounted as read/write once.
+// ISCSI volumes support ownership management and SELinux relabeling.
+type ISCSIVolumeSource struct {
+	// iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+	// is other than default (typically TCP ports 860 and 3260).
+	TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
+	// Target iSCSI Qualified Name.
+	IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
+	// iSCSI Target Lun number.
+	Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
+	// iSCSI Interface Name that uses an iSCSI transport.
+	// Defaults to 'default' (tcp).
+	// +optional
+	ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
+	// Filesystem type of the volume that you want to mount.
+	// Tip: Ensure that the filesystem type is supported by the host operating system.
+	// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+	// TODO: how do we prevent errors in the filesystem from compromising the machine
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
+	// ReadOnly here will force the ReadOnly setting in VolumeMounts.
+	// Defaults to false.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
+	// iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+	// is other than default (typically TCP ports 860 and 3260).
+	// +optional
+	Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
+	// whether support iSCSI Discovery CHAP authentication
+	// +optional
+	DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
+	// whether support iSCSI Session CHAP authentication
+	// +optional
+	SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
+	// CHAP Secret for iSCSI target and initiator authentication
+	// +optional
+	SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
+	// Custom iSCSI Initiator Name.
+	// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+	// <target portal>:<volume name> will be created for the connection.
+	// +optional
+	InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
+}
+
+// ISCSIPersistentVolumeSource represents an ISCSI disk.
+// ISCSI volumes can only be mounted as read/write once.
+// ISCSI volumes support ownership management and SELinux relabeling.
+type ISCSIPersistentVolumeSource struct {
+	// iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+	// is other than default (typically TCP ports 860 and 3260).
+	TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
+	// Target iSCSI Qualified Name.
+	IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
+	// iSCSI Target Lun number.
+	Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
+	// iSCSI Interface Name that uses an iSCSI transport.
+	// Defaults to 'default' (tcp).
+	// +optional
+	ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
+	// Filesystem type of the volume that you want to mount.
+	// Tip: Ensure that the filesystem type is supported by the host operating system.
+	// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+	// TODO: how do we prevent errors in the filesystem from compromising the machine
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
+	// ReadOnly here will force the ReadOnly setting in VolumeMounts.
+	// Defaults to false.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
+	// iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port
+	// is other than default (typically TCP ports 860 and 3260).
+	// +optional
+	Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
+	// whether support iSCSI Discovery CHAP authentication
+	// +optional
+	DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
+	// whether support iSCSI Session CHAP authentication
+	// +optional
+	SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
+	// CHAP Secret for iSCSI target and initiator authentication
+	// +optional
+	SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
+	// Custom iSCSI Initiator Name.
+	// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+	// <target portal>:<volume name> will be created for the connection.
+	// +optional
+	InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
+}
+
+// Represents a Fibre Channel volume.
+// Fibre Channel volumes can only be mounted as read/write once.
+// Fibre Channel volumes support ownership management and SELinux relabeling.
+type FCVolumeSource struct {
+	// Optional: FC target worldwide names (WWNs)
+	// +optional
+	TargetWWNs []string `json:"targetWWNs,omitempty" protobuf:"bytes,1,rep,name=targetWWNs"`
+	// Optional: FC target lun number
+	// +optional
+	Lun *int32 `json:"lun,omitempty" protobuf:"varint,2,opt,name=lun"`
+	// Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// TODO: how do we prevent errors in the filesystem from compromising the machine
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
+	// Optional: Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+	// Optional: FC volume world wide identifiers (wwids)
+	// Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+	// +optional
+	WWIDs []string `json:"wwids,omitempty" protobuf:"bytes,5,rep,name=wwids"`
+}
+
+// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+type AzureFileVolumeSource struct {
+	// the name of secret that contains Azure Storage Account Name and Key
+	SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
+	// Share Name
+	ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
+	// Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+}
+
+// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+type AzureFilePersistentVolumeSource struct {
+	// the name of secret that contains Azure Storage Account Name and Key
+	SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
+	// Share Name
+	ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
+	// Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+	// the namespace of the secret that contains Azure Storage Account Name and Key
+	// default is the same as the Pod
+	// +optional
+	SecretNamespace *string `json:"secretNamespace" protobuf:"bytes,4,opt,name=secretNamespace"`
+}
+
+// Represents a vSphere volume resource.
+type VsphereVirtualDiskVolumeSource struct {
+	// Path that identifies vSphere volume vmdk
+	VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"`
+	// Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+	// Storage Policy Based Management (SPBM) profile name.
+	// +optional
+	StoragePolicyName string `json:"storagePolicyName,omitempty" protobuf:"bytes,3,opt,name=storagePolicyName"`
+	// Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
+	// +optional
+	StoragePolicyID string `json:"storagePolicyID,omitempty" protobuf:"bytes,4,opt,name=storagePolicyID"`
+}
+
+// Represents a Photon Controller persistent disk resource.
+type PhotonPersistentDiskVolumeSource struct {
+	// ID that identifies Photon Controller persistent disk
+	PdID string `json:"pdID" protobuf:"bytes,1,opt,name=pdID"`
+	// Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+}
+
+type AzureDataDiskCachingMode string
+type AzureDataDiskKind string
+
+const (
+	AzureDataDiskCachingNone      AzureDataDiskCachingMode = "None"
+	AzureDataDiskCachingReadOnly  AzureDataDiskCachingMode = "ReadOnly"
+	AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite"
+
+	AzureSharedBlobDisk    AzureDataDiskKind = "Shared"
+	AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated"
+	AzureManagedDisk       AzureDataDiskKind = "Managed"
+)
+
+// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+type AzureDiskVolumeSource struct {
+	// The Name of the data disk in the blob storage
+	DiskName string `json:"diskName" protobuf:"bytes,1,opt,name=diskName"`
+	// The URI the data disk in the blob storage
+	DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"`
+	// Host Caching mode: None, Read Only, Read Write.
+	// +optional
+	CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"`
+	// Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// +optional
+	FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
+	// Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
+	// Expected values Shared: multiple blob disks per storage account  Dedicated: single blob disk per storage account  Managed: azure managed data disk (only in managed availability set). defaults to shared
+	Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"`
+}
+
+// PortworxVolumeSource represents a Portworx volume resource.
+type PortworxVolumeSource struct {
+	// VolumeID uniquely identifies a Portworx volume
+	VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
+	// FSType represents the filesystem type to mount
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+	// Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+}
+
+// ScaleIOVolumeSource represents a persistent ScaleIO volume
+type ScaleIOVolumeSource struct {
+	// The host address of the ScaleIO API Gateway.
+	Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
+	// The name of the storage system as configured in ScaleIO.
+	System string `json:"system" protobuf:"bytes,2,opt,name=system"`
+	// SecretRef references to the secret for ScaleIO user and other
+	// sensitive information. If this is not provided, Login operation will fail.
+	SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
+	// Flag to enable/disable SSL communication with Gateway, default false
+	// +optional
+	SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
+	// The name of the ScaleIO Protection Domain for the configured storage.
+	// +optional
+	ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
+	// The ScaleIO Storage Pool associated with the protection domain.
+	// +optional
+	StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
+	// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+	// Default is ThinProvisioned.
+	// +optional
+	StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
+	// The name of a volume already created in the ScaleIO system
+	// that is associated with this volume source.
+	VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
+	// Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs".
+	// Default is "xfs".
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
+	// Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
+}
+
+// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume
+type ScaleIOPersistentVolumeSource struct {
+	// The host address of the ScaleIO API Gateway.
+	Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
+	// The name of the storage system as configured in ScaleIO.
+	System string `json:"system" protobuf:"bytes,2,opt,name=system"`
+	// SecretRef references to the secret for ScaleIO user and other
+	// sensitive information. If this is not provided, Login operation will fail.
+	SecretRef *SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
+	// Flag to enable/disable SSL communication with Gateway, default false
+	// +optional
+	SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
+	// The name of the ScaleIO Protection Domain for the configured storage.
+	// +optional
+	ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
+	// The ScaleIO Storage Pool associated with the protection domain.
+	// +optional
+	StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
+	// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+	// Default is ThinProvisioned.
+	// +optional
+	StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
+	// The name of a volume already created in the ScaleIO system
+	// that is associated with this volume source.
+	VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
+	// Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs".
+	// Default is "xfs"
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
+	// Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
+}
+
+// Represents a StorageOS persistent volume resource.
+type StorageOSVolumeSource struct {
+	// VolumeName is the human-readable name of the StorageOS volume.  Volume
+	// names are only unique within a namespace.
+	VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
+	// VolumeNamespace specifies the scope of the volume within StorageOS.  If no
+	// namespace is specified then the Pod's namespace will be used.  This allows the
+	// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+	// Set VolumeName to any name to override the default behaviour.
+	// Set to "default" if you are not using namespaces within StorageOS.
+	// Namespaces that do not pre-exist within StorageOS will be created.
+	// +optional
+	VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
+	// Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
+	// Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+	// SecretRef specifies the secret to use for obtaining the StorageOS API
+	// credentials.  If not specified, default values will be attempted.
+	// +optional
+	SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
+}
+
+// Represents a StorageOS persistent volume resource.
+type StorageOSPersistentVolumeSource struct {
+	// VolumeName is the human-readable name of the StorageOS volume.  Volume
+	// names are only unique within a namespace.
+	VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
+	// VolumeNamespace specifies the scope of the volume within StorageOS.  If no
+	// namespace is specified then the Pod's namespace will be used.  This allows the
+	// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+	// Set VolumeName to any name to override the default behaviour.
+	// Set to "default" if you are not using namespaces within StorageOS.
+	// Namespaces that do not pre-exist within StorageOS will be created.
+	// +optional
+	VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
+	// Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
+	// Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+	// SecretRef specifies the secret to use for obtaining the StorageOS API
+	// credentials.  If not specified, default values will be attempted.
+	// +optional
+	SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
+}
+
+// Adapts a ConfigMap into a volume.
+//
+// The contents of the target ConfigMap's Data field will be presented in a
+// volume as files using the keys in the Data field as the file names, unless
+// the items element is populated with specific mappings of keys to paths.
+// ConfigMap volumes support ownership management and SELinux relabeling.
+type ConfigMapVolumeSource struct {
+	LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
+	// If unspecified, each key-value pair in the Data field of the referenced
+	// ConfigMap will be projected into the volume as a file whose name is the
+	// key and content is the value. If specified, the listed keys will be
+	// projected into the specified paths, and unlisted keys will not be
+	// present. If a key is specified which is not present in the ConfigMap,
+	// the volume setup will error unless it is marked optional. Paths must be
+	// relative and may not contain the '..' path or start with '..'.
+	// +optional
+	Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
+	// Optional: mode bits to use on created files by default. Must be a
+	// value between 0 and 0777. Defaults to 0644.
+	// Directories within the path are not affected by this setting.
+	// This might be in conflict with other options that affect the file
+	// mode, like fsGroup, and the result can be other mode bits set.
+	// +optional
+	DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"`
+	// Specify whether the ConfigMap or it's keys must be defined
+	// +optional
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
+}
+
+const (
+	ConfigMapVolumeSourceDefaultMode int32 = 0644
+)
+
+// Adapts a ConfigMap into a projected volume.
+//
+// The contents of the target ConfigMap's Data field will be presented in a
+// projected volume as files using the keys in the Data field as the file names,
+// unless the items element is populated with specific mappings of keys to paths.
+// Note that this is identical to a configmap volume source without the default
+// mode.
+type ConfigMapProjection struct {
+	LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
+	// If unspecified, each key-value pair in the Data field of the referenced
+	// ConfigMap will be projected into the volume as a file whose name is the
+	// key and content is the value. If specified, the listed keys will be
+	// projected into the specified paths, and unlisted keys will not be
+	// present. If a key is specified which is not present in the ConfigMap,
+	// the volume setup will error unless it is marked optional. Paths must be
+	// relative and may not contain the '..' path or start with '..'.
+	// +optional
+	Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
+	// Specify whether the ConfigMap or it's keys must be defined
+	// +optional
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
+}
+
+// ServiceAccountTokenProjection represents a projected service account token
+// volume. This projection can be used to insert a service account token into
+// the pods runtime filesystem for use against APIs (Kubernetes API Server or
+// otherwise).
+type ServiceAccountTokenProjection struct {
+	// Audience is the intended audience of the token. A recipient of a token
+	// must identify itself with an identifier specified in the audience of the
+	// token, and otherwise should reject the token. The audience defaults to the
+	// identifier of the apiserver.
+	//+optional
+	Audience string `json:"audience,omitempty" protobuf:"bytes,1,rep,name=audience"`
+	// ExpirationSeconds is the requested duration of validity of the service
+	// account token. As the token approaches expiration, the kubelet volume
+	// plugin will proactively rotate the service account token. The kubelet will
+	// start trying to rotate the token if the token is older than 80 percent of
+	// its time to live or if the token is older than 24 hours.Defaults to 1 hour
+	// and must be at least 10 minutes.
+	//+optional
+	ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"`
+	// Path is the path relative to the mount point of the file to project the
+	// token into.
+	Path string `json:"path" protobuf:"bytes,3,opt,name=path"`
+}
+
+// Represents a projected volume source
+type ProjectedVolumeSource struct {
+	// list of volume projections
+	Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
+	// Mode bits to use on created files by default. Must be a value between
+	// 0 and 0777.
+	// Directories within the path are not affected by this setting.
+	// This might be in conflict with other options that affect the file
+	// mode, like fsGroup, and the result can be other mode bits set.
+	// +optional
+	DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
+}
+
+// Projection that may be projected along with other supported volume types
+type VolumeProjection struct {
+	// all types below are the supported types for projection into the same volume
+
+	// information about the secret data to project
+	// +optional
+	Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
+	// information about the downwardAPI data to project
+	// +optional
+	DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"`
+	// information about the configMap data to project
+	// +optional
+	ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"`
+	// information about the serviceAccountToken data to project
+	// +optional
+	ServiceAccountToken *ServiceAccountTokenProjection `json:"serviceAccountToken,omitempty" protobuf:"bytes,4,opt,name=serviceAccountToken"`
+}
+
+const (
+	ProjectedVolumeSourceDefaultMode int32 = 0644
+)
+
+// Maps a string key to a path within a volume.
+type KeyToPath struct {
+	// The key to project.
+	Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
+
+	// The relative path of the file to map the key to.
+	// May not be an absolute path.
+	// May not contain the path element '..'.
+	// May not start with the string '..'.
+	Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
+	// Optional: mode bits to use on this file, must be a value between 0
+	// and 0777. If not specified, the volume defaultMode will be used.
+	// This might be in conflict with other options that affect the file
+	// mode, like fsGroup, and the result can be other mode bits set.
+	// +optional
+	Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"`
+}
+
+// Local represents directly-attached storage with node affinity (Beta feature)
+type LocalVolumeSource struct {
+	// The full path to the volume on the node.
+	// It can be either a directory or block device (disk, partition, ...).
+	Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
+
+	// Filesystem type to mount.
+	// It applies only when the Path is a block device.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified.
+	// +optional
+	FSType *string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+}
+
+// Represents storage that is managed by an external CSI volume driver (Beta feature)
+type CSIPersistentVolumeSource struct {
+	// Driver is the name of the driver to use for this volume.
+	// Required.
+	Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
+
+	// VolumeHandle is the unique volume name returned by the CSI volume
+	// plugin’s CreateVolume to refer to the volume on all subsequent calls.
+	// Required.
+	VolumeHandle string `json:"volumeHandle" protobuf:"bytes,2,opt,name=volumeHandle"`
+
+	// Optional: The value to pass to ControllerPublishVolumeRequest.
+	// Defaults to false (read/write).
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+
+	// Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs".
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
+
+	// Attributes of the volume to publish.
+	// +optional
+	VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,5,rep,name=volumeAttributes"`
+
+	// ControllerPublishSecretRef is a reference to the secret object containing
+	// sensitive information to pass to the CSI driver to complete the CSI
+	// ControllerPublishVolume and ControllerUnpublishVolume calls.
+	// This field is optional, and may be empty if no secret is required. If the
+	// secret object contains more than one secret, all secrets are passed.
+	// +optional
+	ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"`
+
+	// NodeStageSecretRef is a reference to the secret object containing sensitive
+	// information to pass to the CSI driver to complete the CSI NodeStageVolume
+	// and NodeStageVolume and NodeUnstageVolume calls.
+	// This field is optional, and may be empty if no secret is required. If the
+	// secret object contains more than one secret, all secrets are passed.
+	// +optional
+	NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"`
+
+	// NodePublishSecretRef is a reference to the secret object containing
+	// sensitive information to pass to the CSI driver to complete the CSI
+	// NodePublishVolume and NodeUnpublishVolume calls.
+	// This field is optional, and may be empty if no secret is required. If the
+	// secret object contains more than one secret, all secrets are passed.
+	// +optional
+	NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"`
+}
+
+// ContainerPort represents a network port in a single container.
+type ContainerPort struct {
+	// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+	// named port in a pod must have a unique name. Name for the port that can be
+	// referred to by services.
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+	// Number of port to expose on the host.
+	// If specified, this must be a valid port number, 0 < x < 65536.
+	// If HostNetwork is specified, this must match ContainerPort.
+	// Most containers do not need this.
+	// +optional
+	HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"`
+	// Number of port to expose on the pod's IP address.
+	// This must be a valid port number, 0 < x < 65536.
+	ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"`
+	// Protocol for port. Must be UDP, TCP, or SCTP.
+	// Defaults to "TCP".
+	// +optional
+	Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"`
+	// What host IP to bind the external port to.
+	// +optional
+	HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
+}
+
+// VolumeMount describes a mounting of a Volume within a container.
+type VolumeMount struct {
+	// This must match the Name of a Volume.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// Mounted read-only if true, read-write otherwise (false or unspecified).
+	// Defaults to false.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
+	// Path within the container at which the volume should be mounted.  Must
+	// not contain ':'.
+	MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"`
+	// Path within the volume from which the container's volume should be mounted.
+	// Defaults to "" (volume's root).
+	// +optional
+	SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"`
+	// mountPropagation determines how mounts are propagated from the host
+	// to container and the other way around.
+	// When not set, MountPropagationNone is used.
+	// This field is beta in 1.10.
+	// +optional
+	MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"`
+}
+
+// MountPropagationMode describes mount propagation.
+type MountPropagationMode string
+
+const (
+	// MountPropagationNone means that the volume in a container will
+	// not receive new mounts from the host or other containers, and filesystems
+	// mounted inside the container won't be propagated to the host or other
+	// containers.
+	// Note that this mode corresponds to "private" in Linux terminology.
+	MountPropagationNone MountPropagationMode = "None"
+	// MountPropagationHostToContainer means that the volume in a container will
+	// receive new mounts from the host or other containers, but filesystems
+	// mounted inside the container won't be propagated to the host or other
+	// containers.
+	// Note that this mode is recursively applied to all mounts in the volume
+	// ("rslave" in Linux terminology).
+	MountPropagationHostToContainer MountPropagationMode = "HostToContainer"
+	// MountPropagationBidirectional means that the volume in a container will
+	// receive new mounts from the host or other containers, and its own mounts
+	// will be propagated from the container to the host or other containers.
+	// Note that this mode is recursively applied to all mounts in the volume
+	// ("rshared" in Linux terminology).
+	MountPropagationBidirectional MountPropagationMode = "Bidirectional"
+)
+
+// volumeDevice describes a mapping of a raw block device within a container.
+type VolumeDevice struct {
+	// name must match the name of a persistentVolumeClaim in the pod
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// devicePath is the path inside of the container that the device will be mapped to.
+	DevicePath string `json:"devicePath" protobuf:"bytes,2,opt,name=devicePath"`
+}
+
+// EnvVar represents an environment variable present in a Container.
+type EnvVar struct {
+	// Name of the environment variable. Must be a C_IDENTIFIER.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+
+	// Optional: no more than one of the following may be specified.
+
+	// Variable references $(VAR_NAME) are expanded
+	// using the previous defined environment variables in the container and
+	// any service environment variables. If a variable cannot be resolved,
+	// the reference in the input string will be unchanged. The $(VAR_NAME)
+	// syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
+	// references will never be expanded, regardless of whether the variable
+	// exists or not.
+	// Defaults to "".
+	// +optional
+	Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
+	// Source for the environment variable's value. Cannot be used if value is not empty.
+	// +optional
+	ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"`
+}
+
+// EnvVarSource represents a source for the value of an EnvVar.
+type EnvVarSource struct {
+	// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
+	// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
+	// +optional
+	FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"`
+	// Selects a resource of the container: only resources limits and requests
+	// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+	// +optional
+	ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"`
+	// Selects a key of a ConfigMap.
+	// +optional
+	ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"`
+	// Selects a key of a secret in the pod's namespace
+	// +optional
+	SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"`
+}
+
+// ObjectFieldSelector selects an APIVersioned field of an object.
+type ObjectFieldSelector struct {
+	// Version of the schema the FieldPath is written in terms of, defaults to "v1".
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"`
+	// Path of the field to select in the specified API version.
+	FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"`
+}
+
+// ResourceFieldSelector represents container resources (cpu, memory) and their output format
+type ResourceFieldSelector struct {
+	// Container name: required for volumes, optional for env vars
+	// +optional
+	ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"`
+	// Required: resource to select
+	Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"`
+	// Specifies the output format of the exposed resources, defaults to "1"
+	// +optional
+	Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"`
+}
+
+// Selects a key from a ConfigMap.
+type ConfigMapKeySelector struct {
+	// The ConfigMap to select from.
+	LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
+	// The key to select.
+	Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
+	// Specify whether the ConfigMap or it's key must be defined
+	// +optional
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
+}
+
+// SecretKeySelector selects a key of a Secret.
+type SecretKeySelector struct {
+	// The name of the secret in the pod's namespace to select from.
+	LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
+	// The key of the secret to select from.  Must be a valid secret key.
+	Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
+	// Specify whether the Secret or it's key must be defined
+	// +optional
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
+}
+
+// EnvFromSource represents the source of a set of ConfigMaps
+type EnvFromSource struct {
+	// An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
+	// +optional
+	Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"`
+	// The ConfigMap to select from
+	// +optional
+	ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"`
+	// The Secret to select from
+	// +optional
+	SecretRef *SecretEnvSource `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
+}
+
+// ConfigMapEnvSource selects a ConfigMap to populate the environment
+// variables with.
+//
+// The contents of the target ConfigMap's Data field will represent the
+// key-value pairs as environment variables.
+type ConfigMapEnvSource struct {
+	// The ConfigMap to select from.
+	LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
+	// Specify whether the ConfigMap must be defined
+	// +optional
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
+}
+
+// SecretEnvSource selects a Secret to populate the environment
+// variables with.
+//
+// The contents of the target Secret's Data field will represent the
+// key-value pairs as environment variables.
+type SecretEnvSource struct {
+	// The Secret to select from.
+	LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
+	// Specify whether the Secret must be defined
+	// +optional
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
+}
+
+// HTTPHeader describes a custom header to be used in HTTP probes
+type HTTPHeader struct {
+	// The header field name
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// The header field value
+	Value string `json:"value" protobuf:"bytes,2,opt,name=value"`
+}
+
+// HTTPGetAction describes an action based on HTTP Get requests.
+type HTTPGetAction struct {
+	// Path to access on the HTTP server.
+	// +optional
+	Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
+	// Name or number of the port to access on the container.
+	// Number must be in the range 1 to 65535.
+	// Name must be an IANA_SVC_NAME.
+	Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"`
+	// Host name to connect to, defaults to the pod IP. You probably want to set
+	// "Host" in httpHeaders instead.
+	// +optional
+	Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"`
+	// Scheme to use for connecting to the host.
+	// Defaults to HTTP.
+	// +optional
+	Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"`
+	// Custom headers to set in the request. HTTP allows repeated headers.
+	// +optional
+	HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"`
+}
+
+// URIScheme identifies the scheme used for connection to a host for Get actions
+type URIScheme string
+
+const (
+	// URISchemeHTTP means that the scheme used will be http://
+	URISchemeHTTP URIScheme = "HTTP"
+	// URISchemeHTTPS means that the scheme used will be https://
+	URISchemeHTTPS URIScheme = "HTTPS"
+)
+
+// TCPSocketAction describes an action based on opening a socket
+type TCPSocketAction struct {
+	// Number or name of the port to access on the container.
+	// Number must be in the range 1 to 65535.
+	// Name must be an IANA_SVC_NAME.
+	Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"`
+	// Optional: Host name to connect to, defaults to the pod IP.
+	// +optional
+	Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
+}
+
+// ExecAction describes a "run in container" action.
+type ExecAction struct {
+	// Command is the command line to execute inside the container, the working directory for the
+	// command  is root ('/') in the container's filesystem. The command is simply exec'd, it is
+	// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+	// a shell, you need to explicitly call out to that shell.
+	// Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+	// +optional
+	Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"`
+}
+
+// Probe describes a health check to be performed against a container to determine whether it is
+// alive or ready to receive traffic.
+type Probe struct {
+	// The action taken to determine the health of a container
+	Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"`
+	// Number of seconds after the container has started before liveness probes are initiated.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+	// +optional
+	InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"`
+	// Number of seconds after which the probe times out.
+	// Defaults to 1 second. Minimum value is 1.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+	// +optional
+	TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"`
+	// How often (in seconds) to perform the probe.
+	// Default to 10 seconds. Minimum value is 1.
+	// +optional
+	PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"`
+	// Minimum consecutive successes for the probe to be considered successful after having failed.
+	// Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+	// +optional
+	SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"`
+	// Minimum consecutive failures for the probe to be considered failed after having succeeded.
+	// Defaults to 3. Minimum value is 1.
+	// +optional
+	FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"`
+}
+
+// PullPolicy describes a policy for if/when to pull a container image
+type PullPolicy string
+
+const (
+	// PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails.
+	PullAlways PullPolicy = "Always"
+	// PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present
+	PullNever PullPolicy = "Never"
+	// PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.
+	PullIfNotPresent PullPolicy = "IfNotPresent"
+)
+
+// TerminationMessagePolicy describes how termination messages are retrieved from a container.
+type TerminationMessagePolicy string
+
+const (
+	// TerminationMessageReadFile is the default behavior and will set the container status message to
+	// the contents of the container's terminationMessagePath when the container exits.
+	TerminationMessageReadFile TerminationMessagePolicy = "File"
+	// TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs
+	// for the container status message when the container exits with an error and the
+	// terminationMessagePath has no contents.
+	TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError"
+)
+
+// Capability represent POSIX capabilities type
+type Capability string
+
+// Adds and removes POSIX capabilities from running containers.
+type Capabilities struct {
+	// Added capabilities
+	// +optional
+	Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"`
+	// Removed capabilities
+	// +optional
+	Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"`
+}
+
+// ResourceRequirements describes the compute resource requirements.
+type ResourceRequirements struct {
+	// Limits describes the maximum amount of compute resources allowed.
+	// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
+	// +optional
+	Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"`
+	// Requests describes the minimum amount of compute resources required.
+	// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+	// otherwise to an implementation-defined value.
+	// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
+	// +optional
+	Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"`
+}
+
+const (
+	// TerminationMessagePathDefault means the default path to capture the application termination message running in a container
+	TerminationMessagePathDefault string = "/dev/termination-log"
+)
+
+// A single application container that you want to run within a pod.
+type Container struct {
+	// Name of the container specified as a DNS_LABEL.
+	// Each container in a pod must have a unique name (DNS_LABEL).
+	// Cannot be updated.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// Docker image name.
+	// More info: https://kubernetes.io/docs/concepts/containers/images
+	// This field is optional to allow higher level config management to default or override
+	// container images in workload controllers like Deployments and StatefulSets.
+	// +optional
+	Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
+	// Entrypoint array. Not executed within a shell.
+	// The docker image's ENTRYPOINT is used if this is not provided.
+	// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+	// cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
+	// can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
+	// regardless of whether the variable exists or not.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+	// +optional
+	Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
+	// Arguments to the entrypoint.
+	// The docker image's CMD is used if this is not provided.
+	// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+	// cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
+	// can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
+	// regardless of whether the variable exists or not.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+	// +optional
+	Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
+	// Container's working directory.
+	// If not specified, the container runtime's default will be used, which
+	// might be configured in the container image.
+	// Cannot be updated.
+	// +optional
+	WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
+	// List of ports to expose from the container. Exposing a port here gives
+	// the system additional information about the network connections a
+	// container uses, but is primarily informational. Not specifying a port here
+	// DOES NOT prevent that port from being exposed. Any port which is
+	// listening on the default "0.0.0.0" address inside a container will be
+	// accessible from the network.
+	// Cannot be updated.
+	// +optional
+	// +patchMergeKey=containerPort
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=containerPort
+	// +listMapKey=protocol
+	Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
+	// List of sources to populate environment variables in the container.
+	// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+	// will be reported as an event when the container is starting. When a key exists in multiple
+	// sources, the value associated with the last source will take precedence.
+	// Values defined by an Env with a duplicate key will take precedence.
+	// Cannot be updated.
+	// +optional
+	EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
+	// List of environment variables to set in the container.
+	// Cannot be updated.
+	// +optional
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
+	// Compute Resources required by this container.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
+	// +optional
+	Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
+	// Pod volumes to mount into the container's filesystem.
+	// Cannot be updated.
+	// +optional
+	// +patchMergeKey=mountPath
+	// +patchStrategy=merge
+	VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
+	// volumeDevices is the list of block devices to be used by the container.
+	// This is a beta feature.
+	// +patchMergeKey=devicePath
+	// +patchStrategy=merge
+	// +optional
+	VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"`
+	// Periodic probe of container liveness.
+	// Container will be restarted if the probe fails.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+	// +optional
+	LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
+	// Periodic probe of container service readiness.
+	// Container will be removed from service endpoints if the probe fails.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+	// +optional
+	ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
+	// Actions that the management system should take in response to container lifecycle events.
+	// Cannot be updated.
+	// +optional
+	Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
+	// Optional: Path at which the file to which the container's termination message
+	// will be written is mounted into the container's filesystem.
+	// Message written is intended to be brief final status, such as an assertion failure message.
+	// Will be truncated by the node if greater than 4096 bytes. The total message length across
+	// all containers will be limited to 12kb.
+	// Defaults to /dev/termination-log.
+	// Cannot be updated.
+	// +optional
+	TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
+	// Indicate how the termination message should be populated. File will use the contents of
+	// terminationMessagePath to populate the container status message on both success and failure.
+	// FallbackToLogsOnError will use the last chunk of container log output if the termination
+	// message file is empty and the container exited with an error.
+	// The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+	// Defaults to File.
+	// Cannot be updated.
+	// +optional
+	TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
+	// Image pull policy.
+	// One of Always, Never, IfNotPresent.
+	// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+	// +optional
+	ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
+	// Security options the pod should run with.
+	// More info: https://kubernetes.io/docs/concepts/policy/security-context/
+	// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+	// +optional
+	SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
+
+	// Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
+	// and shouldn't be used for general purpose containers.
+
+	// Whether this container should allocate a buffer for stdin in the container runtime. If this
+	// is not set, reads from stdin in the container will always result in EOF.
+	// Default is false.
+	// +optional
+	Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
+	// Whether the container runtime should close the stdin channel after it has been opened by
+	// a single attach. When stdin is true the stdin stream will remain open across multiple attach
+	// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+	// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+	// at which time stdin is closed and remains closed until the container is restarted. If this
+	// flag is false, a container processes that reads from stdin will never receive an EOF.
+	// Default is false
+	// +optional
+	StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
+	// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+	// Default is false.
+	// +optional
+	TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
+}
+
+// Handler defines a specific action that should be taken
+// TODO: pass structured data to these actions, and document that data here.
+type Handler struct {
+	// One and only one of the following should be specified.
+	// Exec specifies the action to take.
+	// +optional
+	Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
+	// HTTPGet specifies the http request to perform.
+	// +optional
+	HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
+	// TCPSocket specifies an action involving a TCP port.
+	// TCP hooks not yet supported
+	// TODO: implement a realistic TCP lifecycle hook
+	// +optional
+	TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
+}
+
+// Lifecycle describes actions that the management system should take in response to container lifecycle
+// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
+// until the action is complete, unless the container process fails, in which case the handler is aborted.
+type Lifecycle struct {
+	// PostStart is called immediately after a container is created. If the handler fails,
+	// the container is terminated and restarted according to its restart policy.
+	// Other management of the container blocks until the hook completes.
+	// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+	// +optional
+	PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"`
+	// PreStop is called immediately before a container is terminated.
+	// The container is terminated after the handler completes.
+	// The reason for termination is passed to the handler.
+	// Regardless of the outcome of the handler, the container is eventually terminated.
+	// Other management of the container blocks until the hook completes.
+	// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+	// +optional
+	PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"`
+}
+
+type ConditionStatus string
+
+// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
+// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
+// can't decide if a resource is in the condition or not. In the future, we could add other
+// intermediate conditions, e.g. ConditionDegraded.
+const (
+	ConditionTrue    ConditionStatus = "True"
+	ConditionFalse   ConditionStatus = "False"
+	ConditionUnknown ConditionStatus = "Unknown"
+)
+
+// ContainerStateWaiting is a waiting state of a container.
+type ContainerStateWaiting struct {
+	// (brief) reason the container is not yet running.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"`
+	// Message regarding why the container is not yet running.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
+}
+
+// ContainerStateRunning is a running state of a container.
+type ContainerStateRunning struct {
+	// Time at which the container was last (re-)started
+	// +optional
+	StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"`
+}
+
+// ContainerStateTerminated is a terminated state of a container.
+type ContainerStateTerminated struct {
+	// Exit status from the last termination of the container
+	ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"`
+	// Signal from the last termination of the container
+	// +optional
+	Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"`
+	// (brief) reason from the last termination of the container
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
+	// Message regarding the last termination of the container
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
+	// Time at which previous execution of the container started
+	// +optional
+	StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"`
+	// Time at which the container last terminated
+	// +optional
+	FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"`
+	// Container's ID in the format 'docker://<container_id>'
+	// +optional
+	ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"`
+}
+
+// ContainerState holds a possible state of container.
+// Only one of its members may be specified.
+// If none of them is specified, the default one is ContainerStateWaiting.
+type ContainerState struct {
+	// Details about a waiting container
+	// +optional
+	Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"`
+	// Details about a running container
+	// +optional
+	Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"`
+	// Details about a terminated container
+	// +optional
+	Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"`
+}
+
+// ContainerStatus contains details for the current status of this container.
+type ContainerStatus struct {
+	// This must be a DNS_LABEL. Each container in a pod must have a unique name.
+	// Cannot be updated.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// Details about the container's current condition.
+	// +optional
+	State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"`
+	// Details about the container's last termination condition.
+	// +optional
+	LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"`
+	// Specifies whether the container has passed its readiness probe.
+	Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"`
+	// The number of times the container has been restarted, currently based on
+	// the number of dead containers that have not yet been removed.
+	// Note that this is calculated from dead containers. But those containers are subject to
+	// garbage collection. This value will get capped at 5 by GC.
+	RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"`
+	// The image the container is running.
+	// More info: https://kubernetes.io/docs/concepts/containers/images
+	// TODO(dchen1107): Which image the container is running with?
+	Image string `json:"image" protobuf:"bytes,6,opt,name=image"`
+	// ImageID of the container's image.
+	ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"`
+	// Container's ID in the format 'docker://<container_id>'.
+	// +optional
+	ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"`
+}
+
+// PodPhase is a label for the condition of a pod at the current time.
+type PodPhase string
+
+// These are the valid statuses of pods.
+const (
+	// PodPending means the pod has been accepted by the system, but one or more of the containers
+	// has not been started. This includes time before being bound to a node, as well as time spent
+	// pulling images onto the host.
+	PodPending PodPhase = "Pending"
+	// PodRunning means the pod has been bound to a node and all of the containers have been started.
+	// At least one container is still running or is in the process of being restarted.
+	PodRunning PodPhase = "Running"
+	// PodSucceeded means that all containers in the pod have voluntarily terminated
+	// with a container exit code of 0, and the system is not going to restart any of these containers.
+	PodSucceeded PodPhase = "Succeeded"
+	// PodFailed means that all containers in the pod have terminated, and at least one container has
+	// terminated in a failure (exited with a non-zero exit code or was stopped by the system).
+	PodFailed PodPhase = "Failed"
+	// PodUnknown means that for some reason the state of the pod could not be obtained, typically due
+	// to an error in communicating with the host of the pod.
+	PodUnknown PodPhase = "Unknown"
+)
+
+// PodConditionType is a valid value for PodCondition.Type
+type PodConditionType string
+
+// These are valid conditions of pod.
+const (
+	// PodScheduled represents status of the scheduling process for this pod.
+	PodScheduled PodConditionType = "PodScheduled"
+	// PodReady means the pod is able to service requests and should be added to the
+	// load balancing pools of all matching services.
+	PodReady PodConditionType = "Ready"
+	// PodInitialized means that all init containers in the pod have started successfully.
+	PodInitialized PodConditionType = "Initialized"
+	// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
+	// can't schedule the pod right now, for example due to insufficient resources in the cluster.
+	PodReasonUnschedulable = "Unschedulable"
+	// ContainersReady indicates whether all containers in the pod are ready.
+	ContainersReady PodConditionType = "ContainersReady"
+)
+
+// PodCondition contains details for the current condition of this pod.
+type PodCondition struct {
+	// Type is the type of the condition.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
+	Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"`
+	// Status is the status of the condition.
+	// Can be True, False, Unknown.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
+	Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+	// Last time we probed the condition.
+	// +optional
+	LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
+	// Last time the condition transitioned from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+	// Unique, one-word, CamelCase reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
+	// Human-readable message indicating details about last transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
+}
+
+// RestartPolicy describes how the container should be restarted.
+// Only one of the following restart policies may be specified.
+// If none of the following policies is specified, the default one
+// is RestartPolicyAlways.
+type RestartPolicy string
+
+const (
+	RestartPolicyAlways    RestartPolicy = "Always"
+	RestartPolicyOnFailure RestartPolicy = "OnFailure"
+	RestartPolicyNever     RestartPolicy = "Never"
+)
+
+// DNSPolicy defines how a pod's DNS will be configured.
+type DNSPolicy string
+
+const (
+	// DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS
+	// first, if it is available, then fall back on the default
+	// (as determined by kubelet) DNS settings.
+	DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet"
+
+	// DNSClusterFirst indicates that the pod should use cluster DNS
+	// first unless hostNetwork is true, if it is available, then
+	// fall back on the default (as determined by kubelet) DNS settings.
+	DNSClusterFirst DNSPolicy = "ClusterFirst"
+
+	// DNSDefault indicates that the pod should use the default (as
+	// determined by kubelet) DNS settings.
+	DNSDefault DNSPolicy = "Default"
+
+	// DNSNone indicates that the pod should use empty DNS settings. DNS
+	// parameters such as nameservers and search paths should be defined via
+	// DNSConfig.
+	DNSNone DNSPolicy = "None"
+)
+
+const (
+	// DefaultTerminationGracePeriodSeconds indicates the default duration in
+	// seconds a pod needs to terminate gracefully.
+	DefaultTerminationGracePeriodSeconds = 30
+)
+
+// A node selector represents the union of the results of one or more label queries
+// over a set of nodes; that is, it represents the OR of the selectors represented
+// by the node selector terms.
+type NodeSelector struct {
+	//Required. A list of node selector terms. The terms are ORed.
+	NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"`
+}
+
+// A null or empty node selector term matches no objects. The requirements of
+// them are ANDed.
+// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+type NodeSelectorTerm struct {
+	// A list of node selector requirements by node's labels.
+	// +optional
+	MatchExpressions []NodeSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"`
+	// A list of node selector requirements by node's fields.
+	// +optional
+	MatchFields []NodeSelectorRequirement `json:"matchFields,omitempty" protobuf:"bytes,2,rep,name=matchFields"`
+}
+
+// A node selector requirement is a selector that contains values, a key, and an operator
+// that relates the key and values.
+type NodeSelectorRequirement struct {
+	// The label key that the selector applies to.
+	Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
+	// Represents a key's relationship to a set of values.
+	// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+	Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"`
+	// An array of string values. If the operator is In or NotIn,
+	// the values array must be non-empty. If the operator is Exists or DoesNotExist,
+	// the values array must be empty. If the operator is Gt or Lt, the values
+	// array must have a single element, which will be interpreted as an integer.
+	// This array is replaced during a strategic merge patch.
+	// +optional
+	Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
+}
+
+// A node selector operator is the set of operators that can be used in
+// a node selector requirement.
+type NodeSelectorOperator string
+
+const (
+	NodeSelectorOpIn           NodeSelectorOperator = "In"
+	NodeSelectorOpNotIn        NodeSelectorOperator = "NotIn"
+	NodeSelectorOpExists       NodeSelectorOperator = "Exists"
+	NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist"
+	NodeSelectorOpGt           NodeSelectorOperator = "Gt"
+	NodeSelectorOpLt           NodeSelectorOperator = "Lt"
+)
+
+// A topology selector term represents the result of label queries.
+// A null or empty topology selector term matches no objects.
+// The requirements of them are ANDed.
+// It provides a subset of functionality as NodeSelectorTerm.
+// This is an alpha feature and may change in the future.
+type TopologySelectorTerm struct {
+	// A list of topology selector requirements by labels.
+	// +optional
+	MatchLabelExpressions []TopologySelectorLabelRequirement `json:"matchLabelExpressions,omitempty" protobuf:"bytes,1,rep,name=matchLabelExpressions"`
+}
+
+// A topology selector requirement is a selector that matches given label.
+// This is an alpha feature and may change in the future.
+type TopologySelectorLabelRequirement struct {
+	// The label key that the selector applies to.
+	Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
+	// An array of string values. One value must match the label to be selected.
+	// Each entry in Values is ORed.
+	Values []string `json:"values" protobuf:"bytes,2,rep,name=values"`
+}
+
+// Affinity is a group of affinity scheduling rules.
+type Affinity struct {
+	// Describes node affinity scheduling rules for the pod.
+	// +optional
+	NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"`
+	// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+	// +optional
+	PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"`
+	// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+	// +optional
+	PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"`
+}
+
+// Pod affinity is a group of inter pod affinity scheduling rules.
+type PodAffinity struct {
+	// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+	// If the affinity requirements specified by this field are not met at
+	// scheduling time, the pod will not be scheduled onto the node.
+	// If the affinity requirements specified by this field cease to be met
+	// at some point during pod execution (e.g. due to a pod label update), the
+	// system will try to eventually evict the pod from its node.
+	// When there are multiple elements, the lists of nodes corresponding to each
+	// podAffinityTerm are intersected, i.e. all terms must be satisfied.
+	// +optional
+	// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm  `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
+
+	// If the affinity requirements specified by this field are not met at
+	// scheduling time, the pod will not be scheduled onto the node.
+	// If the affinity requirements specified by this field cease to be met
+	// at some point during pod execution (e.g. due to a pod label update), the
+	// system may or may not try to eventually evict the pod from its node.
+	// When there are multiple elements, the lists of nodes corresponding to each
+	// podAffinityTerm are intersected, i.e. all terms must be satisfied.
+	// +optional
+	RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
+	// The scheduler will prefer to schedule pods to nodes that satisfy
+	// the affinity expressions specified by this field, but it may choose
+	// a node that violates one or more of the expressions. The node that is
+	// most preferred is the one with the greatest sum of weights, i.e.
+	// for each node that meets all of the scheduling requirements (resource
+	// request, requiredDuringScheduling affinity expressions, etc.),
+	// compute a sum by iterating through the elements of this field and adding
+	// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+	// node(s) with the highest sum are the most preferred.
+	// +optional
+	PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
+}
+
+// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
+type PodAntiAffinity struct {
+	// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+	// If the anti-affinity requirements specified by this field are not met at
+	// scheduling time, the pod will not be scheduled onto the node.
+	// If the anti-affinity requirements specified by this field cease to be met
+	// at some point during pod execution (e.g. due to a pod label update), the
+	// system will try to eventually evict the pod from its node.
+	// When there are multiple elements, the lists of nodes corresponding to each
+	// podAffinityTerm are intersected, i.e. all terms must be satisfied.
+	// +optional
+	// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm  `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
+
+	// If the anti-affinity requirements specified by this field are not met at
+	// scheduling time, the pod will not be scheduled onto the node.
+	// If the anti-affinity requirements specified by this field cease to be met
+	// at some point during pod execution (e.g. due to a pod label update), the
+	// system may or may not try to eventually evict the pod from its node.
+	// When there are multiple elements, the lists of nodes corresponding to each
+	// podAffinityTerm are intersected, i.e. all terms must be satisfied.
+	// +optional
+	RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
+	// The scheduler will prefer to schedule pods to nodes that satisfy
+	// the anti-affinity expressions specified by this field, but it may choose
+	// a node that violates one or more of the expressions. The node that is
+	// most preferred is the one with the greatest sum of weights, i.e.
+	// for each node that meets all of the scheduling requirements (resource
+	// request, requiredDuringScheduling anti-affinity expressions, etc.),
+	// compute a sum by iterating through the elements of this field and adding
+	// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+	// node(s) with the highest sum are the most preferred.
+	// +optional
+	PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
+}
+
+// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+type WeightedPodAffinityTerm struct {
+	// weight associated with matching the corresponding podAffinityTerm,
+	// in the range 1-100.
+	Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
+	// Required. A pod affinity term, associated with the corresponding weight.
+	PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"`
+}
+
+// Defines a set of pods (namely those matching the labelSelector
+// relative to the given namespace(s)) that this pod should be
+// co-located (affinity) or not co-located (anti-affinity) with,
+// where co-located is defined as running on a node whose value of
+// the label with key <topologyKey> matches that of any node on which
+// a pod of the set of pods is running
+type PodAffinityTerm struct {
+	// A label query over a set of resources, in this case pods.
+	// +optional
+	LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
+	// namespaces specifies which namespaces the labelSelector applies to (matches against);
+	// null or empty list means "this pod's namespace"
+	// +optional
+	Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"`
+	// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+	// the labelSelector in the specified namespaces, where co-located is defined as running on a node
+	// whose value of the label with key topologyKey matches that of any node on which any of the
+	// selected pods is running.
+	// Empty topologyKey is not allowed.
+	TopologyKey string `json:"topologyKey" protobuf:"bytes,3,opt,name=topologyKey"`
+}
+
+// Node affinity is a group of node affinity scheduling rules.
+type NodeAffinity struct {
+	// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+	// If the affinity requirements specified by this field are not met at
+	// scheduling time, the pod will not be scheduled onto the node.
+	// If the affinity requirements specified by this field cease to be met
+	// at some point during pod execution (e.g. due to an update), the system
+	// will try to eventually evict the pod from its node.
+	// +optional
+	// RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
+
+	// If the affinity requirements specified by this field are not met at
+	// scheduling time, the pod will not be scheduled onto the node.
+	// If the affinity requirements specified by this field cease to be met
+	// at some point during pod execution (e.g. due to an update), the system
+	// may or may not try to eventually evict the pod from its node.
+	// +optional
+	RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"`
+	// The scheduler will prefer to schedule pods to nodes that satisfy
+	// the affinity expressions specified by this field, but it may choose
+	// a node that violates one or more of the expressions. The node that is
+	// most preferred is the one with the greatest sum of weights, i.e.
+	// for each node that meets all of the scheduling requirements (resource
+	// request, requiredDuringScheduling affinity expressions, etc.),
+	// compute a sum by iterating through the elements of this field and adding
+	// "weight" to the sum if the node matches the corresponding matchExpressions; the
+	// node(s) with the highest sum are the most preferred.
+	// +optional
+	PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
+}
+
+// An empty preferred scheduling term matches all objects with implicit weight 0
+// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+type PreferredSchedulingTerm struct {
+	// Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+	Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
+	// A node selector term, associated with the corresponding weight.
+	Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"`
+}
+
+// The node this Taint is attached to has the "effect" on
+// any pod that does not tolerate the Taint.
+type Taint struct {
+	// Required. The taint key to be applied to a node.
+	Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
+	// Required. The taint value corresponding to the taint key.
+	// +optional
+	Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
+	// Required. The effect of the taint on pods
+	// that do not tolerate the taint.
+	// Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
+	Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"`
+	// TimeAdded represents the time at which the taint was added.
+	// It is only written for NoExecute taints.
+	// +optional
+	TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"`
+}
+
+type TaintEffect string
+
+const (
+	// Do not allow new pods to schedule onto the node unless they tolerate the taint,
+	// but allow all pods submitted to Kubelet without going through the scheduler
+	// to start, and allow all already-running pods to continue running.
+	// Enforced by the scheduler.
+	TaintEffectNoSchedule TaintEffect = "NoSchedule"
+	// Like TaintEffectNoSchedule, but the scheduler tries not to schedule
+	// new pods onto the node, rather than prohibiting new pods from scheduling
+	// onto the node entirely. Enforced by the scheduler.
+	TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule"
+	// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+	// Like TaintEffectNoSchedule, but additionally do not allow pods submitted to
+	// Kubelet without going through the scheduler to start.
+	// Enforced by Kubelet and the scheduler.
+	// TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit"
+
+	// Evict any already-running pods that do not tolerate the taint.
+	// Currently enforced by NodeController.
+	TaintEffectNoExecute TaintEffect = "NoExecute"
+)
+
+// The pod this Toleration is attached to tolerates any taint that matches
+// the triple <key,value,effect> using the matching operator <operator>.
+type Toleration struct {
+	// Key is the taint key that the toleration applies to. Empty means match all taint keys.
+	// If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+	// +optional
+	Key string `json:"key,omitempty" protobuf:"bytes,1,opt,name=key"`
+	// Operator represents a key's relationship to the value.
+	// Valid operators are Exists and Equal. Defaults to Equal.
+	// Exists is equivalent to wildcard for value, so that a pod can
+	// tolerate all taints of a particular category.
+	// +optional
+	Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"`
+	// Value is the taint value the toleration matches to.
+	// If the operator is Exists, the value should be empty, otherwise just a regular string.
+	// +optional
+	Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"`
+	// Effect indicates the taint effect to match. Empty means match all taint effects.
+	// When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+	// +optional
+	Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"`
+	// TolerationSeconds represents the period of time the toleration (which must be
+	// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+	// it is not set, which means tolerate the taint forever (do not evict). Zero and
+	// negative values will be treated as 0 (evict immediately) by the system.
+	// +optional
+	TolerationSeconds *int64 `json:"tolerationSeconds,omitempty" protobuf:"varint,5,opt,name=tolerationSeconds"`
+}
+
+// A toleration operator is the set of operators that can be used in a toleration.
+type TolerationOperator string
+
+const (
+	TolerationOpExists TolerationOperator = "Exists"
+	TolerationOpEqual  TolerationOperator = "Equal"
+)
+
+// PodReadinessGate contains the reference to a pod condition
+type PodReadinessGate struct {
+	// ConditionType refers to a condition in the pod's condition list with matching type.
+	ConditionType PodConditionType `json:"conditionType" protobuf:"bytes,1,opt,name=conditionType,casttype=PodConditionType"`
+}
+
+// PodSpec is a description of a pod.
+type PodSpec struct {
+	// List of volumes that can be mounted by containers belonging to the pod.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes
+	// +optional
+	// +patchMergeKey=name
+	// +patchStrategy=merge,retainKeys
+	Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
+	// List of initialization containers belonging to the pod.
+	// Init containers are executed in order prior to containers being started. If any
+	// init container fails, the pod is considered to have failed and is handled according
+	// to its restartPolicy. The name for an init container or normal container must be
+	// unique among all containers.
+	// Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes.
+	// The resourceRequirements of an init container are taken into account during scheduling
+	// by finding the highest request/limit for each resource type, and then using the max of
+	// of that value or the sum of the normal containers. Limits are applied to init containers
+	// in a similar fashion.
+	// Init containers cannot currently be added or removed.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"`
+	// List of containers belonging to the pod.
+	// Containers cannot currently be added or removed.
+	// There must be at least one container in a Pod.
+	// Cannot be updated.
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"`
+	// Restart policy for all containers within the pod.
+	// One of Always, OnFailure, Never.
+	// Default to Always.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
+	// +optional
+	RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"`
+	// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
+	// Value must be non-negative integer. The value zero indicates delete immediately.
+	// If this value is nil, the default grace period will be used instead.
+	// The grace period is the duration in seconds after the processes running in the pod are sent
+	// a termination signal and the time when the processes are forcibly halted with a kill signal.
+	// Set this value longer than the expected cleanup time for your process.
+	// Defaults to 30 seconds.
+	// +optional
+	TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"`
+	// Optional duration in seconds the pod may be active on the node relative to
+	// StartTime before the system will actively try to mark it failed and kill associated containers.
+	// Value must be a positive integer.
+	// +optional
+	ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"`
+	// Set DNS policy for the pod.
+	// Defaults to "ClusterFirst".
+	// Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
+	// DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
+	// To have DNS options set along with hostNetwork, you have to specify DNS policy
+	// explicitly to 'ClusterFirstWithHostNet'.
+	// +optional
+	DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"`
+	// NodeSelector is a selector which must be true for the pod to fit on a node.
+	// Selector which must match a node's labels for the pod to be scheduled on that node.
+	// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+	// +optional
+	NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"`
+
+	// ServiceAccountName is the name of the ServiceAccount to use to run this pod.
+	// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+	// +optional
+	ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"`
+	// DeprecatedServiceAccount is a depreciated alias for ServiceAccountName.
+	// Deprecated: Use serviceAccountName instead.
+	// +k8s:conversion-gen=false
+	// +optional
+	DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"`
+	// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
+	// +optional
+	AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
+
+	// NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
+	// the scheduler simply schedules this pod onto that node, assuming that it fits resource
+	// requirements.
+	// +optional
+	NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
+	// Host networking requested for this pod. Use the host's network namespace.
+	// If this option is set, the ports that will be used must be specified.
+	// Default to false.
+	// +k8s:conversion-gen=false
+	// +optional
+	HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"`
+	// Use the host's pid namespace.
+	// Optional: Default to false.
+	// +k8s:conversion-gen=false
+	// +optional
+	HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"`
+	// Use the host's ipc namespace.
+	// Optional: Default to false.
+	// +k8s:conversion-gen=false
+	// +optional
+	HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"`
+	// Share a single process namespace between all of the containers in a pod.
+	// When this is set containers will be able to view and signal processes from other containers
+	// in the same pod, and the first process in each container will not be assigned PID 1.
+	// HostPID and ShareProcessNamespace cannot both be set.
+	// Optional: Default to false.
+	// This field is beta-level and may be disabled with the PodShareProcessNamespace feature.
+	// +k8s:conversion-gen=false
+	// +optional
+	ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"`
+	// SecurityContext holds pod-level security attributes and common container settings.
+	// Optional: Defaults to empty.  See type description for default values of each field.
+	// +optional
+	SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"`
+	// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
+	// If specified, these secrets will be passed to individual puller implementations for them to use. For example,
+	// in the case of docker, only DockerConfig type secrets are honored.
+	// More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
+	// +optional
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"`
+	// Specifies the hostname of the Pod
+	// If not specified, the pod's hostname will be set to a system-defined value.
+	// +optional
+	Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"`
+	// If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
+	// If not specified, the pod will not have a domainname at all.
+	// +optional
+	Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"`
+	// If specified, the pod's scheduling constraints
+	// +optional
+	Affinity *Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"`
+	// If specified, the pod will be dispatched by specified scheduler.
+	// If not specified, the pod will be dispatched by default scheduler.
+	// +optional
+	SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"`
+	// If specified, the pod's tolerations.
+	// +optional
+	Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"`
+	// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
+	// file if specified. This is only valid for non-hostNetwork pods.
+	// +optional
+	// +patchMergeKey=ip
+	// +patchStrategy=merge
+	HostAliases []HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"`
+	// If specified, indicates the pod's priority. "system-node-critical" and
+	// "system-cluster-critical" are two special keywords which indicate the
+	// highest priorities with the former being the highest priority. Any other
+	// name must be defined by creating a PriorityClass object with that name.
+	// If not specified, the pod priority will be default or zero if there is no
+	// default.
+	// +optional
+	PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"`
+	// The priority value. Various system components use this field to find the
+	// priority of the pod. When Priority Admission Controller is enabled, it
+	// prevents users from setting this field. The admission controller populates
+	// this field from PriorityClassName.
+	// The higher the value, the higher the priority.
+	// +optional
+	Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"`
+	// Specifies the DNS parameters of a pod.
+	// Parameters specified here will be merged to the generated DNS
+	// configuration based on DNSPolicy.
+	// +optional
+	DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"`
+
+	// If specified, all readiness gates will be evaluated for pod readiness.
+	// A pod is ready when all its containers are ready AND
+	// all conditions specified in the readiness gates have status equal to "True"
+	// More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md
+	// +optional
+	ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"`
+	// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
+	// to run this pod.  If no RuntimeClass resource matches the named class, the pod will not be run.
+	// If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
+	// empty definition that uses the default runtime handler.
+	// More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md
+	// This is an alpha feature and may change in the future.
+	// +optional
+	RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"`
+	// EnableServiceLinks indicates whether information about services should be injected into pod's
+	// environment variables, matching the syntax of Docker links.
+	// +optional
+	EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"`
+}
+
+const (
+	// The default value for enableServiceLinks attribute.
+	DefaultEnableServiceLinks = true
+)
+
+// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
+// pod's hosts file.
+type HostAlias struct {
+	// IP address of the host file entry.
+	IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
+	// Hostnames for the above IP address.
+	Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"`
+}
+
+// PodSecurityContext holds pod-level security attributes and common container settings.
+// Some fields are also present in container.securityContext.  Field values of
+// container.securityContext take precedence over field values of PodSecurityContext.
+type PodSecurityContext struct {
+	// The SELinux context to be applied to all containers.
+	// If unspecified, the container runtime will allocate a random SELinux context for each
+	// container.  May also be set in SecurityContext.  If set in
+	// both SecurityContext and PodSecurityContext, the value specified in SecurityContext
+	// takes precedence for that container.
+	// +optional
+	SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"`
+	// The UID to run the entrypoint of the container process.
+	// Defaults to user specified in image metadata if unspecified.
+	// May also be set in SecurityContext.  If set in both SecurityContext and
+	// PodSecurityContext, the value specified in SecurityContext takes precedence
+	// for that container.
+	// +optional
+	RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"`
+	// The GID to run the entrypoint of the container process.
+	// Uses runtime default if unset.
+	// May also be set in SecurityContext.  If set in both SecurityContext and
+	// PodSecurityContext, the value specified in SecurityContext takes precedence
+	// for that container.
+	// +optional
+	RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,6,opt,name=runAsGroup"`
+	// Indicates that the container must run as a non-root user.
+	// If true, the Kubelet will validate the image at runtime to ensure that it
+	// does not run as UID 0 (root) and fail to start the container if it does.
+	// If unset or false, no such validation will be performed.
+	// May also be set in SecurityContext.  If set in both SecurityContext and
+	// PodSecurityContext, the value specified in SecurityContext takes precedence.
+	// +optional
+	RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"`
+	// A list of groups applied to the first process run in each container, in addition
+	// to the container's primary GID.  If unspecified, no groups will be added to
+	// any container.
+	// +optional
+	SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"`
+	// A special supplemental group that applies to all containers in a pod.
+	// Some volume types allow the Kubelet to change the ownership of that volume
+	// to be owned by the pod:
+	//
+	// 1. The owning GID will be the FSGroup
+	// 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
+	// 3. The permission bits are OR'd with rw-rw----
+	//
+	// If unset, the Kubelet will not modify the ownership and permissions of any volume.
+	// +optional
+	FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"`
+	// Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
+	// sysctls (by the container runtime) might fail to launch.
+	// +optional
+	Sysctls []Sysctl `json:"sysctls,omitempty" protobuf:"bytes,7,rep,name=sysctls"`
+}
+
+// PodQOSClass defines the supported qos classes of Pods.
+type PodQOSClass string
+
+const (
+	// PodQOSGuaranteed is the Guaranteed qos class.
+	PodQOSGuaranteed PodQOSClass = "Guaranteed"
+	// PodQOSBurstable is the Burstable qos class.
+	PodQOSBurstable PodQOSClass = "Burstable"
+	// PodQOSBestEffort is the BestEffort qos class.
+	PodQOSBestEffort PodQOSClass = "BestEffort"
+)
+
+// PodDNSConfig defines the DNS parameters of a pod in addition to
+// those generated from DNSPolicy.
+type PodDNSConfig struct {
+	// A list of DNS name server IP addresses.
+	// This will be appended to the base nameservers generated from DNSPolicy.
+	// Duplicated nameservers will be removed.
+	// +optional
+	Nameservers []string `json:"nameservers,omitempty" protobuf:"bytes,1,rep,name=nameservers"`
+	// A list of DNS search domains for host-name lookup.
+	// This will be appended to the base search paths generated from DNSPolicy.
+	// Duplicated search paths will be removed.
+	// +optional
+	Searches []string `json:"searches,omitempty" protobuf:"bytes,2,rep,name=searches"`
+	// A list of DNS resolver options.
+	// This will be merged with the base options generated from DNSPolicy.
+	// Duplicated entries will be removed. Resolution options given in Options
+	// will override those that appear in the base DNSPolicy.
+	// +optional
+	Options []PodDNSConfigOption `json:"options,omitempty" protobuf:"bytes,3,rep,name=options"`
+}
+
+// PodDNSConfigOption defines DNS resolver options of a pod.
+type PodDNSConfigOption struct {
+	// Required.
+	Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+	// +optional
+	Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
+}
+
+// PodStatus represents information about the status of a pod. Status may trail the actual
+// state of a system, especially if the node that hosts the pod cannot contact the control
+// plane.
+type PodStatus struct {
+	// The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle.
+	// The conditions array, the reason and message fields, and the individual container status
+	// arrays contain more detail about the pod's status.
+	// There are five possible phase values:
+	//
+	// Pending: The pod has been accepted by the Kubernetes system, but one or more of the
+	// container images has not been created. This includes time before being scheduled as
+	// well as time spent downloading images over the network, which could take a while.
+	// Running: The pod has been bound to a node, and all of the containers have been created.
+	// At least one container is still running, or is in the process of starting or restarting.
+	// Succeeded: All containers in the pod have terminated in success, and will not be restarted.
+	// Failed: All containers in the pod have terminated, and at least one container has
+	// terminated in failure. The container either exited with non-zero status or was terminated
+	// by the system.
+	// Unknown: For some reason the state of the pod could not be obtained, typically due to an
+	// error in communicating with the host of the pod.
+	//
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
+	// +optional
+	Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"`
+	// Current service state of pod.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
+	// A human readable message indicating details about why the pod is in this condition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
+	// A brief CamelCase message indicating details about why the pod is in this state.
+	// e.g. 'Evicted'
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be
+	// scheduled right away as preemption victims receive their graceful termination periods.
+	// This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide
+	// to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to
+	// give the resources on this node to a higher priority pod that is created after preemption.
+	// As a result, this field may be different than PodSpec.nodeName when the pod is
+	// scheduled.
+	// +optional
+	NominatedNodeName string `json:"nominatedNodeName,omitempty" protobuf:"bytes,11,opt,name=nominatedNodeName"`
+
+	// IP address of the host to which the pod is assigned. Empty if not yet scheduled.
+	// +optional
+	HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
+	// IP address allocated to the pod. Routable at least within the cluster.
+	// Empty if not yet allocated.
+	// +optional
+	PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"`
+
+	// RFC 3339 date and time at which the object was acknowledged by the Kubelet.
+	// This is before the Kubelet pulled the container image(s) for the pod.
+	// +optional
+	StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"`
+
+	// The list has one entry per init container in the manifest. The most recent successful
+	// init container will have ready = true, the most recently started container will have
+	// startTime set.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
+	InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"`
+
+	// The list has one entry per container in the manifest. Each entry is currently the output
+	// of `docker inspect`.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
+	// +optional
+	ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"`
+	// The Quality of Service (QOS) classification assigned to the pod based on resource requirements
+	// See PodQOSClass type for available QOS classes
+	// More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md
+	// +optional
+	QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
+type PodStatusResult struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	// Most recently observed status of the pod.
+	// This data may not be up to date.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Pod is a collection of containers that can run on a host. This resource is created
+// by clients and scheduled onto hosts.
+type Pod struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired behavior of the pod.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Most recently observed status of the pod.
+	// This data may not be up to date.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodList is a list of Pods.
+type PodList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of pods.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md
+	Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// PodTemplateSpec describes the data a pod should have when created from a template
+type PodTemplateSpec struct {
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired behavior of the pod.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodTemplate describes a template for creating copies of a predefined pod.
+type PodTemplate struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Template defines the pods that will be created from this pod template.
+	// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodTemplateList is a list of PodTemplates.
+type PodTemplateList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of pod templates
+	Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ReplicationControllerSpec is the specification of a replication controller.
+type ReplicationControllerSpec struct {
+	// Replicas is the number of desired replicas.
+	// This is a pointer to distinguish between explicit zero and unspecified.
+	// Defaults to 1.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
+	// +optional
+	Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+	// Minimum number of seconds for which a newly created pod should be ready
+	// without any of its container crashing, for it to be considered available.
+	// Defaults to 0 (pod will be considered available as soon as it is ready)
+	// +optional
+	MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
+
+	// Selector is a label query over pods that should match the Replicas count.
+	// If Selector is empty, it is defaulted to the labels present on the Pod template.
+	// Label keys and values that must match in order to be controlled by this replication
+	// controller, if empty defaulted to labels on Pod template.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+	// +optional
+	Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
+
+	// TemplateRef is a reference to an object that describes the pod that will be created if
+	// insufficient replicas are detected.
+	// Reference to an object that describes the pod that will be created if insufficient replicas are detected.
+	// +optional
+	// TemplateRef *ObjectReference `json:"templateRef,omitempty"`
+
+	// Template is the object that describes the pod that will be created if
+	// insufficient replicas are detected. This takes precedence over a TemplateRef.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+	// +optional
+	Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
+}
+
+// ReplicationControllerStatus represents the current status of a replication
+// controller.
+type ReplicationControllerStatus struct {
+	// Replicas is the most recently oberved number of replicas.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
+	Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
+
+	// The number of pods that have labels matching the labels of the pod template of the replication controller.
+	// +optional
+	FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
+
+	// The number of ready replicas for this replication controller.
+	// +optional
+	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
+
+	// The number of available replicas (ready for at least minReadySeconds) for this replication controller.
+	// +optional
+	AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
+
+	// ObservedGeneration reflects the generation of the most recently observed replication controller.
+	// +optional
+	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
+
+	// Represents the latest available observations of a replication controller's current state.
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
+}
+
+type ReplicationControllerConditionType string
+
+// These are valid conditions of a replication controller.
+const (
+	// ReplicationControllerReplicaFailure is added in a replication controller when one of its pods
+	// fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors,
+	// etc. or deleted due to kubelet being down or finalizers are failing.
+	ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure"
+)
+
+// ReplicationControllerCondition describes the state of a replication controller at a certain point.
+type ReplicationControllerCondition struct {
+	// Type of replication controller condition.
+	Type ReplicationControllerConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicationControllerConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+	// The last time the condition transitioned from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+	// The reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// A human readable message indicating details about the transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +genclient
+// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
+// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ReplicationController represents the configuration of a replication controller.
+type ReplicationController struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// If the Labels of a ReplicationController are empty, they are defaulted to
+	// be the same as the Pod(s) that the replication controller manages.
+	// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the specification of the desired behavior of the replication controller.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is the most recently observed status of the replication controller.
+	// This data may be out of date by some window of time.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ReplicationControllerList is a collection of replication controllers.
+type ReplicationControllerList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of replication controllers.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+	Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// Session Affinity Type string
+type ServiceAffinity string
+
+const (
+	// ServiceAffinityClientIP is the Client IP based.
+	ServiceAffinityClientIP ServiceAffinity = "ClientIP"
+
+	// ServiceAffinityNone - no session affinity.
+	ServiceAffinityNone ServiceAffinity = "None"
+)
+
+const DefaultClientIPServiceAffinitySeconds int32 = 10800
+
+// SessionAffinityConfig represents the configurations of session affinity.
+type SessionAffinityConfig struct {
+	// clientIP contains the configurations of Client IP based session affinity.
+	// +optional
+	ClientIP *ClientIPConfig `json:"clientIP,omitempty" protobuf:"bytes,1,opt,name=clientIP"`
+}
+
+// ClientIPConfig represents the configurations of Client IP based session affinity.
+type ClientIPConfig struct {
+	// timeoutSeconds specifies the seconds of ClientIP type session sticky time.
+	// The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
+	// Default value is 10800(for 3 hours).
+	// +optional
+	TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"`
+}
+
+// Service Type string describes ingress methods for a service
+type ServiceType string
+
+const (
+	// ServiceTypeClusterIP means a service will only be accessible inside the
+	// cluster, via the cluster IP.
+	ServiceTypeClusterIP ServiceType = "ClusterIP"
+
+	// ServiceTypeNodePort means a service will be exposed on one port of
+	// every node, in addition to 'ClusterIP' type.
+	ServiceTypeNodePort ServiceType = "NodePort"
+
+	// ServiceTypeLoadBalancer means a service will be exposed via an
+	// external load balancer (if the cloud provider supports it), in addition
+	// to 'NodePort' type.
+	ServiceTypeLoadBalancer ServiceType = "LoadBalancer"
+
+	// ServiceTypeExternalName means a service consists of only a reference to
+	// an external name that kubedns or equivalent will return as a CNAME
+	// record, with no exposing or proxying of any pods involved.
+	ServiceTypeExternalName ServiceType = "ExternalName"
+)
+
+// Service External Traffic Policy Type string
+type ServiceExternalTrafficPolicyType string
+
+const (
+	// ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior.
+	ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local"
+	// ServiceExternalTrafficPolicyTypeCluster specifies node-global (legacy) behavior.
+	ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster"
+)
+
+// ServiceStatus represents the current status of a service.
+type ServiceStatus struct {
+	// LoadBalancer contains the current status of the load-balancer,
+	// if one is present.
+	// +optional
+	LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"`
+}
+
+// LoadBalancerStatus represents the status of a load-balancer.
+type LoadBalancerStatus struct {
+	// Ingress is a list containing ingress points for the load-balancer.
+	// Traffic intended for the service should be sent to these ingress points.
+	// +optional
+	Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"`
+}
+
+// LoadBalancerIngress represents the status of a load-balancer ingress point:
+// traffic intended for the service should be sent to an ingress point.
+type LoadBalancerIngress struct {
+	// IP is set for load-balancer ingress points that are IP based
+	// (typically GCE or OpenStack load-balancers)
+	// +optional
+	IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
+
+	// Hostname is set for load-balancer ingress points that are DNS based
+	// (typically AWS load-balancers)
+	// +optional
+	Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"`
+}
+
+// ServiceSpec describes the attributes that a user creates on a service.
+type ServiceSpec struct {
+	// The list of ports that are exposed by this service.
+	// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+	// +patchMergeKey=port
+	// +patchStrategy=merge
+	Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"`
+
+	// Route service traffic to pods with label keys and values matching this
+	// selector. If empty or not present, the service is assumed to have an
+	// external process managing its endpoints, which Kubernetes will not
+	// modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
+	// Ignored if type is ExternalName.
+	// More info: https://kubernetes.io/docs/concepts/services-networking/service/
+	// +optional
+	Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
+
+	// clusterIP is the IP address of the service and is usually assigned
+	// randomly by the master. If an address is specified manually and is not in
+	// use by others, it will be allocated to the service; otherwise, creation
+	// of the service will fail. This field can not be changed through updates.
+	// Valid values are "None", empty string (""), or a valid IP address. "None"
+	// can be specified for headless services when proxying is not required.
+	// Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if
+	// type is ExternalName.
+	// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+	// +optional
+	ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"`
+
+	// type determines how the Service is exposed. Defaults to ClusterIP. Valid
+	// options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
+	// "ExternalName" maps to the specified externalName.
+	// "ClusterIP" allocates a cluster-internal IP address for load-balancing to
+	// endpoints. Endpoints are determined by the selector or if that is not
+	// specified, by manual construction of an Endpoints object. If clusterIP is
+	// "None", no virtual IP is allocated and the endpoints are published as a
+	// set of endpoints rather than a stable IP.
+	// "NodePort" builds on ClusterIP and allocates a port on every node which
+	// routes to the clusterIP.
+	// "LoadBalancer" builds on NodePort and creates an
+	// external load-balancer (if supported in the current cloud) which routes
+	// to the clusterIP.
+	// More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types
+	// +optional
+	Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"`
+
+	// externalIPs is a list of IP addresses for which nodes in the cluster
+	// will also accept traffic for this service.  These IPs are not managed by
+	// Kubernetes.  The user is responsible for ensuring that traffic arrives
+	// at a node with this IP.  A common example is external load-balancers
+	// that are not part of the Kubernetes system.
+	// +optional
+	ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"`
+
+	// Supports "ClientIP" and "None". Used to maintain session affinity.
+	// Enable client IP based session affinity.
+	// Must be ClientIP or None.
+	// Defaults to None.
+	// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+	// +optional
+	SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"`
+
+	// Only applies to Service Type: LoadBalancer
+	// LoadBalancer will get created with the IP specified in this field.
+	// This feature depends on whether the underlying cloud-provider supports specifying
+	// the loadBalancerIP when a load balancer is created.
+	// This field will be ignored if the cloud-provider does not support the feature.
+	// +optional
+	LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"`
+
+	// If specified and supported by the platform, this will restrict traffic through the cloud-provider
+	// load-balancer will be restricted to the specified client IPs. This field will be ignored if the
+	// cloud-provider does not support the feature."
+	// More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
+	// +optional
+	LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"`
+
+	// externalName is the external reference that kubedns or equivalent will
+	// return as a CNAME record for this service. No proxying will be involved.
+	// Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123)
+	// and requires Type to be ExternalName.
+	// +optional
+	ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"`
+
+	// externalTrafficPolicy denotes if this Service desires to route external
+	// traffic to node-local or cluster-wide endpoints. "Local" preserves the
+	// client source IP and avoids a second hop for LoadBalancer and Nodeport
+	// type services, but risks potentially imbalanced traffic spreading.
+	// "Cluster" obscures the client source IP and may cause a second hop to
+	// another node, but should have good overall load-spreading.
+	// +optional
+	ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"`
+
+	// healthCheckNodePort specifies the healthcheck nodePort for the service.
+	// If not specified, HealthCheckNodePort is created by the service api
+	// backend with the allocated nodePort. Will use user-specified nodePort value
+	// if specified by the client. Only effects when Type is set to LoadBalancer
+	// and ExternalTrafficPolicy is set to Local.
+	// +optional
+	HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"`
+
+	// publishNotReadyAddresses, when set to true, indicates that DNS implementations
+	// must publish the notReadyAddresses of subsets for the Endpoints associated with
+	// the Service. The default value is false.
+	// The primary use case for setting this field is to use a StatefulSet's Headless Service
+	// to propagate SRV records for its Pods without respect to their readiness for purpose
+	// of peer discovery.
+	// +optional
+	PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"`
+	// sessionAffinityConfig contains the configurations of session affinity.
+	// +optional
+	SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"`
+}
+
+// ServicePort contains information on service's port.
+type ServicePort struct {
+	// The name of this port within the service. This must be a DNS_LABEL.
+	// All ports within a ServiceSpec must have unique names. This maps to
+	// the 'Name' field in EndpointPort objects.
+	// Optional if only one ServicePort is defined on this service.
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+
+	// The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
+	// Default is TCP.
+	// +optional
+	Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
+
+	// The port that will be exposed by this service.
+	Port int32 `json:"port" protobuf:"varint,3,opt,name=port"`
+
+	// Number or name of the port to access on the pods targeted by the service.
+	// Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+	// If this is a string, it will be looked up as a named port in the
+	// target Pod's container ports. If this is not specified, the value
+	// of the 'port' field is used (an identity map).
+	// This field is ignored for services with clusterIP=None, and should be
+	// omitted or set equal to the 'port' field.
+	// More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
+	// +optional
+	TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"`
+
+	// The port on each node on which this service is exposed when type=NodePort or LoadBalancer.
+	// Usually assigned by the system. If specified, it will be allocated to the service
+	// if unused or else creation of the service will fail.
+	// Default is to auto-allocate a port if the ServiceType of this Service requires one.
+	// More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+	// +optional
+	NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"`
+}
+
+// +genclient
+// +genclient:skipVerbs=deleteCollection
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Service is a named abstraction of software service (for example, mysql) consisting of local port
+// (for example 3306) that the proxy listens on, and the selector that determines which pods
+// will answer requests sent through the proxy.
+type Service struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the behavior of a service.
+	// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Most recently observed status of the service.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+const (
+	// ClusterIPNone - do not assign a cluster IP
+	// no proxying required and no environment variables should be created for pods
+	ClusterIPNone = "None"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ServiceList holds a list of services.
+type ServiceList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of services
+	Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ServiceAccount binds together:
+// * a name, understood by users, and perhaps by peripheral systems, for an identity
+// * a principal that can be authenticated and authorized
+// * a set of secrets
+type ServiceAccount struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount.
+	// More info: https://kubernetes.io/docs/concepts/configuration/secret
+	// +optional
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"`
+
+	// ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
+	// in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets
+	// can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.
+	// More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+	// +optional
+	ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"`
+
+	// AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.
+	// Can be overridden at the pod level.
+	// +optional
+	AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ServiceAccountList is a list of ServiceAccount objects
+type ServiceAccountList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of ServiceAccounts.
+	// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+	Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Endpoints is a collection of endpoints that implement the actual service. Example:
+//   Name: "mysvc",
+//   Subsets: [
+//     {
+//       Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+//       Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+//     },
+//     {
+//       Addresses: [{"ip": "10.10.3.3"}],
+//       Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
+//     },
+//  ]
+type Endpoints struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// The set of all endpoints is the union of all subsets. Addresses are placed into
+	// subsets according to the IPs they share. A single address with multiple ports,
+	// some of which are ready and some of which are not (because they come from
+	// different containers) will result in the address being displayed in different
+	// subsets for the different ports. No address will appear in both Addresses and
+	// NotReadyAddresses in the same subset.
+	// Sets of addresses and ports that comprise a service.
+	// +optional
+	Subsets []EndpointSubset `json:"subsets,omitempty" protobuf:"bytes,2,rep,name=subsets"`
+}
+
+// EndpointSubset is a group of addresses with a common set of ports. The
+// expanded set of endpoints is the Cartesian product of Addresses x Ports.
+// For example, given:
+//   {
+//     Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+//     Ports:     [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+//   }
+// The resulting set of endpoints can be viewed as:
+//     a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
+//     b: [ 10.10.1.1:309, 10.10.2.2:309 ]
+type EndpointSubset struct {
+	// IP addresses which offer the related ports that are marked as ready. These endpoints
+	// should be considered safe for load balancers and clients to utilize.
+	// +optional
+	Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"`
+	// IP addresses which offer the related ports but are not currently marked as ready
+	// because they have not yet finished starting, have recently failed a readiness check,
+	// or have recently failed a liveness check.
+	// +optional
+	NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"`
+	// Port numbers available on the related IP addresses.
+	// +optional
+	Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"`
+}
+
+// EndpointAddress is a tuple that describes single IP address.
+type EndpointAddress struct {
+	// The IP of this endpoint.
+	// May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
+	// or link-local multicast ((224.0.0.0/24).
+	// IPv6 is also accepted but not fully supported on all platforms. Also, certain
+	// kubernetes components, like kube-proxy, are not IPv6 ready.
+	// TODO: This should allow hostname or IP, See #4447.
+	IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
+	// The Hostname of this endpoint
+	// +optional
+	Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"`
+	// Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.
+	// +optional
+	NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,4,opt,name=nodeName"`
+	// Reference to object providing the endpoint.
+	// +optional
+	TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"`
+}
+
+// EndpointPort is a tuple that describes a single port.
+type EndpointPort struct {
+	// The name of this port (corresponds to ServicePort.Name).
+	// Must be a DNS_LABEL.
+	// Optional only if one port is defined.
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+
+	// The port number of the endpoint.
+	Port int32 `json:"port" protobuf:"varint,2,opt,name=port"`
+
+	// The IP protocol for this port.
+	// Must be UDP, TCP, or SCTP.
+	// Default is TCP.
+	// +optional
+	Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EndpointsList is a list of endpoints.
+type EndpointsList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of endpoints.
+	Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// NodeSpec describes the attributes that a node is created with.
+type NodeSpec struct {
+	// PodCIDR represents the pod IP range assigned to the node.
+	// +optional
+	PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"`
+	// ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>
+	// +optional
+	ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"`
+	// Unschedulable controls node schedulability of new pods. By default, node is schedulable.
+	// More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration
+	// +optional
+	Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"`
+	// If specified, the node's taints.
+	// +optional
+	Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"`
+	// If specified, the source to get node configuration from
+	// The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field
+	// +optional
+	ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"`
+
+	// Deprecated. Not all kubelets will set this field. Remove field after 1.13.
+	// see: https://issues.k8s.io/61966
+	// +optional
+	DoNotUse_ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
+}
+
+// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
+type NodeConfigSource struct {
+	// For historical context, regarding the below kind, apiVersion, and configMapRef deprecation tags:
+	// 1. kind/apiVersion were used by the kubelet to persist this struct to disk (they had no protobuf tags)
+	// 2. configMapRef and proto tag 1 were used by the API to refer to a configmap,
+	//    but used a generic ObjectReference type that didn't really have the fields we needed
+	// All uses/persistence of the NodeConfigSource struct prior to 1.11 were gated by alpha feature flags,
+	// so there was no persisted data for these fields that needed to be migrated/handled.
+
+	// +k8s:deprecated=kind
+	// +k8s:deprecated=apiVersion
+	// +k8s:deprecated=configMapRef,protobuf=1
+
+	// ConfigMap is a reference to a Node's ConfigMap
+	ConfigMap *ConfigMapNodeConfigSource `json:"configMap,omitempty" protobuf:"bytes,2,opt,name=configMap"`
+}
+
+// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.
+type ConfigMapNodeConfigSource struct {
+	// Namespace is the metadata.namespace of the referenced ConfigMap.
+	// This field is required in all cases.
+	Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
+
+	// Name is the metadata.name of the referenced ConfigMap.
+	// This field is required in all cases.
+	Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
+
+	// UID is the metadata.UID of the referenced ConfigMap.
+	// This field is forbidden in Node.Spec, and required in Node.Status.
+	// +optional
+	UID types.UID `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"`
+
+	// ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap.
+	// This field is forbidden in Node.Spec, and required in Node.Status.
+	// +optional
+	ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"`
+
+	// KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure
+	// This field is required in all cases.
+	KubeletConfigKey string `json:"kubeletConfigKey" protobuf:"bytes,5,opt,name=kubeletConfigKey"`
+}
+
+// DaemonEndpoint contains information about a single Daemon endpoint.
+type DaemonEndpoint struct {
+	/*
+		The port tag was not properly in quotes in earlier releases, so it must be
+		uppercased for backwards compat (since it was falling back to var name of
+		'Port').
+	*/
+
+	// Port number of the given endpoint.
+	Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"`
+}
+
+// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
+type NodeDaemonEndpoints struct {
+	// Endpoint on which Kubelet is listening.
+	// +optional
+	KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"`
+}
+
+// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
+type NodeSystemInfo struct {
+	// MachineID reported by the node. For unique machine identification
+	// in the cluster this field is preferred. Learn more from man(5)
+	// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
+	MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"`
+	// SystemUUID reported by the node. For unique machine identification
+	// MachineID is preferred. This field is specific to Red Hat hosts
+	// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
+	SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"`
+	// Boot ID reported by the node.
+	BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
+	// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
+	KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"`
+	// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
+	OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"`
+	// ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
+	ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"`
+	// Kubelet Version reported by the node.
+	KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"`
+	// KubeProxy Version reported by the node.
+	KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"`
+	// The Operating System reported by the node
+	OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
+	// The Architecture reported by the node
+	Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
+}
+
+// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
+type NodeConfigStatus struct {
+	// Assigned reports the checkpointed config the node will try to use.
+	// When Node.Spec.ConfigSource is updated, the node checkpoints the associated
+	// config payload to local disk, along with a record indicating intended
+	// config. The node refers to this record to choose its config checkpoint, and
+	// reports this record in Assigned. Assigned only updates in the status after
+	// the record has been checkpointed to disk. When the Kubelet is restarted,
+	// it tries to make the Assigned config the Active config by loading and
+	// validating the checkpointed payload identified by Assigned.
+	// +optional
+	Assigned *NodeConfigSource `json:"assigned,omitempty" protobuf:"bytes,1,opt,name=assigned"`
+	// Active reports the checkpointed config the node is actively using.
+	// Active will represent either the current version of the Assigned config,
+	// or the current LastKnownGood config, depending on whether attempting to use the
+	// Assigned config results in an error.
+	// +optional
+	Active *NodeConfigSource `json:"active,omitempty" protobuf:"bytes,2,opt,name=active"`
+	// LastKnownGood reports the checkpointed config the node will fall back to
+	// when it encounters an error attempting to use the Assigned config.
+	// The Assigned config becomes the LastKnownGood config when the node determines
+	// that the Assigned config is stable and correct.
+	// This is currently implemented as a 10-minute soak period starting when the local
+	// record of Assigned config is updated. If the Assigned config is Active at the end
+	// of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is
+	// reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil,
+	// because the local default config is always assumed good.
+	// You should not make assumptions about the node's method of determining config stability
+	// and correctness, as this may change or become configurable in the future.
+	// +optional
+	LastKnownGood *NodeConfigSource `json:"lastKnownGood,omitempty" protobuf:"bytes,3,opt,name=lastKnownGood"`
+	// Error describes any problems reconciling the Spec.ConfigSource to the Active config.
+	// Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned
+	// record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting
+	// to load or validate the Assigned config, etc.
+	// Errors may occur at different points while syncing config. Earlier errors (e.g. download or
+	// checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across
+	// Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in
+	// a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error
+	// by fixing the config assigned in Spec.ConfigSource.
+	// You can find additional information for debugging by searching the error message in the Kubelet log.
+	// Error is a human-readable description of the error state; machines can check whether or not Error
+	// is empty, but should not rely on the stability of the Error text across Kubelet versions.
+	// +optional
+	Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
+}
+
+// NodeStatus is information about the current status of a node.
+type NodeStatus struct {
+	// Capacity represents the total resources of a node.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
+	// +optional
+	Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
+	// Allocatable represents the resources of a node that are available for scheduling.
+	// Defaults to Capacity.
+	// +optional
+	Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"`
+	// NodePhase is the recently observed lifecycle phase of the node.
+	// More info: https://kubernetes.io/docs/concepts/nodes/node/#phase
+	// The field is never populated, and now is deprecated.
+	// +optional
+	Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"`
+	// Conditions is an array of current observed node conditions.
+	// More info: https://kubernetes.io/docs/concepts/nodes/node/#condition
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
+	// List of addresses reachable to the node.
+	// Queried from cloud provider, if available.
+	// More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"`
+	// Endpoints of daemons running on the Node.
+	// +optional
+	DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"`
+	// Set of ids/uuids to uniquely identify the node.
+	// More info: https://kubernetes.io/docs/concepts/nodes/node/#info
+	// +optional
+	NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
+	// List of container images on this node
+	// +optional
+	Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"`
+	// List of attachable volumes in use (mounted) by the node.
+	// +optional
+	VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"`
+	// List of volumes that are attached to the node.
+	// +optional
+	VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"`
+	// Status of the config assigned to the node via the dynamic Kubelet config feature.
+	// +optional
+	Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"`
+}
+
+type UniqueVolumeName string
+
+// AttachedVolume describes a volume attached to a node
+type AttachedVolume struct {
+	// Name of the attached volume
+	Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"`
+
+	// DevicePath represents the device path where the volume should be available
+	DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"`
+}
+
+// AvoidPods describes pods that should avoid this node. This is the value for a
+// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and
+// will eventually become a field of NodeStatus.
+type AvoidPods struct {
+	// Bounded-sized list of signatures of pods that should avoid this node, sorted
+	// in timestamp order from oldest to newest. Size of the slice is unspecified.
+	// +optional
+	PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty" protobuf:"bytes,1,rep,name=preferAvoidPods"`
+}
+
+// Describes a class of pods that should avoid this node.
+type PreferAvoidPodsEntry struct {
+	// The class of pods.
+	PodSignature PodSignature `json:"podSignature" protobuf:"bytes,1,opt,name=podSignature"`
+	// Time at which this entry was added to the list.
+	// +optional
+	EvictionTime metav1.Time `json:"evictionTime,omitempty" protobuf:"bytes,2,opt,name=evictionTime"`
+	// (brief) reason why this entry was added to the list.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
+	// Human readable message indicating why this entry was added to the list.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
+}
+
+// Describes the class of pods that should avoid this node.
+// Exactly one field should be set.
+type PodSignature struct {
+	// Reference to controller whose pods should avoid this node.
+	// +optional
+	PodController *metav1.OwnerReference `json:"podController,omitempty" protobuf:"bytes,1,opt,name=podController"`
+}
+
+// Describe a container image
+type ContainerImage struct {
+	// Names by which this image is known.
+	// e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]
+	Names []string `json:"names" protobuf:"bytes,1,rep,name=names"`
+	// The size of the image in bytes.
+	// +optional
+	SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"`
+}
+
+type NodePhase string
+
+// These are the valid phases of node.
+const (
+	// NodePending means the node has been created/added by the system, but not configured.
+	NodePending NodePhase = "Pending"
+	// NodeRunning means the node has been configured and has Kubernetes components running.
+	NodeRunning NodePhase = "Running"
+	// NodeTerminated means the node has been removed from the cluster.
+	NodeTerminated NodePhase = "Terminated"
+)
+
+type NodeConditionType string
+
+// These are valid conditions of node. Currently, we don't have enough information to decide
+// node condition. In the future, we will add more. The proposed set of conditions are:
+// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable.
+const (
+	// NodeReady means kubelet is healthy and ready to accept pods.
+	NodeReady NodeConditionType = "Ready"
+	// NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk
+	// space on the node.
+	NodeOutOfDisk NodeConditionType = "OutOfDisk"
+	// NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
+	NodeMemoryPressure NodeConditionType = "MemoryPressure"
+	// NodeDiskPressure means the kubelet is under pressure due to insufficient available disk.
+	NodeDiskPressure NodeConditionType = "DiskPressure"
+	// NodePIDPressure means the kubelet is under pressure due to insufficient available PID.
+	NodePIDPressure NodeConditionType = "PIDPressure"
+	// NodeNetworkUnavailable means that network for the node is not correctly configured.
+	NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
+)
+
+// NodeCondition contains condition information for a node.
+type NodeCondition struct {
+	// Type of node condition.
+	Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+	// Last time we got an update on a given condition.
+	// +optional
+	LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"`
+	// Last time the condition transit from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+	// (brief) reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
+	// Human readable message indicating details about last transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
+}
+
+type NodeAddressType string
+
+// These are valid address type of node.
+const (
+	NodeHostName    NodeAddressType = "Hostname"
+	NodeExternalIP  NodeAddressType = "ExternalIP"
+	NodeInternalIP  NodeAddressType = "InternalIP"
+	NodeExternalDNS NodeAddressType = "ExternalDNS"
+	NodeInternalDNS NodeAddressType = "InternalDNS"
+)
+
+// NodeAddress contains information for the node's address.
+type NodeAddress struct {
+	// Node address type, one of Hostname, ExternalIP or InternalIP.
+	Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"`
+	// The node address.
+	Address string `json:"address" protobuf:"bytes,2,opt,name=address"`
+}
+
+// ResourceName is the name identifying various resources in a ResourceList.
+type ResourceName string
+
+// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters,
+// with the -, _, and . characters allowed anywhere, except the first or last character.
+// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than
+// camel case, separating compound words.
+// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name.
+const (
+	// CPU, in cores. (500m = .5 cores)
+	ResourceCPU ResourceName = "cpu"
+	// Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+	ResourceMemory ResourceName = "memory"
+	// Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024)
+	ResourceStorage ResourceName = "storage"
+	// Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+	// The resource name for ResourceEphemeralStorage is alpha and it can change across releases.
+	ResourceEphemeralStorage ResourceName = "ephemeral-storage"
+)
+
+const (
+	// Default namespace prefix.
+	ResourceDefaultNamespacePrefix = "kubernetes.io/"
+	// Name prefix for huge page resources (alpha).
+	ResourceHugePagesPrefix = "hugepages-"
+	// Name prefix for storage resource limits
+	ResourceAttachableVolumesPrefix = "attachable-volumes-"
+)
+
+// ResourceList is a set of (resource name, quantity) pairs.
+type ResourceList map[ResourceName]resource.Quantity
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Node is a worker node in Kubernetes.
+// Each node will have a unique identifier in the cache (i.e. in etcd).
+type Node struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the behavior of a node.
+	// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Most recently observed status of the node.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// NodeList is the whole list of all Nodes which have been registered with master.
+type NodeList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of nodes
+	Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// FinalizerName is the name identifying a finalizer during namespace lifecycle.
+type FinalizerName string
+
+// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or
+// in metav1.
+const (
+	FinalizerKubernetes FinalizerName = "kubernetes"
+)
+
+// NamespaceSpec describes the attributes on a Namespace.
+type NamespaceSpec struct {
+	// Finalizers is an opaque list of values that must be empty to permanently remove object from storage.
+	// More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
+	// +optional
+	Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"`
+}
+
+// NamespaceStatus is information about the current status of a Namespace.
+type NamespaceStatus struct {
+	// Phase is the current lifecycle phase of the namespace.
+	// More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
+	// +optional
+	Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"`
+}
+
+type NamespacePhase string
+
+// These are the valid phases of a namespace.
+const (
+	// NamespaceActive means the namespace is available for use in the system
+	NamespaceActive NamespacePhase = "Active"
+	// NamespaceTerminating means the namespace is undergoing graceful termination
+	NamespaceTerminating NamespacePhase = "Terminating"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:skipVerbs=deleteCollection
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Namespace provides a scope for Names.
+// Use of multiple namespaces is optional.
+type Namespace struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the behavior of the Namespace.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status describes the current status of a Namespace.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// NamespaceList is a list of Namespaces.
+type NamespaceList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of Namespace objects in the list.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+	Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
+// Deprecated in 1.7, please use the bindings subresource of pods instead.
+type Binding struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// The target object that you want to bind to the standard object.
+	Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"`
+}
+
+// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
+// +k8s:openapi-gen=false
+type Preconditions struct {
+	// Specifies the target UID.
+	// +optional
+	UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodLogOptions is the query options for a Pod's logs REST call.
+type PodLogOptions struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// The container for which to stream logs. Defaults to only container if there is one container in the pod.
+	// +optional
+	Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"`
+	// Follow the log stream of the pod. Defaults to false.
+	// +optional
+	Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"`
+	// Return previous terminated container logs. Defaults to false.
+	// +optional
+	Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"`
+	// A relative time in seconds before the current time from which to show logs. If this value
+	// precedes the time a pod was started, only logs since the pod start will be returned.
+	// If this value is in the future, no logs will be returned.
+	// Only one of sinceSeconds or sinceTime may be specified.
+	// +optional
+	SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"`
+	// An RFC3339 timestamp from which to show logs. If this value
+	// precedes the time a pod was started, only logs since the pod start will be returned.
+	// If this value is in the future, no logs will be returned.
+	// Only one of sinceSeconds or sinceTime may be specified.
+	// +optional
+	SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"`
+	// If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
+	// of log output. Defaults to false.
+	// +optional
+	Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
+	// If set, the number of lines from the end of the logs to show. If not specified,
+	// logs are shown from the creation of the container or sinceSeconds or sinceTime
+	// +optional
+	TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
+	// If set, the number of bytes to read from the server before terminating the
+	// log output. This may not display a complete final line of logging, and may return
+	// slightly more or slightly less than the specified limit.
+	// +optional
+	LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodAttachOptions is the query options to a Pod's remote attach call.
+// ---
+// TODO: merge w/ PodExecOptions below for stdin, stdout, etc
+// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
+type PodAttachOptions struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Stdin if true, redirects the standard input stream of the pod for this call.
+	// Defaults to false.
+	// +optional
+	Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
+
+	// Stdout if true indicates that stdout is to be redirected for the attach call.
+	// Defaults to true.
+	// +optional
+	Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
+
+	// Stderr if true indicates that stderr is to be redirected for the attach call.
+	// Defaults to true.
+	// +optional
+	Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
+
+	// TTY if true indicates that a tty will be allocated for the attach call.
+	// This is passed through the container runtime so the tty
+	// is allocated on the worker node by the container runtime.
+	// Defaults to false.
+	// +optional
+	TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
+
+	// The container in which to execute the command.
+	// Defaults to only container if there is only one container in the pod.
+	// +optional
+	Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodExecOptions is the query options to a Pod's remote exec call.
+// ---
+// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging
+// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
+type PodExecOptions struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Redirect the standard input stream of the pod for this call.
+	// Defaults to false.
+	// +optional
+	Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
+
+	// Redirect the standard output stream of the pod for this call.
+	// Defaults to true.
+	// +optional
+	Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
+
+	// Redirect the standard error stream of the pod for this call.
+	// Defaults to true.
+	// +optional
+	Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
+
+	// TTY if true indicates that a tty will be allocated for the exec call.
+	// Defaults to false.
+	// +optional
+	TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
+
+	// Container in which to execute the command.
+	// Defaults to only container if there is only one container in the pod.
+	// +optional
+	Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
+
+	// Command is the remote command to execute. argv array. Not executed within a shell.
+	Command []string `json:"command" protobuf:"bytes,6,rep,name=command"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodPortForwardOptions is the query options to a Pod's port forward call
+// when using WebSockets.
+// The `port` query parameter must specify the port or
+// ports (comma separated) to forward over.
+// Port forwarding over SPDY does not use these options. It requires the port
+// to be passed in the `port` header as part of request.
+type PodPortForwardOptions struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// List of ports to forward
+	// Required when using WebSockets
+	// +optional
+	Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodProxyOptions is the query options to a Pod's proxy call.
+type PodProxyOptions struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Path is the URL path to use for the current proxy request to pod.
+	// +optional
+	Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// NodeProxyOptions is the query options to a Node's proxy call.
+type NodeProxyOptions struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Path is the URL path to use for the current proxy request to node.
+	// +optional
+	Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ServiceProxyOptions is the query options to a Service's proxy call.
+type ServiceProxyOptions struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Path is the part of URLs that include service endpoints, suffixes,
+	// and parameters to use for the current proxy request to service.
+	// For example, the whole request URL is
+	// http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy.
+	// Path is _search?q=user:kimchy.
+	// +optional
+	Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
+}
+
+// ObjectReference contains enough information to let you inspect or modify the referred object.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type ObjectReference struct {
+	// Kind of the referent.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
+	// Namespace of the referent.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+	// +optional
+	Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
+	// Name of the referent.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
+	// UID of the referent.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+	// +optional
+	UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
+	// API version of the referent.
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"`
+	// Specific resourceVersion to which this reference is made, if any.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
+	// +optional
+	ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
+
+	// If referring to a piece of an object instead of an entire object, this string
+	// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+	// For example, if the object reference is to a container within a pod, this would take on a value like:
+	// "spec.containers{name}" (where "name" refers to the name of the container that triggered
+	// the event) or if no container name is specified "spec.containers[2]" (container with
+	// index 2 in this pod). This syntax is chosen only to have some well-defined way of
+	// referencing a part of an object.
+	// TODO: this design is not final and this field is subject to change in the future.
+	// +optional
+	FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"`
+}
+
+// LocalObjectReference contains enough information to let you locate the
+// referenced object inside the same namespace.
+type LocalObjectReference struct {
+	// Name of the referent.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+	// TODO: Add other useful fields. apiVersion, kind, uid?
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+}
+
+// TypedLocalObjectReference contains enough information to let you locate the
+// typed referenced object inside the same namespace.
+type TypedLocalObjectReference struct {
+	// APIGroup is the group for the resource being referenced.
+	// If APIGroup is not specified, the specified Kind must be in the core API group.
+	// For any other third-party types, APIGroup is required.
+	// +optional
+	APIGroup *string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"`
+	// Kind is the type of resource being referenced
+	Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
+	// Name is the name of resource being referenced
+	Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SerializedReference is a reference to serialized object.
+type SerializedReference struct {
+	metav1.TypeMeta `json:",inline"`
+	// The reference to an object in the system.
+	// +optional
+	Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"`
+}
+
+// EventSource contains information for an event.
+type EventSource struct {
+	// Component from which the event is generated.
+	// +optional
+	Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"`
+	// Node name on which the event is generated.
+	// +optional
+	Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
+}
+
+// Valid values for event types (new types could be added in future)
+const (
+	// Information only and will not cause any problems
+	EventTypeNormal string = "Normal"
+	// These events are to warn that something might go wrong
+	EventTypeWarning string = "Warning"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Event is a report of an event somewhere in the cluster.
+type Event struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
+
+	// The object that this event is about.
+	InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"`
+
+	// This should be a short, machine understandable string that gives the reason
+	// for the transition into the object's current status.
+	// TODO: provide exact specification for format.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
+
+	// A human-readable description of the status of this operation.
+	// TODO: decide on maximum length.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
+
+	// The component reporting this event. Should be a short machine understandable string.
+	// +optional
+	Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"`
+
+	// The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
+	// +optional
+	FirstTimestamp metav1.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"`
+
+	// The time at which the most recent occurrence of this event was recorded.
+	// +optional
+	LastTimestamp metav1.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"`
+
+	// The number of times this event has occurred.
+	// +optional
+	Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"`
+
+	// Type of this event (Normal, Warning), new types could be added in the future
+	// +optional
+	Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"`
+
+	// Time when this Event was first observed.
+	// +optional
+	EventTime metav1.MicroTime `json:"eventTime,omitempty" protobuf:"bytes,10,opt,name=eventTime"`
+
+	// Data about the Event series this event represents or nil if it's a singleton Event.
+	// +optional
+	Series *EventSeries `json:"series,omitempty" protobuf:"bytes,11,opt,name=series"`
+
+	// What action was taken/failed regarding to the Regarding object.
+	// +optional
+	Action string `json:"action,omitempty" protobuf:"bytes,12,opt,name=action"`
+
+	// Optional secondary object for more complex actions.
+	// +optional
+	Related *ObjectReference `json:"related,omitempty" protobuf:"bytes,13,opt,name=related"`
+
+	// Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
+	// +optional
+	ReportingController string `json:"reportingComponent" protobuf:"bytes,14,opt,name=reportingComponent"`
+
+	// ID of the controller instance, e.g. `kubelet-xyzf`.
+	// +optional
+	ReportingInstance string `json:"reportingInstance" protobuf:"bytes,15,opt,name=reportingInstance"`
+}
+
+// EventSeries contain information on series of events, i.e. thing that was/is happening
+// continuously for some time.
+type EventSeries struct {
+	// Number of occurrences in this series up to the last heartbeat time
+	Count int32 `json:"count,omitempty" protobuf:"varint,1,name=count"`
+	// Time of the last occurrence observed
+	LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"`
+	// State of this Series: Ongoing or Finished
+	State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"`
+}
+
+type EventSeriesState string
+
+const (
+	EventSeriesStateOngoing  EventSeriesState = "Ongoing"
+	EventSeriesStateFinished EventSeriesState = "Finished"
+	EventSeriesStateUnknown  EventSeriesState = "Unknown"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EventList is a list of events.
+type EventList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of events
+	Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// List holds a list of objects, which may not be known by the server.
+type List metav1.List
+
+// LimitType is a type of object that is limited
+type LimitType string
+
+const (
+	// Limit that applies to all pods in a namespace
+	LimitTypePod LimitType = "Pod"
+	// Limit that applies to all containers in a namespace
+	LimitTypeContainer LimitType = "Container"
+	// Limit that applies to all persistent volume claims in a namespace
+	LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim"
+)
+
+// LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
+type LimitRangeItem struct {
+	// Type of resource that this limit applies to.
+	// +optional
+	Type LimitType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=LimitType"`
+	// Max usage constraints on this kind by resource name.
+	// +optional
+	Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"`
+	// Min usage constraints on this kind by resource name.
+	// +optional
+	Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"`
+	// Default resource requirement limit value by resource name if resource limit is omitted.
+	// +optional
+	Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"`
+	// DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
+	// +optional
+	DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"`
+	// MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
+	// +optional
+	MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"`
+}
+
+// LimitRangeSpec defines a min/max usage limit for resources that match on kind.
+type LimitRangeSpec struct {
+	// Limits is the list of LimitRangeItem objects that are enforced.
+	Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// LimitRange sets resource usage limits for each kind of resource in a Namespace.
+type LimitRange struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the limits enforced.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// LimitRangeList is a list of LimitRange items.
+type LimitRangeList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of LimitRange objects.
+	// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
+	Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// The following identify resource constants for Kubernetes object types
+const (
+	// Pods, number
+	ResourcePods ResourceName = "pods"
+	// Services, number
+	ResourceServices ResourceName = "services"
+	// ReplicationControllers, number
+	ResourceReplicationControllers ResourceName = "replicationcontrollers"
+	// ResourceQuotas, number
+	ResourceQuotas ResourceName = "resourcequotas"
+	// ResourceSecrets, number
+	ResourceSecrets ResourceName = "secrets"
+	// ResourceConfigMaps, number
+	ResourceConfigMaps ResourceName = "configmaps"
+	// ResourcePersistentVolumeClaims, number
+	ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims"
+	// ResourceServicesNodePorts, number
+	ResourceServicesNodePorts ResourceName = "services.nodeports"
+	// ResourceServicesLoadBalancers, number
+	ResourceServicesLoadBalancers ResourceName = "services.loadbalancers"
+	// CPU request, in cores. (500m = .5 cores)
+	ResourceRequestsCPU ResourceName = "requests.cpu"
+	// Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+	ResourceRequestsMemory ResourceName = "requests.memory"
+	// Storage request, in bytes
+	ResourceRequestsStorage ResourceName = "requests.storage"
+	// Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+	ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage"
+	// CPU limit, in cores. (500m = .5 cores)
+	ResourceLimitsCPU ResourceName = "limits.cpu"
+	// Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+	ResourceLimitsMemory ResourceName = "limits.memory"
+	// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+	ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
+)
+
+// The following identify resource prefix for Kubernetes object types
+const (
+	// HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+	// As burst is not supported for HugePages, we would only quota its request, and ignore the limit.
+	ResourceRequestsHugePagesPrefix = "requests.hugepages-"
+	// Default resource requests prefix
+	DefaultResourceRequestsPrefix = "requests."
+)
+
+// A ResourceQuotaScope defines a filter that must match each object tracked by a quota
+type ResourceQuotaScope string
+
+const (
+	// Match all pod objects where spec.activeDeadlineSeconds
+	ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating"
+	// Match all pod objects where !spec.activeDeadlineSeconds
+	ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating"
+	// Match all pod objects that have best effort quality of service
+	ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort"
+	// Match all pod objects that do not have best effort quality of service
+	ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort"
+	// Match all pod objects that have priority class mentioned
+	ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass"
+)
+
+// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
+type ResourceQuotaSpec struct {
+	// hard is the set of desired hard limits for each named resource.
+	// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
+	// +optional
+	Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
+	// A collection of filters that must match each object tracked by a quota.
+	// If not specified, the quota matches all objects.
+	// +optional
+	Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"`
+	// scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota
+	// but expressed using ScopeSelectorOperator in combination with possible values.
+	// For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
+	// +optional
+	ScopeSelector *ScopeSelector `json:"scopeSelector,omitempty" protobuf:"bytes,3,opt,name=scopeSelector"`
+}
+
+// A scope selector represents the AND of the selectors represented
+// by the scoped-resource selector requirements.
+type ScopeSelector struct {
+	// A list of scope selector requirements by scope of the resources.
+	// +optional
+	MatchExpressions []ScopedResourceSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"`
+}
+
+// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
+// that relates the scope name and values.
+type ScopedResourceSelectorRequirement struct {
+	// The name of the scope that the selector applies to.
+	ScopeName ResourceQuotaScope `json:"scopeName" protobuf:"bytes,1,opt,name=scopeName"`
+	// Represents a scope's relationship to a set of values.
+	// Valid operators are In, NotIn, Exists, DoesNotExist.
+	Operator ScopeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=ScopedResourceSelectorOperator"`
+	// An array of string values. If the operator is In or NotIn,
+	// the values array must be non-empty. If the operator is Exists or DoesNotExist,
+	// the values array must be empty.
+	// This array is replaced during a strategic merge patch.
+	// +optional
+	Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
+}
+
+// A scope selector operator is the set of operators that can be used in
+// a scope selector requirement.
+type ScopeSelectorOperator string
+
+const (
+	ScopeSelectorOpIn           ScopeSelectorOperator = "In"
+	ScopeSelectorOpNotIn        ScopeSelectorOperator = "NotIn"
+	ScopeSelectorOpExists       ScopeSelectorOperator = "Exists"
+	ScopeSelectorOpDoesNotExist ScopeSelectorOperator = "DoesNotExist"
+)
+
+// ResourceQuotaStatus defines the enforced hard limits and observed use.
+type ResourceQuotaStatus struct {
+	// Hard is the set of enforced hard limits for each named resource.
+	// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
+	// +optional
+	Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
+	// Used is the current observed total usage of the resource in the namespace.
+	// +optional
+	Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ResourceQuota sets aggregate quota restrictions enforced per namespace
+type ResourceQuota struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the desired quota.
+	// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status defines the actual enforced quota and its current usage.
+	// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ResourceQuotaList is a list of ResourceQuota items.
+type ResourceQuotaList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of ResourceQuota objects.
+	// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
+	Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Secret holds secret data of a certain type. The total bytes of the values in
+// the Data field must be less than MaxSecretSize bytes.
+type Secret struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Data contains the secret data. Each key must consist of alphanumeric
+	// characters, '-', '_' or '.'. The serialized form of the secret data is a
+	// base64 encoded string, representing the arbitrary (possibly non-string)
+	// data value here. Described in https://tools.ietf.org/html/rfc4648#section-4
+	// +optional
+	Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
+
+	// stringData allows specifying non-binary secret data in string form.
+	// It is provided as a write-only convenience method.
+	// All keys and values are merged into the data field on write, overwriting any existing values.
+	// It is never output when reading from the API.
+	// +k8s:conversion-gen=false
+	// +optional
+	StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"`
+
+	// Used to facilitate programmatic handling of secret data.
+	// +optional
+	Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"`
+}
+
+const MaxSecretSize = 1 * 1024 * 1024
+
+type SecretType string
+
+const (
+	// SecretTypeOpaque is the default. Arbitrary user-defined data
+	SecretTypeOpaque SecretType = "Opaque"
+
+	// SecretTypeServiceAccountToken contains a token that identifies a service account to the API
+	//
+	// Required fields:
+	// - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies
+	// - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies
+	// - Secret.Data["token"] - a token that identifies the service account to the API
+	SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token"
+
+	// ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
+	ServiceAccountNameKey = "kubernetes.io/service-account.name"
+	// ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
+	ServiceAccountUIDKey = "kubernetes.io/service-account.uid"
+	// ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets
+	ServiceAccountTokenKey = "token"
+	// ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets
+	ServiceAccountKubeconfigKey = "kubernetes.kubeconfig"
+	// ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets
+	ServiceAccountRootCAKey = "ca.crt"
+	// ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls
+	ServiceAccountNamespaceKey = "namespace"
+
+	// SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg
+	//
+	// Required fields:
+	// - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file
+	SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg"
+
+	// DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets
+	DockerConfigKey = ".dockercfg"
+
+	// SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json
+	//
+	// Required fields:
+	// - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file
+	SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson"
+
+	// DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets
+	DockerConfigJsonKey = ".dockerconfigjson"
+
+	// SecretTypeBasicAuth contains data needed for basic authentication.
+	//
+	// Required at least one of fields:
+	// - Secret.Data["username"] - username used for authentication
+	// - Secret.Data["password"] - password or token needed for authentication
+	SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth"
+
+	// BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets
+	BasicAuthUsernameKey = "username"
+	// BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets
+	BasicAuthPasswordKey = "password"
+
+	// SecretTypeSSHAuth contains data needed for SSH authetication.
+	//
+	// Required field:
+	// - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication
+	SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth"
+
+	// SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets
+	SSHAuthPrivateKey = "ssh-privatekey"
+	// SecretTypeTLS contains information about a TLS client or server secret. It
+	// is primarily used with TLS termination of the Ingress resource, but may be
+	// used in other types.
+	//
+	// Required fields:
+	// - Secret.Data["tls.key"] - TLS private key.
+	//   Secret.Data["tls.crt"] - TLS certificate.
+	// TODO: Consider supporting different formats, specifying CA/destinationCA.
+	SecretTypeTLS SecretType = "kubernetes.io/tls"
+
+	// TLSCertKey is the key for tls certificates in a TLS secert.
+	TLSCertKey = "tls.crt"
+	// TLSPrivateKeyKey is the key for the private key field in a TLS secret.
+	TLSPrivateKeyKey = "tls.key"
+	// SecretTypeBootstrapToken is used during the automated bootstrap process (first
+	// implemented by kubeadm). It stores tokens that are used to sign well known
+	// ConfigMaps. They are used for authn.
+	SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SecretList is a list of Secret.
+type SecretList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of secret objects.
+	// More info: https://kubernetes.io/docs/concepts/configuration/secret
+	Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ConfigMap holds configuration data for pods to consume.
+type ConfigMap struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Data contains the configuration data.
+	// Each key must consist of alphanumeric characters, '-', '_' or '.'.
+	// Values with non-UTF-8 byte sequences must use the BinaryData field.
+	// The keys stored in Data must not overlap with the keys in
+	// the BinaryData field, this is enforced during validation process.
+	// +optional
+	Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
+
+	// BinaryData contains the binary data.
+	// Each key must consist of alphanumeric characters, '-', '_' or '.'.
+	// BinaryData can contain byte sequences that are not in the UTF-8 range.
+	// The keys stored in BinaryData must not overlap with the ones in
+	// the Data field, this is enforced during validation process.
+	// Using this field will require 1.10+ apiserver and
+	// kubelet.
+	// +optional
+	BinaryData map[string][]byte `json:"binaryData,omitempty" protobuf:"bytes,3,rep,name=binaryData"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ConfigMapList is a resource containing a list of ConfigMap objects.
+type ConfigMapList struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of ConfigMaps.
+	Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// Type and constants for component health validation.
+type ComponentConditionType string
+
+// These are the valid conditions for the component.
+const (
+	ComponentHealthy ComponentConditionType = "Healthy"
+)
+
+// Information about the condition of a component.
+type ComponentCondition struct {
+	// Type of condition for a component.
+	// Valid value: "Healthy"
+	Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"`
+	// Status of the condition for a component.
+	// Valid values for "Healthy": "True", "False", or "Unknown".
+	Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+	// Message about the condition for a component.
+	// For example, information about a health check.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
+	// Condition error code for a component.
+	// For example, a health check error code.
+	// +optional
+	Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
+type ComponentStatus struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of component conditions observed
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Status of all the conditions for the component as a list of ComponentStatus objects.
+type ComponentStatusList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of ComponentStatus objects.
+	Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// DownwardAPIVolumeSource represents a volume containing downward API info.
+// Downward API volumes support ownership management and SELinux relabeling.
+type DownwardAPIVolumeSource struct {
+	// Items is a list of downward API volume file
+	// +optional
+	Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
+	// Optional: mode bits to use on created files by default. Must be a
+	// value between 0 and 0777. Defaults to 0644.
+	// Directories within the path are not affected by this setting.
+	// This might be in conflict with other options that affect the file
+	// mode, like fsGroup, and the result can be other mode bits set.
+	// +optional
+	DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
+}
+
+const (
+	DownwardAPIVolumeSourceDefaultMode int32 = 0644
+)
+
+// DownwardAPIVolumeFile represents information to create the file containing the pod field
+type DownwardAPIVolumeFile struct {
+	// Required: Path is  the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
+	Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
+	// Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
+	// +optional
+	FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"`
+	// Selects a resource of the container: only resources limits and requests
+	// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+	// +optional
+	ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"`
+	// Optional: mode bits to use on this file, must be a value between 0
+	// and 0777. If not specified, the volume defaultMode will be used.
+	// This might be in conflict with other options that affect the file
+	// mode, like fsGroup, and the result can be other mode bits set.
+	// +optional
+	Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"`
+}
+
+// Represents downward API info for projecting into a projected volume.
+// Note that this is identical to a downwardAPI volume source without the default
+// mode.
+type DownwardAPIProjection struct {
+	// Items is a list of DownwardAPIVolume file
+	// +optional
+	Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
+}
+
+// SecurityContext holds security configuration that will be applied to a container.
+// Some fields are present in both SecurityContext and PodSecurityContext.  When both
+// are set, the values in SecurityContext take precedence.
+type SecurityContext struct {
+	// The capabilities to add/drop when running containers.
+	// Defaults to the default set of capabilities granted by the container runtime.
+	// +optional
+	Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"`
+	// Run container in privileged mode.
+	// Processes in privileged containers are essentially equivalent to root on the host.
+	// Defaults to false.
+	// +optional
+	Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"`
+	// The SELinux context to be applied to the container.
+	// If unspecified, the container runtime will allocate a random SELinux context for each
+	// container.  May also be set in PodSecurityContext.  If set in both SecurityContext and
+	// PodSecurityContext, the value specified in SecurityContext takes precedence.
+	// +optional
+	SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"`
+	// The UID to run the entrypoint of the container process.
+	// Defaults to user specified in image metadata if unspecified.
+	// May also be set in PodSecurityContext.  If set in both SecurityContext and
+	// PodSecurityContext, the value specified in SecurityContext takes precedence.
+	// +optional
+	RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"`
+	// The GID to run the entrypoint of the container process.
+	// Uses runtime default if unset.
+	// May also be set in PodSecurityContext.  If set in both SecurityContext and
+	// PodSecurityContext, the value specified in SecurityContext takes precedence.
+	// +optional
+	RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,8,opt,name=runAsGroup"`
+	// Indicates that the container must run as a non-root user.
+	// If true, the Kubelet will validate the image at runtime to ensure that it
+	// does not run as UID 0 (root) and fail to start the container if it does.
+	// If unset or false, no such validation will be performed.
+	// May also be set in PodSecurityContext.  If set in both SecurityContext and
+	// PodSecurityContext, the value specified in SecurityContext takes precedence.
+	// +optional
+	RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"`
+	// Whether this container has a read-only root filesystem.
+	// Default is false.
+	// +optional
+	ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"`
+	// AllowPrivilegeEscalation controls whether a process can gain more
+	// privileges than its parent process. This bool directly controls if
+	// the no_new_privs flag will be set on the container process.
+	// AllowPrivilegeEscalation is true always when the container is:
+	// 1) run as Privileged
+	// 2) has CAP_SYS_ADMIN
+	// +optional
+	AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"`
+	// procMount denotes the type of proc mount to use for the containers.
+	// The default is DefaultProcMount which uses the container runtime defaults for
+	// readonly paths and masked paths.
+	// This requires the ProcMountType feature flag to be enabled.
+	// +optional
+	ProcMount *ProcMountType `json:"procMount,omitempty" protobuf:"bytes,9,opt,name=procMount"`
+}
+
+type ProcMountType string
+
+const (
+	// DefaultProcMount uses the container runtime defaults for readonly and masked
+	// paths for /proc.  Most container runtimes mask certain paths in /proc to avoid
+	// accidental security exposure of special devices or information.
+	DefaultProcMount ProcMountType = "Default"
+
+	// UnmaskedProcMount bypasses the default masking behavior of the container
+	// runtime and ensures the newly created /proc the container stays in tact with
+	// no modifications.
+	UnmaskedProcMount ProcMountType = "Unmasked"
+)
+
+// SELinuxOptions are the labels to be applied to the container
+type SELinuxOptions struct {
+	// User is a SELinux user label that applies to the container.
+	// +optional
+	User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"`
+	// Role is a SELinux role label that applies to the container.
+	// +optional
+	Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"`
+	// Type is a SELinux type label that applies to the container.
+	// +optional
+	Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"`
+	// Level is SELinux level label that applies to the container.
+	// +optional
+	Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RangeAllocation is not a public type.
+type RangeAllocation struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Range is string that identifies the range represented by 'data'.
+	Range string `json:"range" protobuf:"bytes,2,opt,name=range"`
+	// Data is a bit array containing all allocated addresses in the previous segment.
+	Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"`
+}
+
+const (
+	// "default-scheduler" is the name of default scheduler.
+	DefaultSchedulerName = "default-scheduler"
+
+	// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
+	// corresponding to every RequiredDuringScheduling affinity rule.
+	// When the --hard-pod-affinity-weight scheduler flag is not specified,
+	// DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule.
+	DefaultHardPodAffinitySymmetricWeight int32 = 1
+)
+
+// Sysctl defines a kernel parameter to be set
+type Sysctl struct {
+	// Name of a property to set
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// Value of a property to set
+	Value string `json:"value" protobuf:"bytes,2,opt,name=value"`
+}
+
+// NodeResources is an object for conveying resource information about a node.
+// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.
+type NodeResources struct {
+	// Capacity represents the available resources of a node
+	Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
+}
+
+const (
+	// Enable stdin for remote command execution
+	ExecStdinParam = "input"
+	// Enable stdout for remote command execution
+	ExecStdoutParam = "output"
+	// Enable stderr for remote command execution
+	ExecStderrParam = "error"
+	// Enable TTY for remote command execution
+	ExecTTYParam = "tty"
+	// Command to run for remote command execution
+	ExecCommandParam = "command"
+
+	// Name of header that specifies stream type
+	StreamType = "streamType"
+	// Value for streamType header for stdin stream
+	StreamTypeStdin = "stdin"
+	// Value for streamType header for stdout stream
+	StreamTypeStdout = "stdout"
+	// Value for streamType header for stderr stream
+	StreamTypeStderr = "stderr"
+	// Value for streamType header for data stream
+	StreamTypeData = "data"
+	// Value for streamType header for error stream
+	StreamTypeError = "error"
+	// Value for streamType header for terminal resize stream
+	StreamTypeResize = "resize"
+
+	// Name of header that specifies the port being forwarded
+	PortHeader = "port"
+	// Name of header that specifies a request ID used to associate the error
+	// and data streams for a single forwarded connection
+	PortForwardRequestIDHeader = "requestID"
+)
diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..71f9068
--- /dev/null
+++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
@@ -0,0 +1,2346 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_AWSElasticBlockStoreVolumeSource = map[string]string{
+	"":          "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.",
+	"volumeID":  "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+	"fsType":    "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+	"partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).",
+	"readOnly":  "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+}
+
+func (AWSElasticBlockStoreVolumeSource) SwaggerDoc() map[string]string {
+	return map_AWSElasticBlockStoreVolumeSource
+}
+
+var map_Affinity = map[string]string{
+	"":                "Affinity is a group of affinity scheduling rules.",
+	"nodeAffinity":    "Describes node affinity scheduling rules for the pod.",
+	"podAffinity":     "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).",
+	"podAntiAffinity": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).",
+}
+
+func (Affinity) SwaggerDoc() map[string]string {
+	return map_Affinity
+}
+
+var map_AttachedVolume = map[string]string{
+	"":           "AttachedVolume describes a volume attached to a node",
+	"name":       "Name of the attached volume",
+	"devicePath": "DevicePath represents the device path where the volume should be available",
+}
+
+func (AttachedVolume) SwaggerDoc() map[string]string {
+	return map_AttachedVolume
+}
+
+var map_AvoidPods = map[string]string{
+	"":                "AvoidPods describes pods that should avoid this node. This is the value for a Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and will eventually become a field of NodeStatus.",
+	"preferAvoidPods": "Bounded-sized list of signatures of pods that should avoid this node, sorted in timestamp order from oldest to newest. Size of the slice is unspecified.",
+}
+
+func (AvoidPods) SwaggerDoc() map[string]string {
+	return map_AvoidPods
+}
+
+var map_AzureDiskVolumeSource = map[string]string{
+	"":            "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
+	"diskName":    "The Name of the data disk in the blob storage",
+	"diskURI":     "The URI the data disk in the blob storage",
+	"cachingMode": "Host Caching mode: None, Read Only, Read Write.",
+	"fsType":      "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+	"readOnly":    "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+	"kind":        "Expected values Shared: multiple blob disks per storage account  Dedicated: single blob disk per storage account  Managed: azure managed data disk (only in managed availability set). defaults to shared",
+}
+
+func (AzureDiskVolumeSource) SwaggerDoc() map[string]string {
+	return map_AzureDiskVolumeSource
+}
+
+var map_AzureFilePersistentVolumeSource = map[string]string{
+	"":                "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+	"secretName":      "the name of secret that contains Azure Storage Account Name and Key",
+	"shareName":       "Share Name",
+	"readOnly":        "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+	"secretNamespace": "the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod",
+}
+
+func (AzureFilePersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_AzureFilePersistentVolumeSource
+}
+
+var map_AzureFileVolumeSource = map[string]string{
+	"":           "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+	"secretName": "the name of secret that contains Azure Storage Account Name and Key",
+	"shareName":  "Share Name",
+	"readOnly":   "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+}
+
+func (AzureFileVolumeSource) SwaggerDoc() map[string]string {
+	return map_AzureFileVolumeSource
+}
+
+var map_Binding = map[string]string{
+	"":         "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"target":   "The target object that you want to bind to the standard object.",
+}
+
+func (Binding) SwaggerDoc() map[string]string {
+	return map_Binding
+}
+
+var map_CSIPersistentVolumeSource = map[string]string{
+	"":                           "Represents storage that is managed by an external CSI volume driver (Beta feature)",
+	"driver":                     "Driver is the name of the driver to use for this volume. Required.",
+	"volumeHandle":               "VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.",
+	"readOnly":                   "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).",
+	"fsType":                     "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".",
+	"volumeAttributes":           "Attributes of the volume to publish.",
+	"controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
+	"nodeStageSecretRef":         "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
+	"nodePublishSecretRef":       "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
+}
+
+func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_CSIPersistentVolumeSource
+}
+
+var map_Capabilities = map[string]string{
+	"":     "Adds and removes POSIX capabilities from running containers.",
+	"add":  "Added capabilities",
+	"drop": "Removed capabilities",
+}
+
+func (Capabilities) SwaggerDoc() map[string]string {
+	return map_Capabilities
+}
+
+var map_CephFSPersistentVolumeSource = map[string]string{
+	"":           "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.",
+	"monitors":   "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it",
+	"path":       "Optional: Used as the mounted root, rather than the full Ceph tree, default is /",
+	"user":       "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it",
+	"secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it",
+	"secretRef":  "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it",
+	"readOnly":   "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it",
+}
+
+func (CephFSPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_CephFSPersistentVolumeSource
+}
+
+var map_CephFSVolumeSource = map[string]string{
+	"":           "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.",
+	"monitors":   "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it",
+	"path":       "Optional: Used as the mounted root, rather than the full Ceph tree, default is /",
+	"user":       "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it",
+	"secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it",
+	"secretRef":  "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it",
+	"readOnly":   "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it",
+}
+
+func (CephFSVolumeSource) SwaggerDoc() map[string]string {
+	return map_CephFSVolumeSource
+}
+
+var map_CinderPersistentVolumeSource = map[string]string{
+	"":          "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.",
+	"volumeID":  "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+	"fsType":    "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+	"readOnly":  "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+	"secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.",
+}
+
+func (CinderPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_CinderPersistentVolumeSource
+}
+
+var map_CinderVolumeSource = map[string]string{
+	"":          "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.",
+	"volumeID":  "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+	"fsType":    "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+	"readOnly":  "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+	"secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.",
+}
+
+func (CinderVolumeSource) SwaggerDoc() map[string]string {
+	return map_CinderVolumeSource
+}
+
+var map_ClientIPConfig = map[string]string{
+	"":               "ClientIPConfig represents the configurations of Client IP based session affinity.",
+	"timeoutSeconds": "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).",
+}
+
+func (ClientIPConfig) SwaggerDoc() map[string]string {
+	return map_ClientIPConfig
+}
+
+var map_ComponentCondition = map[string]string{
+	"":        "Information about the condition of a component.",
+	"type":    "Type of condition for a component. Valid value: \"Healthy\"",
+	"status":  "Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\".",
+	"message": "Message about the condition for a component. For example, information about a health check.",
+	"error":   "Condition error code for a component. For example, a health check error code.",
+}
+
+func (ComponentCondition) SwaggerDoc() map[string]string {
+	return map_ComponentCondition
+}
+
+var map_ComponentStatus = map[string]string{
+	"":           "ComponentStatus (and ComponentStatusList) holds the cluster validation info.",
+	"metadata":   "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"conditions": "List of component conditions observed",
+}
+
+func (ComponentStatus) SwaggerDoc() map[string]string {
+	return map_ComponentStatus
+}
+
+var map_ComponentStatusList = map[string]string{
+	"":         "Status of all the conditions for the component as a list of ComponentStatus objects.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "List of ComponentStatus objects.",
+}
+
+func (ComponentStatusList) SwaggerDoc() map[string]string {
+	return map_ComponentStatusList
+}
+
+var map_ConfigMap = map[string]string{
+	"":           "ConfigMap holds configuration data for pods to consume.",
+	"metadata":   "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"data":       "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.",
+	"binaryData": "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.",
+}
+
+func (ConfigMap) SwaggerDoc() map[string]string {
+	return map_ConfigMap
+}
+
+var map_ConfigMapEnvSource = map[string]string{
+	"":         "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.",
+	"optional": "Specify whether the ConfigMap must be defined",
+}
+
+func (ConfigMapEnvSource) SwaggerDoc() map[string]string {
+	return map_ConfigMapEnvSource
+}
+
+var map_ConfigMapKeySelector = map[string]string{
+	"":         "Selects a key from a ConfigMap.",
+	"key":      "The key to select.",
+	"optional": "Specify whether the ConfigMap or it's key must be defined",
+}
+
+func (ConfigMapKeySelector) SwaggerDoc() map[string]string {
+	return map_ConfigMapKeySelector
+}
+
+var map_ConfigMapList = map[string]string{
+	"":         "ConfigMapList is a resource containing a list of ConfigMap objects.",
+	"metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "Items is the list of ConfigMaps.",
+}
+
+func (ConfigMapList) SwaggerDoc() map[string]string {
+	return map_ConfigMapList
+}
+
+var map_ConfigMapNodeConfigSource = map[string]string{
+	"":                 "ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.",
+	"namespace":        "Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases.",
+	"name":             "Name is the metadata.name of the referenced ConfigMap. This field is required in all cases.",
+	"uid":              "UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.",
+	"resourceVersion":  "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.",
+	"kubeletConfigKey": "KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases.",
+}
+
+func (ConfigMapNodeConfigSource) SwaggerDoc() map[string]string {
+	return map_ConfigMapNodeConfigSource
+}
+
+var map_ConfigMapProjection = map[string]string{
+	"":         "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.",
+	"items":    "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
+	"optional": "Specify whether the ConfigMap or it's keys must be defined",
+}
+
+func (ConfigMapProjection) SwaggerDoc() map[string]string {
+	return map_ConfigMapProjection
+}
+
+var map_ConfigMapVolumeSource = map[string]string{
+	"":            "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.",
+	"items":       "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
+	"defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+	"optional":    "Specify whether the ConfigMap or it's keys must be defined",
+}
+
+func (ConfigMapVolumeSource) SwaggerDoc() map[string]string {
+	return map_ConfigMapVolumeSource
+}
+
+var map_Container = map[string]string{
+	"":                         "A single application container that you want to run within a pod.",
+	"name":                     "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.",
+	"image":                    "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
+	"command":                  "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+	"args":                     "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+	"workingDir":               "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
+	"ports":                    "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.",
+	"envFrom":                  "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
+	"env":                      "List of environment variables to set in the container. Cannot be updated.",
+	"resources":                "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
+	"volumeMounts":             "Pod volumes to mount into the container's filesystem. Cannot be updated.",
+	"volumeDevices":            "volumeDevices is the list of block devices to be used by the container. This is a beta feature.",
+	"livenessProbe":            "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+	"readinessProbe":           "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+	"lifecycle":                "Actions that the management system should take in response to container lifecycle events. Cannot be updated.",
+	"terminationMessagePath":   "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.",
+	"terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.",
+	"imagePullPolicy":          "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images",
+	"securityContext":          "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
+	"stdin":                    "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.",
+	"stdinOnce":                "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false",
+	"tty":                      "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.",
+}
+
+func (Container) SwaggerDoc() map[string]string {
+	return map_Container
+}
+
+var map_ContainerImage = map[string]string{
+	"":          "Describe a container image",
+	"names":     "Names by which this image is known. e.g. [\"k8s.gcr.io/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]",
+	"sizeBytes": "The size of the image in bytes.",
+}
+
+func (ContainerImage) SwaggerDoc() map[string]string {
+	return map_ContainerImage
+}
+
+var map_ContainerPort = map[string]string{
+	"":              "ContainerPort represents a network port in a single container.",
+	"name":          "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.",
+	"hostPort":      "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.",
+	"containerPort": "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.",
+	"protocol":      "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".",
+	"hostIP":        "What host IP to bind the external port to.",
+}
+
+func (ContainerPort) SwaggerDoc() map[string]string {
+	return map_ContainerPort
+}
+
+var map_ContainerState = map[string]string{
+	"":           "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.",
+	"waiting":    "Details about a waiting container",
+	"running":    "Details about a running container",
+	"terminated": "Details about a terminated container",
+}
+
+func (ContainerState) SwaggerDoc() map[string]string {
+	return map_ContainerState
+}
+
+var map_ContainerStateRunning = map[string]string{
+	"":          "ContainerStateRunning is a running state of a container.",
+	"startedAt": "Time at which the container was last (re-)started",
+}
+
+func (ContainerStateRunning) SwaggerDoc() map[string]string {
+	return map_ContainerStateRunning
+}
+
+var map_ContainerStateTerminated = map[string]string{
+	"":            "ContainerStateTerminated is a terminated state of a container.",
+	"exitCode":    "Exit status from the last termination of the container",
+	"signal":      "Signal from the last termination of the container",
+	"reason":      "(brief) reason from the last termination of the container",
+	"message":     "Message regarding the last termination of the container",
+	"startedAt":   "Time at which previous execution of the container started",
+	"finishedAt":  "Time at which the container last terminated",
+	"containerID": "Container's ID in the format 'docker://<container_id>'",
+}
+
+func (ContainerStateTerminated) SwaggerDoc() map[string]string {
+	return map_ContainerStateTerminated
+}
+
+var map_ContainerStateWaiting = map[string]string{
+	"":        "ContainerStateWaiting is a waiting state of a container.",
+	"reason":  "(brief) reason the container is not yet running.",
+	"message": "Message regarding why the container is not yet running.",
+}
+
+func (ContainerStateWaiting) SwaggerDoc() map[string]string {
+	return map_ContainerStateWaiting
+}
+
+var map_ContainerStatus = map[string]string{
+	"":             "ContainerStatus contains details for the current status of this container.",
+	"name":         "This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.",
+	"state":        "Details about the container's current condition.",
+	"lastState":    "Details about the container's last termination condition.",
+	"ready":        "Specifies whether the container has passed its readiness probe.",
+	"restartCount": "The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.",
+	"image":        "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images",
+	"imageID":      "ImageID of the container's image.",
+	"containerID":  "Container's ID in the format 'docker://<container_id>'.",
+}
+
+func (ContainerStatus) SwaggerDoc() map[string]string {
+	return map_ContainerStatus
+}
+
+var map_DaemonEndpoint = map[string]string{
+	"":     "DaemonEndpoint contains information about a single Daemon endpoint.",
+	"Port": "Port number of the given endpoint.",
+}
+
+func (DaemonEndpoint) SwaggerDoc() map[string]string {
+	return map_DaemonEndpoint
+}
+
+var map_DownwardAPIProjection = map[string]string{
+	"":      "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.",
+	"items": "Items is a list of DownwardAPIVolume file",
+}
+
+func (DownwardAPIProjection) SwaggerDoc() map[string]string {
+	return map_DownwardAPIProjection
+}
+
+var map_DownwardAPIVolumeFile = map[string]string{
+	"":                 "DownwardAPIVolumeFile represents information to create the file containing the pod field",
+	"path":             "Required: Path is  the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'",
+	"fieldRef":         "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.",
+	"resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.",
+	"mode":             "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+}
+
+func (DownwardAPIVolumeFile) SwaggerDoc() map[string]string {
+	return map_DownwardAPIVolumeFile
+}
+
+var map_DownwardAPIVolumeSource = map[string]string{
+	"":            "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.",
+	"items":       "Items is a list of downward API volume file",
+	"defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+}
+
+func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string {
+	return map_DownwardAPIVolumeSource
+}
+
+var map_EmptyDirVolumeSource = map[string]string{
+	"":          "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.",
+	"medium":    "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir",
+	"sizeLimit": "Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir",
+}
+
+func (EmptyDirVolumeSource) SwaggerDoc() map[string]string {
+	return map_EmptyDirVolumeSource
+}
+
+var map_EndpointAddress = map[string]string{
+	"":          "EndpointAddress is a tuple that describes single IP address.",
+	"ip":        "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.",
+	"hostname":  "The Hostname of this endpoint",
+	"nodeName":  "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.",
+	"targetRef": "Reference to object providing the endpoint.",
+}
+
+func (EndpointAddress) SwaggerDoc() map[string]string {
+	return map_EndpointAddress
+}
+
+var map_EndpointPort = map[string]string{
+	"":         "EndpointPort is a tuple that describes a single port.",
+	"name":     "The name of this port (corresponds to ServicePort.Name). Must be a DNS_LABEL. Optional only if one port is defined.",
+	"port":     "The port number of the endpoint.",
+	"protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
+}
+
+func (EndpointPort) SwaggerDoc() map[string]string {
+	return map_EndpointPort
+}
+
+var map_EndpointSubset = map[string]string{
+	"":                  "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n  {\n    Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n    Ports:     [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n  }\nThe resulting set of endpoints can be viewed as:\n    a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n    b: [ 10.10.1.1:309, 10.10.2.2:309 ]",
+	"addresses":         "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.",
+	"notReadyAddresses": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.",
+	"ports":             "Port numbers available on the related IP addresses.",
+}
+
+func (EndpointSubset) SwaggerDoc() map[string]string {
+	return map_EndpointSubset
+}
+
+var map_Endpoints = map[string]string{
+	"":         "Endpoints is a collection of endpoints that implement the actual service. Example:\n  Name: \"mysvc\",\n  Subsets: [\n    {\n      Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n      Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n    },\n    {\n      Addresses: [{\"ip\": \"10.10.3.3\"}],\n      Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n    },\n ]",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"subsets":  "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.",
+}
+
+func (Endpoints) SwaggerDoc() map[string]string {
+	return map_Endpoints
+}
+
+var map_EndpointsList = map[string]string{
+	"":         "EndpointsList is a list of endpoints.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "List of endpoints.",
+}
+
+func (EndpointsList) SwaggerDoc() map[string]string {
+	return map_EndpointsList
+}
+
+var map_EnvFromSource = map[string]string{
+	"":             "EnvFromSource represents the source of a set of ConfigMaps",
+	"prefix":       "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.",
+	"configMapRef": "The ConfigMap to select from",
+	"secretRef":    "The Secret to select from",
+}
+
+func (EnvFromSource) SwaggerDoc() map[string]string {
+	return map_EnvFromSource
+}
+
+var map_EnvVar = map[string]string{
+	"":          "EnvVar represents an environment variable present in a Container.",
+	"name":      "Name of the environment variable. Must be a C_IDENTIFIER.",
+	"value":     "Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".",
+	"valueFrom": "Source for the environment variable's value. Cannot be used if value is not empty.",
+}
+
+func (EnvVar) SwaggerDoc() map[string]string {
+	return map_EnvVar
+}
+
+var map_EnvVarSource = map[string]string{
+	"":                 "EnvVarSource represents a source for the value of an EnvVar.",
+	"fieldRef":         "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.",
+	"resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.",
+	"configMapKeyRef":  "Selects a key of a ConfigMap.",
+	"secretKeyRef":     "Selects a key of a secret in the pod's namespace",
+}
+
+func (EnvVarSource) SwaggerDoc() map[string]string {
+	return map_EnvVarSource
+}
+
+var map_Event = map[string]string{
+	"":                   "Event is a report of an event somewhere in the cluster.",
+	"metadata":           "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"involvedObject":     "The object that this event is about.",
+	"reason":             "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.",
+	"message":            "A human-readable description of the status of this operation.",
+	"source":             "The component reporting this event. Should be a short machine understandable string.",
+	"firstTimestamp":     "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)",
+	"lastTimestamp":      "The time at which the most recent occurrence of this event was recorded.",
+	"count":              "The number of times this event has occurred.",
+	"type":               "Type of this event (Normal, Warning), new types could be added in the future",
+	"eventTime":          "Time when this Event was first observed.",
+	"series":             "Data about the Event series this event represents or nil if it's a singleton Event.",
+	"action":             "What action was taken/failed regarding to the Regarding object.",
+	"related":            "Optional secondary object for more complex actions.",
+	"reportingComponent": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.",
+	"reportingInstance":  "ID of the controller instance, e.g. `kubelet-xyzf`.",
+}
+
+func (Event) SwaggerDoc() map[string]string {
+	return map_Event
+}
+
+var map_EventList = map[string]string{
+	"":         "EventList is a list of events.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "List of events",
+}
+
+func (EventList) SwaggerDoc() map[string]string {
+	return map_EventList
+}
+
+var map_EventSeries = map[string]string{
+	"":                 "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.",
+	"count":            "Number of occurrences in this series up to the last heartbeat time",
+	"lastObservedTime": "Time of the last occurrence observed",
+	"state":            "State of this Series: Ongoing or Finished",
+}
+
+func (EventSeries) SwaggerDoc() map[string]string {
+	return map_EventSeries
+}
+
+var map_EventSource = map[string]string{
+	"":          "EventSource contains information for an event.",
+	"component": "Component from which the event is generated.",
+	"host":      "Node name on which the event is generated.",
+}
+
+func (EventSource) SwaggerDoc() map[string]string {
+	return map_EventSource
+}
+
+var map_ExecAction = map[string]string{
+	"":        "ExecAction describes a \"run in container\" action.",
+	"command": "Command is the command line to execute inside the container, the working directory for the command  is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.",
+}
+
+func (ExecAction) SwaggerDoc() map[string]string {
+	return map_ExecAction
+}
+
+var map_FCVolumeSource = map[string]string{
+	"":           "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.",
+	"targetWWNs": "Optional: FC target worldwide names (WWNs)",
+	"lun":        "Optional: FC target lun number",
+	"fsType":     "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+	"readOnly":   "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+	"wwids":      "Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.",
+}
+
+func (FCVolumeSource) SwaggerDoc() map[string]string {
+	return map_FCVolumeSource
+}
+
+var map_FlexPersistentVolumeSource = map[string]string{
+	"":          "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.",
+	"driver":    "Driver is the name of the driver to use for this volume.",
+	"fsType":    "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.",
+	"secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.",
+	"readOnly":  "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+	"options":   "Optional: Extra command options if any.",
+}
+
+func (FlexPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_FlexPersistentVolumeSource
+}
+
+var map_FlexVolumeSource = map[string]string{
+	"":          "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
+	"driver":    "Driver is the name of the driver to use for this volume.",
+	"fsType":    "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.",
+	"secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.",
+	"readOnly":  "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+	"options":   "Optional: Extra command options if any.",
+}
+
+func (FlexVolumeSource) SwaggerDoc() map[string]string {
+	return map_FlexVolumeSource
+}
+
+var map_FlockerVolumeSource = map[string]string{
+	"":            "Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.",
+	"datasetName": "Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated",
+	"datasetUUID": "UUID of the dataset. This is unique identifier of a Flocker dataset",
+}
+
+func (FlockerVolumeSource) SwaggerDoc() map[string]string {
+	return map_FlockerVolumeSource
+}
+
+var map_GCEPersistentDiskVolumeSource = map[string]string{
+	"":          "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.",
+	"pdName":    "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+	"fsType":    "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+	"partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+	"readOnly":  "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+}
+
+func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string {
+	return map_GCEPersistentDiskVolumeSource
+}
+
+var map_GitRepoVolumeSource = map[string]string{
+	"":           "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
+	"repository": "Repository URL",
+	"revision":   "Commit hash for the specified revision.",
+	"directory":  "Target directory name. Must not contain or start with '..'.  If '.' is supplied, the volume directory will be the git repository.  Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.",
+}
+
+func (GitRepoVolumeSource) SwaggerDoc() map[string]string {
+	return map_GitRepoVolumeSource
+}
+
+var map_GlusterfsPersistentVolumeSource = map[string]string{
+	"":                   "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.",
+	"endpoints":          "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
+	"path":               "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
+	"readOnly":           "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
+	"endpointsNamespace": "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
+}
+
+func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_GlusterfsPersistentVolumeSource
+}
+
+var map_GlusterfsVolumeSource = map[string]string{
+	"":          "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.",
+	"endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
+	"path":      "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
+	"readOnly":  "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
+}
+
+func (GlusterfsVolumeSource) SwaggerDoc() map[string]string {
+	return map_GlusterfsVolumeSource
+}
+
+var map_HTTPGetAction = map[string]string{
+	"":            "HTTPGetAction describes an action based on HTTP Get requests.",
+	"path":        "Path to access on the HTTP server.",
+	"port":        "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.",
+	"host":        "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.",
+	"scheme":      "Scheme to use for connecting to the host. Defaults to HTTP.",
+	"httpHeaders": "Custom headers to set in the request. HTTP allows repeated headers.",
+}
+
+func (HTTPGetAction) SwaggerDoc() map[string]string {
+	return map_HTTPGetAction
+}
+
+var map_HTTPHeader = map[string]string{
+	"":      "HTTPHeader describes a custom header to be used in HTTP probes",
+	"name":  "The header field name",
+	"value": "The header field value",
+}
+
+func (HTTPHeader) SwaggerDoc() map[string]string {
+	return map_HTTPHeader
+}
+
+var map_Handler = map[string]string{
+	"":          "Handler defines a specific action that should be taken",
+	"exec":      "One and only one of the following should be specified. Exec specifies the action to take.",
+	"httpGet":   "HTTPGet specifies the http request to perform.",
+	"tcpSocket": "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported",
+}
+
+func (Handler) SwaggerDoc() map[string]string {
+	return map_Handler
+}
+
+var map_HostAlias = map[string]string{
+	"":          "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.",
+	"ip":        "IP address of the host file entry.",
+	"hostnames": "Hostnames for the above IP address.",
+}
+
+func (HostAlias) SwaggerDoc() map[string]string {
+	return map_HostAlias
+}
+
+var map_HostPathVolumeSource = map[string]string{
+	"":     "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.",
+	"path": "Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
+	"type": "Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
+}
+
+func (HostPathVolumeSource) SwaggerDoc() map[string]string {
+	return map_HostPathVolumeSource
+}
+
+var map_ISCSIPersistentVolumeSource = map[string]string{
+	"":                  "ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.",
+	"targetPortal":      "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
+	"iqn":               "Target iSCSI Qualified Name.",
+	"lun":               "iSCSI Target Lun number.",
+	"iscsiInterface":    "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).",
+	"fsType":            "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi",
+	"readOnly":          "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.",
+	"portals":           "iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
+	"chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication",
+	"chapAuthSession":   "whether support iSCSI Session CHAP authentication",
+	"secretRef":         "CHAP Secret for iSCSI target and initiator authentication",
+	"initiatorName":     "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface <target portal>:<volume name> will be created for the connection.",
+}
+
+func (ISCSIPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_ISCSIPersistentVolumeSource
+}
+
+var map_ISCSIVolumeSource = map[string]string{
+	"":                  "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.",
+	"targetPortal":      "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
+	"iqn":               "Target iSCSI Qualified Name.",
+	"lun":               "iSCSI Target Lun number.",
+	"iscsiInterface":    "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).",
+	"fsType":            "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi",
+	"readOnly":          "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.",
+	"portals":           "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
+	"chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication",
+	"chapAuthSession":   "whether support iSCSI Session CHAP authentication",
+	"secretRef":         "CHAP Secret for iSCSI target and initiator authentication",
+	"initiatorName":     "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface <target portal>:<volume name> will be created for the connection.",
+}
+
+func (ISCSIVolumeSource) SwaggerDoc() map[string]string {
+	return map_ISCSIVolumeSource
+}
+
+var map_KeyToPath = map[string]string{
+	"":     "Maps a string key to a path within a volume.",
+	"key":  "The key to project.",
+	"path": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.",
+	"mode": "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+}
+
+func (KeyToPath) SwaggerDoc() map[string]string {
+	return map_KeyToPath
+}
+
+var map_Lifecycle = map[string]string{
+	"":          "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.",
+	"postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
+	"preStop":   "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
+}
+
+func (Lifecycle) SwaggerDoc() map[string]string {
+	return map_Lifecycle
+}
+
+var map_LimitRange = map[string]string{
+	"":         "LimitRange sets resource usage limits for each kind of resource in a Namespace.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (LimitRange) SwaggerDoc() map[string]string {
+	return map_LimitRange
+}
+
+var map_LimitRangeItem = map[string]string{
+	"":                     "LimitRangeItem defines a min/max usage limit for any resource that matches on kind.",
+	"type":                 "Type of resource that this limit applies to.",
+	"max":                  "Max usage constraints on this kind by resource name.",
+	"min":                  "Min usage constraints on this kind by resource name.",
+	"default":              "Default resource requirement limit value by resource name if resource limit is omitted.",
+	"defaultRequest":       "DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.",
+	"maxLimitRequestRatio": "MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.",
+}
+
+func (LimitRangeItem) SwaggerDoc() map[string]string {
+	return map_LimitRangeItem
+}
+
+var map_LimitRangeList = map[string]string{
+	"":         "LimitRangeList is a list of LimitRange items.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
+}
+
+func (LimitRangeList) SwaggerDoc() map[string]string {
+	return map_LimitRangeList
+}
+
+var map_LimitRangeSpec = map[string]string{
+	"":       "LimitRangeSpec defines a min/max usage limit for resources that match on kind.",
+	"limits": "Limits is the list of LimitRangeItem objects that are enforced.",
+}
+
+func (LimitRangeSpec) SwaggerDoc() map[string]string {
+	return map_LimitRangeSpec
+}
+
+var map_LoadBalancerIngress = map[string]string{
+	"":         "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.",
+	"ip":       "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)",
+	"hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)",
+}
+
+func (LoadBalancerIngress) SwaggerDoc() map[string]string {
+	return map_LoadBalancerIngress
+}
+
+var map_LoadBalancerStatus = map[string]string{
+	"":        "LoadBalancerStatus represents the status of a load-balancer.",
+	"ingress": "Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.",
+}
+
+func (LoadBalancerStatus) SwaggerDoc() map[string]string {
+	return map_LoadBalancerStatus
+}
+
+var map_LocalObjectReference = map[string]string{
+	"":     "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.",
+	"name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+}
+
+func (LocalObjectReference) SwaggerDoc() map[string]string {
+	return map_LocalObjectReference
+}
+
+var map_LocalVolumeSource = map[string]string{
+	"":       "Local represents directly-attached storage with node affinity (Beta feature)",
+	"path":   "The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).",
+	"fsType": "Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a fileystem if unspecified.",
+}
+
+func (LocalVolumeSource) SwaggerDoc() map[string]string {
+	return map_LocalVolumeSource
+}
+
+var map_NFSVolumeSource = map[string]string{
+	"":         "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.",
+	"server":   "Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+	"path":     "Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+	"readOnly": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+}
+
+func (NFSVolumeSource) SwaggerDoc() map[string]string {
+	return map_NFSVolumeSource
+}
+
+var map_Namespace = map[string]string{
+	"":         "Namespace provides a scope for Names. Use of multiple namespaces is optional.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+	"status":   "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (Namespace) SwaggerDoc() map[string]string {
+	return map_Namespace
+}
+
+var map_NamespaceList = map[string]string{
+	"":         "NamespaceList is a list of Namespaces.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/",
+}
+
+func (NamespaceList) SwaggerDoc() map[string]string {
+	return map_NamespaceList
+}
+
+var map_NamespaceSpec = map[string]string{
+	"":           "NamespaceSpec describes the attributes on a Namespace.",
+	"finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/",
+}
+
+func (NamespaceSpec) SwaggerDoc() map[string]string {
+	return map_NamespaceSpec
+}
+
+var map_NamespaceStatus = map[string]string{
+	"":      "NamespaceStatus is information about the current status of a Namespace.",
+	"phase": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/",
+}
+
+func (NamespaceStatus) SwaggerDoc() map[string]string {
+	return map_NamespaceStatus
+}
+
+var map_Node = map[string]string{
+	"":         "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+	"status":   "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (Node) SwaggerDoc() map[string]string {
+	return map_Node
+}
+
+var map_NodeAddress = map[string]string{
+	"":        "NodeAddress contains information for the node's address.",
+	"type":    "Node address type, one of Hostname, ExternalIP or InternalIP.",
+	"address": "The node address.",
+}
+
+func (NodeAddress) SwaggerDoc() map[string]string {
+	return map_NodeAddress
+}
+
+var map_NodeAffinity = map[string]string{
+	"": "Node affinity is a group of node affinity scheduling rules.",
+	"requiredDuringSchedulingIgnoredDuringExecution":  "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.",
+	"preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.",
+}
+
+func (NodeAffinity) SwaggerDoc() map[string]string {
+	return map_NodeAffinity
+}
+
+var map_NodeCondition = map[string]string{
+	"":                   "NodeCondition contains condition information for a node.",
+	"type":               "Type of node condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastHeartbeatTime":  "Last time we got an update on a given condition.",
+	"lastTransitionTime": "Last time the condition transit from one status to another.",
+	"reason":             "(brief) reason for the condition's last transition.",
+	"message":            "Human readable message indicating details about last transition.",
+}
+
+func (NodeCondition) SwaggerDoc() map[string]string {
+	return map_NodeCondition
+}
+
+var map_NodeConfigSource = map[string]string{
+	"":          "NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.",
+	"configMap": "ConfigMap is a reference to a Node's ConfigMap",
+}
+
+func (NodeConfigSource) SwaggerDoc() map[string]string {
+	return map_NodeConfigSource
+}
+
+var map_NodeConfigStatus = map[string]string{
+	"":              "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.",
+	"assigned":      "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned.",
+	"active":        "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error.",
+	"lastKnownGood": "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future.",
+	"error":         "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.",
+}
+
+func (NodeConfigStatus) SwaggerDoc() map[string]string {
+	return map_NodeConfigStatus
+}
+
+var map_NodeDaemonEndpoints = map[string]string{
+	"":                "NodeDaemonEndpoints lists ports opened by daemons running on the Node.",
+	"kubeletEndpoint": "Endpoint on which Kubelet is listening.",
+}
+
+func (NodeDaemonEndpoints) SwaggerDoc() map[string]string {
+	return map_NodeDaemonEndpoints
+}
+
+var map_NodeList = map[string]string{
+	"":         "NodeList is the whole list of all Nodes which have been registered with master.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "List of nodes",
+}
+
+func (NodeList) SwaggerDoc() map[string]string {
+	return map_NodeList
+}
+
+var map_NodeProxyOptions = map[string]string{
+	"":     "NodeProxyOptions is the query options to a Node's proxy call.",
+	"path": "Path is the URL path to use for the current proxy request to node.",
+}
+
+func (NodeProxyOptions) SwaggerDoc() map[string]string {
+	return map_NodeProxyOptions
+}
+
+var map_NodeResources = map[string]string{
+	"":         "NodeResources is an object for conveying resource information about a node. see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.",
+	"Capacity": "Capacity represents the available resources of a node",
+}
+
+func (NodeResources) SwaggerDoc() map[string]string {
+	return map_NodeResources
+}
+
+var map_NodeSelector = map[string]string{
+	"":                  "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.",
+	"nodeSelectorTerms": "Required. A list of node selector terms. The terms are ORed.",
+}
+
+func (NodeSelector) SwaggerDoc() map[string]string {
+	return map_NodeSelector
+}
+
+var map_NodeSelectorRequirement = map[string]string{
+	"":         "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
+	"key":      "The label key that the selector applies to.",
+	"operator": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.",
+	"values":   "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.",
+}
+
+func (NodeSelectorRequirement) SwaggerDoc() map[string]string {
+	return map_NodeSelectorRequirement
+}
+
+var map_NodeSelectorTerm = map[string]string{
+	"":                 "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.",
+	"matchExpressions": "A list of node selector requirements by node's labels.",
+	"matchFields":      "A list of node selector requirements by node's fields.",
+}
+
+func (NodeSelectorTerm) SwaggerDoc() map[string]string {
+	return map_NodeSelectorTerm
+}
+
+var map_NodeSpec = map[string]string{
+	"":              "NodeSpec describes the attributes that a node is created with.",
+	"podCIDR":       "PodCIDR represents the pod IP range assigned to the node.",
+	"providerID":    "ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>",
+	"unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration",
+	"taints":        "If specified, the node's taints.",
+	"configSource":  "If specified, the source to get node configuration from The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field",
+	"externalID":    "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966",
+}
+
+func (NodeSpec) SwaggerDoc() map[string]string {
+	return map_NodeSpec
+}
+
+var map_NodeStatus = map[string]string{
+	"":                "NodeStatus is information about the current status of a node.",
+	"capacity":        "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity",
+	"allocatable":     "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.",
+	"phase":           "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.",
+	"conditions":      "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition",
+	"addresses":       "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses",
+	"daemonEndpoints": "Endpoints of daemons running on the Node.",
+	"nodeInfo":        "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info",
+	"images":          "List of container images on this node",
+	"volumesInUse":    "List of attachable volumes in use (mounted) by the node.",
+	"volumesAttached": "List of volumes that are attached to the node.",
+	"config":          "Status of the config assigned to the node via the dynamic Kubelet config feature.",
+}
+
+func (NodeStatus) SwaggerDoc() map[string]string {
+	return map_NodeStatus
+}
+
+var map_NodeSystemInfo = map[string]string{
+	"":                        "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.",
+	"machineID":               "MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html",
+	"systemUUID":              "SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html",
+	"bootID":                  "Boot ID reported by the node.",
+	"kernelVersion":           "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).",
+	"osImage":                 "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).",
+	"containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).",
+	"kubeletVersion":          "Kubelet Version reported by the node.",
+	"kubeProxyVersion":        "KubeProxy Version reported by the node.",
+	"operatingSystem":         "The Operating System reported by the node",
+	"architecture":            "The Architecture reported by the node",
+}
+
+func (NodeSystemInfo) SwaggerDoc() map[string]string {
+	return map_NodeSystemInfo
+}
+
+var map_ObjectFieldSelector = map[string]string{
+	"":           "ObjectFieldSelector selects an APIVersioned field of an object.",
+	"apiVersion": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".",
+	"fieldPath":  "Path of the field to select in the specified API version.",
+}
+
+func (ObjectFieldSelector) SwaggerDoc() map[string]string {
+	return map_ObjectFieldSelector
+}
+
+var map_ObjectReference = map[string]string{
+	"":                "ObjectReference contains enough information to let you inspect or modify the referred object.",
+	"kind":            "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"namespace":       "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/",
+	"name":            "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+	"uid":             "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids",
+	"apiVersion":      "API version of the referent.",
+	"resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency",
+	"fieldPath":       "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.",
+}
+
+func (ObjectReference) SwaggerDoc() map[string]string {
+	return map_ObjectReference
+}
+
+var map_PersistentVolume = map[string]string{
+	"":         "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes",
+	"status":   "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes",
+}
+
+func (PersistentVolume) SwaggerDoc() map[string]string {
+	return map_PersistentVolume
+}
+
+var map_PersistentVolumeClaim = map[string]string{
+	"":         "PersistentVolumeClaim is a user's request for and claim to a persistent volume",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+	"status":   "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+}
+
+func (PersistentVolumeClaim) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeClaim
+}
+
+var map_PersistentVolumeClaimCondition = map[string]string{
+	"":                   "PersistentVolumeClaimCondition contails details about state of pvc",
+	"lastProbeTime":      "Last time we probed the condition.",
+	"lastTransitionTime": "Last time the condition transitioned from one status to another.",
+	"reason":             "Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.",
+	"message":            "Human-readable message indicating details about last transition.",
+}
+
+func (PersistentVolumeClaimCondition) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeClaimCondition
+}
+
+var map_PersistentVolumeClaimList = map[string]string{
+	"":         "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "A list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+}
+
+func (PersistentVolumeClaimList) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeClaimList
+}
+
+var map_PersistentVolumeClaimSpec = map[string]string{
+	"":                 "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes",
+	"accessModes":      "AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1",
+	"selector":         "A label query over volumes to consider for binding.",
+	"resources":        "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources",
+	"volumeName":       "VolumeName is the binding reference to the PersistentVolume backing this claim.",
+	"storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1",
+	"volumeMode":       "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is a beta feature.",
+	"dataSource":       "This field requires the VolumeSnapshotDataSource alpha feature gate to be enabled and currently VolumeSnapshot is the only supported data source. If the provisioner can support VolumeSnapshot data source, it will create a new volume and data will be restored to the volume at the same time. If the provisioner does not support VolumeSnapshot data source, volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.",
+}
+
+func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeClaimSpec
+}
+
+var map_PersistentVolumeClaimStatus = map[string]string{
+	"":            "PersistentVolumeClaimStatus is the current status of a persistent volume claim.",
+	"phase":       "Phase represents the current phase of PersistentVolumeClaim.",
+	"accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1",
+	"capacity":    "Represents the actual resources of the underlying volume.",
+	"conditions":  "Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.",
+}
+
+func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeClaimStatus
+}
+
+var map_PersistentVolumeClaimVolumeSource = map[string]string{
+	"":          "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).",
+	"claimName": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+	"readOnly":  "Will force the ReadOnly setting in VolumeMounts. Default false.",
+}
+
+func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeClaimVolumeSource
+}
+
+var map_PersistentVolumeList = map[string]string{
+	"":         "PersistentVolumeList is a list of PersistentVolume items.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes",
+}
+
+func (PersistentVolumeList) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeList
+}
+
+var map_PersistentVolumeSource = map[string]string{
+	"":                     "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.",
+	"gcePersistentDisk":    "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+	"awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+	"hostPath":             "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
+	"glusterfs":            "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md",
+	"nfs":                  "NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+	"rbd":                  "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md",
+	"iscsi":                "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.",
+	"cinder":               "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+	"cephfs":               "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
+	"fc":                   "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
+	"flocker":              "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running",
+	"flexVolume":           "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
+	"azureFile":            "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+	"vsphereVolume":        "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
+	"quobyte":              "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
+	"azureDisk":            "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
+	"photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
+	"portworxVolume":       "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine",
+	"scaleIO":              "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
+	"local":                "Local represents directly-attached storage with node affinity",
+	"storageos":            "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md",
+	"csi":                  "CSI represents storage that handled by an external CSI driver (Beta feature).",
+}
+
+func (PersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeSource
+}
+
+var map_PersistentVolumeSpec = map[string]string{
+	"":                              "PersistentVolumeSpec is the specification of a persistent volume.",
+	"capacity":                      "A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity",
+	"accessModes":                   "AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes",
+	"claimRef":                      "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding",
+	"persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming",
+	"storageClassName":              "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.",
+	"mountOptions":                  "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
+	"volumeMode":                    "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is a beta feature.",
+	"nodeAffinity":                  "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
+}
+
+func (PersistentVolumeSpec) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeSpec
+}
+
+var map_PersistentVolumeStatus = map[string]string{
+	"":        "PersistentVolumeStatus is the current status of a persistent volume.",
+	"phase":   "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase",
+	"message": "A human-readable message indicating details about why the volume is in this state.",
+	"reason":  "Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.",
+}
+
+func (PersistentVolumeStatus) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeStatus
+}
+
+var map_PhotonPersistentDiskVolumeSource = map[string]string{
+	"":       "Represents a Photon Controller persistent disk resource.",
+	"pdID":   "ID that identifies Photon Controller persistent disk",
+	"fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+}
+
+func (PhotonPersistentDiskVolumeSource) SwaggerDoc() map[string]string {
+	return map_PhotonPersistentDiskVolumeSource
+}
+
+var map_Pod = map[string]string{
+	"":         "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+	"status":   "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (Pod) SwaggerDoc() map[string]string {
+	return map_Pod
+}
+
+var map_PodAffinity = map[string]string{
+	"": "Pod affinity is a group of inter pod affinity scheduling rules.",
+	"requiredDuringSchedulingIgnoredDuringExecution":  "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
+	"preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
+}
+
+func (PodAffinity) SwaggerDoc() map[string]string {
+	return map_PodAffinity
+}
+
+var map_PodAffinityTerm = map[string]string{
+	"":              "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running",
+	"labelSelector": "A label query over a set of resources, in this case pods.",
+	"namespaces":    "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"",
+	"topologyKey":   "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.",
+}
+
+func (PodAffinityTerm) SwaggerDoc() map[string]string {
+	return map_PodAffinityTerm
+}
+
+var map_PodAntiAffinity = map[string]string{
+	"": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.",
+	"requiredDuringSchedulingIgnoredDuringExecution":  "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
+	"preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
+}
+
+func (PodAntiAffinity) SwaggerDoc() map[string]string {
+	return map_PodAntiAffinity
+}
+
+var map_PodAttachOptions = map[string]string{
+	"":          "PodAttachOptions is the query options to a Pod's remote attach call.",
+	"stdin":     "Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.",
+	"stdout":    "Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.",
+	"stderr":    "Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.",
+	"tty":       "TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.",
+	"container": "The container in which to execute the command. Defaults to only container if there is only one container in the pod.",
+}
+
+func (PodAttachOptions) SwaggerDoc() map[string]string {
+	return map_PodAttachOptions
+}
+
+var map_PodCondition = map[string]string{
+	"":                   "PodCondition contains details for the current condition of this pod.",
+	"type":               "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
+	"status":             "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
+	"lastProbeTime":      "Last time we probed the condition.",
+	"lastTransitionTime": "Last time the condition transitioned from one status to another.",
+	"reason":             "Unique, one-word, CamelCase reason for the condition's last transition.",
+	"message":            "Human-readable message indicating details about last transition.",
+}
+
+func (PodCondition) SwaggerDoc() map[string]string {
+	return map_PodCondition
+}
+
+var map_PodDNSConfig = map[string]string{
+	"":            "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.",
+	"nameservers": "A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.",
+	"searches":    "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.",
+	"options":     "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.",
+}
+
+func (PodDNSConfig) SwaggerDoc() map[string]string {
+	return map_PodDNSConfig
+}
+
+var map_PodDNSConfigOption = map[string]string{
+	"":     "PodDNSConfigOption defines DNS resolver options of a pod.",
+	"name": "Required.",
+}
+
+func (PodDNSConfigOption) SwaggerDoc() map[string]string {
+	return map_PodDNSConfigOption
+}
+
+var map_PodExecOptions = map[string]string{
+	"":          "PodExecOptions is the query options to a Pod's remote exec call.",
+	"stdin":     "Redirect the standard input stream of the pod for this call. Defaults to false.",
+	"stdout":    "Redirect the standard output stream of the pod for this call. Defaults to true.",
+	"stderr":    "Redirect the standard error stream of the pod for this call. Defaults to true.",
+	"tty":       "TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.",
+	"container": "Container in which to execute the command. Defaults to only container if there is only one container in the pod.",
+	"command":   "Command is the remote command to execute. argv array. Not executed within a shell.",
+}
+
+func (PodExecOptions) SwaggerDoc() map[string]string {
+	return map_PodExecOptions
+}
+
+var map_PodList = map[string]string{
+	"":         "PodList is a list of Pods.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "List of pods. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md",
+}
+
+func (PodList) SwaggerDoc() map[string]string {
+	return map_PodList
+}
+
+var map_PodLogOptions = map[string]string{
+	"":             "PodLogOptions is the query options for a Pod's logs REST call.",
+	"container":    "The container for which to stream logs. Defaults to only container if there is one container in the pod.",
+	"follow":       "Follow the log stream of the pod. Defaults to false.",
+	"previous":     "Return previous terminated container logs. Defaults to false.",
+	"sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
+	"sinceTime":    "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
+	"timestamps":   "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
+	"tailLines":    "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime",
+	"limitBytes":   "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
+}
+
+func (PodLogOptions) SwaggerDoc() map[string]string {
+	return map_PodLogOptions
+}
+
+var map_PodPortForwardOptions = map[string]string{
+	"":      "PodPortForwardOptions is the query options to a Pod's port forward call when using WebSockets. The `port` query parameter must specify the port or ports (comma separated) to forward over. Port forwarding over SPDY does not use these options. It requires the port to be passed in the `port` header as part of request.",
+	"ports": "List of ports to forward Required when using WebSockets",
+}
+
+func (PodPortForwardOptions) SwaggerDoc() map[string]string {
+	return map_PodPortForwardOptions
+}
+
+var map_PodProxyOptions = map[string]string{
+	"":     "PodProxyOptions is the query options to a Pod's proxy call.",
+	"path": "Path is the URL path to use for the current proxy request to pod.",
+}
+
+func (PodProxyOptions) SwaggerDoc() map[string]string {
+	return map_PodProxyOptions
+}
+
+var map_PodReadinessGate = map[string]string{
+	"":              "PodReadinessGate contains the reference to a pod condition",
+	"conditionType": "ConditionType refers to a condition in the pod's condition list with matching type.",
+}
+
+func (PodReadinessGate) SwaggerDoc() map[string]string {
+	return map_PodReadinessGate
+}
+
+var map_PodSecurityContext = map[string]string{
+	"":                   "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext.  Field values of container.securityContext take precedence over field values of PodSecurityContext.",
+	"seLinuxOptions":     "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container.  May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
+	"runAsUser":          "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
+	"runAsGroup":         "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
+	"runAsNonRoot":       "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+	"supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID.  If unspecified, no groups will be added to any container.",
+	"fsGroup":            "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ",
+	"sysctls":            "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.",
+}
+
+func (PodSecurityContext) SwaggerDoc() map[string]string {
+	return map_PodSecurityContext
+}
+
+var map_PodSignature = map[string]string{
+	"":              "Describes the class of pods that should avoid this node. Exactly one field should be set.",
+	"podController": "Reference to controller whose pods should avoid this node.",
+}
+
+func (PodSignature) SwaggerDoc() map[string]string {
+	return map_PodSignature
+}
+
+var map_PodSpec = map[string]string{
+	"":                              "PodSpec is a description of a pod.",
+	"volumes":                       "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes",
+	"initContainers":                "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/",
+	"containers":                    "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.",
+	"restartPolicy":                 "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy",
+	"terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.",
+	"activeDeadlineSeconds":         "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.",
+	"dnsPolicy":                     "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.",
+	"nodeSelector":                  "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
+	"serviceAccountName":            "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
+	"serviceAccount":                "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
+	"automountServiceAccountToken":  "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
+	"nodeName":                      "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.",
+	"hostNetwork":                   "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
+	"hostPID":                       "Use the host's pid namespace. Optional: Default to false.",
+	"hostIPC":                       "Use the host's ipc namespace. Optional: Default to false.",
+	"shareProcessNamespace":         "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. This field is beta-level and may be disabled with the PodShareProcessNamespace feature.",
+	"securityContext":               "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty.  See type description for default values of each field.",
+	"imagePullSecrets":              "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod",
+	"hostname":                      "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.",
+	"subdomain":                     "If specified, the fully qualified Pod hostname will be \"<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>\". If not specified, the pod will not have a domainname at all.",
+	"affinity":                      "If specified, the pod's scheduling constraints",
+	"schedulerName":                 "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.",
+	"tolerations":                   "If specified, the pod's tolerations.",
+	"hostAliases":                   "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.",
+	"priorityClassName":             "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
+	"priority":                      "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.",
+	"dnsConfig":                     "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.",
+	"readinessGates":                "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md",
+	"runtimeClassName":              "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod.  If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future.",
+	"enableServiceLinks":            "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links.",
+}
+
+func (PodSpec) SwaggerDoc() map[string]string {
+	return map_PodSpec
+}
+
+var map_PodStatus = map[string]string{
+	"":                      "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.",
+	"phase":                 "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase",
+	"conditions":            "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
+	"message":               "A human readable message indicating details about why the pod is in this condition.",
+	"reason":                "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'",
+	"nominatedNodeName":     "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.",
+	"hostIP":                "IP address of the host to which the pod is assigned. Empty if not yet scheduled.",
+	"podIP":                 "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.",
+	"startTime":             "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.",
+	"initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
+	"containerStatuses":     "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
+	"qosClass":              "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md",
+}
+
+func (PodStatus) SwaggerDoc() map[string]string {
+	return map_PodStatus
+}
+
+var map_PodStatusResult = map[string]string{
+	"":         "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"status":   "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (PodStatusResult) SwaggerDoc() map[string]string {
+	return map_PodStatusResult
+}
+
+var map_PodTemplate = map[string]string{
+	"":         "PodTemplate describes a template for creating copies of a predefined pod.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"template": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (PodTemplate) SwaggerDoc() map[string]string {
+	return map_PodTemplate
+}
+
+var map_PodTemplateList = map[string]string{
+	"":         "PodTemplateList is a list of PodTemplates.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "List of pod templates",
+}
+
+func (PodTemplateList) SwaggerDoc() map[string]string {
+	return map_PodTemplateList
+}
+
+var map_PodTemplateSpec = map[string]string{
+	"":         "PodTemplateSpec describes the data a pod should have when created from a template",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (PodTemplateSpec) SwaggerDoc() map[string]string {
+	return map_PodTemplateSpec
+}
+
+var map_PortworxVolumeSource = map[string]string{
+	"":         "PortworxVolumeSource represents a Portworx volume resource.",
+	"volumeID": "VolumeID uniquely identifies a Portworx volume",
+	"fsType":   "FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+	"readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+}
+
+func (PortworxVolumeSource) SwaggerDoc() map[string]string {
+	return map_PortworxVolumeSource
+}
+
+var map_Preconditions = map[string]string{
+	"":    "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.",
+	"uid": "Specifies the target UID.",
+}
+
+func (Preconditions) SwaggerDoc() map[string]string {
+	return map_Preconditions
+}
+
+var map_PreferAvoidPodsEntry = map[string]string{
+	"":             "Describes a class of pods that should avoid this node.",
+	"podSignature": "The class of pods.",
+	"evictionTime": "Time at which this entry was added to the list.",
+	"reason":       "(brief) reason why this entry was added to the list.",
+	"message":      "Human readable message indicating why this entry was added to the list.",
+}
+
+func (PreferAvoidPodsEntry) SwaggerDoc() map[string]string {
+	return map_PreferAvoidPodsEntry
+}
+
+var map_PreferredSchedulingTerm = map[string]string{
+	"":           "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).",
+	"weight":     "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.",
+	"preference": "A node selector term, associated with the corresponding weight.",
+}
+
+func (PreferredSchedulingTerm) SwaggerDoc() map[string]string {
+	return map_PreferredSchedulingTerm
+}
+
+var map_Probe = map[string]string{
+	"":                    "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.",
+	"initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+	"timeoutSeconds":      "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+	"periodSeconds":       "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.",
+	"successThreshold":    "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.",
+	"failureThreshold":    "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.",
+}
+
+func (Probe) SwaggerDoc() map[string]string {
+	return map_Probe
+}
+
+var map_ProjectedVolumeSource = map[string]string{
+	"":            "Represents a projected volume source",
+	"sources":     "list of volume projections",
+	"defaultMode": "Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+}
+
+func (ProjectedVolumeSource) SwaggerDoc() map[string]string {
+	return map_ProjectedVolumeSource
+}
+
+var map_QuobyteVolumeSource = map[string]string{
+	"":         "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.",
+	"registry": "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes",
+	"volume":   "Volume is a string that references an already created Quobyte volume by name.",
+	"readOnly": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.",
+	"user":     "User to map volume access to Defaults to serivceaccount user",
+	"group":    "Group to map volume access to Default is no group",
+}
+
+func (QuobyteVolumeSource) SwaggerDoc() map[string]string {
+	return map_QuobyteVolumeSource
+}
+
+var map_RBDPersistentVolumeSource = map[string]string{
+	"":          "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.",
+	"monitors":  "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+	"image":     "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+	"fsType":    "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd",
+	"pool":      "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+	"user":      "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+	"keyring":   "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+	"secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+	"readOnly":  "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+}
+
+func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_RBDPersistentVolumeSource
+}
+
+var map_RBDVolumeSource = map[string]string{
+	"":          "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.",
+	"monitors":  "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+	"image":     "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+	"fsType":    "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd",
+	"pool":      "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+	"user":      "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+	"keyring":   "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+	"secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+	"readOnly":  "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+}
+
+func (RBDVolumeSource) SwaggerDoc() map[string]string {
+	return map_RBDVolumeSource
+}
+
+var map_RangeAllocation = map[string]string{
+	"":         "RangeAllocation is not a public type.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"range":    "Range is string that identifies the range represented by 'data'.",
+	"data":     "Data is a bit array containing all allocated addresses in the previous segment.",
+}
+
+func (RangeAllocation) SwaggerDoc() map[string]string {
+	return map_RangeAllocation
+}
+
+var map_ReplicationController = map[string]string{
+	"":         "ReplicationController represents the configuration of a replication controller.",
+	"metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+	"status":   "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (ReplicationController) SwaggerDoc() map[string]string {
+	return map_ReplicationController
+}
+
+var map_ReplicationControllerCondition = map[string]string{
+	"":                   "ReplicationControllerCondition describes the state of a replication controller at a certain point.",
+	"type":               "Type of replication controller condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastTransitionTime": "The last time the condition transitioned from one status to another.",
+	"reason":             "The reason for the condition's last transition.",
+	"message":            "A human readable message indicating details about the transition.",
+}
+
+func (ReplicationControllerCondition) SwaggerDoc() map[string]string {
+	return map_ReplicationControllerCondition
+}
+
+var map_ReplicationControllerList = map[string]string{
+	"":         "ReplicationControllerList is a collection of replication controllers.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller",
+}
+
+func (ReplicationControllerList) SwaggerDoc() map[string]string {
+	return map_ReplicationControllerList
+}
+
+var map_ReplicationControllerSpec = map[string]string{
+	"":                "ReplicationControllerSpec is the specification of a replication controller.",
+	"replicas":        "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller",
+	"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+	"selector":        "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+	"template":        "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+}
+
+func (ReplicationControllerSpec) SwaggerDoc() map[string]string {
+	return map_ReplicationControllerSpec
+}
+
+var map_ReplicationControllerStatus = map[string]string{
+	"":                     "ReplicationControllerStatus represents the current status of a replication controller.",
+	"replicas":             "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller",
+	"fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replication controller.",
+	"readyReplicas":        "The number of ready replicas for this replication controller.",
+	"availableReplicas":    "The number of available replicas (ready for at least minReadySeconds) for this replication controller.",
+	"observedGeneration":   "ObservedGeneration reflects the generation of the most recently observed replication controller.",
+	"conditions":           "Represents the latest available observations of a replication controller's current state.",
+}
+
+func (ReplicationControllerStatus) SwaggerDoc() map[string]string {
+	return map_ReplicationControllerStatus
+}
+
+var map_ResourceFieldSelector = map[string]string{
+	"":              "ResourceFieldSelector represents container resources (cpu, memory) and their output format",
+	"containerName": "Container name: required for volumes, optional for env vars",
+	"resource":      "Required: resource to select",
+	"divisor":       "Specifies the output format of the exposed resources, defaults to \"1\"",
+}
+
+func (ResourceFieldSelector) SwaggerDoc() map[string]string {
+	return map_ResourceFieldSelector
+}
+
+var map_ResourceQuota = map[string]string{
+	"":         "ResourceQuota sets aggregate quota restrictions enforced per namespace",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+	"status":   "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (ResourceQuota) SwaggerDoc() map[string]string {
+	return map_ResourceQuota
+}
+
+var map_ResourceQuotaList = map[string]string{
+	"":         "ResourceQuotaList is a list of ResourceQuota items.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/",
+}
+
+func (ResourceQuotaList) SwaggerDoc() map[string]string {
+	return map_ResourceQuotaList
+}
+
+var map_ResourceQuotaSpec = map[string]string{
+	"":              "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.",
+	"hard":          "hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/",
+	"scopes":        "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.",
+	"scopeSelector": "scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.",
+}
+
+func (ResourceQuotaSpec) SwaggerDoc() map[string]string {
+	return map_ResourceQuotaSpec
+}
+
+var map_ResourceQuotaStatus = map[string]string{
+	"":     "ResourceQuotaStatus defines the enforced hard limits and observed use.",
+	"hard": "Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/",
+	"used": "Used is the current observed total usage of the resource in the namespace.",
+}
+
+func (ResourceQuotaStatus) SwaggerDoc() map[string]string {
+	return map_ResourceQuotaStatus
+}
+
+var map_ResourceRequirements = map[string]string{
+	"":         "ResourceRequirements describes the compute resource requirements.",
+	"limits":   "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
+	"requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
+}
+
+func (ResourceRequirements) SwaggerDoc() map[string]string {
+	return map_ResourceRequirements
+}
+
+var map_SELinuxOptions = map[string]string{
+	"":      "SELinuxOptions are the labels to be applied to the container",
+	"user":  "User is a SELinux user label that applies to the container.",
+	"role":  "Role is a SELinux role label that applies to the container.",
+	"type":  "Type is a SELinux type label that applies to the container.",
+	"level": "Level is SELinux level label that applies to the container.",
+}
+
+func (SELinuxOptions) SwaggerDoc() map[string]string {
+	return map_SELinuxOptions
+}
+
+var map_ScaleIOPersistentVolumeSource = map[string]string{
+	"":                 "ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume",
+	"gateway":          "The host address of the ScaleIO API Gateway.",
+	"system":           "The name of the storage system as configured in ScaleIO.",
+	"secretRef":        "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.",
+	"sslEnabled":       "Flag to enable/disable SSL communication with Gateway, default false",
+	"protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.",
+	"storagePool":      "The ScaleIO Storage Pool associated with the protection domain.",
+	"storageMode":      "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.",
+	"volumeName":       "The name of a volume already created in the ScaleIO system that is associated with this volume source.",
+	"fsType":           "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\"",
+	"readOnly":         "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+}
+
+func (ScaleIOPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_ScaleIOPersistentVolumeSource
+}
+
+var map_ScaleIOVolumeSource = map[string]string{
+	"":                 "ScaleIOVolumeSource represents a persistent ScaleIO volume",
+	"gateway":          "The host address of the ScaleIO API Gateway.",
+	"system":           "The name of the storage system as configured in ScaleIO.",
+	"secretRef":        "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.",
+	"sslEnabled":       "Flag to enable/disable SSL communication with Gateway, default false",
+	"protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.",
+	"storagePool":      "The ScaleIO Storage Pool associated with the protection domain.",
+	"storageMode":      "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.",
+	"volumeName":       "The name of a volume already created in the ScaleIO system that is associated with this volume source.",
+	"fsType":           "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".",
+	"readOnly":         "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+}
+
+func (ScaleIOVolumeSource) SwaggerDoc() map[string]string {
+	return map_ScaleIOVolumeSource
+}
+
+var map_ScopeSelector = map[string]string{
+	"":                 "A scope selector represents the AND of the selectors represented by the scoped-resource selector requirements.",
+	"matchExpressions": "A list of scope selector requirements by scope of the resources.",
+}
+
+func (ScopeSelector) SwaggerDoc() map[string]string {
+	return map_ScopeSelector
+}
+
+var map_ScopedResourceSelectorRequirement = map[string]string{
+	"":          "A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values.",
+	"scopeName": "The name of the scope that the selector applies to.",
+	"operator":  "Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist.",
+	"values":    "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.",
+}
+
+func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string {
+	return map_ScopedResourceSelectorRequirement
+}
+
+var map_Secret = map[string]string{
+	"":           "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.",
+	"metadata":   "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"data":       "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4",
+	"stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.",
+	"type":       "Used to facilitate programmatic handling of secret data.",
+}
+
+func (Secret) SwaggerDoc() map[string]string {
+	return map_Secret
+}
+
+var map_SecretEnvSource = map[string]string{
+	"":         "SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.",
+	"optional": "Specify whether the Secret must be defined",
+}
+
+func (SecretEnvSource) SwaggerDoc() map[string]string {
+	return map_SecretEnvSource
+}
+
+var map_SecretKeySelector = map[string]string{
+	"":         "SecretKeySelector selects a key of a Secret.",
+	"key":      "The key of the secret to select from.  Must be a valid secret key.",
+	"optional": "Specify whether the Secret or it's key must be defined",
+}
+
+func (SecretKeySelector) SwaggerDoc() map[string]string {
+	return map_SecretKeySelector
+}
+
+var map_SecretList = map[string]string{
+	"":         "SecretList is a list of Secret.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret",
+}
+
+func (SecretList) SwaggerDoc() map[string]string {
+	return map_SecretList
+}
+
+var map_SecretProjection = map[string]string{
+	"":         "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.",
+	"items":    "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
+	"optional": "Specify whether the Secret or its key must be defined",
+}
+
+func (SecretProjection) SwaggerDoc() map[string]string {
+	return map_SecretProjection
+}
+
+var map_SecretReference = map[string]string{
+	"":          "SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace",
+	"name":      "Name is unique within a namespace to reference a secret resource.",
+	"namespace": "Namespace defines the space within which the secret name must be unique.",
+}
+
+func (SecretReference) SwaggerDoc() map[string]string {
+	return map_SecretReference
+}
+
+var map_SecretVolumeSource = map[string]string{
+	"":            "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.",
+	"secretName":  "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
+	"items":       "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
+	"defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+	"optional":    "Specify whether the Secret or it's keys must be defined",
+}
+
+func (SecretVolumeSource) SwaggerDoc() map[string]string {
+	return map_SecretVolumeSource
+}
+
+var map_SecurityContext = map[string]string{
+	"":                         "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext.  When both are set, the values in SecurityContext take precedence.",
+	"capabilities":             "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.",
+	"privileged":               "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.",
+	"seLinuxOptions":           "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container.  May also be set in PodSecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+	"runAsUser":                "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+	"runAsGroup":               "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+	"runAsNonRoot":             "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+	"readOnlyRootFilesystem":   "Whether this container has a read-only root filesystem. Default is false.",
+	"allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN",
+	"procMount":                "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.",
+}
+
+func (SecurityContext) SwaggerDoc() map[string]string {
+	return map_SecurityContext
+}
+
+var map_SerializedReference = map[string]string{
+	"":          "SerializedReference is a reference to serialized object.",
+	"reference": "The reference to an object in the system.",
+}
+
+func (SerializedReference) SwaggerDoc() map[string]string {
+	return map_SerializedReference
+}
+
+var map_Service = map[string]string{
+	"":         "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+	"status":   "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (Service) SwaggerDoc() map[string]string {
+	return map_Service
+}
+
+var map_ServiceAccount = map[string]string{
+	"":                             "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets",
+	"metadata":                     "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"secrets":                      "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret",
+	"imagePullSecrets":             "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod",
+	"automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.",
+}
+
+func (ServiceAccount) SwaggerDoc() map[string]string {
+	return map_ServiceAccount
+}
+
+var map_ServiceAccountList = map[string]string{
+	"":         "ServiceAccountList is a list of ServiceAccount objects",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
+}
+
+func (ServiceAccountList) SwaggerDoc() map[string]string {
+	return map_ServiceAccountList
+}
+
+var map_ServiceAccountTokenProjection = map[string]string{
+	"":                  "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).",
+	"audience":          "Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.",
+	"expirationSeconds": "ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.",
+	"path":              "Path is the path relative to the mount point of the file to project the token into.",
+}
+
+func (ServiceAccountTokenProjection) SwaggerDoc() map[string]string {
+	return map_ServiceAccountTokenProjection
+}
+
+var map_ServiceList = map[string]string{
+	"":         "ServiceList holds a list of services.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "List of services",
+}
+
+func (ServiceList) SwaggerDoc() map[string]string {
+	return map_ServiceList
+}
+
+var map_ServicePort = map[string]string{
+	"":           "ServicePort contains information on service's port.",
+	"name":       "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.",
+	"protocol":   "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.",
+	"port":       "The port that will be exposed by this service.",
+	"targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service",
+	"nodePort":   "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport",
+}
+
+func (ServicePort) SwaggerDoc() map[string]string {
+	return map_ServicePort
+}
+
+var map_ServiceProxyOptions = map[string]string{
+	"":     "ServiceProxyOptions is the query options to a Service's proxy call.",
+	"path": "Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.",
+}
+
+func (ServiceProxyOptions) SwaggerDoc() map[string]string {
+	return map_ServiceProxyOptions
+}
+
+var map_ServiceSpec = map[string]string{
+	"":                         "ServiceSpec describes the attributes that a user creates on a service.",
+	"ports":                    "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
+	"selector":                 "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/",
+	"clusterIP":                "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
+	"type":                     "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services ",
+	"externalIPs":              "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service.  These IPs are not managed by Kubernetes.  The user is responsible for ensuring that traffic arrives at a node with this IP.  A common example is external load-balancers that are not part of the Kubernetes system.",
+	"sessionAffinity":          "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
+	"loadBalancerIP":           "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.",
+	"loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/",
+	"externalName":             "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.",
+	"externalTrafficPolicy":    "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.",
+	"healthCheckNodePort":      "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.",
+	"publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.",
+	"sessionAffinityConfig":    "sessionAffinityConfig contains the configurations of session affinity.",
+}
+
+func (ServiceSpec) SwaggerDoc() map[string]string {
+	return map_ServiceSpec
+}
+
+var map_ServiceStatus = map[string]string{
+	"":             "ServiceStatus represents the current status of a service.",
+	"loadBalancer": "LoadBalancer contains the current status of the load-balancer, if one is present.",
+}
+
+func (ServiceStatus) SwaggerDoc() map[string]string {
+	return map_ServiceStatus
+}
+
+var map_SessionAffinityConfig = map[string]string{
+	"":         "SessionAffinityConfig represents the configurations of session affinity.",
+	"clientIP": "clientIP contains the configurations of Client IP based session affinity.",
+}
+
+func (SessionAffinityConfig) SwaggerDoc() map[string]string {
+	return map_SessionAffinityConfig
+}
+
+var map_StorageOSPersistentVolumeSource = map[string]string{
+	"":                "Represents a StorageOS persistent volume resource.",
+	"volumeName":      "VolumeName is the human-readable name of the StorageOS volume.  Volume names are only unique within a namespace.",
+	"volumeNamespace": "VolumeNamespace specifies the scope of the volume within StorageOS.  If no namespace is specified then the Pod's namespace will be used.  This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.",
+	"fsType":          "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+	"readOnly":        "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+	"secretRef":       "SecretRef specifies the secret to use for obtaining the StorageOS API credentials.  If not specified, default values will be attempted.",
+}
+
+func (StorageOSPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_StorageOSPersistentVolumeSource
+}
+
+var map_StorageOSVolumeSource = map[string]string{
+	"":                "Represents a StorageOS persistent volume resource.",
+	"volumeName":      "VolumeName is the human-readable name of the StorageOS volume.  Volume names are only unique within a namespace.",
+	"volumeNamespace": "VolumeNamespace specifies the scope of the volume within StorageOS.  If no namespace is specified then the Pod's namespace will be used.  This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.",
+	"fsType":          "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+	"readOnly":        "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+	"secretRef":       "SecretRef specifies the secret to use for obtaining the StorageOS API credentials.  If not specified, default values will be attempted.",
+}
+
+func (StorageOSVolumeSource) SwaggerDoc() map[string]string {
+	return map_StorageOSVolumeSource
+}
+
+var map_Sysctl = map[string]string{
+	"":      "Sysctl defines a kernel parameter to be set",
+	"name":  "Name of a property to set",
+	"value": "Value of a property to set",
+}
+
+func (Sysctl) SwaggerDoc() map[string]string {
+	return map_Sysctl
+}
+
+var map_TCPSocketAction = map[string]string{
+	"":     "TCPSocketAction describes an action based on opening a socket",
+	"port": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.",
+	"host": "Optional: Host name to connect to, defaults to the pod IP.",
+}
+
+func (TCPSocketAction) SwaggerDoc() map[string]string {
+	return map_TCPSocketAction
+}
+
+var map_Taint = map[string]string{
+	"":          "The node this Taint is attached to has the \"effect\" on any pod that does not tolerate the Taint.",
+	"key":       "Required. The taint key to be applied to a node.",
+	"value":     "Required. The taint value corresponding to the taint key.",
+	"effect":    "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.",
+	"timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.",
+}
+
+func (Taint) SwaggerDoc() map[string]string {
+	return map_Taint
+}
+
+var map_Toleration = map[string]string{
+	"":                  "The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.",
+	"key":               "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.",
+	"operator":          "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.",
+	"value":             "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.",
+	"effect":            "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.",
+	"tolerationSeconds": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.",
+}
+
+func (Toleration) SwaggerDoc() map[string]string {
+	return map_Toleration
+}
+
+var map_TopologySelectorLabelRequirement = map[string]string{
+	"":       "A topology selector requirement is a selector that matches given label. This is an alpha feature and may change in the future.",
+	"key":    "The label key that the selector applies to.",
+	"values": "An array of string values. One value must match the label to be selected. Each entry in Values is ORed.",
+}
+
+func (TopologySelectorLabelRequirement) SwaggerDoc() map[string]string {
+	return map_TopologySelectorLabelRequirement
+}
+
+var map_TopologySelectorTerm = map[string]string{
+	"":                      "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.",
+	"matchLabelExpressions": "A list of topology selector requirements by labels.",
+}
+
+func (TopologySelectorTerm) SwaggerDoc() map[string]string {
+	return map_TopologySelectorTerm
+}
+
+var map_TypedLocalObjectReference = map[string]string{
+	"":         "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.",
+	"apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.",
+	"kind":     "Kind is the type of resource being referenced",
+	"name":     "Name is the name of resource being referenced",
+}
+
+func (TypedLocalObjectReference) SwaggerDoc() map[string]string {
+	return map_TypedLocalObjectReference
+}
+
+var map_Volume = map[string]string{
+	"":     "Volume represents a named volume in a pod that may be accessed by any container in the pod.",
+	"name": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+}
+
+func (Volume) SwaggerDoc() map[string]string {
+	return map_Volume
+}
+
+var map_VolumeDevice = map[string]string{
+	"":           "volumeDevice describes a mapping of a raw block device within a container.",
+	"name":       "name must match the name of a persistentVolumeClaim in the pod",
+	"devicePath": "devicePath is the path inside of the container that the device will be mapped to.",
+}
+
+func (VolumeDevice) SwaggerDoc() map[string]string {
+	return map_VolumeDevice
+}
+
+var map_VolumeMount = map[string]string{
+	"":                 "VolumeMount describes a mounting of a Volume within a container.",
+	"name":             "This must match the Name of a Volume.",
+	"readOnly":         "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.",
+	"mountPath":        "Path within the container at which the volume should be mounted.  Must not contain ':'.",
+	"subPath":          "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).",
+	"mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.",
+}
+
+func (VolumeMount) SwaggerDoc() map[string]string {
+	return map_VolumeMount
+}
+
+var map_VolumeNodeAffinity = map[string]string{
+	"":         "VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.",
+	"required": "Required specifies hard node constraints that must be met.",
+}
+
+func (VolumeNodeAffinity) SwaggerDoc() map[string]string {
+	return map_VolumeNodeAffinity
+}
+
+var map_VolumeProjection = map[string]string{
+	"":                    "Projection that may be projected along with other supported volume types",
+	"secret":              "information about the secret data to project",
+	"downwardAPI":         "information about the downwardAPI data to project",
+	"configMap":           "information about the configMap data to project",
+	"serviceAccountToken": "information about the serviceAccountToken data to project",
+}
+
+func (VolumeProjection) SwaggerDoc() map[string]string {
+	return map_VolumeProjection
+}
+
+var map_VolumeSource = map[string]string{
+	"":                      "Represents the source of a volume to mount. Only one of its members may be specified.",
+	"hostPath":              "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
+	"emptyDir":              "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir",
+	"gcePersistentDisk":     "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+	"awsElasticBlockStore":  "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+	"gitRepo":               "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
+	"secret":                "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
+	"nfs":                   "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+	"iscsi":                 "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md",
+	"glusterfs":             "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md",
+	"persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+	"rbd":                   "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md",
+	"flexVolume":            "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
+	"cinder":                "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+	"cephfs":                "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
+	"flocker":               "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
+	"downwardAPI":           "DownwardAPI represents downward API about the pod that should populate this volume",
+	"fc":                    "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
+	"azureFile":             "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+	"configMap":             "ConfigMap represents a configMap that should populate this volume",
+	"vsphereVolume":         "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
+	"quobyte":               "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
+	"azureDisk":             "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
+	"photonPersistentDisk":  "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
+	"projected":             "Items for all in one resources secrets, configmaps, and downward API",
+	"portworxVolume":        "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine",
+	"scaleIO":               "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
+	"storageos":             "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
+}
+
+func (VolumeSource) SwaggerDoc() map[string]string {
+	return map_VolumeSource
+}
+
+var map_VsphereVirtualDiskVolumeSource = map[string]string{
+	"":                  "Represents a vSphere volume resource.",
+	"volumePath":        "Path that identifies vSphere volume vmdk",
+	"fsType":            "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+	"storagePolicyName": "Storage Policy Based Management (SPBM) profile name.",
+	"storagePolicyID":   "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.",
+}
+
+func (VsphereVirtualDiskVolumeSource) SwaggerDoc() map[string]string {
+	return map_VsphereVirtualDiskVolumeSource
+}
+
+var map_WeightedPodAffinityTerm = map[string]string{
+	"":                "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)",
+	"weight":          "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.",
+	"podAffinityTerm": "Required. A pod affinity term, associated with the corresponding weight.",
+}
+
+func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string {
+	return map_WeightedPodAffinityTerm
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..4219c95
--- /dev/null
+++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
@@ -0,0 +1,5430 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	types "k8s.io/apimachinery/pkg/types"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSElasticBlockStoreVolumeSource.
+func (in *AWSElasticBlockStoreVolumeSource) DeepCopy() *AWSElasticBlockStoreVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(AWSElasticBlockStoreVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Affinity) DeepCopyInto(out *Affinity) {
+	*out = *in
+	if in.NodeAffinity != nil {
+		in, out := &in.NodeAffinity, &out.NodeAffinity
+		*out = new(NodeAffinity)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.PodAffinity != nil {
+		in, out := &in.PodAffinity, &out.PodAffinity
+		*out = new(PodAffinity)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.PodAntiAffinity != nil {
+		in, out := &in.PodAntiAffinity, &out.PodAntiAffinity
+		*out = new(PodAntiAffinity)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Affinity.
+func (in *Affinity) DeepCopy() *Affinity {
+	if in == nil {
+		return nil
+	}
+	out := new(Affinity)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedVolume.
+func (in *AttachedVolume) DeepCopy() *AttachedVolume {
+	if in == nil {
+		return nil
+	}
+	out := new(AttachedVolume)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AvoidPods) DeepCopyInto(out *AvoidPods) {
+	*out = *in
+	if in.PreferAvoidPods != nil {
+		in, out := &in.PreferAvoidPods, &out.PreferAvoidPods
+		*out = make([]PreferAvoidPodsEntry, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvoidPods.
+func (in *AvoidPods) DeepCopy() *AvoidPods {
+	if in == nil {
+		return nil
+	}
+	out := new(AvoidPods)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) {
+	*out = *in
+	if in.CachingMode != nil {
+		in, out := &in.CachingMode, &out.CachingMode
+		*out = new(AzureDataDiskCachingMode)
+		**out = **in
+	}
+	if in.FSType != nil {
+		in, out := &in.FSType, &out.FSType
+		*out = new(string)
+		**out = **in
+	}
+	if in.ReadOnly != nil {
+		in, out := &in.ReadOnly, &out.ReadOnly
+		*out = new(bool)
+		**out = **in
+	}
+	if in.Kind != nil {
+		in, out := &in.Kind, &out.Kind
+		*out = new(AzureDataDiskKind)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskVolumeSource.
+func (in *AzureDiskVolumeSource) DeepCopy() *AzureDiskVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(AzureDiskVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistentVolumeSource) {
+	*out = *in
+	if in.SecretNamespace != nil {
+		in, out := &in.SecretNamespace, &out.SecretNamespace
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilePersistentVolumeSource.
+func (in *AzureFilePersistentVolumeSource) DeepCopy() *AzureFilePersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(AzureFilePersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureFileVolumeSource) DeepCopyInto(out *AzureFileVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFileVolumeSource.
+func (in *AzureFileVolumeSource) DeepCopy() *AzureFileVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(AzureFileVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Binding) DeepCopyInto(out *Binding) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	out.Target = in.Target
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding.
+func (in *Binding) DeepCopy() *Binding {
+	if in == nil {
+		return nil
+	}
+	out := new(Binding)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Binding) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) {
+	*out = *in
+	if in.VolumeAttributes != nil {
+		in, out := &in.VolumeAttributes, &out.VolumeAttributes
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.ControllerPublishSecretRef != nil {
+		in, out := &in.ControllerPublishSecretRef, &out.ControllerPublishSecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	if in.NodeStageSecretRef != nil {
+		in, out := &in.NodeStageSecretRef, &out.NodeStageSecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	if in.NodePublishSecretRef != nil {
+		in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource.
+func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(CSIPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Capabilities) DeepCopyInto(out *Capabilities) {
+	*out = *in
+	if in.Add != nil {
+		in, out := &in.Add, &out.Add
+		*out = make([]Capability, len(*in))
+		copy(*out, *in)
+	}
+	if in.Drop != nil {
+		in, out := &in.Drop, &out.Drop
+		*out = make([]Capability, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capabilities.
+func (in *Capabilities) DeepCopy() *Capabilities {
+	if in == nil {
+		return nil
+	}
+	out := new(Capabilities)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolumeSource) {
+	*out = *in
+	if in.Monitors != nil {
+		in, out := &in.Monitors, &out.Monitors
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSPersistentVolumeSource.
+func (in *CephFSPersistentVolumeSource) DeepCopy() *CephFSPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(CephFSPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) {
+	*out = *in
+	if in.Monitors != nil {
+		in, out := &in.Monitors, &out.Monitors
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(LocalObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSVolumeSource.
+func (in *CephFSVolumeSource) DeepCopy() *CephFSVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(CephFSVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CinderPersistentVolumeSource) DeepCopyInto(out *CinderPersistentVolumeSource) {
+	*out = *in
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderPersistentVolumeSource.
+func (in *CinderPersistentVolumeSource) DeepCopy() *CinderPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(CinderPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) {
+	*out = *in
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(LocalObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderVolumeSource.
+func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(CinderVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) {
+	*out = *in
+	if in.TimeoutSeconds != nil {
+		in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientIPConfig.
+func (in *ClientIPConfig) DeepCopy() *ClientIPConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(ClientIPConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentCondition.
+func (in *ComponentCondition) DeepCopy() *ComponentCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(ComponentCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]ComponentCondition, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus.
+func (in *ComponentStatus) DeepCopy() *ComponentStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ComponentStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ComponentStatus) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ComponentStatus, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatusList.
+func (in *ComponentStatusList) DeepCopy() *ComponentStatusList {
+	if in == nil {
+		return nil
+	}
+	out := new(ComponentStatusList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ComponentStatusList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMap) DeepCopyInto(out *ConfigMap) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Data != nil {
+		in, out := &in.Data, &out.Data
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.BinaryData != nil {
+		in, out := &in.BinaryData, &out.BinaryData
+		*out = make(map[string][]byte, len(*in))
+		for key, val := range *in {
+			var outVal []byte
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = make([]byte, len(*in))
+				copy(*out, *in)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMap.
+func (in *ConfigMap) DeepCopy() *ConfigMap {
+	if in == nil {
+		return nil
+	}
+	out := new(ConfigMap)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ConfigMap) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) {
+	*out = *in
+	out.LocalObjectReference = in.LocalObjectReference
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource.
+func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ConfigMapEnvSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) {
+	*out = *in
+	out.LocalObjectReference = in.LocalObjectReference
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeySelector.
+func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector {
+	if in == nil {
+		return nil
+	}
+	out := new(ConfigMapKeySelector)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ConfigMap, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapList.
+func (in *ConfigMapList) DeepCopy() *ConfigMapList {
+	if in == nil {
+		return nil
+	}
+	out := new(ConfigMapList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ConfigMapList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapNodeConfigSource) DeepCopyInto(out *ConfigMapNodeConfigSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNodeConfigSource.
+func (in *ConfigMapNodeConfigSource) DeepCopy() *ConfigMapNodeConfigSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ConfigMapNodeConfigSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) {
+	*out = *in
+	out.LocalObjectReference = in.LocalObjectReference
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]KeyToPath, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapProjection.
+func (in *ConfigMapProjection) DeepCopy() *ConfigMapProjection {
+	if in == nil {
+		return nil
+	}
+	out := new(ConfigMapProjection)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) {
+	*out = *in
+	out.LocalObjectReference = in.LocalObjectReference
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]KeyToPath, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.DefaultMode != nil {
+		in, out := &in.DefaultMode, &out.DefaultMode
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource.
+func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ConfigMapVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Container) DeepCopyInto(out *Container) {
+	*out = *in
+	if in.Command != nil {
+		in, out := &in.Command, &out.Command
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Args != nil {
+		in, out := &in.Args, &out.Args
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Ports != nil {
+		in, out := &in.Ports, &out.Ports
+		*out = make([]ContainerPort, len(*in))
+		copy(*out, *in)
+	}
+	if in.EnvFrom != nil {
+		in, out := &in.EnvFrom, &out.EnvFrom
+		*out = make([]EnvFromSource, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Env != nil {
+		in, out := &in.Env, &out.Env
+		*out = make([]EnvVar, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	in.Resources.DeepCopyInto(&out.Resources)
+	if in.VolumeMounts != nil {
+		in, out := &in.VolumeMounts, &out.VolumeMounts
+		*out = make([]VolumeMount, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.VolumeDevices != nil {
+		in, out := &in.VolumeDevices, &out.VolumeDevices
+		*out = make([]VolumeDevice, len(*in))
+		copy(*out, *in)
+	}
+	if in.LivenessProbe != nil {
+		in, out := &in.LivenessProbe, &out.LivenessProbe
+		*out = new(Probe)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ReadinessProbe != nil {
+		in, out := &in.ReadinessProbe, &out.ReadinessProbe
+		*out = new(Probe)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Lifecycle != nil {
+		in, out := &in.Lifecycle, &out.Lifecycle
+		*out = new(Lifecycle)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.SecurityContext != nil {
+		in, out := &in.SecurityContext, &out.SecurityContext
+		*out = new(SecurityContext)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container.
+func (in *Container) DeepCopy() *Container {
+	if in == nil {
+		return nil
+	}
+	out := new(Container)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerImage) DeepCopyInto(out *ContainerImage) {
+	*out = *in
+	if in.Names != nil {
+		in, out := &in.Names, &out.Names
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerImage.
+func (in *ContainerImage) DeepCopy() *ContainerImage {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerImage)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerPort) DeepCopyInto(out *ContainerPort) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPort.
+func (in *ContainerPort) DeepCopy() *ContainerPort {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerPort)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerState) DeepCopyInto(out *ContainerState) {
+	*out = *in
+	if in.Waiting != nil {
+		in, out := &in.Waiting, &out.Waiting
+		*out = new(ContainerStateWaiting)
+		**out = **in
+	}
+	if in.Running != nil {
+		in, out := &in.Running, &out.Running
+		*out = new(ContainerStateRunning)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Terminated != nil {
+		in, out := &in.Terminated, &out.Terminated
+		*out = new(ContainerStateTerminated)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerState.
+func (in *ContainerState) DeepCopy() *ContainerState {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerState)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerStateRunning) DeepCopyInto(out *ContainerStateRunning) {
+	*out = *in
+	in.StartedAt.DeepCopyInto(&out.StartedAt)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateRunning.
+func (in *ContainerStateRunning) DeepCopy() *ContainerStateRunning {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerStateRunning)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerStateTerminated) DeepCopyInto(out *ContainerStateTerminated) {
+	*out = *in
+	in.StartedAt.DeepCopyInto(&out.StartedAt)
+	in.FinishedAt.DeepCopyInto(&out.FinishedAt)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateTerminated.
+func (in *ContainerStateTerminated) DeepCopy() *ContainerStateTerminated {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerStateTerminated)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerStateWaiting) DeepCopyInto(out *ContainerStateWaiting) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateWaiting.
+func (in *ContainerStateWaiting) DeepCopy() *ContainerStateWaiting {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerStateWaiting)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) {
+	*out = *in
+	in.State.DeepCopyInto(&out.State)
+	in.LastTerminationState.DeepCopyInto(&out.LastTerminationState)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus.
+func (in *ContainerStatus) DeepCopy() *ContainerStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonEndpoint.
+func (in *DaemonEndpoint) DeepCopy() *DaemonEndpoint {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonEndpoint)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DownwardAPIProjection) DeepCopyInto(out *DownwardAPIProjection) {
+	*out = *in
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]DownwardAPIVolumeFile, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIProjection.
+func (in *DownwardAPIProjection) DeepCopy() *DownwardAPIProjection {
+	if in == nil {
+		return nil
+	}
+	out := new(DownwardAPIProjection)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) {
+	*out = *in
+	if in.FieldRef != nil {
+		in, out := &in.FieldRef, &out.FieldRef
+		*out = new(ObjectFieldSelector)
+		**out = **in
+	}
+	if in.ResourceFieldRef != nil {
+		in, out := &in.ResourceFieldRef, &out.ResourceFieldRef
+		*out = new(ResourceFieldSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Mode != nil {
+		in, out := &in.Mode, &out.Mode
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeFile.
+func (in *DownwardAPIVolumeFile) DeepCopy() *DownwardAPIVolumeFile {
+	if in == nil {
+		return nil
+	}
+	out := new(DownwardAPIVolumeFile)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) {
+	*out = *in
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]DownwardAPIVolumeFile, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.DefaultMode != nil {
+		in, out := &in.DefaultMode, &out.DefaultMode
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeSource.
+func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(DownwardAPIVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) {
+	*out = *in
+	if in.SizeLimit != nil {
+		in, out := &in.SizeLimit, &out.SizeLimit
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirVolumeSource.
+func (in *EmptyDirVolumeSource) DeepCopy() *EmptyDirVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(EmptyDirVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) {
+	*out = *in
+	if in.NodeName != nil {
+		in, out := &in.NodeName, &out.NodeName
+		*out = new(string)
+		**out = **in
+	}
+	if in.TargetRef != nil {
+		in, out := &in.TargetRef, &out.TargetRef
+		*out = new(ObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress.
+func (in *EndpointAddress) DeepCopy() *EndpointAddress {
+	if in == nil {
+		return nil
+	}
+	out := new(EndpointAddress)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointPort) DeepCopyInto(out *EndpointPort) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort.
+func (in *EndpointPort) DeepCopy() *EndpointPort {
+	if in == nil {
+		return nil
+	}
+	out := new(EndpointPort)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) {
+	*out = *in
+	if in.Addresses != nil {
+		in, out := &in.Addresses, &out.Addresses
+		*out = make([]EndpointAddress, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.NotReadyAddresses != nil {
+		in, out := &in.NotReadyAddresses, &out.NotReadyAddresses
+		*out = make([]EndpointAddress, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Ports != nil {
+		in, out := &in.Ports, &out.Ports
+		*out = make([]EndpointPort, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSubset.
+func (in *EndpointSubset) DeepCopy() *EndpointSubset {
+	if in == nil {
+		return nil
+	}
+	out := new(EndpointSubset)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Endpoints) DeepCopyInto(out *Endpoints) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Subsets != nil {
+		in, out := &in.Subsets, &out.Subsets
+		*out = make([]EndpointSubset, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints.
+func (in *Endpoints) DeepCopy() *Endpoints {
+	if in == nil {
+		return nil
+	}
+	out := new(Endpoints)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Endpoints) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointsList) DeepCopyInto(out *EndpointsList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Endpoints, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsList.
+func (in *EndpointsList) DeepCopy() *EndpointsList {
+	if in == nil {
+		return nil
+	}
+	out := new(EndpointsList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EndpointsList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) {
+	*out = *in
+	if in.ConfigMapRef != nil {
+		in, out := &in.ConfigMapRef, &out.ConfigMapRef
+		*out = new(ConfigMapEnvSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(SecretEnvSource)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvFromSource.
+func (in *EnvFromSource) DeepCopy() *EnvFromSource {
+	if in == nil {
+		return nil
+	}
+	out := new(EnvFromSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EnvVar) DeepCopyInto(out *EnvVar) {
+	*out = *in
+	if in.ValueFrom != nil {
+		in, out := &in.ValueFrom, &out.ValueFrom
+		*out = new(EnvVarSource)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar.
+func (in *EnvVar) DeepCopy() *EnvVar {
+	if in == nil {
+		return nil
+	}
+	out := new(EnvVar)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) {
+	*out = *in
+	if in.FieldRef != nil {
+		in, out := &in.FieldRef, &out.FieldRef
+		*out = new(ObjectFieldSelector)
+		**out = **in
+	}
+	if in.ResourceFieldRef != nil {
+		in, out := &in.ResourceFieldRef, &out.ResourceFieldRef
+		*out = new(ResourceFieldSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ConfigMapKeyRef != nil {
+		in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef
+		*out = new(ConfigMapKeySelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.SecretKeyRef != nil {
+		in, out := &in.SecretKeyRef, &out.SecretKeyRef
+		*out = new(SecretKeySelector)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSource.
+func (in *EnvVarSource) DeepCopy() *EnvVarSource {
+	if in == nil {
+		return nil
+	}
+	out := new(EnvVarSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Event) DeepCopyInto(out *Event) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	out.InvolvedObject = in.InvolvedObject
+	out.Source = in.Source
+	in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp)
+	in.LastTimestamp.DeepCopyInto(&out.LastTimestamp)
+	in.EventTime.DeepCopyInto(&out.EventTime)
+	if in.Series != nil {
+		in, out := &in.Series, &out.Series
+		*out = new(EventSeries)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Related != nil {
+		in, out := &in.Related, &out.Related
+		*out = new(ObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event.
+func (in *Event) DeepCopy() *Event {
+	if in == nil {
+		return nil
+	}
+	out := new(Event)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Event) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventList) DeepCopyInto(out *EventList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Event, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList.
+func (in *EventList) DeepCopy() *EventList {
+	if in == nil {
+		return nil
+	}
+	out := new(EventList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EventList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventSeries) DeepCopyInto(out *EventSeries) {
+	*out = *in
+	in.LastObservedTime.DeepCopyInto(&out.LastObservedTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries.
+func (in *EventSeries) DeepCopy() *EventSeries {
+	if in == nil {
+		return nil
+	}
+	out := new(EventSeries)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventSource) DeepCopyInto(out *EventSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSource.
+func (in *EventSource) DeepCopy() *EventSource {
+	if in == nil {
+		return nil
+	}
+	out := new(EventSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecAction) DeepCopyInto(out *ExecAction) {
+	*out = *in
+	if in.Command != nil {
+		in, out := &in.Command, &out.Command
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecAction.
+func (in *ExecAction) DeepCopy() *ExecAction {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecAction)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) {
+	*out = *in
+	if in.TargetWWNs != nil {
+		in, out := &in.TargetWWNs, &out.TargetWWNs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Lun != nil {
+		in, out := &in.Lun, &out.Lun
+		*out = new(int32)
+		**out = **in
+	}
+	if in.WWIDs != nil {
+		in, out := &in.WWIDs, &out.WWIDs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FCVolumeSource.
+func (in *FCVolumeSource) DeepCopy() *FCVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(FCVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) {
+	*out = *in
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	if in.Options != nil {
+		in, out := &in.Options, &out.Options
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexPersistentVolumeSource.
+func (in *FlexPersistentVolumeSource) DeepCopy() *FlexPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(FlexPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) {
+	*out = *in
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(LocalObjectReference)
+		**out = **in
+	}
+	if in.Options != nil {
+		in, out := &in.Options, &out.Options
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexVolumeSource.
+func (in *FlexVolumeSource) DeepCopy() *FlexVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(FlexVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlockerVolumeSource) DeepCopyInto(out *FlockerVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlockerVolumeSource.
+func (in *FlockerVolumeSource) DeepCopy() *FlockerVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(FlockerVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCEPersistentDiskVolumeSource) DeepCopyInto(out *GCEPersistentDiskVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEPersistentDiskVolumeSource.
+func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(GCEPersistentDiskVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoVolumeSource.
+func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(GitRepoVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GlusterfsPersistentVolumeSource) DeepCopyInto(out *GlusterfsPersistentVolumeSource) {
+	*out = *in
+	if in.EndpointsNamespace != nil {
+		in, out := &in.EndpointsNamespace, &out.EndpointsNamespace
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsPersistentVolumeSource.
+func (in *GlusterfsPersistentVolumeSource) DeepCopy() *GlusterfsPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(GlusterfsPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsVolumeSource.
+func (in *GlusterfsVolumeSource) DeepCopy() *GlusterfsVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(GlusterfsVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTTPGetAction) DeepCopyInto(out *HTTPGetAction) {
+	*out = *in
+	out.Port = in.Port
+	if in.HTTPHeaders != nil {
+		in, out := &in.HTTPHeaders, &out.HTTPHeaders
+		*out = make([]HTTPHeader, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPGetAction.
+func (in *HTTPGetAction) DeepCopy() *HTTPGetAction {
+	if in == nil {
+		return nil
+	}
+	out := new(HTTPGetAction)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader.
+func (in *HTTPHeader) DeepCopy() *HTTPHeader {
+	if in == nil {
+		return nil
+	}
+	out := new(HTTPHeader)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Handler) DeepCopyInto(out *Handler) {
+	*out = *in
+	if in.Exec != nil {
+		in, out := &in.Exec, &out.Exec
+		*out = new(ExecAction)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.HTTPGet != nil {
+		in, out := &in.HTTPGet, &out.HTTPGet
+		*out = new(HTTPGetAction)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.TCPSocket != nil {
+		in, out := &in.TCPSocket, &out.TCPSocket
+		*out = new(TCPSocketAction)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler.
+func (in *Handler) DeepCopy() *Handler {
+	if in == nil {
+		return nil
+	}
+	out := new(Handler)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostAlias) DeepCopyInto(out *HostAlias) {
+	*out = *in
+	if in.Hostnames != nil {
+		in, out := &in.Hostnames, &out.Hostnames
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias.
+func (in *HostAlias) DeepCopy() *HostAlias {
+	if in == nil {
+		return nil
+	}
+	out := new(HostAlias)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) {
+	*out = *in
+	if in.Type != nil {
+		in, out := &in.Type, &out.Type
+		*out = new(HostPathType)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathVolumeSource.
+func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(HostPathVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) {
+	*out = *in
+	if in.Portals != nil {
+		in, out := &in.Portals, &out.Portals
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	if in.InitiatorName != nil {
+		in, out := &in.InitiatorName, &out.InitiatorName
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource.
+func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ISCSIPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) {
+	*out = *in
+	if in.Portals != nil {
+		in, out := &in.Portals, &out.Portals
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(LocalObjectReference)
+		**out = **in
+	}
+	if in.InitiatorName != nil {
+		in, out := &in.InitiatorName, &out.InitiatorName
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIVolumeSource.
+func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ISCSIVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KeyToPath) DeepCopyInto(out *KeyToPath) {
+	*out = *in
+	if in.Mode != nil {
+		in, out := &in.Mode, &out.Mode
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyToPath.
+func (in *KeyToPath) DeepCopy() *KeyToPath {
+	if in == nil {
+		return nil
+	}
+	out := new(KeyToPath)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Lifecycle) DeepCopyInto(out *Lifecycle) {
+	*out = *in
+	if in.PostStart != nil {
+		in, out := &in.PostStart, &out.PostStart
+		*out = new(Handler)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.PreStop != nil {
+		in, out := &in.PreStop, &out.PreStop
+		*out = new(Handler)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lifecycle.
+func (in *Lifecycle) DeepCopy() *Lifecycle {
+	if in == nil {
+		return nil
+	}
+	out := new(Lifecycle)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LimitRange) DeepCopyInto(out *LimitRange) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRange.
+func (in *LimitRange) DeepCopy() *LimitRange {
+	if in == nil {
+		return nil
+	}
+	out := new(LimitRange)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LimitRange) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LimitRangeItem) DeepCopyInto(out *LimitRangeItem) {
+	*out = *in
+	if in.Max != nil {
+		in, out := &in.Max, &out.Max
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Min != nil {
+		in, out := &in.Min, &out.Min
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Default != nil {
+		in, out := &in.Default, &out.Default
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.DefaultRequest != nil {
+		in, out := &in.DefaultRequest, &out.DefaultRequest
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.MaxLimitRequestRatio != nil {
+		in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeItem.
+func (in *LimitRangeItem) DeepCopy() *LimitRangeItem {
+	if in == nil {
+		return nil
+	}
+	out := new(LimitRangeItem)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]LimitRange, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeList.
+func (in *LimitRangeList) DeepCopy() *LimitRangeList {
+	if in == nil {
+		return nil
+	}
+	out := new(LimitRangeList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LimitRangeList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LimitRangeSpec) DeepCopyInto(out *LimitRangeSpec) {
+	*out = *in
+	if in.Limits != nil {
+		in, out := &in.Limits, &out.Limits
+		*out = make([]LimitRangeItem, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeSpec.
+func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(LimitRangeSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *List) DeepCopyInto(out *List) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]runtime.RawExtension, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List.
+func (in *List) DeepCopy() *List {
+	if in == nil {
+		return nil
+	}
+	out := new(List)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *List) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerIngress.
+func (in *LoadBalancerIngress) DeepCopy() *LoadBalancerIngress {
+	if in == nil {
+		return nil
+	}
+	out := new(LoadBalancerIngress)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) {
+	*out = *in
+	if in.Ingress != nil {
+		in, out := &in.Ingress, &out.Ingress
+		*out = make([]LoadBalancerIngress, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus.
+func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(LoadBalancerStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference.
+func (in *LocalObjectReference) DeepCopy() *LocalObjectReference {
+	if in == nil {
+		return nil
+	}
+	out := new(LocalObjectReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LocalVolumeSource) DeepCopyInto(out *LocalVolumeSource) {
+	*out = *in
+	if in.FSType != nil {
+		in, out := &in.FSType, &out.FSType
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalVolumeSource.
+func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(LocalVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSVolumeSource.
+func (in *NFSVolumeSource) DeepCopy() *NFSVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(NFSVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Namespace) DeepCopyInto(out *Namespace) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	out.Status = in.Status
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace.
+func (in *Namespace) DeepCopy() *Namespace {
+	if in == nil {
+		return nil
+	}
+	out := new(Namespace)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Namespace) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamespaceList) DeepCopyInto(out *NamespaceList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Namespace, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList.
+func (in *NamespaceList) DeepCopy() *NamespaceList {
+	if in == nil {
+		return nil
+	}
+	out := new(NamespaceList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NamespaceList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) {
+	*out = *in
+	if in.Finalizers != nil {
+		in, out := &in.Finalizers, &out.Finalizers
+		*out = make([]FinalizerName, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec.
+func (in *NamespaceSpec) DeepCopy() *NamespaceSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(NamespaceSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceStatus.
+func (in *NamespaceStatus) DeepCopy() *NamespaceStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(NamespaceStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Node) DeepCopyInto(out *Node) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node.
+func (in *Node) DeepCopy() *Node {
+	if in == nil {
+		return nil
+	}
+	out := new(Node)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Node) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeAddress) DeepCopyInto(out *NodeAddress) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress.
+func (in *NodeAddress) DeepCopy() *NodeAddress {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeAddress)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) {
+	*out = *in
+	if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+		in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+		*out = new(NodeSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+		in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+		*out = make([]PreferredSchedulingTerm, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinity.
+func (in *NodeAffinity) DeepCopy() *NodeAffinity {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeAffinity)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeCondition) DeepCopyInto(out *NodeCondition) {
+	*out = *in
+	in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime)
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCondition.
+func (in *NodeCondition) DeepCopy() *NodeCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) {
+	*out = *in
+	if in.ConfigMap != nil {
+		in, out := &in.ConfigMap, &out.ConfigMap
+		*out = new(ConfigMapNodeConfigSource)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigSource.
+func (in *NodeConfigSource) DeepCopy() *NodeConfigSource {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeConfigSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeConfigStatus) DeepCopyInto(out *NodeConfigStatus) {
+	*out = *in
+	if in.Assigned != nil {
+		in, out := &in.Assigned, &out.Assigned
+		*out = new(NodeConfigSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Active != nil {
+		in, out := &in.Active, &out.Active
+		*out = new(NodeConfigSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.LastKnownGood != nil {
+		in, out := &in.LastKnownGood, &out.LastKnownGood
+		*out = new(NodeConfigSource)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigStatus.
+func (in *NodeConfigStatus) DeepCopy() *NodeConfigStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeConfigStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) {
+	*out = *in
+	out.KubeletEndpoint = in.KubeletEndpoint
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDaemonEndpoints.
+func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeDaemonEndpoints)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeList) DeepCopyInto(out *NodeList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Node, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList.
+func (in *NodeList) DeepCopy() *NodeList {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NodeList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeProxyOptions) DeepCopyInto(out *NodeProxyOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProxyOptions.
+func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeProxyOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NodeProxyOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeResources) DeepCopyInto(out *NodeResources) {
+	*out = *in
+	if in.Capacity != nil {
+		in, out := &in.Capacity, &out.Capacity
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources.
+func (in *NodeResources) DeepCopy() *NodeResources {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeResources)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSelector) DeepCopyInto(out *NodeSelector) {
+	*out = *in
+	if in.NodeSelectorTerms != nil {
+		in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms
+		*out = make([]NodeSelectorTerm, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector.
+func (in *NodeSelector) DeepCopy() *NodeSelector {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeSelector)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSelectorRequirement) DeepCopyInto(out *NodeSelectorRequirement) {
+	*out = *in
+	if in.Values != nil {
+		in, out := &in.Values, &out.Values
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorRequirement.
+func (in *NodeSelectorRequirement) DeepCopy() *NodeSelectorRequirement {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeSelectorRequirement)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) {
+	*out = *in
+	if in.MatchExpressions != nil {
+		in, out := &in.MatchExpressions, &out.MatchExpressions
+		*out = make([]NodeSelectorRequirement, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.MatchFields != nil {
+		in, out := &in.MatchFields, &out.MatchFields
+		*out = make([]NodeSelectorRequirement, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorTerm.
+func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeSelectorTerm)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSpec) DeepCopyInto(out *NodeSpec) {
+	*out = *in
+	if in.Taints != nil {
+		in, out := &in.Taints, &out.Taints
+		*out = make([]Taint, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.ConfigSource != nil {
+		in, out := &in.ConfigSource, &out.ConfigSource
+		*out = new(NodeConfigSource)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec.
+func (in *NodeSpec) DeepCopy() *NodeSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
+	*out = *in
+	if in.Capacity != nil {
+		in, out := &in.Capacity, &out.Capacity
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Allocatable != nil {
+		in, out := &in.Allocatable, &out.Allocatable
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]NodeCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Addresses != nil {
+		in, out := &in.Addresses, &out.Addresses
+		*out = make([]NodeAddress, len(*in))
+		copy(*out, *in)
+	}
+	out.DaemonEndpoints = in.DaemonEndpoints
+	out.NodeInfo = in.NodeInfo
+	if in.Images != nil {
+		in, out := &in.Images, &out.Images
+		*out = make([]ContainerImage, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.VolumesInUse != nil {
+		in, out := &in.VolumesInUse, &out.VolumesInUse
+		*out = make([]UniqueVolumeName, len(*in))
+		copy(*out, *in)
+	}
+	if in.VolumesAttached != nil {
+		in, out := &in.VolumesAttached, &out.VolumesAttached
+		*out = make([]AttachedVolume, len(*in))
+		copy(*out, *in)
+	}
+	if in.Config != nil {
+		in, out := &in.Config, &out.Config
+		*out = new(NodeConfigStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus.
+func (in *NodeStatus) DeepCopy() *NodeStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSystemInfo.
+func (in *NodeSystemInfo) DeepCopy() *NodeSystemInfo {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeSystemInfo)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectFieldSelector) DeepCopyInto(out *ObjectFieldSelector) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectFieldSelector.
+func (in *ObjectFieldSelector) DeepCopy() *ObjectFieldSelector {
+	if in == nil {
+		return nil
+	}
+	out := new(ObjectFieldSelector)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectReference) DeepCopyInto(out *ObjectReference) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference.
+func (in *ObjectReference) DeepCopy() *ObjectReference {
+	if in == nil {
+		return nil
+	}
+	out := new(ObjectReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ObjectReference) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	out.Status = in.Status
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolume.
+func (in *PersistentVolume) DeepCopy() *PersistentVolume {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolume)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PersistentVolume) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeClaim) DeepCopyInto(out *PersistentVolumeClaim) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaim.
+func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeClaim)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeClaimCondition) DeepCopyInto(out *PersistentVolumeClaimCondition) {
+	*out = *in
+	in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimCondition.
+func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeClaimCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]PersistentVolumeClaim, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimList.
+func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeClaimList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec) {
+	*out = *in
+	if in.AccessModes != nil {
+		in, out := &in.AccessModes, &out.AccessModes
+		*out = make([]PersistentVolumeAccessMode, len(*in))
+		copy(*out, *in)
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Resources.DeepCopyInto(&out.Resources)
+	if in.StorageClassName != nil {
+		in, out := &in.StorageClassName, &out.StorageClassName
+		*out = new(string)
+		**out = **in
+	}
+	if in.VolumeMode != nil {
+		in, out := &in.VolumeMode, &out.VolumeMode
+		*out = new(PersistentVolumeMode)
+		**out = **in
+	}
+	if in.DataSource != nil {
+		in, out := &in.DataSource, &out.DataSource
+		*out = new(TypedLocalObjectReference)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSpec.
+func (in *PersistentVolumeClaimSpec) DeepCopy() *PersistentVolumeClaimSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeClaimSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimStatus) {
+	*out = *in
+	if in.AccessModes != nil {
+		in, out := &in.AccessModes, &out.AccessModes
+		*out = make([]PersistentVolumeAccessMode, len(*in))
+		copy(*out, *in)
+	}
+	if in.Capacity != nil {
+		in, out := &in.Capacity, &out.Capacity
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]PersistentVolumeClaimCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimStatus.
+func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeClaimStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimVolumeSource.
+func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeClaimVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]PersistentVolume, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeList.
+func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PersistentVolumeList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) {
+	*out = *in
+	if in.GCEPersistentDisk != nil {
+		in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk
+		*out = new(GCEPersistentDiskVolumeSource)
+		**out = **in
+	}
+	if in.AWSElasticBlockStore != nil {
+		in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore
+		*out = new(AWSElasticBlockStoreVolumeSource)
+		**out = **in
+	}
+	if in.HostPath != nil {
+		in, out := &in.HostPath, &out.HostPath
+		*out = new(HostPathVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Glusterfs != nil {
+		in, out := &in.Glusterfs, &out.Glusterfs
+		*out = new(GlusterfsPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.NFS != nil {
+		in, out := &in.NFS, &out.NFS
+		*out = new(NFSVolumeSource)
+		**out = **in
+	}
+	if in.RBD != nil {
+		in, out := &in.RBD, &out.RBD
+		*out = new(RBDPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ISCSI != nil {
+		in, out := &in.ISCSI, &out.ISCSI
+		*out = new(ISCSIPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Cinder != nil {
+		in, out := &in.Cinder, &out.Cinder
+		*out = new(CinderPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.CephFS != nil {
+		in, out := &in.CephFS, &out.CephFS
+		*out = new(CephFSPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.FC != nil {
+		in, out := &in.FC, &out.FC
+		*out = new(FCVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Flocker != nil {
+		in, out := &in.Flocker, &out.Flocker
+		*out = new(FlockerVolumeSource)
+		**out = **in
+	}
+	if in.FlexVolume != nil {
+		in, out := &in.FlexVolume, &out.FlexVolume
+		*out = new(FlexPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.AzureFile != nil {
+		in, out := &in.AzureFile, &out.AzureFile
+		*out = new(AzureFilePersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.VsphereVolume != nil {
+		in, out := &in.VsphereVolume, &out.VsphereVolume
+		*out = new(VsphereVirtualDiskVolumeSource)
+		**out = **in
+	}
+	if in.Quobyte != nil {
+		in, out := &in.Quobyte, &out.Quobyte
+		*out = new(QuobyteVolumeSource)
+		**out = **in
+	}
+	if in.AzureDisk != nil {
+		in, out := &in.AzureDisk, &out.AzureDisk
+		*out = new(AzureDiskVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.PhotonPersistentDisk != nil {
+		in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk
+		*out = new(PhotonPersistentDiskVolumeSource)
+		**out = **in
+	}
+	if in.PortworxVolume != nil {
+		in, out := &in.PortworxVolume, &out.PortworxVolume
+		*out = new(PortworxVolumeSource)
+		**out = **in
+	}
+	if in.ScaleIO != nil {
+		in, out := &in.ScaleIO, &out.ScaleIO
+		*out = new(ScaleIOPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Local != nil {
+		in, out := &in.Local, &out.Local
+		*out = new(LocalVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.StorageOS != nil {
+		in, out := &in.StorageOS, &out.StorageOS
+		*out = new(StorageOSPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.CSI != nil {
+		in, out := &in.CSI, &out.CSI
+		*out = new(CSIPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSource.
+func (in *PersistentVolumeSource) DeepCopy() *PersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) {
+	*out = *in
+	if in.Capacity != nil {
+		in, out := &in.Capacity, &out.Capacity
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	in.PersistentVolumeSource.DeepCopyInto(&out.PersistentVolumeSource)
+	if in.AccessModes != nil {
+		in, out := &in.AccessModes, &out.AccessModes
+		*out = make([]PersistentVolumeAccessMode, len(*in))
+		copy(*out, *in)
+	}
+	if in.ClaimRef != nil {
+		in, out := &in.ClaimRef, &out.ClaimRef
+		*out = new(ObjectReference)
+		**out = **in
+	}
+	if in.MountOptions != nil {
+		in, out := &in.MountOptions, &out.MountOptions
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.VolumeMode != nil {
+		in, out := &in.VolumeMode, &out.VolumeMode
+		*out = new(PersistentVolumeMode)
+		**out = **in
+	}
+	if in.NodeAffinity != nil {
+		in, out := &in.NodeAffinity, &out.NodeAffinity
+		*out = new(VolumeNodeAffinity)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSpec.
+func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeStatus.
+func (in *PersistentVolumeStatus) DeepCopy() *PersistentVolumeStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PhotonPersistentDiskVolumeSource) DeepCopyInto(out *PhotonPersistentDiskVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhotonPersistentDiskVolumeSource.
+func (in *PhotonPersistentDiskVolumeSource) DeepCopy() *PhotonPersistentDiskVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(PhotonPersistentDiskVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Pod) DeepCopyInto(out *Pod) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod.
+func (in *Pod) DeepCopy() *Pod {
+	if in == nil {
+		return nil
+	}
+	out := new(Pod)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Pod) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodAffinity) DeepCopyInto(out *PodAffinity) {
+	*out = *in
+	if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+		in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+		*out = make([]PodAffinityTerm, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+		in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+		*out = make([]WeightedPodAffinityTerm, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinity.
+func (in *PodAffinity) DeepCopy() *PodAffinity {
+	if in == nil {
+		return nil
+	}
+	out := new(PodAffinity)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) {
+	*out = *in
+	if in.LabelSelector != nil {
+		in, out := &in.LabelSelector, &out.LabelSelector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Namespaces != nil {
+		in, out := &in.Namespaces, &out.Namespaces
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinityTerm.
+func (in *PodAffinityTerm) DeepCopy() *PodAffinityTerm {
+	if in == nil {
+		return nil
+	}
+	out := new(PodAffinityTerm)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodAntiAffinity) DeepCopyInto(out *PodAntiAffinity) {
+	*out = *in
+	if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+		in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+		*out = make([]PodAffinityTerm, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+		in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+		*out = make([]WeightedPodAffinityTerm, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAntiAffinity.
+func (in *PodAntiAffinity) DeepCopy() *PodAntiAffinity {
+	if in == nil {
+		return nil
+	}
+	out := new(PodAntiAffinity)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodAttachOptions) DeepCopyInto(out *PodAttachOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttachOptions.
+func (in *PodAttachOptions) DeepCopy() *PodAttachOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(PodAttachOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodAttachOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodCondition) DeepCopyInto(out *PodCondition) {
+	*out = *in
+	in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition.
+func (in *PodCondition) DeepCopy() *PodCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(PodCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) {
+	*out = *in
+	if in.Nameservers != nil {
+		in, out := &in.Nameservers, &out.Nameservers
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Searches != nil {
+		in, out := &in.Searches, &out.Searches
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Options != nil {
+		in, out := &in.Options, &out.Options
+		*out = make([]PodDNSConfigOption, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig.
+func (in *PodDNSConfig) DeepCopy() *PodDNSConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(PodDNSConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) {
+	*out = *in
+	if in.Value != nil {
+		in, out := &in.Value, &out.Value
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption.
+func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption {
+	if in == nil {
+		return nil
+	}
+	out := new(PodDNSConfigOption)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Command != nil {
+		in, out := &in.Command, &out.Command
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExecOptions.
+func (in *PodExecOptions) DeepCopy() *PodExecOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(PodExecOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodExecOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodList) DeepCopyInto(out *PodList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Pod, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList.
+func (in *PodList) DeepCopy() *PodList {
+	if in == nil {
+		return nil
+	}
+	out := new(PodList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.SinceSeconds != nil {
+		in, out := &in.SinceSeconds, &out.SinceSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	if in.SinceTime != nil {
+		in, out := &in.SinceTime, &out.SinceTime
+		*out = (*in).DeepCopy()
+	}
+	if in.TailLines != nil {
+		in, out := &in.TailLines, &out.TailLines
+		*out = new(int64)
+		**out = **in
+	}
+	if in.LimitBytes != nil {
+		in, out := &in.LimitBytes, &out.LimitBytes
+		*out = new(int64)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogOptions.
+func (in *PodLogOptions) DeepCopy() *PodLogOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(PodLogOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodLogOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Ports != nil {
+		in, out := &in.Ports, &out.Ports
+		*out = make([]int32, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPortForwardOptions.
+func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(PodPortForwardOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodProxyOptions) DeepCopyInto(out *PodProxyOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProxyOptions.
+func (in *PodProxyOptions) DeepCopy() *PodProxyOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(PodProxyOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodProxyOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodReadinessGate) DeepCopyInto(out *PodReadinessGate) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodReadinessGate.
+func (in *PodReadinessGate) DeepCopy() *PodReadinessGate {
+	if in == nil {
+		return nil
+	}
+	out := new(PodReadinessGate)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
+	*out = *in
+	if in.SELinuxOptions != nil {
+		in, out := &in.SELinuxOptions, &out.SELinuxOptions
+		*out = new(SELinuxOptions)
+		**out = **in
+	}
+	if in.RunAsUser != nil {
+		in, out := &in.RunAsUser, &out.RunAsUser
+		*out = new(int64)
+		**out = **in
+	}
+	if in.RunAsGroup != nil {
+		in, out := &in.RunAsGroup, &out.RunAsGroup
+		*out = new(int64)
+		**out = **in
+	}
+	if in.RunAsNonRoot != nil {
+		in, out := &in.RunAsNonRoot, &out.RunAsNonRoot
+		*out = new(bool)
+		**out = **in
+	}
+	if in.SupplementalGroups != nil {
+		in, out := &in.SupplementalGroups, &out.SupplementalGroups
+		*out = make([]int64, len(*in))
+		copy(*out, *in)
+	}
+	if in.FSGroup != nil {
+		in, out := &in.FSGroup, &out.FSGroup
+		*out = new(int64)
+		**out = **in
+	}
+	if in.Sysctls != nil {
+		in, out := &in.Sysctls, &out.Sysctls
+		*out = make([]Sysctl, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityContext.
+func (in *PodSecurityContext) DeepCopy() *PodSecurityContext {
+	if in == nil {
+		return nil
+	}
+	out := new(PodSecurityContext)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodSignature) DeepCopyInto(out *PodSignature) {
+	*out = *in
+	if in.PodController != nil {
+		in, out := &in.PodController, &out.PodController
+		*out = new(metav1.OwnerReference)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSignature.
+func (in *PodSignature) DeepCopy() *PodSignature {
+	if in == nil {
+		return nil
+	}
+	out := new(PodSignature)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodSpec) DeepCopyInto(out *PodSpec) {
+	*out = *in
+	if in.Volumes != nil {
+		in, out := &in.Volumes, &out.Volumes
+		*out = make([]Volume, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.InitContainers != nil {
+		in, out := &in.InitContainers, &out.InitContainers
+		*out = make([]Container, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Containers != nil {
+		in, out := &in.Containers, &out.Containers
+		*out = make([]Container, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.TerminationGracePeriodSeconds != nil {
+		in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	if in.ActiveDeadlineSeconds != nil {
+		in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	if in.NodeSelector != nil {
+		in, out := &in.NodeSelector, &out.NodeSelector
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.AutomountServiceAccountToken != nil {
+		in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
+		*out = new(bool)
+		**out = **in
+	}
+	if in.ShareProcessNamespace != nil {
+		in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace
+		*out = new(bool)
+		**out = **in
+	}
+	if in.SecurityContext != nil {
+		in, out := &in.SecurityContext, &out.SecurityContext
+		*out = new(PodSecurityContext)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ImagePullSecrets != nil {
+		in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
+		*out = make([]LocalObjectReference, len(*in))
+		copy(*out, *in)
+	}
+	if in.Affinity != nil {
+		in, out := &in.Affinity, &out.Affinity
+		*out = new(Affinity)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Tolerations != nil {
+		in, out := &in.Tolerations, &out.Tolerations
+		*out = make([]Toleration, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.HostAliases != nil {
+		in, out := &in.HostAliases, &out.HostAliases
+		*out = make([]HostAlias, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Priority != nil {
+		in, out := &in.Priority, &out.Priority
+		*out = new(int32)
+		**out = **in
+	}
+	if in.DNSConfig != nil {
+		in, out := &in.DNSConfig, &out.DNSConfig
+		*out = new(PodDNSConfig)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ReadinessGates != nil {
+		in, out := &in.ReadinessGates, &out.ReadinessGates
+		*out = make([]PodReadinessGate, len(*in))
+		copy(*out, *in)
+	}
+	if in.RuntimeClassName != nil {
+		in, out := &in.RuntimeClassName, &out.RuntimeClassName
+		*out = new(string)
+		**out = **in
+	}
+	if in.EnableServiceLinks != nil {
+		in, out := &in.EnableServiceLinks, &out.EnableServiceLinks
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec.
+func (in *PodSpec) DeepCopy() *PodSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(PodSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodStatus) DeepCopyInto(out *PodStatus) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]PodCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.StartTime != nil {
+		in, out := &in.StartTime, &out.StartTime
+		*out = (*in).DeepCopy()
+	}
+	if in.InitContainerStatuses != nil {
+		in, out := &in.InitContainerStatuses, &out.InitContainerStatuses
+		*out = make([]ContainerStatus, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.ContainerStatuses != nil {
+		in, out := &in.ContainerStatuses, &out.ContainerStatuses
+		*out = make([]ContainerStatus, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus.
+func (in *PodStatus) DeepCopy() *PodStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(PodStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodStatusResult) DeepCopyInto(out *PodStatusResult) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusResult.
+func (in *PodStatusResult) DeepCopy() *PodStatusResult {
+	if in == nil {
+		return nil
+	}
+	out := new(PodStatusResult)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodStatusResult) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodTemplate) DeepCopyInto(out *PodTemplate) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Template.DeepCopyInto(&out.Template)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate.
+func (in *PodTemplate) DeepCopy() *PodTemplate {
+	if in == nil {
+		return nil
+	}
+	out := new(PodTemplate)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodTemplate) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]PodTemplate, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateList.
+func (in *PodTemplateList) DeepCopy() *PodTemplateList {
+	if in == nil {
+		return nil
+	}
+	out := new(PodTemplateList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodTemplateList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) {
+	*out = *in
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateSpec.
+func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(PodTemplateSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxVolumeSource.
+func (in *PortworxVolumeSource) DeepCopy() *PortworxVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(PortworxVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Preconditions) DeepCopyInto(out *Preconditions) {
+	*out = *in
+	if in.UID != nil {
+		in, out := &in.UID, &out.UID
+		*out = new(types.UID)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions.
+func (in *Preconditions) DeepCopy() *Preconditions {
+	if in == nil {
+		return nil
+	}
+	out := new(Preconditions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PreferAvoidPodsEntry) DeepCopyInto(out *PreferAvoidPodsEntry) {
+	*out = *in
+	in.PodSignature.DeepCopyInto(&out.PodSignature)
+	in.EvictionTime.DeepCopyInto(&out.EvictionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferAvoidPodsEntry.
+func (in *PreferAvoidPodsEntry) DeepCopy() *PreferAvoidPodsEntry {
+	if in == nil {
+		return nil
+	}
+	out := new(PreferAvoidPodsEntry)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PreferredSchedulingTerm) DeepCopyInto(out *PreferredSchedulingTerm) {
+	*out = *in
+	in.Preference.DeepCopyInto(&out.Preference)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferredSchedulingTerm.
+func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm {
+	if in == nil {
+		return nil
+	}
+	out := new(PreferredSchedulingTerm)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Probe) DeepCopyInto(out *Probe) {
+	*out = *in
+	in.Handler.DeepCopyInto(&out.Handler)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe.
+func (in *Probe) DeepCopy() *Probe {
+	if in == nil {
+		return nil
+	}
+	out := new(Probe)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) {
+	*out = *in
+	if in.Sources != nil {
+		in, out := &in.Sources, &out.Sources
+		*out = make([]VolumeProjection, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.DefaultMode != nil {
+		in, out := &in.DefaultMode, &out.DefaultMode
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectedVolumeSource.
+func (in *ProjectedVolumeSource) DeepCopy() *ProjectedVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ProjectedVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *QuobyteVolumeSource) DeepCopyInto(out *QuobyteVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuobyteVolumeSource.
+func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(QuobyteVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource) {
+	*out = *in
+	if in.CephMonitors != nil {
+		in, out := &in.CephMonitors, &out.CephMonitors
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDPersistentVolumeSource.
+func (in *RBDPersistentVolumeSource) DeepCopy() *RBDPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(RBDPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) {
+	*out = *in
+	if in.CephMonitors != nil {
+		in, out := &in.CephMonitors, &out.CephMonitors
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(LocalObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDVolumeSource.
+func (in *RBDVolumeSource) DeepCopy() *RBDVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(RBDVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Data != nil {
+		in, out := &in.Data, &out.Data
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation.
+func (in *RangeAllocation) DeepCopy() *RangeAllocation {
+	if in == nil {
+		return nil
+	}
+	out := new(RangeAllocation)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RangeAllocation) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicationController) DeepCopyInto(out *ReplicationController) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationController.
+func (in *ReplicationController) DeepCopy() *ReplicationController {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicationController)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ReplicationController) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicationControllerCondition) DeepCopyInto(out *ReplicationControllerCondition) {
+	*out = *in
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerCondition.
+func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicationControllerCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ReplicationController, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerList.
+func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicationControllerList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ReplicationControllerList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec) {
+	*out = *in
+	if in.Replicas != nil {
+		in, out := &in.Replicas, &out.Replicas
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.Template != nil {
+		in, out := &in.Template, &out.Template
+		*out = new(PodTemplateSpec)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerSpec.
+func (in *ReplicationControllerSpec) DeepCopy() *ReplicationControllerSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicationControllerSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicationControllerStatus) DeepCopyInto(out *ReplicationControllerStatus) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]ReplicationControllerCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerStatus.
+func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicationControllerStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) {
+	*out = *in
+	out.Divisor = in.Divisor.DeepCopy()
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFieldSelector.
+func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceFieldSelector)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in ResourceList) DeepCopyInto(out *ResourceList) {
+	{
+		in := &in
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+		return
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList.
+func (in ResourceList) DeepCopy() ResourceList {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceList)
+	in.DeepCopyInto(out)
+	return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota.
+func (in *ResourceQuota) DeepCopy() *ResourceQuota {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceQuota)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ResourceQuota) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ResourceQuota, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList.
+func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceQuotaList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ResourceQuotaList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) {
+	*out = *in
+	if in.Hard != nil {
+		in, out := &in.Hard, &out.Hard
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Scopes != nil {
+		in, out := &in.Scopes, &out.Scopes
+		*out = make([]ResourceQuotaScope, len(*in))
+		copy(*out, *in)
+	}
+	if in.ScopeSelector != nil {
+		in, out := &in.ScopeSelector, &out.ScopeSelector
+		*out = new(ScopeSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec.
+func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceQuotaSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) {
+	*out = *in
+	if in.Hard != nil {
+		in, out := &in.Hard, &out.Hard
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Used != nil {
+		in, out := &in.Used, &out.Used
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus.
+func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceQuotaStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) {
+	*out = *in
+	if in.Limits != nil {
+		in, out := &in.Limits, &out.Limits
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Requests != nil {
+		in, out := &in.Requests, &out.Requests
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements.
+func (in *ResourceRequirements) DeepCopy() *ResourceRequirements {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceRequirements)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxOptions.
+func (in *SELinuxOptions) DeepCopy() *SELinuxOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(SELinuxOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolumeSource) {
+	*out = *in
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOPersistentVolumeSource.
+func (in *ScaleIOPersistentVolumeSource) DeepCopy() *ScaleIOPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ScaleIOPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) {
+	*out = *in
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(LocalObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOVolumeSource.
+func (in *ScaleIOVolumeSource) DeepCopy() *ScaleIOVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ScaleIOVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScopeSelector) DeepCopyInto(out *ScopeSelector) {
+	*out = *in
+	if in.MatchExpressions != nil {
+		in, out := &in.MatchExpressions, &out.MatchExpressions
+		*out = make([]ScopedResourceSelectorRequirement, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeSelector.
+func (in *ScopeSelector) DeepCopy() *ScopeSelector {
+	if in == nil {
+		return nil
+	}
+	out := new(ScopeSelector)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScopedResourceSelectorRequirement) DeepCopyInto(out *ScopedResourceSelectorRequirement) {
+	*out = *in
+	if in.Values != nil {
+		in, out := &in.Values, &out.Values
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopedResourceSelectorRequirement.
+func (in *ScopedResourceSelectorRequirement) DeepCopy() *ScopedResourceSelectorRequirement {
+	if in == nil {
+		return nil
+	}
+	out := new(ScopedResourceSelectorRequirement)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Secret) DeepCopyInto(out *Secret) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Data != nil {
+		in, out := &in.Data, &out.Data
+		*out = make(map[string][]byte, len(*in))
+		for key, val := range *in {
+			var outVal []byte
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = make([]byte, len(*in))
+				copy(*out, *in)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	if in.StringData != nil {
+		in, out := &in.StringData, &out.StringData
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret.
+func (in *Secret) DeepCopy() *Secret {
+	if in == nil {
+		return nil
+	}
+	out := new(Secret)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Secret) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) {
+	*out = *in
+	out.LocalObjectReference = in.LocalObjectReference
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource.
+func (in *SecretEnvSource) DeepCopy() *SecretEnvSource {
+	if in == nil {
+		return nil
+	}
+	out := new(SecretEnvSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) {
+	*out = *in
+	out.LocalObjectReference = in.LocalObjectReference
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector.
+func (in *SecretKeySelector) DeepCopy() *SecretKeySelector {
+	if in == nil {
+		return nil
+	}
+	out := new(SecretKeySelector)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretList) DeepCopyInto(out *SecretList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Secret, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList.
+func (in *SecretList) DeepCopy() *SecretList {
+	if in == nil {
+		return nil
+	}
+	out := new(SecretList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SecretList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretProjection) DeepCopyInto(out *SecretProjection) {
+	*out = *in
+	out.LocalObjectReference = in.LocalObjectReference
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]KeyToPath, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretProjection.
+func (in *SecretProjection) DeepCopy() *SecretProjection {
+	if in == nil {
+		return nil
+	}
+	out := new(SecretProjection)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretReference) DeepCopyInto(out *SecretReference) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
+func (in *SecretReference) DeepCopy() *SecretReference {
+	if in == nil {
+		return nil
+	}
+	out := new(SecretReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) {
+	*out = *in
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]KeyToPath, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.DefaultMode != nil {
+		in, out := &in.DefaultMode, &out.DefaultMode
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolumeSource.
+func (in *SecretVolumeSource) DeepCopy() *SecretVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(SecretVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecurityContext) DeepCopyInto(out *SecurityContext) {
+	*out = *in
+	if in.Capabilities != nil {
+		in, out := &in.Capabilities, &out.Capabilities
+		*out = new(Capabilities)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Privileged != nil {
+		in, out := &in.Privileged, &out.Privileged
+		*out = new(bool)
+		**out = **in
+	}
+	if in.SELinuxOptions != nil {
+		in, out := &in.SELinuxOptions, &out.SELinuxOptions
+		*out = new(SELinuxOptions)
+		**out = **in
+	}
+	if in.RunAsUser != nil {
+		in, out := &in.RunAsUser, &out.RunAsUser
+		*out = new(int64)
+		**out = **in
+	}
+	if in.RunAsGroup != nil {
+		in, out := &in.RunAsGroup, &out.RunAsGroup
+		*out = new(int64)
+		**out = **in
+	}
+	if in.RunAsNonRoot != nil {
+		in, out := &in.RunAsNonRoot, &out.RunAsNonRoot
+		*out = new(bool)
+		**out = **in
+	}
+	if in.ReadOnlyRootFilesystem != nil {
+		in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem
+		*out = new(bool)
+		**out = **in
+	}
+	if in.AllowPrivilegeEscalation != nil {
+		in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation
+		*out = new(bool)
+		**out = **in
+	}
+	if in.ProcMount != nil {
+		in, out := &in.ProcMount, &out.ProcMount
+		*out = new(ProcMountType)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContext.
+func (in *SecurityContext) DeepCopy() *SecurityContext {
+	if in == nil {
+		return nil
+	}
+	out := new(SecurityContext)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SerializedReference) DeepCopyInto(out *SerializedReference) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.Reference = in.Reference
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedReference.
+func (in *SerializedReference) DeepCopy() *SerializedReference {
+	if in == nil {
+		return nil
+	}
+	out := new(SerializedReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SerializedReference) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Service) DeepCopyInto(out *Service) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service.
+func (in *Service) DeepCopy() *Service {
+	if in == nil {
+		return nil
+	}
+	out := new(Service)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Service) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Secrets != nil {
+		in, out := &in.Secrets, &out.Secrets
+		*out = make([]ObjectReference, len(*in))
+		copy(*out, *in)
+	}
+	if in.ImagePullSecrets != nil {
+		in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
+		*out = make([]LocalObjectReference, len(*in))
+		copy(*out, *in)
+	}
+	if in.AutomountServiceAccountToken != nil {
+		in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccount.
+func (in *ServiceAccount) DeepCopy() *ServiceAccount {
+	if in == nil {
+		return nil
+	}
+	out := new(ServiceAccount)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceAccount) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ServiceAccount, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountList.
+func (in *ServiceAccountList) DeepCopy() *ServiceAccountList {
+	if in == nil {
+		return nil
+	}
+	out := new(ServiceAccountList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceAccountList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceAccountTokenProjection) DeepCopyInto(out *ServiceAccountTokenProjection) {
+	*out = *in
+	if in.ExpirationSeconds != nil {
+		in, out := &in.ExpirationSeconds, &out.ExpirationSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTokenProjection.
+func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjection {
+	if in == nil {
+		return nil
+	}
+	out := new(ServiceAccountTokenProjection)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceList) DeepCopyInto(out *ServiceList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Service, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList.
+func (in *ServiceList) DeepCopy() *ServiceList {
+	if in == nil {
+		return nil
+	}
+	out := new(ServiceList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServicePort) DeepCopyInto(out *ServicePort) {
+	*out = *in
+	out.TargetPort = in.TargetPort
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort.
+func (in *ServicePort) DeepCopy() *ServicePort {
+	if in == nil {
+		return nil
+	}
+	out := new(ServicePort)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceProxyOptions) DeepCopyInto(out *ServiceProxyOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceProxyOptions.
+func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(ServiceProxyOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
+	*out = *in
+	if in.Ports != nil {
+		in, out := &in.Ports, &out.Ports
+		*out = make([]ServicePort, len(*in))
+		copy(*out, *in)
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.ExternalIPs != nil {
+		in, out := &in.ExternalIPs, &out.ExternalIPs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.LoadBalancerSourceRanges != nil {
+		in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.SessionAffinityConfig != nil {
+		in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig
+		*out = new(SessionAffinityConfig)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec.
+func (in *ServiceSpec) DeepCopy() *ServiceSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ServiceSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) {
+	*out = *in
+	in.LoadBalancer.DeepCopyInto(&out.LoadBalancer)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus.
+func (in *ServiceStatus) DeepCopy() *ServiceStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ServiceStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) {
+	*out = *in
+	if in.ClientIP != nil {
+		in, out := &in.ClientIP, &out.ClientIP
+		*out = new(ClientIPConfig)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityConfig.
+func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(SessionAffinityConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) {
+	*out = *in
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(ObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSPersistentVolumeSource.
+func (in *StorageOSPersistentVolumeSource) DeepCopy() *StorageOSPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(StorageOSPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) {
+	*out = *in
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(LocalObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSVolumeSource.
+func (in *StorageOSVolumeSource) DeepCopy() *StorageOSVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(StorageOSVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Sysctl) DeepCopyInto(out *Sysctl) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sysctl.
+func (in *Sysctl) DeepCopy() *Sysctl {
+	if in == nil {
+		return nil
+	}
+	out := new(Sysctl)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TCPSocketAction) DeepCopyInto(out *TCPSocketAction) {
+	*out = *in
+	out.Port = in.Port
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSocketAction.
+func (in *TCPSocketAction) DeepCopy() *TCPSocketAction {
+	if in == nil {
+		return nil
+	}
+	out := new(TCPSocketAction)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Taint) DeepCopyInto(out *Taint) {
+	*out = *in
+	if in.TimeAdded != nil {
+		in, out := &in.TimeAdded, &out.TimeAdded
+		*out = (*in).DeepCopy()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint.
+func (in *Taint) DeepCopy() *Taint {
+	if in == nil {
+		return nil
+	}
+	out := new(Taint)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Toleration) DeepCopyInto(out *Toleration) {
+	*out = *in
+	if in.TolerationSeconds != nil {
+		in, out := &in.TolerationSeconds, &out.TolerationSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration.
+func (in *Toleration) DeepCopy() *Toleration {
+	if in == nil {
+		return nil
+	}
+	out := new(Toleration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TopologySelectorLabelRequirement) DeepCopyInto(out *TopologySelectorLabelRequirement) {
+	*out = *in
+	if in.Values != nil {
+		in, out := &in.Values, &out.Values
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorLabelRequirement.
+func (in *TopologySelectorLabelRequirement) DeepCopy() *TopologySelectorLabelRequirement {
+	if in == nil {
+		return nil
+	}
+	out := new(TopologySelectorLabelRequirement)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TopologySelectorTerm) DeepCopyInto(out *TopologySelectorTerm) {
+	*out = *in
+	if in.MatchLabelExpressions != nil {
+		in, out := &in.MatchLabelExpressions, &out.MatchLabelExpressions
+		*out = make([]TopologySelectorLabelRequirement, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorTerm.
+func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm {
+	if in == nil {
+		return nil
+	}
+	out := new(TopologySelectorTerm)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) {
+	*out = *in
+	if in.APIGroup != nil {
+		in, out := &in.APIGroup, &out.APIGroup
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedLocalObjectReference.
+func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference {
+	if in == nil {
+		return nil
+	}
+	out := new(TypedLocalObjectReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Volume) DeepCopyInto(out *Volume) {
+	*out = *in
+	in.VolumeSource.DeepCopyInto(&out.VolumeSource)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume.
+func (in *Volume) DeepCopy() *Volume {
+	if in == nil {
+		return nil
+	}
+	out := new(Volume)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice.
+func (in *VolumeDevice) DeepCopy() *VolumeDevice {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeDevice)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeMount) DeepCopyInto(out *VolumeMount) {
+	*out = *in
+	if in.MountPropagation != nil {
+		in, out := &in.MountPropagation, &out.MountPropagation
+		*out = new(MountPropagationMode)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount.
+func (in *VolumeMount) DeepCopy() *VolumeMount {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeMount)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) {
+	*out = *in
+	if in.Required != nil {
+		in, out := &in.Required, &out.Required
+		*out = new(NodeSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeAffinity.
+func (in *VolumeNodeAffinity) DeepCopy() *VolumeNodeAffinity {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeNodeAffinity)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) {
+	*out = *in
+	if in.Secret != nil {
+		in, out := &in.Secret, &out.Secret
+		*out = new(SecretProjection)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.DownwardAPI != nil {
+		in, out := &in.DownwardAPI, &out.DownwardAPI
+		*out = new(DownwardAPIProjection)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ConfigMap != nil {
+		in, out := &in.ConfigMap, &out.ConfigMap
+		*out = new(ConfigMapProjection)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ServiceAccountToken != nil {
+		in, out := &in.ServiceAccountToken, &out.ServiceAccountToken
+		*out = new(ServiceAccountTokenProjection)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeProjection.
+func (in *VolumeProjection) DeepCopy() *VolumeProjection {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeProjection)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeSource) DeepCopyInto(out *VolumeSource) {
+	*out = *in
+	if in.HostPath != nil {
+		in, out := &in.HostPath, &out.HostPath
+		*out = new(HostPathVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.EmptyDir != nil {
+		in, out := &in.EmptyDir, &out.EmptyDir
+		*out = new(EmptyDirVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.GCEPersistentDisk != nil {
+		in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk
+		*out = new(GCEPersistentDiskVolumeSource)
+		**out = **in
+	}
+	if in.AWSElasticBlockStore != nil {
+		in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore
+		*out = new(AWSElasticBlockStoreVolumeSource)
+		**out = **in
+	}
+	if in.GitRepo != nil {
+		in, out := &in.GitRepo, &out.GitRepo
+		*out = new(GitRepoVolumeSource)
+		**out = **in
+	}
+	if in.Secret != nil {
+		in, out := &in.Secret, &out.Secret
+		*out = new(SecretVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.NFS != nil {
+		in, out := &in.NFS, &out.NFS
+		*out = new(NFSVolumeSource)
+		**out = **in
+	}
+	if in.ISCSI != nil {
+		in, out := &in.ISCSI, &out.ISCSI
+		*out = new(ISCSIVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Glusterfs != nil {
+		in, out := &in.Glusterfs, &out.Glusterfs
+		*out = new(GlusterfsVolumeSource)
+		**out = **in
+	}
+	if in.PersistentVolumeClaim != nil {
+		in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim
+		*out = new(PersistentVolumeClaimVolumeSource)
+		**out = **in
+	}
+	if in.RBD != nil {
+		in, out := &in.RBD, &out.RBD
+		*out = new(RBDVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.FlexVolume != nil {
+		in, out := &in.FlexVolume, &out.FlexVolume
+		*out = new(FlexVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Cinder != nil {
+		in, out := &in.Cinder, &out.Cinder
+		*out = new(CinderVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.CephFS != nil {
+		in, out := &in.CephFS, &out.CephFS
+		*out = new(CephFSVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Flocker != nil {
+		in, out := &in.Flocker, &out.Flocker
+		*out = new(FlockerVolumeSource)
+		**out = **in
+	}
+	if in.DownwardAPI != nil {
+		in, out := &in.DownwardAPI, &out.DownwardAPI
+		*out = new(DownwardAPIVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.FC != nil {
+		in, out := &in.FC, &out.FC
+		*out = new(FCVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.AzureFile != nil {
+		in, out := &in.AzureFile, &out.AzureFile
+		*out = new(AzureFileVolumeSource)
+		**out = **in
+	}
+	if in.ConfigMap != nil {
+		in, out := &in.ConfigMap, &out.ConfigMap
+		*out = new(ConfigMapVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.VsphereVolume != nil {
+		in, out := &in.VsphereVolume, &out.VsphereVolume
+		*out = new(VsphereVirtualDiskVolumeSource)
+		**out = **in
+	}
+	if in.Quobyte != nil {
+		in, out := &in.Quobyte, &out.Quobyte
+		*out = new(QuobyteVolumeSource)
+		**out = **in
+	}
+	if in.AzureDisk != nil {
+		in, out := &in.AzureDisk, &out.AzureDisk
+		*out = new(AzureDiskVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.PhotonPersistentDisk != nil {
+		in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk
+		*out = new(PhotonPersistentDiskVolumeSource)
+		**out = **in
+	}
+	if in.Projected != nil {
+		in, out := &in.Projected, &out.Projected
+		*out = new(ProjectedVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.PortworxVolume != nil {
+		in, out := &in.PortworxVolume, &out.PortworxVolume
+		*out = new(PortworxVolumeSource)
+		**out = **in
+	}
+	if in.ScaleIO != nil {
+		in, out := &in.ScaleIO, &out.ScaleIO
+		*out = new(ScaleIOVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.StorageOS != nil {
+		in, out := &in.StorageOS, &out.StorageOS
+		*out = new(StorageOSVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource.
+func (in *VolumeSource) DeepCopy() *VolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VsphereVirtualDiskVolumeSource) DeepCopyInto(out *VsphereVirtualDiskVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereVirtualDiskVolumeSource.
+func (in *VsphereVirtualDiskVolumeSource) DeepCopy() *VsphereVirtualDiskVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(VsphereVirtualDiskVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WeightedPodAffinityTerm) DeepCopyInto(out *WeightedPodAffinityTerm) {
+	*out = *in
+	in.PodAffinityTerm.DeepCopyInto(&out.PodAffinityTerm)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedPodAffinityTerm.
+func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm {
+	if in == nil {
+		return nil
+	}
+	out := new(WeightedPodAffinityTerm)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/events/v1beta1/doc.go b/vendor/k8s.io/api/events/v1beta1/doc.go
new file mode 100644
index 0000000..bd269c6
--- /dev/null
+++ b/vendor/k8s.io/api/events/v1beta1/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+// +groupName=events.k8s.io
+
+package v1beta1 // import "k8s.io/api/events/v1beta1"
diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto
new file mode 100644
index 0000000..b3e565e
--- /dev/null
+++ b/vendor/k8s.io/api/events/v1beta1/generated.proto
@@ -0,0 +1,121 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.events.v1beta1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta1";
+
+// Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.
+message Event {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Required. Time when this Event was first observed.
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2;
+
+  // Data about the Event series this event represents or nil if it's a singleton Event.
+  // +optional
+  optional EventSeries series = 3;
+
+  // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
+  // +optional
+  optional string reportingController = 4;
+
+  // ID of the controller instance, e.g. `kubelet-xyzf`.
+  // +optional
+  optional string reportingInstance = 5;
+
+  // What action was taken/failed regarding to the regarding object.
+  // +optional
+  optional string action = 6;
+
+  // Why the action was taken.
+  optional string reason = 7;
+
+  // The object this Event is about. In most cases it's an Object reporting controller implements.
+  // E.g. ReplicaSetController implements ReplicaSets and this event is emitted because
+  // it acts on some changes in a ReplicaSet object.
+  // +optional
+  optional k8s.io.api.core.v1.ObjectReference regarding = 8;
+
+  // Optional secondary object for more complex actions. E.g. when regarding object triggers
+  // a creation or deletion of related object.
+  // +optional
+  optional k8s.io.api.core.v1.ObjectReference related = 9;
+
+  // Optional. A human-readable description of the status of this operation.
+  // Maximal length of the note is 1kB, but libraries should be prepared to
+  // handle values up to 64kB.
+  // +optional
+  optional string note = 10;
+
+  // Type of this event (Normal, Warning), new types could be added in the
+  // future.
+  // +optional
+  optional string type = 11;
+
+  // Deprecated field assuring backward compatibility with core.v1 Event type
+  // +optional
+  optional k8s.io.api.core.v1.EventSource deprecatedSource = 12;
+
+  // Deprecated field assuring backward compatibility with core.v1 Event type
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13;
+
+  // Deprecated field assuring backward compatibility with core.v1 Event type
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14;
+
+  // Deprecated field assuring backward compatibility with core.v1 Event type
+  // +optional
+  optional int32 deprecatedCount = 15;
+}
+
+// EventList is a list of Event objects.
+message EventList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of schema objects.
+  repeated Event items = 2;
+}
+
+// EventSeries contain information on series of events, i.e. thing that was/is happening
+// continuously for some time.
+message EventSeries {
+  // Number of occurrences in this series up to the last heartbeat time
+  optional int32 count = 1;
+
+  // Time when last Event from the series was seen before last heartbeat.
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2;
+
+  // Information whether this series is ongoing or finished.
+  optional string state = 3;
+}
+
diff --git a/vendor/k8s.io/api/events/v1beta1/register.go b/vendor/k8s.io/api/events/v1beta1/register.go
new file mode 100644
index 0000000..4506782
--- /dev/null
+++ b/vendor/k8s.io/api/events/v1beta1/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "events.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Event{},
+		&EventList{},
+	)
+
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/events/v1beta1/types.go b/vendor/k8s.io/api/events/v1beta1/types.go
new file mode 100644
index 0000000..dc48ddb
--- /dev/null
+++ b/vendor/k8s.io/api/events/v1beta1/types.go
@@ -0,0 +1,122 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	corev1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.
+type Event struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Required. Time when this Event was first observed.
+	EventTime metav1.MicroTime `json:"eventTime" protobuf:"bytes,2,opt,name=eventTime"`
+
+	// Data about the Event series this event represents or nil if it's a singleton Event.
+	// +optional
+	Series *EventSeries `json:"series,omitempty" protobuf:"bytes,3,opt,name=series"`
+
+	// Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
+	// +optional
+	ReportingController string `json:"reportingController,omitempty" protobuf:"bytes,4,opt,name=reportingController"`
+
+	// ID of the controller instance, e.g. `kubelet-xyzf`.
+	// +optional
+	ReportingInstance string `json:"reportingInstance,omitemtpy" protobuf:"bytes,5,opt,name=reportingInstance"`
+
+	// What action was taken/failed regarding to the regarding object.
+	// +optional
+	Action string `json:"action,omitemtpy" protobuf:"bytes,6,name=action"`
+
+	// Why the action was taken.
+	Reason string `json:"reason,omitempty" protobuf:"bytes,7,name=reason"`
+
+	// The object this Event is about. In most cases it's an Object reporting controller implements.
+	// E.g. ReplicaSetController implements ReplicaSets and this event is emitted because
+	// it acts on some changes in a ReplicaSet object.
+	// +optional
+	Regarding corev1.ObjectReference `json:"regarding,omitempty" protobuf:"bytes,8,opt,name=regarding"`
+
+	// Optional secondary object for more complex actions. E.g. when regarding object triggers
+	// a creation or deletion of related object.
+	// +optional
+	Related *corev1.ObjectReference `json:"related,omitempty" protobuf:"bytes,9,opt,name=related"`
+
+	// Optional. A human-readable description of the status of this operation.
+	// Maximal length of the note is 1kB, but libraries should be prepared to
+	// handle values up to 64kB.
+	// +optional
+	Note string `json:"note,omitempty" protobuf:"bytes,10,opt,name=note"`
+
+	// Type of this event (Normal, Warning), new types could be added in the
+	// future.
+	// +optional
+	Type string `json:"type,omitempty" protobuf:"bytes,11,opt,name=type"`
+
+	// Deprecated field assuring backward compatibility with core.v1 Event type
+	// +optional
+	DeprecatedSource corev1.EventSource `json:"deprecatedSource,omitempty" protobuf:"bytes,12,opt,name=deprecatedSource"`
+	// Deprecated field assuring backward compatibility with core.v1 Event type
+	// +optional
+	DeprecatedFirstTimestamp metav1.Time `json:"deprecatedFirstTimestamp,omitempty" protobuf:"bytes,13,opt,name=deprecatedFirstTimestamp"`
+	// Deprecated field assuring backward compatibility with core.v1 Event type
+	// +optional
+	DeprecatedLastTimestamp metav1.Time `json:"deprecatedLastTimestamp,omitempty" protobuf:"bytes,14,opt,name=deprecatedLastTimestamp"`
+	// Deprecated field assuring backward compatibility with core.v1 Event type
+	// +optional
+	DeprecatedCount int32 `json:"deprecatedCount,omitempty" protobuf:"varint,15,opt,name=deprecatedCount"`
+}
+
+// EventSeries contain information on series of events, i.e. thing that was/is happening
+// continuously for some time.
+type EventSeries struct {
+	// Number of occurrences in this series up to the last heartbeat time
+	Count int32 `json:"count" protobuf:"varint,1,opt,name=count"`
+	// Time when last Event from the series was seen before last heartbeat.
+	LastObservedTime metav1.MicroTime `json:"lastObservedTime" protobuf:"bytes,2,opt,name=lastObservedTime"`
+	// Information whether this series is ongoing or finished.
+	State EventSeriesState `json:"state" protobuf:"bytes,3,opt,name=state"`
+}
+
+type EventSeriesState string
+
+const (
+	EventSeriesStateOngoing  EventSeriesState = "Ongoing"
+	EventSeriesStateFinished EventSeriesState = "Finished"
+	EventSeriesStateUnknown  EventSeriesState = "Unknown"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EventList is a list of Event objects.
+type EventList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of schema objects.
+	Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..a15672c
--- /dev/null
+++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,73 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_Event = map[string]string{
+	"":                         "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.",
+	"eventTime":                "Required. Time when this Event was first observed.",
+	"series":                   "Data about the Event series this event represents or nil if it's a singleton Event.",
+	"reportingController":      "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.",
+	"reportingInstance":        "ID of the controller instance, e.g. `kubelet-xyzf`.",
+	"action":                   "What action was taken/failed regarding to the regarding object.",
+	"reason":                   "Why the action was taken.",
+	"regarding":                "The object this Event is about. In most cases it's an Object reporting controller implements. E.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.",
+	"related":                  "Optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.",
+	"note":                     "Optional. A human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.",
+	"type":                     "Type of this event (Normal, Warning), new types could be added in the future.",
+	"deprecatedSource":         "Deprecated field assuring backward compatibility with core.v1 Event type",
+	"deprecatedFirstTimestamp": "Deprecated field assuring backward compatibility with core.v1 Event type",
+	"deprecatedLastTimestamp":  "Deprecated field assuring backward compatibility with core.v1 Event type",
+	"deprecatedCount":          "Deprecated field assuring backward compatibility with core.v1 Event type",
+}
+
+func (Event) SwaggerDoc() map[string]string {
+	return map_Event
+}
+
+var map_EventList = map[string]string{
+	"":         "EventList is a list of Event objects.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "Items is a list of schema objects.",
+}
+
+func (EventList) SwaggerDoc() map[string]string {
+	return map_EventList
+}
+
+var map_EventSeries = map[string]string{
+	"":                 "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.",
+	"count":            "Number of occurrences in this series up to the last heartbeat time",
+	"lastObservedTime": "Time when last Event from the series was seen before last heartbeat.",
+	"state":            "Information whether this series is ongoing or finished.",
+}
+
+func (EventSeries) SwaggerDoc() map[string]string {
+	return map_EventSeries
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..e52e142
--- /dev/null
+++ b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,117 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1 "k8s.io/api/core/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Event) DeepCopyInto(out *Event) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.EventTime.DeepCopyInto(&out.EventTime)
+	if in.Series != nil {
+		in, out := &in.Series, &out.Series
+		*out = new(EventSeries)
+		(*in).DeepCopyInto(*out)
+	}
+	out.Regarding = in.Regarding
+	if in.Related != nil {
+		in, out := &in.Related, &out.Related
+		*out = new(v1.ObjectReference)
+		**out = **in
+	}
+	out.DeprecatedSource = in.DeprecatedSource
+	in.DeprecatedFirstTimestamp.DeepCopyInto(&out.DeprecatedFirstTimestamp)
+	in.DeprecatedLastTimestamp.DeepCopyInto(&out.DeprecatedLastTimestamp)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event.
+func (in *Event) DeepCopy() *Event {
+	if in == nil {
+		return nil
+	}
+	out := new(Event)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Event) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventList) DeepCopyInto(out *EventList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Event, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList.
+func (in *EventList) DeepCopy() *EventList {
+	if in == nil {
+		return nil
+	}
+	out := new(EventList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EventList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventSeries) DeepCopyInto(out *EventSeries) {
+	*out = *in
+	in.LastObservedTime.DeepCopyInto(&out.LastObservedTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries.
+func (in *EventSeries) DeepCopy() *EventSeries {
+	if in == nil {
+		return nil
+	}
+	out := new(EventSeries)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/extensions/v1beta1/doc.go b/vendor/k8s.io/api/extensions/v1beta1/doc.go
new file mode 100644
index 0000000..8ce1830
--- /dev/null
+++ b/vendor/k8s.io/api/extensions/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+package v1beta1 // import "k8s.io/api/extensions/v1beta1"
diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
new file mode 100644
index 0000000..efcda7e
--- /dev/null
+++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
@@ -0,0 +1,1160 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.extensions.v1beta1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta1";
+
+// AllowedFlexVolume represents a single Flexvolume that is allowed to be used.
+// Deprecated: use AllowedFlexVolume from policy API Group instead.
+message AllowedFlexVolume {
+  // driver is the name of the Flexvolume driver.
+  optional string driver = 1;
+}
+
+// AllowedHostPath defines the host volume conditions that will be enabled by a policy
+// for pods to use. It requires the path prefix to be defined.
+// Deprecated: use AllowedHostPath from policy API Group instead.
+message AllowedHostPath {
+  // pathPrefix is the path prefix that the host volume must match.
+  // It does not support `*`.
+  // Trailing slashes are trimmed when validating the path prefix with a host path.
+  //
+  // Examples:
+  // `/foo` would allow `/foo`, `/foo/` and `/foo/bar`
+  // `/foo` would not allow `/food` or `/etc/foo`
+  optional string pathPrefix = 1;
+
+  // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.
+  // +optional
+  optional bool readOnly = 2;
+}
+
+// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for
+// more information.
+// DaemonSet represents the configuration of a daemon set.
+message DaemonSet {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // The desired behavior of this daemon set.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional DaemonSetSpec spec = 2;
+
+  // The current status of this daemon set. This data may be
+  // out of date by some window of time.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional DaemonSetStatus status = 3;
+}
+
+// DaemonSetCondition describes the state of a DaemonSet at a certain point.
+message DaemonSetCondition {
+  // Type of DaemonSet condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // Last time the condition transitioned from one status to another.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+  // The reason for the condition's last transition.
+  // +optional
+  optional string reason = 4;
+
+  // A human readable message indicating details about the transition.
+  // +optional
+  optional string message = 5;
+}
+
+// DaemonSetList is a collection of daemon sets.
+message DaemonSetList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // A list of daemon sets.
+  repeated DaemonSet items = 2;
+}
+
+// DaemonSetSpec is the specification of a daemon set.
+message DaemonSetSpec {
+  // A label query over pods that are managed by the daemon set.
+  // Must match in order to be controlled.
+  // If empty, defaulted to labels on Pod template.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
+
+  // An object that describes the pod that will be created.
+  // The DaemonSet will create exactly one copy of this pod on every node
+  // that matches the template's node selector (or on every node if no node
+  // selector is specified).
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+  optional k8s.io.api.core.v1.PodTemplateSpec template = 2;
+
+  // An update strategy to replace existing DaemonSet pods with new pods.
+  // +optional
+  optional DaemonSetUpdateStrategy updateStrategy = 3;
+
+  // The minimum number of seconds for which a newly created DaemonSet pod should
+  // be ready without any of its container crashing, for it to be considered
+  // available. Defaults to 0 (pod will be considered available as soon as it
+  // is ready).
+  // +optional
+  optional int32 minReadySeconds = 4;
+
+  // DEPRECATED.
+  // A sequence number representing a specific generation of the template.
+  // Populated by the system. It can be set only during the creation.
+  // +optional
+  optional int64 templateGeneration = 5;
+
+  // The number of old history to retain to allow rollback.
+  // This is a pointer to distinguish between explicit zero and not specified.
+  // Defaults to 10.
+  // +optional
+  optional int32 revisionHistoryLimit = 6;
+}
+
+// DaemonSetStatus represents the current status of a daemon set.
+message DaemonSetStatus {
+  // The number of nodes that are running at least 1
+  // daemon pod and are supposed to run the daemon pod.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+  optional int32 currentNumberScheduled = 1;
+
+  // The number of nodes that are running the daemon pod, but are
+  // not supposed to run the daemon pod.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+  optional int32 numberMisscheduled = 2;
+
+  // The total number of nodes that should be running the daemon
+  // pod (including nodes correctly running the daemon pod).
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+  optional int32 desiredNumberScheduled = 3;
+
+  // The number of nodes that should be running the daemon pod and have one
+  // or more of the daemon pod running and ready.
+  optional int32 numberReady = 4;
+
+  // The most recent generation observed by the daemon set controller.
+  // +optional
+  optional int64 observedGeneration = 5;
+
+  // The total number of nodes that are running updated daemon pod
+  // +optional
+  optional int32 updatedNumberScheduled = 6;
+
+  // The number of nodes that should be running the
+  // daemon pod and have one or more of the daemon pod running and
+  // available (ready for at least spec.minReadySeconds)
+  // +optional
+  optional int32 numberAvailable = 7;
+
+  // The number of nodes that should be running the
+  // daemon pod and have none of the daemon pod running and available
+  // (ready for at least spec.minReadySeconds)
+  // +optional
+  optional int32 numberUnavailable = 8;
+
+  // Count of hash collisions for the DaemonSet. The DaemonSet controller
+  // uses this field as a collision avoidance mechanism when it needs to
+  // create the name for the newest ControllerRevision.
+  // +optional
+  optional int32 collisionCount = 9;
+
+  // Represents the latest available observations of a DaemonSet's current state.
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated DaemonSetCondition conditions = 10;
+}
+
+message DaemonSetUpdateStrategy {
+  // Type of daemon set update. Can be "RollingUpdate" or "OnDelete".
+  // Default is OnDelete.
+  // +optional
+  optional string type = 1;
+
+  // Rolling update config params. Present only if type = "RollingUpdate".
+  // ---
+  // TODO: Update this to follow our convention for oneOf, whatever we decide it
+  // to be. Same as Deployment `strategy.rollingUpdate`.
+  // See https://github.com/kubernetes/kubernetes/issues/35345
+  // +optional
+  optional RollingUpdateDaemonSet rollingUpdate = 2;
+}
+
+// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for
+// more information.
+// Deployment enables declarative updates for Pods and ReplicaSets.
+message Deployment {
+  // Standard object metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior of the Deployment.
+  // +optional
+  optional DeploymentSpec spec = 2;
+
+  // Most recently observed status of the Deployment.
+  // +optional
+  optional DeploymentStatus status = 3;
+}
+
+// DeploymentCondition describes the state of a deployment at a certain point.
+message DeploymentCondition {
+  // Type of deployment condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // The last time this condition was updated.
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
+
+  // Last time the condition transitioned from one status to another.
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
+
+  // The reason for the condition's last transition.
+  optional string reason = 4;
+
+  // A human readable message indicating details about the transition.
+  optional string message = 5;
+}
+
+// DeploymentList is a list of Deployments.
+message DeploymentList {
+  // Standard list metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of Deployments.
+  repeated Deployment items = 2;
+}
+
+// DEPRECATED.
+// DeploymentRollback stores the information required to rollback a deployment.
+message DeploymentRollback {
+  // Required: This must match the Name of a deployment.
+  optional string name = 1;
+
+  // The annotations to be updated to a deployment
+  // +optional
+  map<string, string> updatedAnnotations = 2;
+
+  // The config of this deployment rollback.
+  optional RollbackConfig rollbackTo = 3;
+}
+
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
+message DeploymentSpec {
+  // Number of desired pods. This is a pointer to distinguish between explicit
+  // zero and not specified. Defaults to 1.
+  // +optional
+  optional int32 replicas = 1;
+
+  // Label selector for pods. Existing ReplicaSets whose pods are
+  // selected by this will be the ones affected by this deployment.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+  // Template describes the pods that will be created.
+  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+
+  // The deployment strategy to use to replace existing pods with new ones.
+  // +optional
+  // +patchStrategy=retainKeys
+  optional DeploymentStrategy strategy = 4;
+
+  // Minimum number of seconds for which a newly created pod should be ready
+  // without any of its container crashing, for it to be considered available.
+  // Defaults to 0 (pod will be considered available as soon as it is ready)
+  // +optional
+  optional int32 minReadySeconds = 5;
+
+  // The number of old ReplicaSets to retain to allow rollback.
+  // This is a pointer to distinguish between explicit zero and not specified.
+  // This is set to the max value of int32 (i.e. 2147483647) by default, which
+  // means "retaining all old RelicaSets".
+  // +optional
+  optional int32 revisionHistoryLimit = 6;
+
+  // Indicates that the deployment is paused and will not be processed by the
+  // deployment controller.
+  // +optional
+  optional bool paused = 7;
+
+  // DEPRECATED.
+  // The config this deployment is rolling back to. Will be cleared after rollback is done.
+  // +optional
+  optional RollbackConfig rollbackTo = 8;
+
+  // The maximum time in seconds for a deployment to make progress before it
+  // is considered to be failed. The deployment controller will continue to
+  // process failed deployments and a condition with a ProgressDeadlineExceeded
+  // reason will be surfaced in the deployment status. Note that progress will
+  // not be estimated during the time a deployment is paused. This is set to
+  // the max value of int32 (i.e. 2147483647) by default, which means "no deadline".
+  // +optional
+  optional int32 progressDeadlineSeconds = 9;
+}
+
+// DeploymentStatus is the most recently observed status of the Deployment.
+message DeploymentStatus {
+  // The generation observed by the deployment controller.
+  // +optional
+  optional int64 observedGeneration = 1;
+
+  // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+  // +optional
+  optional int32 replicas = 2;
+
+  // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+  // +optional
+  optional int32 updatedReplicas = 3;
+
+  // Total number of ready pods targeted by this deployment.
+  // +optional
+  optional int32 readyReplicas = 7;
+
+  // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+  // +optional
+  optional int32 availableReplicas = 4;
+
+  // Total number of unavailable pods targeted by this deployment. This is the total number of
+  // pods that are still required for the deployment to have 100% available capacity. They may
+  // either be pods that are running but not yet available or pods that still have not been created.
+  // +optional
+  optional int32 unavailableReplicas = 5;
+
+  // Represents the latest available observations of a deployment's current state.
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated DeploymentCondition conditions = 6;
+
+  // Count of hash collisions for the Deployment. The Deployment controller uses this
+  // field as a collision avoidance mechanism when it needs to create the name for the
+  // newest ReplicaSet.
+  // +optional
+  optional int32 collisionCount = 8;
+}
+
+// DeploymentStrategy describes how to replace existing pods with new ones.
+message DeploymentStrategy {
+  // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+  // +optional
+  optional string type = 1;
+
+  // Rolling update config params. Present only if DeploymentStrategyType =
+  // RollingUpdate.
+  // ---
+  // TODO: Update this to follow our convention for oneOf, whatever we decide it
+  // to be.
+  // +optional
+  optional RollingUpdateDeployment rollingUpdate = 2;
+}
+
+// FSGroupStrategyOptions defines the strategy type and options used to create the strategy.
+// Deprecated: use FSGroupStrategyOptions from policy API Group instead.
+message FSGroupStrategyOptions {
+  // rule is the strategy that will dictate what FSGroup is used in the SecurityContext.
+  // +optional
+  optional string rule = 1;
+
+  // ranges are the allowed ranges of fs groups.  If you would like to force a single
+  // fs group then supply a single range with the same start and end. Required for MustRunAs.
+  // +optional
+  repeated IDRange ranges = 2;
+}
+
+// HTTPIngressPath associates a path regex with a backend. Incoming urls matching
+// the path are forwarded to the backend.
+message HTTPIngressPath {
+  // Path is an extended POSIX regex as defined by IEEE Std 1003.1,
+  // (i.e this follows the egrep/unix syntax, not the perl syntax)
+  // matched against the path of an incoming request. Currently it can
+  // contain characters disallowed from the conventional "path"
+  // part of a URL as defined by RFC 3986. Paths must begin with
+  // a '/'. If unspecified, the path defaults to a catch all sending
+  // traffic to the backend.
+  // +optional
+  optional string path = 1;
+
+  // Backend defines the referenced service endpoint to which the traffic
+  // will be forwarded to.
+  optional IngressBackend backend = 2;
+}
+
+// HTTPIngressRuleValue is a list of http selectors pointing to backends.
+// In the example: http://<host>/<path>?<searchpart> -> backend where
+// where parts of the url correspond to RFC 3986, this resource will be used
+// to match against everything after the last '/' and before the first '?'
+// or '#'.
+message HTTPIngressRuleValue {
+  // A collection of paths that map requests to backends.
+  repeated HTTPIngressPath paths = 1;
+}
+
+// HostPortRange defines a range of host ports that will be enabled by a policy
+// for pods to use.  It requires both the start and end to be defined.
+// Deprecated: use HostPortRange from policy API Group instead.
+message HostPortRange {
+  // min is the start of the range, inclusive.
+  optional int32 min = 1;
+
+  // max is the end of the range, inclusive.
+  optional int32 max = 2;
+}
+
+// IDRange provides a min/max of an allowed range of IDs.
+// Deprecated: use IDRange from policy API Group instead.
+message IDRange {
+  // min is the start of the range, inclusive.
+  optional int64 min = 1;
+
+  // max is the end of the range, inclusive.
+  optional int64 max = 2;
+}
+
+// DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock.
+// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods
+// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should
+// not be included within this rule.
+message IPBlock {
+  // CIDR is a string representing the IP Block
+  // Valid examples are "192.168.1.1/24"
+  optional string cidr = 1;
+
+  // Except is a slice of CIDRs that should not be included within an IP Block
+  // Valid examples are "192.168.1.1/24"
+  // Except values will be rejected if they are outside the CIDR range
+  // +optional
+  repeated string except = 2;
+}
+
+// Ingress is a collection of rules that allow inbound connections to reach the
+// endpoints defined by a backend. An Ingress can be configured to give services
+// externally-reachable urls, load balance traffic, terminate SSL, offer name
+// based virtual hosting etc.
+message Ingress {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec is the desired state of the Ingress.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional IngressSpec spec = 2;
+
+  // Status is the current state of the Ingress.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional IngressStatus status = 3;
+}
+
+// IngressBackend describes all endpoints for a given service and port.
+message IngressBackend {
+  // Specifies the name of the referenced service.
+  optional string serviceName = 1;
+
+  // Specifies the port of the referenced service.
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2;
+}
+
+// IngressList is a collection of Ingress.
+message IngressList {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of Ingress.
+  repeated Ingress items = 2;
+}
+
+// IngressRule represents the rules mapping the paths under a specified host to
+// the related backend services. Incoming requests are first evaluated for a host
+// match, then routed to the backend associated with the matching IngressRuleValue.
+message IngressRule {
+  // Host is the fully qualified domain name of a network host, as defined
+  // by RFC 3986. Note the following deviations from the "host" part of the
+  // URI as defined in the RFC:
+  // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the
+  // 	  IP in the Spec of the parent Ingress.
+  // 2. The `:` delimiter is not respected because ports are not allowed.
+  // 	  Currently the port of an Ingress is implicitly :80 for http and
+  // 	  :443 for https.
+  // Both these may change in the future.
+  // Incoming requests are matched against the host before the IngressRuleValue.
+  // If the host is unspecified, the Ingress routes all traffic based on the
+  // specified IngressRuleValue.
+  // +optional
+  optional string host = 1;
+
+  // IngressRuleValue represents a rule to route requests for this IngressRule.
+  // If unspecified, the rule defaults to a http catch-all. Whether that sends
+  // just traffic matching the host to the default backend or all traffic to the
+  // default backend, is left to the controller fulfilling the Ingress. Http is
+  // currently the only supported IngressRuleValue.
+  // +optional
+  optional IngressRuleValue ingressRuleValue = 2;
+}
+
+// IngressRuleValue represents a rule to apply against incoming requests. If the
+// rule is satisfied, the request is routed to the specified backend. Currently
+// mixing different types of rules in a single Ingress is disallowed, so exactly
+// one of the following must be set.
+message IngressRuleValue {
+  // +optional
+  optional HTTPIngressRuleValue http = 1;
+}
+
+// IngressSpec describes the Ingress the user wishes to exist.
+message IngressSpec {
+  // A default backend capable of servicing requests that don't match any
+  // rule. At least one of 'backend' or 'rules' must be specified. This field
+  // is optional to allow the loadbalancer controller or defaulting logic to
+  // specify a global default.
+  // +optional
+  optional IngressBackend backend = 1;
+
+  // TLS configuration. Currently the Ingress only supports a single TLS
+  // port, 443. If multiple members of this list specify different hosts, they
+  // will be multiplexed on the same port according to the hostname specified
+  // through the SNI TLS extension, if the ingress controller fulfilling the
+  // ingress supports SNI.
+  // +optional
+  repeated IngressTLS tls = 2;
+
+  // A list of host rules used to configure the Ingress. If unspecified, or
+  // no rule matches, all traffic is sent to the default backend.
+  // +optional
+  repeated IngressRule rules = 3;
+}
+
+// IngressStatus describe the current state of the Ingress.
+message IngressStatus {
+  // LoadBalancer contains the current status of the load-balancer.
+  // +optional
+  optional k8s.io.api.core.v1.LoadBalancerStatus loadBalancer = 1;
+}
+
+// IngressTLS describes the transport layer security associated with an Ingress.
+message IngressTLS {
+  // Hosts are a list of hosts included in the TLS certificate. The values in
+  // this list must match the name/s used in the tlsSecret. Defaults to the
+  // wildcard host setting for the loadbalancer controller fulfilling this
+  // Ingress, if left unspecified.
+  // +optional
+  repeated string hosts = 1;
+
+  // SecretName is the name of the secret used to terminate SSL traffic on 443.
+  // Field is left optional to allow SSL routing based on SNI hostname alone.
+  // If the SNI host in a listener conflicts with the "Host" header field used
+  // by an IngressRule, the SNI host is used for termination and value of the
+  // Host header is used for routing.
+  // +optional
+  optional string secretName = 2;
+}
+
+// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy.
+// NetworkPolicy describes what network traffic is allowed for a set of Pods
+message NetworkPolicy {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior for this NetworkPolicy.
+  // +optional
+  optional NetworkPolicySpec spec = 2;
+}
+
+// DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule.
+// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods
+// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to.
+// This type is beta-level in 1.8
+message NetworkPolicyEgressRule {
+  // List of destination ports for outgoing traffic.
+  // Each item in this list is combined using a logical OR. If this field is
+  // empty or missing, this rule matches all ports (traffic not restricted by port).
+  // If this field is present and contains at least one item, then this rule allows
+  // traffic only if the traffic matches at least one port in the list.
+  // +optional
+  repeated NetworkPolicyPort ports = 1;
+
+  // List of destinations for outgoing traffic of pods selected for this rule.
+  // Items in this list are combined using a logical OR operation. If this field is
+  // empty or missing, this rule matches all destinations (traffic not restricted by
+  // destination). If this field is present and contains at least one item, this rule
+  // allows traffic only if the traffic matches at least one item in the to list.
+  // +optional
+  repeated NetworkPolicyPeer to = 2;
+}
+
+// DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule.
+// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.
+message NetworkPolicyIngressRule {
+  // List of ports which should be made accessible on the pods selected for this rule.
+  // Each item in this list is combined using a logical OR.
+  // If this field is empty or missing, this rule matches all ports (traffic not restricted by port).
+  // If this field is present and contains at least one item, then this rule allows traffic
+  // only if the traffic matches at least one port in the list.
+  // +optional
+  repeated NetworkPolicyPort ports = 1;
+
+  // List of sources which should be able to access the pods selected for this rule.
+  // Items in this list are combined using a logical OR operation.
+  // If this field is empty or missing, this rule matches all sources (traffic not restricted by source).
+  // If this field is present and contains at least on item, this rule allows traffic only if the
+  // traffic matches at least one item in the from list.
+  // +optional
+  repeated NetworkPolicyPeer from = 2;
+}
+
+// DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList.
+// Network Policy List is a list of NetworkPolicy objects.
+message NetworkPolicyList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of schema objects.
+  repeated NetworkPolicy items = 2;
+}
+
+// DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer.
+message NetworkPolicyPeer {
+  // This is a label selector which selects Pods. This field follows standard label
+  // selector semantics; if present but empty, it selects all pods.
+  //
+  // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects
+  // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
+  // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1;
+
+  // Selects Namespaces using cluster-scoped labels. This field follows standard label
+  // selector semantics; if present but empty, it selects all namespaces.
+  //
+  // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects
+  // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
+  // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2;
+
+  // IPBlock defines policy on a particular IPBlock. If this field is set then
+  // neither of the other fields can be.
+  // +optional
+  optional IPBlock ipBlock = 3;
+}
+
+// DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort.
+message NetworkPolicyPort {
+  // Optional.  The protocol (TCP, UDP, or SCTP) which traffic must match.
+  // If not specified, this field defaults to TCP.
+  // +optional
+  optional string protocol = 1;
+
+  // If specified, the port on the given protocol.  This can
+  // either be a numerical or named port on a pod.  If this field is not provided,
+  // this matches all port names and numbers.
+  // If present, only traffic on the specified protocol AND port
+  // will be matched.
+  // +optional
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2;
+}
+
+// DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec.
+message NetworkPolicySpec {
+  // Selects the pods to which this NetworkPolicy object applies.  The array of ingress rules
+  // is applied to any pods selected by this field. Multiple network policies can select the
+  // same set of pods.  In this case, the ingress rules for each are combined additively.
+  // This field is NOT optional and follows standard label selector semantics.
+  // An empty podSelector matches all pods in this namespace.
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1;
+
+  // List of ingress rules to be applied to the selected pods.
+  // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod
+  // OR if the traffic source is the pod's local node,
+  // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy
+  // objects whose podSelector matches the pod.
+  // If this field is empty then this NetworkPolicy does not allow any traffic
+  // (and serves solely to ensure that the pods it selects are isolated by default).
+  // +optional
+  repeated NetworkPolicyIngressRule ingress = 2;
+
+  // List of egress rules to be applied to the selected pods. Outgoing traffic is
+  // allowed if there are no NetworkPolicies selecting the pod (and cluster policy
+  // otherwise allows the traffic), OR if the traffic matches at least one egress rule
+  // across all of the NetworkPolicy objects whose podSelector matches the pod. If
+  // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves
+  // solely to ensure that the pods it selects are isolated by default).
+  // This field is beta-level in 1.8
+  // +optional
+  repeated NetworkPolicyEgressRule egress = 3;
+
+  // List of rule types that the NetworkPolicy relates to.
+  // Valid options are Ingress, Egress, or Ingress,Egress.
+  // If this field is not specified, it will default based on the existence of Ingress or Egress rules;
+  // policies that contain an Egress section are assumed to affect Egress, and all policies
+  // (whether or not they contain an Ingress section) are assumed to affect Ingress.
+  // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ].
+  // Likewise, if you want to write a policy that specifies that no egress is allowed,
+  // you must specify a policyTypes value that include "Egress" (since such a policy would not include
+  // an Egress section and would otherwise default to just [ "Ingress" ]).
+  // This field is beta-level in 1.8
+  // +optional
+  repeated string policyTypes = 4;
+}
+
+// PodSecurityPolicy governs the ability to make requests that affect the Security Context
+// that will be applied to a pod and container.
+// Deprecated: use PodSecurityPolicy from policy API Group instead.
+message PodSecurityPolicy {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // spec defines the policy enforced.
+  // +optional
+  optional PodSecurityPolicySpec spec = 2;
+}
+
+// PodSecurityPolicyList is a list of PodSecurityPolicy objects.
+// Deprecated: use PodSecurityPolicyList from policy API Group instead.
+message PodSecurityPolicyList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // items is a list of schema objects.
+  repeated PodSecurityPolicy items = 2;
+}
+
+// PodSecurityPolicySpec defines the policy enforced.
+// Deprecated: use PodSecurityPolicySpec from policy API Group instead.
+message PodSecurityPolicySpec {
+  // privileged determines if a pod can request to be run as privileged.
+  // +optional
+  optional bool privileged = 1;
+
+  // defaultAddCapabilities is the default set of capabilities that will be added to the container
+  // unless the pod spec specifically drops the capability.  You may not list a capability in both
+  // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly
+  // allowed, and need not be included in the allowedCapabilities list.
+  // +optional
+  repeated string defaultAddCapabilities = 2;
+
+  // requiredDropCapabilities are the capabilities that will be dropped from the container.  These
+  // are required to be dropped and cannot be added.
+  // +optional
+  repeated string requiredDropCapabilities = 3;
+
+  // allowedCapabilities is a list of capabilities that can be requested to add to the container.
+  // Capabilities in this field may be added at the pod author's discretion.
+  // You must not list a capability in both allowedCapabilities and requiredDropCapabilities.
+  // +optional
+  repeated string allowedCapabilities = 4;
+
+  // volumes is a white list of allowed volume plugins. Empty indicates that
+  // no volumes may be used. To allow all volumes you may use '*'.
+  // +optional
+  repeated string volumes = 5;
+
+  // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
+  // +optional
+  optional bool hostNetwork = 6;
+
+  // hostPorts determines which host port ranges are allowed to be exposed.
+  // +optional
+  repeated HostPortRange hostPorts = 7;
+
+  // hostPID determines if the policy allows the use of HostPID in the pod spec.
+  // +optional
+  optional bool hostPID = 8;
+
+  // hostIPC determines if the policy allows the use of HostIPC in the pod spec.
+  // +optional
+  optional bool hostIPC = 9;
+
+  // seLinux is the strategy that will dictate the allowable labels that may be set.
+  optional SELinuxStrategyOptions seLinux = 10;
+
+  // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
+  optional RunAsUserStrategyOptions runAsUser = 11;
+
+  // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set.
+  // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the
+  // RunAsGroup feature gate to be enabled.
+  // +optional
+  optional RunAsGroupStrategyOptions runAsGroup = 22;
+
+  // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
+  optional SupplementalGroupsStrategyOptions supplementalGroups = 12;
+
+  // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.
+  optional FSGroupStrategyOptions fsGroup = 13;
+
+  // readOnlyRootFilesystem when set to true will force containers to run with a read only root file
+  // system.  If the container specifically requests to run with a non-read only root file system
+  // the PSP should deny the pod.
+  // If set to false the container may run with a read only root file system if it wishes but it
+  // will not be forced to.
+  // +optional
+  optional bool readOnlyRootFilesystem = 14;
+
+  // defaultAllowPrivilegeEscalation controls the default setting for whether a
+  // process can gain more privileges than its parent process.
+  // +optional
+  optional bool defaultAllowPrivilegeEscalation = 15;
+
+  // allowPrivilegeEscalation determines if a pod can request to allow
+  // privilege escalation. If unspecified, defaults to true.
+  // +optional
+  optional bool allowPrivilegeEscalation = 16;
+
+  // allowedHostPaths is a white list of allowed host paths. Empty indicates
+  // that all host paths may be used.
+  // +optional
+  repeated AllowedHostPath allowedHostPaths = 17;
+
+  // allowedFlexVolumes is a whitelist of allowed Flexvolumes.  Empty or nil indicates that all
+  // Flexvolumes may be used.  This parameter is effective only when the usage of the Flexvolumes
+  // is allowed in the "volumes" field.
+  // +optional
+  repeated AllowedFlexVolume allowedFlexVolumes = 18;
+
+  // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none.
+  // Each entry is either a plain sysctl name or ends in "*" in which case it is considered
+  // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed.
+  // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.
+  //
+  // Examples:
+  // e.g. "foo/*" allows "foo/bar", "foo/baz", etc.
+  // e.g. "foo.*" allows "foo.bar", "foo.baz", etc.
+  // +optional
+  repeated string allowedUnsafeSysctls = 19;
+
+  // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none.
+  // Each entry is either a plain sysctl name or ends in "*" in which case it is considered
+  // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.
+  //
+  // Examples:
+  // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc.
+  // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc.
+  // +optional
+  repeated string forbiddenSysctls = 20;
+
+  // AllowedProcMountTypes is a whitelist of allowed ProcMountTypes.
+  // Empty or nil indicates that only the DefaultProcMountType may be used.
+  // This requires the ProcMountType feature flag to be enabled.
+  // +optional
+  repeated string allowedProcMountTypes = 21;
+}
+
+// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for
+// more information.
+// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
+message ReplicaSet {
+  // If the Labels of a ReplicaSet are empty, they are defaulted to
+  // be the same as the Pod(s) that the ReplicaSet manages.
+  // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the specification of the desired behavior of the ReplicaSet.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional ReplicaSetSpec spec = 2;
+
+  // Status is the most recently observed status of the ReplicaSet.
+  // This data may be out of date by some window of time.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional ReplicaSetStatus status = 3;
+}
+
+// ReplicaSetCondition describes the state of a replica set at a certain point.
+message ReplicaSetCondition {
+  // Type of replica set condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // The last time the condition transitioned from one status to another.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+  // The reason for the condition's last transition.
+  // +optional
+  optional string reason = 4;
+
+  // A human readable message indicating details about the transition.
+  // +optional
+  optional string message = 5;
+}
+
+// ReplicaSetList is a collection of ReplicaSets.
+message ReplicaSetList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of ReplicaSets.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+  repeated ReplicaSet items = 2;
+}
+
+// ReplicaSetSpec is the specification of a ReplicaSet.
+message ReplicaSetSpec {
+  // Replicas is the number of desired replicas.
+  // This is a pointer to distinguish between explicit zero and unspecified.
+  // Defaults to 1.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+  // +optional
+  optional int32 replicas = 1;
+
+  // Minimum number of seconds for which a newly created pod should be ready
+  // without any of its container crashing, for it to be considered available.
+  // Defaults to 0 (pod will be considered available as soon as it is ready)
+  // +optional
+  optional int32 minReadySeconds = 4;
+
+  // Selector is a label query over pods that should match the replica count.
+  // If the selector is empty, it is defaulted to the labels present on the pod template.
+  // Label keys and values that must match in order to be controlled by this replica set.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+  // Template is the object that describes the pod that will be created if
+  // insufficient replicas are detected.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+  // +optional
+  optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
+}
+
+// ReplicaSetStatus represents the current status of a ReplicaSet.
+message ReplicaSetStatus {
+  // Replicas is the most recently oberved number of replicas.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+  optional int32 replicas = 1;
+
+  // The number of pods that have labels matching the labels of the pod template of the replicaset.
+  // +optional
+  optional int32 fullyLabeledReplicas = 2;
+
+  // The number of ready replicas for this replica set.
+  // +optional
+  optional int32 readyReplicas = 4;
+
+  // The number of available replicas (ready for at least minReadySeconds) for this replica set.
+  // +optional
+  optional int32 availableReplicas = 5;
+
+  // ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
+  // +optional
+  optional int64 observedGeneration = 3;
+
+  // Represents the latest available observations of a replica set's current state.
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  repeated ReplicaSetCondition conditions = 6;
+}
+
+// Dummy definition
+message ReplicationControllerDummy {
+}
+
+// DEPRECATED.
+message RollbackConfig {
+  // The revision to rollback to. If set to 0, rollback to the last revision.
+  // +optional
+  optional int64 revision = 1;
+}
+
+// Spec to control the desired behavior of daemon set rolling update.
+message RollingUpdateDaemonSet {
+  // The maximum number of DaemonSet pods that can be unavailable during the
+  // update. Value can be an absolute number (ex: 5) or a percentage of total
+  // number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+  // number is calculated from percentage by rounding up.
+  // This cannot be 0.
+  // Default value is 1.
+  // Example: when this is set to 30%, at most 30% of the total number of nodes
+  // that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+  // can have their pods stopped for an update at any given
+  // time. The update starts by stopping at most 30% of those DaemonSet pods
+  // and then brings up new DaemonSet pods in their place. Once the new pods
+  // are available, it then proceeds onto other DaemonSet pods, thus ensuring
+  // that at least 70% of original number of DaemonSet pods are available at
+  // all times during the update.
+  // +optional
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
+}
+
+// Spec to control the desired behavior of rolling update.
+message RollingUpdateDeployment {
+  // The maximum number of pods that can be unavailable during the update.
+  // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+  // Absolute number is calculated from percentage by rounding down.
+  // This can not be 0 if MaxSurge is 0.
+  // By default, a fixed value of 1 is used.
+  // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods
+  // immediately when the rolling update starts. Once new pods are ready, old RC
+  // can be scaled down further, followed by scaling up the new RC, ensuring
+  // that the total number of pods available at all times during the update is at
+  // least 70% of desired pods.
+  // +optional
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
+
+  // The maximum number of pods that can be scheduled above the desired number of
+  // pods.
+  // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+  // This can not be 0 if MaxUnavailable is 0.
+  // Absolute number is calculated from percentage by rounding up.
+  // By default, a value of 1 is used.
+  // Example: when this is set to 30%, the new RC can be scaled up immediately when
+  // the rolling update starts, such that the total number of old and new pods do not exceed
+  // 130% of desired pods. Once old pods have been killed,
+  // new RC can be scaled up further, ensuring that total number of pods running
+  // at any time during the update is atmost 130% of desired pods.
+  // +optional
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
+}
+
+// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.
+// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.
+message RunAsGroupStrategyOptions {
+  // rule is the strategy that will dictate the allowable RunAsGroup values that may be set.
+  optional string rule = 1;
+
+  // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid
+  // then supply a single range with the same start and end. Required for MustRunAs.
+  // +optional
+  repeated IDRange ranges = 2;
+}
+
+// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.
+// Deprecated: use RunAsUserStrategyOptions from policy API Group instead.
+message RunAsUserStrategyOptions {
+  // rule is the strategy that will dictate the allowable RunAsUser values that may be set.
+  optional string rule = 1;
+
+  // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid
+  // then supply a single range with the same start and end. Required for MustRunAs.
+  // +optional
+  repeated IDRange ranges = 2;
+}
+
+// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.
+// Deprecated: use SELinuxStrategyOptions from policy API Group instead.
+message SELinuxStrategyOptions {
+  // rule is the strategy that will dictate the allowable labels that may be set.
+  optional string rule = 1;
+
+  // seLinuxOptions required to run as; required for MustRunAs
+  // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  // +optional
+  optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2;
+}
+
+// represents a scaling request for a resource.
+message Scale {
+  // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
+  // +optional
+  optional ScaleSpec spec = 2;
+
+  // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
+  // +optional
+  optional ScaleStatus status = 3;
+}
+
+// describes the attributes of a scale subresource
+message ScaleSpec {
+  // desired number of instances for the scaled object.
+  // +optional
+  optional int32 replicas = 1;
+}
+
+// represents the current status of a scale subresource.
+message ScaleStatus {
+  // actual number of observed instances of the scaled object.
+  optional int32 replicas = 1;
+
+  // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
+  // +optional
+  map<string, string> selector = 2;
+
+  // label selector for pods that should match the replicas count. This is a serializated
+  // version of both map-based and more expressive set-based selectors. This is done to
+  // avoid introspection in the clients. The string will be in the same format as the
+  // query-param syntax. If the target type only supports map-based selectors, both this
+  // field and map-based selector field are populated.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+  // +optional
+  optional string targetSelector = 3;
+}
+
+// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.
+// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead.
+message SupplementalGroupsStrategyOptions {
+  // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.
+  // +optional
+  optional string rule = 1;
+
+  // ranges are the allowed ranges of supplemental groups.  If you would like to force a single
+  // supplemental group then supply a single range with the same start and end. Required for MustRunAs.
+  // +optional
+  repeated IDRange ranges = 2;
+}
+
diff --git a/vendor/k8s.io/api/extensions/v1beta1/register.go b/vendor/k8s.io/api/extensions/v1beta1/register.go
new file mode 100644
index 0000000..7625f67
--- /dev/null
+++ b/vendor/k8s.io/api/extensions/v1beta1/register.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "extensions"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Deployment{},
+		&DeploymentList{},
+		&DeploymentRollback{},
+		&ReplicationControllerDummy{},
+		&Scale{},
+		&DaemonSetList{},
+		&DaemonSet{},
+		&Ingress{},
+		&IngressList{},
+		&ReplicaSet{},
+		&ReplicaSetList{},
+		&PodSecurityPolicy{},
+		&PodSecurityPolicyList{},
+		&NetworkPolicy{},
+		&NetworkPolicyList{},
+	)
+	// Add the watch version that applies
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go
new file mode 100644
index 0000000..5ba6f95
--- /dev/null
+++ b/vendor/k8s.io/api/extensions/v1beta1/types.go
@@ -0,0 +1,1360 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	appsv1beta1 "k8s.io/api/apps/v1beta1"
+	"k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// describes the attributes of a scale subresource
+type ScaleSpec struct {
+	// desired number of instances for the scaled object.
+	// +optional
+	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+}
+
+// represents the current status of a scale subresource.
+type ScaleStatus struct {
+	// actual number of observed instances of the scaled object.
+	Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
+
+	// label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
+	// +optional
+	Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
+
+	// label selector for pods that should match the replicas count. This is a serializated
+	// version of both map-based and more expressive set-based selectors. This is done to
+	// avoid introspection in the clients. The string will be in the same format as the
+	// query-param syntax. If the target type only supports map-based selectors, both this
+	// field and map-based selector field are populated.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+	// +optional
+	TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// represents a scaling request for a resource.
+type Scale struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
+	// +optional
+	Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
+	// +optional
+	Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Dummy definition
+type ReplicationControllerDummy struct {
+	metav1.TypeMeta `json:",inline"`
+}
+
+// +genclient
+// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale
+// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for
+// more information.
+// Deployment enables declarative updates for Pods and ReplicaSets.
+type Deployment struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired behavior of the Deployment.
+	// +optional
+	Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Most recently observed status of the Deployment.
+	// +optional
+	Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
+type DeploymentSpec struct {
+	// Number of desired pods. This is a pointer to distinguish between explicit
+	// zero and not specified. Defaults to 1.
+	// +optional
+	Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+	// Label selector for pods. Existing ReplicaSets whose pods are
+	// selected by this will be the ones affected by this deployment.
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
+
+	// Template describes the pods that will be created.
+	Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"`
+
+	// The deployment strategy to use to replace existing pods with new ones.
+	// +optional
+	// +patchStrategy=retainKeys
+	Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"`
+
+	// Minimum number of seconds for which a newly created pod should be ready
+	// without any of its container crashing, for it to be considered available.
+	// Defaults to 0 (pod will be considered available as soon as it is ready)
+	// +optional
+	MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"`
+
+	// The number of old ReplicaSets to retain to allow rollback.
+	// This is a pointer to distinguish between explicit zero and not specified.
+	// This is set to the max value of int32 (i.e. 2147483647) by default, which
+	// means "retaining all old RelicaSets".
+	// +optional
+	RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"`
+
+	// Indicates that the deployment is paused and will not be processed by the
+	// deployment controller.
+	// +optional
+	Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"`
+
+	// DEPRECATED.
+	// The config this deployment is rolling back to. Will be cleared after rollback is done.
+	// +optional
+	RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"`
+
+	// The maximum time in seconds for a deployment to make progress before it
+	// is considered to be failed. The deployment controller will continue to
+	// process failed deployments and a condition with a ProgressDeadlineExceeded
+	// reason will be surfaced in the deployment status. Note that progress will
+	// not be estimated during the time a deployment is paused. This is set to
+	// the max value of int32 (i.e. 2147483647) by default, which means "no deadline".
+	// +optional
+	ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DEPRECATED.
+// DeploymentRollback stores the information required to rollback a deployment.
+type DeploymentRollback struct {
+	metav1.TypeMeta `json:",inline"`
+	// Required: This must match the Name of a deployment.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// The annotations to be updated to a deployment
+	// +optional
+	UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"`
+	// The config of this deployment rollback.
+	RollbackTo RollbackConfig `json:"rollbackTo" protobuf:"bytes,3,opt,name=rollbackTo"`
+}
+
+// DEPRECATED.
+type RollbackConfig struct {
+	// The revision to rollback to. If set to 0, rollback to the last revision.
+	// +optional
+	Revision int64 `json:"revision,omitempty" protobuf:"varint,1,opt,name=revision"`
+}
+
+const (
+	// DefaultDeploymentUniqueLabelKey is the default key of the selector that is added
+	// to existing RCs (and label key that is added to its pods) to prevent the existing RCs
+	// to select new pods (and old pods being select by new RC).
+	DefaultDeploymentUniqueLabelKey string = "pod-template-hash"
+)
+
+// DeploymentStrategy describes how to replace existing pods with new ones.
+type DeploymentStrategy struct {
+	// Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+	// +optional
+	Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"`
+
+	// Rolling update config params. Present only if DeploymentStrategyType =
+	// RollingUpdate.
+	//---
+	// TODO: Update this to follow our convention for oneOf, whatever we decide it
+	// to be.
+	// +optional
+	RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
+}
+
+type DeploymentStrategyType string
+
+const (
+	// Kill all existing pods before creating new ones.
+	RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate"
+
+	// Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one.
+	RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate"
+)
+
+// Spec to control the desired behavior of rolling update.
+type RollingUpdateDeployment struct {
+	// The maximum number of pods that can be unavailable during the update.
+	// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+	// Absolute number is calculated from percentage by rounding down.
+	// This can not be 0 if MaxSurge is 0.
+	// By default, a fixed value of 1 is used.
+	// Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods
+	// immediately when the rolling update starts. Once new pods are ready, old RC
+	// can be scaled down further, followed by scaling up the new RC, ensuring
+	// that the total number of pods available at all times during the update is at
+	// least 70% of desired pods.
+	// +optional
+	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
+
+	// The maximum number of pods that can be scheduled above the desired number of
+	// pods.
+	// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+	// This can not be 0 if MaxUnavailable is 0.
+	// Absolute number is calculated from percentage by rounding up.
+	// By default, a value of 1 is used.
+	// Example: when this is set to 30%, the new RC can be scaled up immediately when
+	// the rolling update starts, such that the total number of old and new pods do not exceed
+	// 130% of desired pods. Once old pods have been killed,
+	// new RC can be scaled up further, ensuring that total number of pods running
+	// at any time during the update is atmost 130% of desired pods.
+	// +optional
+	MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"`
+}
+
+// DeploymentStatus is the most recently observed status of the Deployment.
+type DeploymentStatus struct {
+	// The generation observed by the deployment controller.
+	// +optional
+	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+	// Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+	// +optional
+	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"`
+
+	// Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+	// +optional
+	UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"`
+
+	// Total number of ready pods targeted by this deployment.
+	// +optional
+	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"`
+
+	// Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+	// +optional
+	AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"`
+
+	// Total number of unavailable pods targeted by this deployment. This is the total number of
+	// pods that are still required for the deployment to have 100% available capacity. They may
+	// either be pods that are running but not yet available or pods that still have not been created.
+	// +optional
+	UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
+
+	// Represents the latest available observations of a deployment's current state.
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
+
+	// Count of hash collisions for the Deployment. The Deployment controller uses this
+	// field as a collision avoidance mechanism when it needs to create the name for the
+	// newest ReplicaSet.
+	// +optional
+	CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"`
+}
+
+type DeploymentConditionType string
+
+// These are valid conditions of a deployment.
+const (
+	// Available means the deployment is available, ie. at least the minimum available
+	// replicas required are up and running for at least minReadySeconds.
+	DeploymentAvailable DeploymentConditionType = "Available"
+	// Progressing means the deployment is progressing. Progress for a deployment is
+	// considered when a new replica set is created or adopted, and when new pods scale
+	// up or old pods scale down. Progress is not estimated for paused deployments or
+	// when progressDeadlineSeconds is not specified.
+	DeploymentProgressing DeploymentConditionType = "Progressing"
+	// ReplicaFailure is added in a deployment when one of its pods fails to be created
+	// or deleted.
+	DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure"
+)
+
+// DeploymentCondition describes the state of a deployment at a certain point.
+type DeploymentCondition struct {
+	// Type of deployment condition.
+	Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+	// The last time this condition was updated.
+	LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"`
+	// Last time the condition transitioned from one status to another.
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"`
+	// The reason for the condition's last transition.
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// A human readable message indicating details about the transition.
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DeploymentList is a list of Deployments.
+type DeploymentList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of Deployments.
+	Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+type DaemonSetUpdateStrategy struct {
+	// Type of daemon set update. Can be "RollingUpdate" or "OnDelete".
+	// Default is OnDelete.
+	// +optional
+	Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"`
+
+	// Rolling update config params. Present only if type = "RollingUpdate".
+	//---
+	// TODO: Update this to follow our convention for oneOf, whatever we decide it
+	// to be. Same as Deployment `strategy.rollingUpdate`.
+	// See https://github.com/kubernetes/kubernetes/issues/35345
+	// +optional
+	RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
+}
+
+type DaemonSetUpdateStrategyType string
+
+const (
+	// Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other.
+	RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate"
+
+	// Replace the old daemons only when it's killed
+	OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete"
+)
+
+// Spec to control the desired behavior of daemon set rolling update.
+type RollingUpdateDaemonSet struct {
+	// The maximum number of DaemonSet pods that can be unavailable during the
+	// update. Value can be an absolute number (ex: 5) or a percentage of total
+	// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+	// number is calculated from percentage by rounding up.
+	// This cannot be 0.
+	// Default value is 1.
+	// Example: when this is set to 30%, at most 30% of the total number of nodes
+	// that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+	// can have their pods stopped for an update at any given
+	// time. The update starts by stopping at most 30% of those DaemonSet pods
+	// and then brings up new DaemonSet pods in their place. Once the new pods
+	// are available, it then proceeds onto other DaemonSet pods, thus ensuring
+	// that at least 70% of original number of DaemonSet pods are available at
+	// all times during the update.
+	// +optional
+	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
+}
+
+// DaemonSetSpec is the specification of a daemon set.
+type DaemonSetSpec struct {
+	// A label query over pods that are managed by the daemon set.
+	// Must match in order to be controlled.
+	// If empty, defaulted to labels on Pod template.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"`
+
+	// An object that describes the pod that will be created.
+	// The DaemonSet will create exactly one copy of this pod on every node
+	// that matches the template's node selector (or on every node if no node
+	// selector is specified).
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+	Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"`
+
+	// An update strategy to replace existing DaemonSet pods with new pods.
+	// +optional
+	UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"`
+
+	// The minimum number of seconds for which a newly created DaemonSet pod should
+	// be ready without any of its container crashing, for it to be considered
+	// available. Defaults to 0 (pod will be considered available as soon as it
+	// is ready).
+	// +optional
+	MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
+
+	// DEPRECATED.
+	// A sequence number representing a specific generation of the template.
+	// Populated by the system. It can be set only during the creation.
+	// +optional
+	TemplateGeneration int64 `json:"templateGeneration,omitempty" protobuf:"varint,5,opt,name=templateGeneration"`
+
+	// The number of old history to retain to allow rollback.
+	// This is a pointer to distinguish between explicit zero and not specified.
+	// Defaults to 10.
+	// +optional
+	RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"`
+}
+
+// DaemonSetStatus represents the current status of a daemon set.
+type DaemonSetStatus struct {
+	// The number of nodes that are running at least 1
+	// daemon pod and are supposed to run the daemon pod.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+	CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"`
+
+	// The number of nodes that are running the daemon pod, but are
+	// not supposed to run the daemon pod.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+	NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"`
+
+	// The total number of nodes that should be running the daemon
+	// pod (including nodes correctly running the daemon pod).
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+	DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"`
+
+	// The number of nodes that should be running the daemon pod and have one
+	// or more of the daemon pod running and ready.
+	NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"`
+
+	// The most recent generation observed by the daemon set controller.
+	// +optional
+	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"`
+
+	// The total number of nodes that are running updated daemon pod
+	// +optional
+	UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"`
+
+	// The number of nodes that should be running the
+	// daemon pod and have one or more of the daemon pod running and
+	// available (ready for at least spec.minReadySeconds)
+	// +optional
+	NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"`
+
+	// The number of nodes that should be running the
+	// daemon pod and have none of the daemon pod running and available
+	// (ready for at least spec.minReadySeconds)
+	// +optional
+	NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"`
+
+	// Count of hash collisions for the DaemonSet. The DaemonSet controller
+	// uses this field as a collision avoidance mechanism when it needs to
+	// create the name for the newest ControllerRevision.
+	// +optional
+	CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"`
+
+	// Represents the latest available observations of a DaemonSet's current state.
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"`
+}
+
+type DaemonSetConditionType string
+
+// TODO: Add valid condition types of a DaemonSet.
+
+// DaemonSetCondition describes the state of a DaemonSet at a certain point.
+type DaemonSetCondition struct {
+	// Type of DaemonSet condition.
+	Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+	// Last time the condition transitioned from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+	// The reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// A human readable message indicating details about the transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for
+// more information.
+// DaemonSet represents the configuration of a daemon set.
+type DaemonSet struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// The desired behavior of this daemon set.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// The current status of this daemon set. This data may be
+	// out of date by some window of time.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+const (
+	// DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead.
+	// DaemonSetTemplateGenerationKey is the key of the labels that is added
+	// to daemon set pods to distinguish between old and new pod templates
+	// during DaemonSet template update.
+	DaemonSetTemplateGenerationKey string = "pod-template-generation"
+
+	// DefaultDaemonSetUniqueLabelKey is the default label key that is added
+	// to existing DaemonSet pods to distinguish between old and new
+	// DaemonSet pods during DaemonSet template updates.
+	DefaultDaemonSetUniqueLabelKey = appsv1beta1.ControllerRevisionHashLabelKey
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DaemonSetList is a collection of daemon sets.
+type DaemonSetList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// A list of daemon sets.
+	Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Ingress is a collection of rules that allow inbound connections to reach the
+// endpoints defined by a backend. An Ingress can be configured to give services
+// externally-reachable urls, load balance traffic, terminate SSL, offer name
+// based virtual hosting etc.
+type Ingress struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec is the desired state of the Ingress.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is the current state of the Ingress.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// IngressList is a collection of Ingress.
+type IngressList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of Ingress.
+	Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// IngressSpec describes the Ingress the user wishes to exist.
+type IngressSpec struct {
+	// A default backend capable of servicing requests that don't match any
+	// rule. At least one of 'backend' or 'rules' must be specified. This field
+	// is optional to allow the loadbalancer controller or defaulting logic to
+	// specify a global default.
+	// +optional
+	Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"`
+
+	// TLS configuration. Currently the Ingress only supports a single TLS
+	// port, 443. If multiple members of this list specify different hosts, they
+	// will be multiplexed on the same port according to the hostname specified
+	// through the SNI TLS extension, if the ingress controller fulfilling the
+	// ingress supports SNI.
+	// +optional
+	TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"`
+
+	// A list of host rules used to configure the Ingress. If unspecified, or
+	// no rule matches, all traffic is sent to the default backend.
+	// +optional
+	Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"`
+	// TODO: Add the ability to specify load-balancer IP through claims
+}
+
+// IngressTLS describes the transport layer security associated with an Ingress.
+type IngressTLS struct {
+	// Hosts are a list of hosts included in the TLS certificate. The values in
+	// this list must match the name/s used in the tlsSecret. Defaults to the
+	// wildcard host setting for the loadbalancer controller fulfilling this
+	// Ingress, if left unspecified.
+	// +optional
+	Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"`
+	// SecretName is the name of the secret used to terminate SSL traffic on 443.
+	// Field is left optional to allow SSL routing based on SNI hostname alone.
+	// If the SNI host in a listener conflicts with the "Host" header field used
+	// by an IngressRule, the SNI host is used for termination and value of the
+	// Host header is used for routing.
+	// +optional
+	SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"`
+	// TODO: Consider specifying different modes of termination, protocols etc.
+}
+
+// IngressStatus describe the current state of the Ingress.
+type IngressStatus struct {
+	// LoadBalancer contains the current status of the load-balancer.
+	// +optional
+	LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"`
+}
+
+// IngressRule represents the rules mapping the paths under a specified host to
+// the related backend services. Incoming requests are first evaluated for a host
+// match, then routed to the backend associated with the matching IngressRuleValue.
+type IngressRule struct {
+	// Host is the fully qualified domain name of a network host, as defined
+	// by RFC 3986. Note the following deviations from the "host" part of the
+	// URI as defined in the RFC:
+	// 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the
+	//	  IP in the Spec of the parent Ingress.
+	// 2. The `:` delimiter is not respected because ports are not allowed.
+	//	  Currently the port of an Ingress is implicitly :80 for http and
+	//	  :443 for https.
+	// Both these may change in the future.
+	// Incoming requests are matched against the host before the IngressRuleValue.
+	// If the host is unspecified, the Ingress routes all traffic based on the
+	// specified IngressRuleValue.
+	// +optional
+	Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"`
+	// IngressRuleValue represents a rule to route requests for this IngressRule.
+	// If unspecified, the rule defaults to a http catch-all. Whether that sends
+	// just traffic matching the host to the default backend or all traffic to the
+	// default backend, is left to the controller fulfilling the Ingress. Http is
+	// currently the only supported IngressRuleValue.
+	// +optional
+	IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"`
+}
+
+// IngressRuleValue represents a rule to apply against incoming requests. If the
+// rule is satisfied, the request is routed to the specified backend. Currently
+// mixing different types of rules in a single Ingress is disallowed, so exactly
+// one of the following must be set.
+type IngressRuleValue struct {
+	//TODO:
+	// 1. Consider renaming this resource and the associated rules so they
+	// aren't tied to Ingress. They can be used to route intra-cluster traffic.
+	// 2. Consider adding fields for ingress-type specific global options
+	// usable by a loadbalancer, like http keep-alive.
+
+	// +optional
+	HTTP *HTTPIngressRuleValue `json:"http,omitempty" protobuf:"bytes,1,opt,name=http"`
+}
+
+// HTTPIngressRuleValue is a list of http selectors pointing to backends.
+// In the example: http://<host>/<path>?<searchpart> -> backend where
+// where parts of the url correspond to RFC 3986, this resource will be used
+// to match against everything after the last '/' and before the first '?'
+// or '#'.
+type HTTPIngressRuleValue struct {
+	// A collection of paths that map requests to backends.
+	Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"`
+	// TODO: Consider adding fields for ingress-type specific global
+	// options usable by a loadbalancer, like http keep-alive.
+}
+
+// HTTPIngressPath associates a path regex with a backend. Incoming urls matching
+// the path are forwarded to the backend.
+type HTTPIngressPath struct {
+	// Path is an extended POSIX regex as defined by IEEE Std 1003.1,
+	// (i.e this follows the egrep/unix syntax, not the perl syntax)
+	// matched against the path of an incoming request. Currently it can
+	// contain characters disallowed from the conventional "path"
+	// part of a URL as defined by RFC 3986. Paths must begin with
+	// a '/'. If unspecified, the path defaults to a catch all sending
+	// traffic to the backend.
+	// +optional
+	Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
+
+	// Backend defines the referenced service endpoint to which the traffic
+	// will be forwarded to.
+	Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"`
+}
+
+// IngressBackend describes all endpoints for a given service and port.
+type IngressBackend struct {
+	// Specifies the name of the referenced service.
+	ServiceName string `json:"serviceName" protobuf:"bytes,1,opt,name=serviceName"`
+
+	// Specifies the port of the referenced service.
+	ServicePort intstr.IntOrString `json:"servicePort" protobuf:"bytes,2,opt,name=servicePort"`
+}
+
+// +genclient
+// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale
+// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for
+// more information.
+// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
+type ReplicaSet struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// If the Labels of a ReplicaSet are empty, they are defaulted to
+	// be the same as the Pod(s) that the ReplicaSet manages.
+	// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the specification of the desired behavior of the ReplicaSet.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is the most recently observed status of the ReplicaSet.
+	// This data may be out of date by some window of time.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ReplicaSetList is a collection of ReplicaSets.
+type ReplicaSetList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of ReplicaSets.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+	Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ReplicaSetSpec is the specification of a ReplicaSet.
+type ReplicaSetSpec struct {
+	// Replicas is the number of desired replicas.
+	// This is a pointer to distinguish between explicit zero and unspecified.
+	// Defaults to 1.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+	// +optional
+	Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+	// Minimum number of seconds for which a newly created pod should be ready
+	// without any of its container crashing, for it to be considered available.
+	// Defaults to 0 (pod will be considered available as soon as it is ready)
+	// +optional
+	MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
+
+	// Selector is a label query over pods that should match the replica count.
+	// If the selector is empty, it is defaulted to the labels present on the pod template.
+	// Label keys and values that must match in order to be controlled by this replica set.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
+
+	// Template is the object that describes the pod that will be created if
+	// insufficient replicas are detected.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+	// +optional
+	Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
+}
+
+// ReplicaSetStatus represents the current status of a ReplicaSet.
+type ReplicaSetStatus struct {
+	// Replicas is the most recently oberved number of replicas.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+	Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
+
+	// The number of pods that have labels matching the labels of the pod template of the replicaset.
+	// +optional
+	FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
+
+	// The number of ready replicas for this replica set.
+	// +optional
+	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
+
+	// The number of available replicas (ready for at least minReadySeconds) for this replica set.
+	// +optional
+	AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
+
+	// ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
+	// +optional
+	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
+
+	// Represents the latest available observations of a replica set's current state.
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
+}
+
+type ReplicaSetConditionType string
+
+// These are valid conditions of a replica set.
+const (
+	// ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created
+	// due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted
+	// due to kubelet being down or finalizers are failing.
+	ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure"
+)
+
+// ReplicaSetCondition describes the state of a replica set at a certain point.
+type ReplicaSetCondition struct {
+	// Type of replica set condition.
+	Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+	// The last time the condition transitioned from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+	// The reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// A human readable message indicating details about the transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodSecurityPolicy governs the ability to make requests that affect the Security Context
+// that will be applied to a pod and container.
+// Deprecated: use PodSecurityPolicy from policy API Group instead.
+type PodSecurityPolicy struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// spec defines the policy enforced.
+	// +optional
+	Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// PodSecurityPolicySpec defines the policy enforced.
+// Deprecated: use PodSecurityPolicySpec from policy API Group instead.
+type PodSecurityPolicySpec struct {
+	// privileged determines if a pod can request to be run as privileged.
+	// +optional
+	Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"`
+	// defaultAddCapabilities is the default set of capabilities that will be added to the container
+	// unless the pod spec specifically drops the capability.  You may not list a capability in both
+	// defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly
+	// allowed, and need not be included in the allowedCapabilities list.
+	// +optional
+	DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"`
+	// requiredDropCapabilities are the capabilities that will be dropped from the container.  These
+	// are required to be dropped and cannot be added.
+	// +optional
+	RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"`
+	// allowedCapabilities is a list of capabilities that can be requested to add to the container.
+	// Capabilities in this field may be added at the pod author's discretion.
+	// You must not list a capability in both allowedCapabilities and requiredDropCapabilities.
+	// +optional
+	AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"`
+	// volumes is a white list of allowed volume plugins. Empty indicates that
+	// no volumes may be used. To allow all volumes you may use '*'.
+	// +optional
+	Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"`
+	// hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
+	// +optional
+	HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"`
+	// hostPorts determines which host port ranges are allowed to be exposed.
+	// +optional
+	HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"`
+	// hostPID determines if the policy allows the use of HostPID in the pod spec.
+	// +optional
+	HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"`
+	// hostIPC determines if the policy allows the use of HostIPC in the pod spec.
+	// +optional
+	HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"`
+	// seLinux is the strategy that will dictate the allowable labels that may be set.
+	SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"`
+	// runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
+	RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"`
+	// RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set.
+	// If this field is omitted, the pod's RunAsGroup can take any value. This field requires the
+	// RunAsGroup feature gate to be enabled.
+	// +optional
+	RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"`
+	// supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
+	SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"`
+	// fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.
+	FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"`
+	// readOnlyRootFilesystem when set to true will force containers to run with a read only root file
+	// system.  If the container specifically requests to run with a non-read only root file system
+	// the PSP should deny the pod.
+	// If set to false the container may run with a read only root file system if it wishes but it
+	// will not be forced to.
+	// +optional
+	ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"`
+	// defaultAllowPrivilegeEscalation controls the default setting for whether a
+	// process can gain more privileges than its parent process.
+	// +optional
+	DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"`
+	// allowPrivilegeEscalation determines if a pod can request to allow
+	// privilege escalation. If unspecified, defaults to true.
+	// +optional
+	AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"`
+	// allowedHostPaths is a white list of allowed host paths. Empty indicates
+	// that all host paths may be used.
+	// +optional
+	AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"`
+	// allowedFlexVolumes is a whitelist of allowed Flexvolumes.  Empty or nil indicates that all
+	// Flexvolumes may be used.  This parameter is effective only when the usage of the Flexvolumes
+	// is allowed in the "volumes" field.
+	// +optional
+	AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"`
+	// allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none.
+	// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
+	// as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed.
+	// Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.
+	//
+	// Examples:
+	// e.g. "foo/*" allows "foo/bar", "foo/baz", etc.
+	// e.g. "foo.*" allows "foo.bar", "foo.baz", etc.
+	// +optional
+	AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"`
+	// forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none.
+	// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
+	// as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.
+	//
+	// Examples:
+	// e.g. "foo/*" forbids "foo/bar", "foo/baz", etc.
+	// e.g. "foo.*" forbids "foo.bar", "foo.baz", etc.
+	// +optional
+	ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"`
+	// AllowedProcMountTypes is a whitelist of allowed ProcMountTypes.
+	// Empty or nil indicates that only the DefaultProcMountType may be used.
+	// This requires the ProcMountType feature flag to be enabled.
+	// +optional
+	AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"`
+}
+
+// AllowedHostPath defines the host volume conditions that will be enabled by a policy
+// for pods to use. It requires the path prefix to be defined.
+// Deprecated: use AllowedHostPath from policy API Group instead.
+type AllowedHostPath struct {
+	// pathPrefix is the path prefix that the host volume must match.
+	// It does not support `*`.
+	// Trailing slashes are trimmed when validating the path prefix with a host path.
+	//
+	// Examples:
+	// `/foo` would allow `/foo`, `/foo/` and `/foo/bar`
+	// `/foo` would not allow `/food` or `/etc/foo`
+	PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"`
+
+	// when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
+}
+
+// FSType gives strong typing to different file systems that are used by volumes.
+// Deprecated: use FSType from policy API Group instead.
+type FSType string
+
+var (
+	AzureFile             FSType = "azureFile"
+	Flocker               FSType = "flocker"
+	FlexVolume            FSType = "flexVolume"
+	HostPath              FSType = "hostPath"
+	EmptyDir              FSType = "emptyDir"
+	GCEPersistentDisk     FSType = "gcePersistentDisk"
+	AWSElasticBlockStore  FSType = "awsElasticBlockStore"
+	GitRepo               FSType = "gitRepo"
+	Secret                FSType = "secret"
+	NFS                   FSType = "nfs"
+	ISCSI                 FSType = "iscsi"
+	Glusterfs             FSType = "glusterfs"
+	PersistentVolumeClaim FSType = "persistentVolumeClaim"
+	RBD                   FSType = "rbd"
+	Cinder                FSType = "cinder"
+	CephFS                FSType = "cephFS"
+	DownwardAPI           FSType = "downwardAPI"
+	FC                    FSType = "fc"
+	ConfigMap             FSType = "configMap"
+	Quobyte               FSType = "quobyte"
+	AzureDisk             FSType = "azureDisk"
+	All                   FSType = "*"
+)
+
+// AllowedFlexVolume represents a single Flexvolume that is allowed to be used.
+// Deprecated: use AllowedFlexVolume from policy API Group instead.
+type AllowedFlexVolume struct {
+	// driver is the name of the Flexvolume driver.
+	Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
+}
+
+// HostPortRange defines a range of host ports that will be enabled by a policy
+// for pods to use.  It requires both the start and end to be defined.
+// Deprecated: use HostPortRange from policy API Group instead.
+type HostPortRange struct {
+	// min is the start of the range, inclusive.
+	Min int32 `json:"min" protobuf:"varint,1,opt,name=min"`
+	// max is the end of the range, inclusive.
+	Max int32 `json:"max" protobuf:"varint,2,opt,name=max"`
+}
+
+// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.
+// Deprecated: use SELinuxStrategyOptions from policy API Group instead.
+type SELinuxStrategyOptions struct {
+	// rule is the strategy that will dictate the allowable labels that may be set.
+	Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"`
+	// seLinuxOptions required to run as; required for MustRunAs
+	// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+	// +optional
+	SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"`
+}
+
+// SELinuxStrategy denotes strategy types for generating SELinux options for a
+// Security Context.
+// Deprecated: use SELinuxStrategy from policy API Group instead.
+type SELinuxStrategy string
+
+const (
+	// SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied.
+	// Deprecated: use SELinuxStrategyMustRunAs from policy API Group instead.
+	SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs"
+	// SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels.
+	// Deprecated: use SELinuxStrategyRunAsAny from policy API Group instead.
+	SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny"
+)
+
+// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.
+// Deprecated: use RunAsUserStrategyOptions from policy API Group instead.
+type RunAsUserStrategyOptions struct {
+	// rule is the strategy that will dictate the allowable RunAsUser values that may be set.
+	Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"`
+	// ranges are the allowed ranges of uids that may be used. If you would like to force a single uid
+	// then supply a single range with the same start and end. Required for MustRunAs.
+	// +optional
+	Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
+}
+
+// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.
+// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.
+type RunAsGroupStrategyOptions struct {
+	// rule is the strategy that will dictate the allowable RunAsGroup values that may be set.
+	Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"`
+	// ranges are the allowed ranges of gids that may be used. If you would like to force a single gid
+	// then supply a single range with the same start and end. Required for MustRunAs.
+	// +optional
+	Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
+}
+
+// IDRange provides a min/max of an allowed range of IDs.
+// Deprecated: use IDRange from policy API Group instead.
+type IDRange struct {
+	// min is the start of the range, inclusive.
+	Min int64 `json:"min" protobuf:"varint,1,opt,name=min"`
+	// max is the end of the range, inclusive.
+	Max int64 `json:"max" protobuf:"varint,2,opt,name=max"`
+}
+
+// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a
+// Security Context.
+// Deprecated: use RunAsUserStrategy from policy API Group instead.
+type RunAsUserStrategy string
+
+const (
+	// RunAsUserStrategyMustRunAs means that container must run as a particular uid.
+	// Deprecated: use RunAsUserStrategyMustRunAs from policy API Group instead.
+	RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs"
+	// RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid.
+	// Deprecated: use RunAsUserStrategyMustRunAsNonRoot from policy API Group instead.
+	RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot"
+	// RunAsUserStrategyRunAsAny means that container may make requests for any uid.
+	// Deprecated: use RunAsUserStrategyRunAsAny from policy API Group instead.
+	RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny"
+)
+
+// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a
+// Security Context.
+// Deprecated: use RunAsGroupStrategy from policy API Group instead.
+type RunAsGroupStrategy string
+
+const (
+	// RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid.
+	// However, when RunAsGroup are specified, they have to fall in the defined range.
+	RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs"
+	// RunAsGroupStrategyMustRunAs means that container must run as a particular gid.
+	// Deprecated: use RunAsGroupStrategyMustRunAs from policy API Group instead.
+	RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs"
+	// RunAsGroupStrategyRunAsAny means that container may make requests for any gid.
+	// Deprecated: use RunAsGroupStrategyRunAsAny from policy API Group instead.
+	RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny"
+)
+
+// FSGroupStrategyOptions defines the strategy type and options used to create the strategy.
+// Deprecated: use FSGroupStrategyOptions from policy API Group instead.
+type FSGroupStrategyOptions struct {
+	// rule is the strategy that will dictate what FSGroup is used in the SecurityContext.
+	// +optional
+	Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"`
+	// ranges are the allowed ranges of fs groups.  If you would like to force a single
+	// fs group then supply a single range with the same start and end. Required for MustRunAs.
+	// +optional
+	Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
+}
+
+// FSGroupStrategyType denotes strategy types for generating FSGroup values for a
+// SecurityContext
+// Deprecated: use FSGroupStrategyType from policy API Group instead.
+type FSGroupStrategyType string
+
+const (
+	// FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied.
+	// Deprecated: use FSGroupStrategyMustRunAs from policy API Group instead.
+	FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs"
+	// FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels.
+	// Deprecated: use FSGroupStrategyRunAsAny from policy API Group instead.
+	FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny"
+)
+
+// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.
+// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead.
+type SupplementalGroupsStrategyOptions struct {
+	// rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.
+	// +optional
+	Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"`
+	// ranges are the allowed ranges of supplemental groups.  If you would like to force a single
+	// supplemental group then supply a single range with the same start and end. Required for MustRunAs.
+	// +optional
+	Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
+}
+
+// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental
+// groups for a SecurityContext.
+// Deprecated: use SupplementalGroupsStrategyType from policy API Group instead.
+type SupplementalGroupsStrategyType string
+
+const (
+	// SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid.
+	// Deprecated: use SupplementalGroupsStrategyMustRunAs from policy API Group instead.
+	SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs"
+	// SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid.
+	// Deprecated: use SupplementalGroupsStrategyRunAsAny from policy API Group instead.
+	SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodSecurityPolicyList is a list of PodSecurityPolicy objects.
+// Deprecated: use PodSecurityPolicyList from policy API Group instead.
+type PodSecurityPolicyList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// items is a list of schema objects.
+	Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy.
+// NetworkPolicy describes what network traffic is allowed for a set of Pods
+type NetworkPolicy struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired behavior for this NetworkPolicy.
+	// +optional
+	Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// DEPRECATED 1.9 - This group version of PolicyType is deprecated by networking/v1/PolicyType.
+// Policy Type string describes the NetworkPolicy type
+// This type is beta-level in 1.8
+type PolicyType string
+
+const (
+	// PolicyTypeIngress is a NetworkPolicy that affects ingress traffic on selected pods
+	PolicyTypeIngress PolicyType = "Ingress"
+	// PolicyTypeEgress is a NetworkPolicy that affects egress traffic on selected pods
+	PolicyTypeEgress PolicyType = "Egress"
+)
+
+// DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec.
+type NetworkPolicySpec struct {
+	// Selects the pods to which this NetworkPolicy object applies.  The array of ingress rules
+	// is applied to any pods selected by this field. Multiple network policies can select the
+	// same set of pods.  In this case, the ingress rules for each are combined additively.
+	// This field is NOT optional and follows standard label selector semantics.
+	// An empty podSelector matches all pods in this namespace.
+	PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"`
+
+	// List of ingress rules to be applied to the selected pods.
+	// Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod
+	// OR if the traffic source is the pod's local node,
+	// OR if the traffic matches at least one ingress rule across all of the NetworkPolicy
+	// objects whose podSelector matches the pod.
+	// If this field is empty then this NetworkPolicy does not allow any traffic
+	// (and serves solely to ensure that the pods it selects are isolated by default).
+	// +optional
+	Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"`
+
+	// List of egress rules to be applied to the selected pods. Outgoing traffic is
+	// allowed if there are no NetworkPolicies selecting the pod (and cluster policy
+	// otherwise allows the traffic), OR if the traffic matches at least one egress rule
+	// across all of the NetworkPolicy objects whose podSelector matches the pod. If
+	// this field is empty then this NetworkPolicy limits all outgoing traffic (and serves
+	// solely to ensure that the pods it selects are isolated by default).
+	// This field is beta-level in 1.8
+	// +optional
+	Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"`
+
+	// List of rule types that the NetworkPolicy relates to.
+	// Valid options are Ingress, Egress, or Ingress,Egress.
+	// If this field is not specified, it will default based on the existence of Ingress or Egress rules;
+	// policies that contain an Egress section are assumed to affect Egress, and all policies
+	// (whether or not they contain an Ingress section) are assumed to affect Ingress.
+	// If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ].
+	// Likewise, if you want to write a policy that specifies that no egress is allowed,
+	// you must specify a policyTypes value that include "Egress" (since such a policy would not include
+	// an Egress section and would otherwise default to just [ "Ingress" ]).
+	// This field is beta-level in 1.8
+	// +optional
+	PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"`
+}
+
+// DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule.
+// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.
+type NetworkPolicyIngressRule struct {
+	// List of ports which should be made accessible on the pods selected for this rule.
+	// Each item in this list is combined using a logical OR.
+	// If this field is empty or missing, this rule matches all ports (traffic not restricted by port).
+	// If this field is present and contains at least one item, then this rule allows traffic
+	// only if the traffic matches at least one port in the list.
+	// +optional
+	Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"`
+
+	// List of sources which should be able to access the pods selected for this rule.
+	// Items in this list are combined using a logical OR operation.
+	// If this field is empty or missing, this rule matches all sources (traffic not restricted by source).
+	// If this field is present and contains at least on item, this rule allows traffic only if the
+	// traffic matches at least one item in the from list.
+	// +optional
+	From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"`
+}
+
+// DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule.
+// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods
+// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to.
+// This type is beta-level in 1.8
+type NetworkPolicyEgressRule struct {
+	// List of destination ports for outgoing traffic.
+	// Each item in this list is combined using a logical OR. If this field is
+	// empty or missing, this rule matches all ports (traffic not restricted by port).
+	// If this field is present and contains at least one item, then this rule allows
+	// traffic only if the traffic matches at least one port in the list.
+	// +optional
+	Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"`
+
+	// List of destinations for outgoing traffic of pods selected for this rule.
+	// Items in this list are combined using a logical OR operation. If this field is
+	// empty or missing, this rule matches all destinations (traffic not restricted by
+	// destination). If this field is present and contains at least one item, this rule
+	// allows traffic only if the traffic matches at least one item in the to list.
+	// +optional
+	To []NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,2,rep,name=to"`
+}
+
+// DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort.
+type NetworkPolicyPort struct {
+	// Optional.  The protocol (TCP, UDP, or SCTP) which traffic must match.
+	// If not specified, this field defaults to TCP.
+	// +optional
+	Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/api/core/v1.Protocol"`
+
+	// If specified, the port on the given protocol.  This can
+	// either be a numerical or named port on a pod.  If this field is not provided,
+	// this matches all port names and numbers.
+	// If present, only traffic on the specified protocol AND port
+	// will be matched.
+	// +optional
+	Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"`
+}
+
+// DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock.
+// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods
+// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should
+// not be included within this rule.
+type IPBlock struct {
+	// CIDR is a string representing the IP Block
+	// Valid examples are "192.168.1.1/24"
+	CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"`
+	// Except is a slice of CIDRs that should not be included within an IP Block
+	// Valid examples are "192.168.1.1/24"
+	// Except values will be rejected if they are outside the CIDR range
+	// +optional
+	Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"`
+}
+
+// DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer.
+type NetworkPolicyPeer struct {
+	// This is a label selector which selects Pods. This field follows standard label
+	// selector semantics; if present but empty, it selects all pods.
+	//
+	// If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects
+	// the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
+	// Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.
+	// +optional
+	PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"`
+
+	// Selects Namespaces using cluster-scoped labels. This field follows standard label
+	// selector semantics; if present but empty, it selects all namespaces.
+	//
+	// If PodSelector is also set, then the NetworkPolicyPeer as a whole selects
+	// the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
+	// Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.
+	// +optional
+	NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"`
+
+	// IPBlock defines policy on a particular IPBlock. If this field is set then
+	// neither of the other fields can be.
+	// +optional
+	IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList.
+// Network Policy List is a list of NetworkPolicy objects.
+type NetworkPolicyList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of schema objects.
+	Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..bce6036
--- /dev/null
+++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,640 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_AllowedFlexVolume = map[string]string{
+	"":       "AllowedFlexVolume represents a single Flexvolume that is allowed to be used. Deprecated: use AllowedFlexVolume from policy API Group instead.",
+	"driver": "driver is the name of the Flexvolume driver.",
+}
+
+func (AllowedFlexVolume) SwaggerDoc() map[string]string {
+	return map_AllowedFlexVolume
+}
+
+var map_AllowedHostPath = map[string]string{
+	"":           "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined. Deprecated: use AllowedHostPath from policy API Group instead.",
+	"pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`",
+	"readOnly":   "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.",
+}
+
+func (AllowedHostPath) SwaggerDoc() map[string]string {
+	return map_AllowedHostPath
+}
+
+var map_DaemonSet = map[string]string{
+	"":         "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+	"status":   "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (DaemonSet) SwaggerDoc() map[string]string {
+	return map_DaemonSet
+}
+
+var map_DaemonSetCondition = map[string]string{
+	"":                   "DaemonSetCondition describes the state of a DaemonSet at a certain point.",
+	"type":               "Type of DaemonSet condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastTransitionTime": "Last time the condition transitioned from one status to another.",
+	"reason":             "The reason for the condition's last transition.",
+	"message":            "A human readable message indicating details about the transition.",
+}
+
+func (DaemonSetCondition) SwaggerDoc() map[string]string {
+	return map_DaemonSetCondition
+}
+
+var map_DaemonSetList = map[string]string{
+	"":         "DaemonSetList is a collection of daemon sets.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "A list of daemon sets.",
+}
+
+func (DaemonSetList) SwaggerDoc() map[string]string {
+	return map_DaemonSetList
+}
+
+var map_DaemonSetSpec = map[string]string{
+	"":                     "DaemonSetSpec is the specification of a daemon set.",
+	"selector":             "A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+	"template":             "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+	"updateStrategy":       "An update strategy to replace existing DaemonSet pods with new pods.",
+	"minReadySeconds":      "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).",
+	"templateGeneration":   "DEPRECATED. A sequence number representing a specific generation of the template. Populated by the system. It can be set only during the creation.",
+	"revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.",
+}
+
+func (DaemonSetSpec) SwaggerDoc() map[string]string {
+	return map_DaemonSetSpec
+}
+
+var map_DaemonSetStatus = map[string]string{
+	"":                       "DaemonSetStatus represents the current status of a daemon set.",
+	"currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+	"numberMisscheduled":     "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+	"desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+	"numberReady":            "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.",
+	"observedGeneration":     "The most recent generation observed by the daemon set controller.",
+	"updatedNumberScheduled": "The total number of nodes that are running updated daemon pod",
+	"numberAvailable":        "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)",
+	"numberUnavailable":      "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)",
+	"collisionCount":         "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.",
+	"conditions":             "Represents the latest available observations of a DaemonSet's current state.",
+}
+
+func (DaemonSetStatus) SwaggerDoc() map[string]string {
+	return map_DaemonSetStatus
+}
+
+var map_DaemonSetUpdateStrategy = map[string]string{
+	"type":          "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is OnDelete.",
+	"rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".",
+}
+
+func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string {
+	return map_DaemonSetUpdateStrategy
+}
+
+var map_Deployment = map[string]string{
+	"":         "DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.",
+	"metadata": "Standard object metadata.",
+	"spec":     "Specification of the desired behavior of the Deployment.",
+	"status":   "Most recently observed status of the Deployment.",
+}
+
+func (Deployment) SwaggerDoc() map[string]string {
+	return map_Deployment
+}
+
+var map_DeploymentCondition = map[string]string{
+	"":                   "DeploymentCondition describes the state of a deployment at a certain point.",
+	"type":               "Type of deployment condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastUpdateTime":     "The last time this condition was updated.",
+	"lastTransitionTime": "Last time the condition transitioned from one status to another.",
+	"reason":             "The reason for the condition's last transition.",
+	"message":            "A human readable message indicating details about the transition.",
+}
+
+func (DeploymentCondition) SwaggerDoc() map[string]string {
+	return map_DeploymentCondition
+}
+
+var map_DeploymentList = map[string]string{
+	"":         "DeploymentList is a list of Deployments.",
+	"metadata": "Standard list metadata.",
+	"items":    "Items is the list of Deployments.",
+}
+
+func (DeploymentList) SwaggerDoc() map[string]string {
+	return map_DeploymentList
+}
+
+var map_DeploymentRollback = map[string]string{
+	"":                   "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.",
+	"name":               "Required: This must match the Name of a deployment.",
+	"updatedAnnotations": "The annotations to be updated to a deployment",
+	"rollbackTo":         "The config of this deployment rollback.",
+}
+
+func (DeploymentRollback) SwaggerDoc() map[string]string {
+	return map_DeploymentRollback
+}
+
+var map_DeploymentSpec = map[string]string{
+	"":                        "DeploymentSpec is the specification of the desired behavior of the Deployment.",
+	"replicas":                "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.",
+	"selector":                "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.",
+	"template":                "Template describes the pods that will be created.",
+	"strategy":                "The deployment strategy to use to replace existing pods with new ones.",
+	"minReadySeconds":         "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+	"revisionHistoryLimit":    "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"retaining all old RelicaSets\".",
+	"paused":                  "Indicates that the deployment is paused and will not be processed by the deployment controller.",
+	"rollbackTo":              "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.",
+	"progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"no deadline\".",
+}
+
+func (DeploymentSpec) SwaggerDoc() map[string]string {
+	return map_DeploymentSpec
+}
+
+var map_DeploymentStatus = map[string]string{
+	"":                    "DeploymentStatus is the most recently observed status of the Deployment.",
+	"observedGeneration":  "The generation observed by the deployment controller.",
+	"replicas":            "Total number of non-terminated pods targeted by this deployment (their labels match the selector).",
+	"updatedReplicas":     "Total number of non-terminated pods targeted by this deployment that have the desired template spec.",
+	"readyReplicas":       "Total number of ready pods targeted by this deployment.",
+	"availableReplicas":   "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
+	"unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
+	"conditions":          "Represents the latest available observations of a deployment's current state.",
+	"collisionCount":      "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
+}
+
+func (DeploymentStatus) SwaggerDoc() map[string]string {
+	return map_DeploymentStatus
+}
+
+var map_DeploymentStrategy = map[string]string{
+	"":              "DeploymentStrategy describes how to replace existing pods with new ones.",
+	"type":          "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.",
+	"rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.",
+}
+
+func (DeploymentStrategy) SwaggerDoc() map[string]string {
+	return map_DeploymentStrategy
+}
+
+var map_FSGroupStrategyOptions = map[string]string{
+	"":       "FSGroupStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use FSGroupStrategyOptions from policy API Group instead.",
+	"rule":   "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.",
+	"ranges": "ranges are the allowed ranges of fs groups.  If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.",
+}
+
+func (FSGroupStrategyOptions) SwaggerDoc() map[string]string {
+	return map_FSGroupStrategyOptions
+}
+
+var map_HTTPIngressPath = map[string]string{
+	"":        "HTTPIngressPath associates a path regex with a backend. Incoming urls matching the path are forwarded to the backend.",
+	"path":    "Path is an extended POSIX regex as defined by IEEE Std 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. If unspecified, the path defaults to a catch all sending traffic to the backend.",
+	"backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.",
+}
+
+func (HTTPIngressPath) SwaggerDoc() map[string]string {
+	return map_HTTPIngressPath
+}
+
+var map_HTTPIngressRuleValue = map[string]string{
+	"":      "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://<host>/<path>?<searchpart> -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.",
+	"paths": "A collection of paths that map requests to backends.",
+}
+
+func (HTTPIngressRuleValue) SwaggerDoc() map[string]string {
+	return map_HTTPIngressRuleValue
+}
+
+var map_HostPortRange = map[string]string{
+	"":    "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use.  It requires both the start and end to be defined. Deprecated: use HostPortRange from policy API Group instead.",
+	"min": "min is the start of the range, inclusive.",
+	"max": "max is the end of the range, inclusive.",
+}
+
+func (HostPortRange) SwaggerDoc() map[string]string {
+	return map_HostPortRange
+}
+
+var map_IDRange = map[string]string{
+	"":    "IDRange provides a min/max of an allowed range of IDs. Deprecated: use IDRange from policy API Group instead.",
+	"min": "min is the start of the range, inclusive.",
+	"max": "max is the end of the range, inclusive.",
+}
+
+func (IDRange) SwaggerDoc() map[string]string {
+	return map_IDRange
+}
+
+var map_IPBlock = map[string]string{
+	"":       "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.",
+	"cidr":   "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"",
+	"except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range",
+}
+
+func (IPBlock) SwaggerDoc() map[string]string {
+	return map_IPBlock
+}
+
+var map_Ingress = map[string]string{
+	"":         "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+	"status":   "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (Ingress) SwaggerDoc() map[string]string {
+	return map_Ingress
+}
+
+var map_IngressBackend = map[string]string{
+	"":            "IngressBackend describes all endpoints for a given service and port.",
+	"serviceName": "Specifies the name of the referenced service.",
+	"servicePort": "Specifies the port of the referenced service.",
+}
+
+func (IngressBackend) SwaggerDoc() map[string]string {
+	return map_IngressBackend
+}
+
+var map_IngressList = map[string]string{
+	"":         "IngressList is a collection of Ingress.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "Items is the list of Ingress.",
+}
+
+func (IngressList) SwaggerDoc() map[string]string {
+	return map_IngressList
+}
+
+var map_IngressRule = map[string]string{
+	"":     "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.",
+	"host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in the RFC: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the\n\t  IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t  Currently the port of an Ingress is implicitly :80 for http and\n\t  :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.",
+}
+
+func (IngressRule) SwaggerDoc() map[string]string {
+	return map_IngressRule
+}
+
+var map_IngressRuleValue = map[string]string{
+	"": "IngressRuleValue represents a rule to apply against incoming requests. If the rule is satisfied, the request is routed to the specified backend. Currently mixing different types of rules in a single Ingress is disallowed, so exactly one of the following must be set.",
+}
+
+func (IngressRuleValue) SwaggerDoc() map[string]string {
+	return map_IngressRuleValue
+}
+
+var map_IngressSpec = map[string]string{
+	"":        "IngressSpec describes the Ingress the user wishes to exist.",
+	"backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.",
+	"tls":     "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.",
+	"rules":   "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.",
+}
+
+func (IngressSpec) SwaggerDoc() map[string]string {
+	return map_IngressSpec
+}
+
+var map_IngressStatus = map[string]string{
+	"":             "IngressStatus describe the current state of the Ingress.",
+	"loadBalancer": "LoadBalancer contains the current status of the load-balancer.",
+}
+
+func (IngressStatus) SwaggerDoc() map[string]string {
+	return map_IngressStatus
+}
+
+var map_IngressTLS = map[string]string{
+	"":           "IngressTLS describes the transport layer security associated with an Ingress.",
+	"hosts":      "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.",
+	"secretName": "SecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.",
+}
+
+func (IngressTLS) SwaggerDoc() map[string]string {
+	return map_IngressTLS
+}
+
+var map_NetworkPolicy = map[string]string{
+	"":         "DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Specification of the desired behavior for this NetworkPolicy.",
+}
+
+func (NetworkPolicy) SwaggerDoc() map[string]string {
+	return map_NetworkPolicy
+}
+
+var map_NetworkPolicyEgressRule = map[string]string{
+	"":      "DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8",
+	"ports": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.",
+	"to":    "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.",
+}
+
+func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string {
+	return map_NetworkPolicyEgressRule
+}
+
+var map_NetworkPolicyIngressRule = map[string]string{
+	"":      "DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.",
+	"ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.",
+	"from":  "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.",
+}
+
+func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string {
+	return map_NetworkPolicyIngressRule
+}
+
+var map_NetworkPolicyList = map[string]string{
+	"":         "DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. Network Policy List is a list of NetworkPolicy objects.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "Items is a list of schema objects.",
+}
+
+func (NetworkPolicyList) SwaggerDoc() map[string]string {
+	return map_NetworkPolicyList
+}
+
+var map_NetworkPolicyPeer = map[string]string{
+	"":                  "DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer.",
+	"podSelector":       "This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.",
+	"namespaceSelector": "Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.",
+	"ipBlock":           "IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.",
+}
+
+func (NetworkPolicyPeer) SwaggerDoc() map[string]string {
+	return map_NetworkPolicyPeer
+}
+
+var map_NetworkPolicyPort = map[string]string{
+	"":         "DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort.",
+	"protocol": "Optional.  The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.",
+	"port":     "If specified, the port on the given protocol.  This can either be a numerical or named port on a pod.  If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.",
+}
+
+func (NetworkPolicyPort) SwaggerDoc() map[string]string {
+	return map_NetworkPolicyPort
+}
+
+var map_NetworkPolicySpec = map[string]string{
+	"":            "DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec.",
+	"podSelector": "Selects the pods to which this NetworkPolicy object applies.  The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods.  In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.",
+	"ingress":     "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default).",
+	"egress":      "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8",
+	"policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress, Egress, or Ingress,Egress. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8",
+}
+
+func (NetworkPolicySpec) SwaggerDoc() map[string]string {
+	return map_NetworkPolicySpec
+}
+
+var map_PodSecurityPolicy = map[string]string{
+	"":         "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated: use PodSecurityPolicy from policy API Group instead.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "spec defines the policy enforced.",
+}
+
+func (PodSecurityPolicy) SwaggerDoc() map[string]string {
+	return map_PodSecurityPolicy
+}
+
+var map_PodSecurityPolicyList = map[string]string{
+	"":         "PodSecurityPolicyList is a list of PodSecurityPolicy objects. Deprecated: use PodSecurityPolicyList from policy API Group instead.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "items is a list of schema objects.",
+}
+
+func (PodSecurityPolicyList) SwaggerDoc() map[string]string {
+	return map_PodSecurityPolicyList
+}
+
+var map_PodSecurityPolicySpec = map[string]string{
+	"":                                "PodSecurityPolicySpec defines the policy enforced. Deprecated: use PodSecurityPolicySpec from policy API Group instead.",
+	"privileged":                      "privileged determines if a pod can request to be run as privileged.",
+	"defaultAddCapabilities":          "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability.  You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.",
+	"requiredDropCapabilities":        "requiredDropCapabilities are the capabilities that will be dropped from the container.  These are required to be dropped and cannot be added.",
+	"allowedCapabilities":             "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.",
+	"volumes":                         "volumes is a white list of allowed volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.",
+	"hostNetwork":                     "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.",
+	"hostPorts":                       "hostPorts determines which host port ranges are allowed to be exposed.",
+	"hostPID":                         "hostPID determines if the policy allows the use of HostPID in the pod spec.",
+	"hostIPC":                         "hostIPC determines if the policy allows the use of HostIPC in the pod spec.",
+	"seLinux":                         "seLinux is the strategy that will dictate the allowable labels that may be set.",
+	"runAsUser":                       "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.",
+	"runAsGroup":                      "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.",
+	"supplementalGroups":              "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.",
+	"fsGroup":                         "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.",
+	"readOnlyRootFilesystem":          "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system.  If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.",
+	"defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.",
+	"allowPrivilegeEscalation":        "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.",
+	"allowedHostPaths":                "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.",
+	"allowedFlexVolumes":              "allowedFlexVolumes is a whitelist of allowed Flexvolumes.  Empty or nil indicates that all Flexvolumes may be used.  This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.",
+	"allowedUnsafeSysctls":            "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.",
+	"forbiddenSysctls":                "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.",
+	"allowedProcMountTypes":           "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.",
+}
+
+func (PodSecurityPolicySpec) SwaggerDoc() map[string]string {
+	return map_PodSecurityPolicySpec
+}
+
+var map_ReplicaSet = map[string]string{
+	"":         "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.",
+	"metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+	"status":   "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+}
+
+func (ReplicaSet) SwaggerDoc() map[string]string {
+	return map_ReplicaSet
+}
+
+var map_ReplicaSetCondition = map[string]string{
+	"":                   "ReplicaSetCondition describes the state of a replica set at a certain point.",
+	"type":               "Type of replica set condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastTransitionTime": "The last time the condition transitioned from one status to another.",
+	"reason":             "The reason for the condition's last transition.",
+	"message":            "A human readable message indicating details about the transition.",
+}
+
+func (ReplicaSetCondition) SwaggerDoc() map[string]string {
+	return map_ReplicaSetCondition
+}
+
+var map_ReplicaSetList = map[string]string{
+	"":         "ReplicaSetList is a collection of ReplicaSets.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller",
+}
+
+func (ReplicaSetList) SwaggerDoc() map[string]string {
+	return map_ReplicaSetList
+}
+
+var map_ReplicaSetSpec = map[string]string{
+	"":                "ReplicaSetSpec is the specification of a ReplicaSet.",
+	"replicas":        "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
+	"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+	"selector":        "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+	"template":        "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+}
+
+func (ReplicaSetSpec) SwaggerDoc() map[string]string {
+	return map_ReplicaSetSpec
+}
+
+var map_ReplicaSetStatus = map[string]string{
+	"":                     "ReplicaSetStatus represents the current status of a ReplicaSet.",
+	"replicas":             "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
+	"fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.",
+	"readyReplicas":        "The number of ready replicas for this replica set.",
+	"availableReplicas":    "The number of available replicas (ready for at least minReadySeconds) for this replica set.",
+	"observedGeneration":   "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
+	"conditions":           "Represents the latest available observations of a replica set's current state.",
+}
+
+func (ReplicaSetStatus) SwaggerDoc() map[string]string {
+	return map_ReplicaSetStatus
+}
+
+var map_ReplicationControllerDummy = map[string]string{
+	"": "Dummy definition",
+}
+
+func (ReplicationControllerDummy) SwaggerDoc() map[string]string {
+	return map_ReplicationControllerDummy
+}
+
+var map_RollbackConfig = map[string]string{
+	"":         "DEPRECATED.",
+	"revision": "The revision to rollback to. If set to 0, rollback to the last revision.",
+}
+
+func (RollbackConfig) SwaggerDoc() map[string]string {
+	return map_RollbackConfig
+}
+
+var map_RollingUpdateDaemonSet = map[string]string{
+	"":               "Spec to control the desired behavior of daemon set rolling update.",
+	"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
+}
+
+func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
+	return map_RollingUpdateDaemonSet
+}
+
+var map_RollingUpdateDeployment = map[string]string{
+	"":               "Spec to control the desired behavior of rolling update.",
+	"maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. By default, a fixed value of 1 is used. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.",
+	"maxSurge":       "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.",
+}
+
+func (RollingUpdateDeployment) SwaggerDoc() map[string]string {
+	return map_RollingUpdateDeployment
+}
+
+var map_RunAsGroupStrategyOptions = map[string]string{
+	"":       "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.",
+	"rule":   "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.",
+	"ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.",
+}
+
+func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string {
+	return map_RunAsGroupStrategyOptions
+}
+
+var map_RunAsUserStrategyOptions = map[string]string{
+	"":       "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsUserStrategyOptions from policy API Group instead.",
+	"rule":   "rule is the strategy that will dictate the allowable RunAsUser values that may be set.",
+	"ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.",
+}
+
+func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string {
+	return map_RunAsUserStrategyOptions
+}
+
+var map_SELinuxStrategyOptions = map[string]string{
+	"":               "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use SELinuxStrategyOptions from policy API Group instead.",
+	"rule":           "rule is the strategy that will dictate the allowable labels that may be set.",
+	"seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
+}
+
+func (SELinuxStrategyOptions) SwaggerDoc() map[string]string {
+	return map_SELinuxStrategyOptions
+}
+
+var map_Scale = map[string]string{
+	"":         "represents a scaling request for a resource.",
+	"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.",
+	"spec":     "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.",
+	"status":   "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.",
+}
+
+func (Scale) SwaggerDoc() map[string]string {
+	return map_Scale
+}
+
+var map_ScaleSpec = map[string]string{
+	"":         "describes the attributes of a scale subresource",
+	"replicas": "desired number of instances for the scaled object.",
+}
+
+func (ScaleSpec) SwaggerDoc() map[string]string {
+	return map_ScaleSpec
+}
+
+var map_ScaleStatus = map[string]string{
+	"":               "represents the current status of a scale subresource.",
+	"replicas":       "actual number of observed instances of the scaled object.",
+	"selector":       "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors",
+	"targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+}
+
+func (ScaleStatus) SwaggerDoc() map[string]string {
+	return map_ScaleStatus
+}
+
+var map_SupplementalGroupsStrategyOptions = map[string]string{
+	"":       "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead.",
+	"rule":   "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.",
+	"ranges": "ranges are the allowed ranges of supplemental groups.  If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.",
+}
+
+func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string {
+	return map_SupplementalGroupsStrategyOptions
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..8128c07
--- /dev/null
+++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,1445 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	corev1 "k8s.io/api/core/v1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume.
+func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume {
+	if in == nil {
+		return nil
+	}
+	out := new(AllowedFlexVolume)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath.
+func (in *AllowedHostPath) DeepCopy() *AllowedHostPath {
+	if in == nil {
+		return nil
+	}
+	out := new(AllowedHostPath)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSet) DeepCopyInto(out *DaemonSet) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet.
+func (in *DaemonSet) DeepCopy() *DaemonSet {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSet)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DaemonSet) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) {
+	*out = *in
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition.
+func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSetCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]DaemonSet, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList.
+func (in *DaemonSetList) DeepCopy() *DaemonSetList {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSetList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DaemonSetList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) {
+	*out = *in
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Template.DeepCopyInto(&out.Template)
+	in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
+	if in.RevisionHistoryLimit != nil {
+		in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec.
+func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSetSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) {
+	*out = *in
+	if in.CollisionCount != nil {
+		in, out := &in.CollisionCount, &out.CollisionCount
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]DaemonSetCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus.
+func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSetStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) {
+	*out = *in
+	if in.RollingUpdate != nil {
+		in, out := &in.RollingUpdate, &out.RollingUpdate
+		*out = new(RollingUpdateDaemonSet)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy.
+func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonSetUpdateStrategy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Deployment) DeepCopyInto(out *Deployment) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment.
+func (in *Deployment) DeepCopy() *Deployment {
+	if in == nil {
+		return nil
+	}
+	out := new(Deployment)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Deployment) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) {
+	*out = *in
+	in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition.
+func (in *DeploymentCondition) DeepCopy() *DeploymentCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentList) DeepCopyInto(out *DeploymentList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Deployment, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList.
+func (in *DeploymentList) DeepCopy() *DeploymentList {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeploymentList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.UpdatedAnnotations != nil {
+		in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	out.RollbackTo = in.RollbackTo
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback.
+func (in *DeploymentRollback) DeepCopy() *DeploymentRollback {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentRollback)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeploymentRollback) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
+	*out = *in
+	if in.Replicas != nil {
+		in, out := &in.Replicas, &out.Replicas
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Template.DeepCopyInto(&out.Template)
+	in.Strategy.DeepCopyInto(&out.Strategy)
+	if in.RevisionHistoryLimit != nil {
+		in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+		*out = new(int32)
+		**out = **in
+	}
+	if in.RollbackTo != nil {
+		in, out := &in.RollbackTo, &out.RollbackTo
+		*out = new(RollbackConfig)
+		**out = **in
+	}
+	if in.ProgressDeadlineSeconds != nil {
+		in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec.
+func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]DeploymentCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.CollisionCount != nil {
+		in, out := &in.CollisionCount, &out.CollisionCount
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus.
+func (in *DeploymentStatus) DeepCopy() *DeploymentStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) {
+	*out = *in
+	if in.RollingUpdate != nil {
+		in, out := &in.RollingUpdate, &out.RollingUpdate
+		*out = new(RollingUpdateDeployment)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy.
+func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy {
+	if in == nil {
+		return nil
+	}
+	out := new(DeploymentStrategy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) {
+	*out = *in
+	if in.Ranges != nil {
+		in, out := &in.Ranges, &out.Ranges
+		*out = make([]IDRange, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions.
+func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(FSGroupStrategyOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) {
+	*out = *in
+	out.Backend = in.Backend
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressPath.
+func (in *HTTPIngressPath) DeepCopy() *HTTPIngressPath {
+	if in == nil {
+		return nil
+	}
+	out := new(HTTPIngressPath)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) {
+	*out = *in
+	if in.Paths != nil {
+		in, out := &in.Paths, &out.Paths
+		*out = make([]HTTPIngressPath, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressRuleValue.
+func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue {
+	if in == nil {
+		return nil
+	}
+	out := new(HTTPIngressRuleValue)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostPortRange) DeepCopyInto(out *HostPortRange) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange.
+func (in *HostPortRange) DeepCopy() *HostPortRange {
+	if in == nil {
+		return nil
+	}
+	out := new(HostPortRange)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IDRange) DeepCopyInto(out *IDRange) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange.
+func (in *IDRange) DeepCopy() *IDRange {
+	if in == nil {
+		return nil
+	}
+	out := new(IDRange)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPBlock) DeepCopyInto(out *IPBlock) {
+	*out = *in
+	if in.Except != nil {
+		in, out := &in.Except, &out.Except
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPBlock.
+func (in *IPBlock) DeepCopy() *IPBlock {
+	if in == nil {
+		return nil
+	}
+	out := new(IPBlock)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Ingress) DeepCopyInto(out *Ingress) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress.
+func (in *Ingress) DeepCopy() *Ingress {
+	if in == nil {
+		return nil
+	}
+	out := new(Ingress)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Ingress) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressBackend) DeepCopyInto(out *IngressBackend) {
+	*out = *in
+	out.ServicePort = in.ServicePort
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressBackend.
+func (in *IngressBackend) DeepCopy() *IngressBackend {
+	if in == nil {
+		return nil
+	}
+	out := new(IngressBackend)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressList) DeepCopyInto(out *IngressList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Ingress, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList.
+func (in *IngressList) DeepCopy() *IngressList {
+	if in == nil {
+		return nil
+	}
+	out := new(IngressList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IngressList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressRule) DeepCopyInto(out *IngressRule) {
+	*out = *in
+	in.IngressRuleValue.DeepCopyInto(&out.IngressRuleValue)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule.
+func (in *IngressRule) DeepCopy() *IngressRule {
+	if in == nil {
+		return nil
+	}
+	out := new(IngressRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressRuleValue) DeepCopyInto(out *IngressRuleValue) {
+	*out = *in
+	if in.HTTP != nil {
+		in, out := &in.HTTP, &out.HTTP
+		*out = new(HTTPIngressRuleValue)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRuleValue.
+func (in *IngressRuleValue) DeepCopy() *IngressRuleValue {
+	if in == nil {
+		return nil
+	}
+	out := new(IngressRuleValue)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressSpec) DeepCopyInto(out *IngressSpec) {
+	*out = *in
+	if in.Backend != nil {
+		in, out := &in.Backend, &out.Backend
+		*out = new(IngressBackend)
+		**out = **in
+	}
+	if in.TLS != nil {
+		in, out := &in.TLS, &out.TLS
+		*out = make([]IngressTLS, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Rules != nil {
+		in, out := &in.Rules, &out.Rules
+		*out = make([]IngressRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec.
+func (in *IngressSpec) DeepCopy() *IngressSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(IngressSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressStatus) DeepCopyInto(out *IngressStatus) {
+	*out = *in
+	in.LoadBalancer.DeepCopyInto(&out.LoadBalancer)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus.
+func (in *IngressStatus) DeepCopy() *IngressStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(IngressStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressTLS) DeepCopyInto(out *IngressTLS) {
+	*out = *in
+	if in.Hosts != nil {
+		in, out := &in.Hosts, &out.Hosts
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressTLS.
+func (in *IngressTLS) DeepCopy() *IngressTLS {
+	if in == nil {
+		return nil
+	}
+	out := new(IngressTLS)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicy.
+func (in *NetworkPolicy) DeepCopy() *NetworkPolicy {
+	if in == nil {
+		return nil
+	}
+	out := new(NetworkPolicy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NetworkPolicy) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicyEgressRule) DeepCopyInto(out *NetworkPolicyEgressRule) {
+	*out = *in
+	if in.Ports != nil {
+		in, out := &in.Ports, &out.Ports
+		*out = make([]NetworkPolicyPort, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.To != nil {
+		in, out := &in.To, &out.To
+		*out = make([]NetworkPolicyPeer, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyEgressRule.
+func (in *NetworkPolicyEgressRule) DeepCopy() *NetworkPolicyEgressRule {
+	if in == nil {
+		return nil
+	}
+	out := new(NetworkPolicyEgressRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicyIngressRule) DeepCopyInto(out *NetworkPolicyIngressRule) {
+	*out = *in
+	if in.Ports != nil {
+		in, out := &in.Ports, &out.Ports
+		*out = make([]NetworkPolicyPort, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.From != nil {
+		in, out := &in.From, &out.From
+		*out = make([]NetworkPolicyPeer, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyIngressRule.
+func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule {
+	if in == nil {
+		return nil
+	}
+	out := new(NetworkPolicyIngressRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]NetworkPolicy, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyList.
+func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList {
+	if in == nil {
+		return nil
+	}
+	out := new(NetworkPolicyList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NetworkPolicyList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicyPeer) DeepCopyInto(out *NetworkPolicyPeer) {
+	*out = *in
+	if in.PodSelector != nil {
+		in, out := &in.PodSelector, &out.PodSelector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.NamespaceSelector != nil {
+		in, out := &in.NamespaceSelector, &out.NamespaceSelector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.IPBlock != nil {
+		in, out := &in.IPBlock, &out.IPBlock
+		*out = new(IPBlock)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPeer.
+func (in *NetworkPolicyPeer) DeepCopy() *NetworkPolicyPeer {
+	if in == nil {
+		return nil
+	}
+	out := new(NetworkPolicyPeer)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) {
+	*out = *in
+	if in.Protocol != nil {
+		in, out := &in.Protocol, &out.Protocol
+		*out = new(corev1.Protocol)
+		**out = **in
+	}
+	if in.Port != nil {
+		in, out := &in.Port, &out.Port
+		*out = new(intstr.IntOrString)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPort.
+func (in *NetworkPolicyPort) DeepCopy() *NetworkPolicyPort {
+	if in == nil {
+		return nil
+	}
+	out := new(NetworkPolicyPort)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) {
+	*out = *in
+	in.PodSelector.DeepCopyInto(&out.PodSelector)
+	if in.Ingress != nil {
+		in, out := &in.Ingress, &out.Ingress
+		*out = make([]NetworkPolicyIngressRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Egress != nil {
+		in, out := &in.Egress, &out.Egress
+		*out = make([]NetworkPolicyEgressRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.PolicyTypes != nil {
+		in, out := &in.PolicyTypes, &out.PolicyTypes
+		*out = make([]PolicyType, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicySpec.
+func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec {
+	if in == nil {
+		return nil
+	}
+	out := new(NetworkPolicySpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy.
+func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy {
+	if in == nil {
+		return nil
+	}
+	out := new(PodSecurityPolicy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]PodSecurityPolicy, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList.
+func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList {
+	if in == nil {
+		return nil
+	}
+	out := new(PodSecurityPolicyList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) {
+	*out = *in
+	if in.DefaultAddCapabilities != nil {
+		in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities
+		*out = make([]corev1.Capability, len(*in))
+		copy(*out, *in)
+	}
+	if in.RequiredDropCapabilities != nil {
+		in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities
+		*out = make([]corev1.Capability, len(*in))
+		copy(*out, *in)
+	}
+	if in.AllowedCapabilities != nil {
+		in, out := &in.AllowedCapabilities, &out.AllowedCapabilities
+		*out = make([]corev1.Capability, len(*in))
+		copy(*out, *in)
+	}
+	if in.Volumes != nil {
+		in, out := &in.Volumes, &out.Volumes
+		*out = make([]FSType, len(*in))
+		copy(*out, *in)
+	}
+	if in.HostPorts != nil {
+		in, out := &in.HostPorts, &out.HostPorts
+		*out = make([]HostPortRange, len(*in))
+		copy(*out, *in)
+	}
+	in.SELinux.DeepCopyInto(&out.SELinux)
+	in.RunAsUser.DeepCopyInto(&out.RunAsUser)
+	if in.RunAsGroup != nil {
+		in, out := &in.RunAsGroup, &out.RunAsGroup
+		*out = new(RunAsGroupStrategyOptions)
+		(*in).DeepCopyInto(*out)
+	}
+	in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups)
+	in.FSGroup.DeepCopyInto(&out.FSGroup)
+	if in.DefaultAllowPrivilegeEscalation != nil {
+		in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation
+		*out = new(bool)
+		**out = **in
+	}
+	if in.AllowPrivilegeEscalation != nil {
+		in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation
+		*out = new(bool)
+		**out = **in
+	}
+	if in.AllowedHostPaths != nil {
+		in, out := &in.AllowedHostPaths, &out.AllowedHostPaths
+		*out = make([]AllowedHostPath, len(*in))
+		copy(*out, *in)
+	}
+	if in.AllowedFlexVolumes != nil {
+		in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes
+		*out = make([]AllowedFlexVolume, len(*in))
+		copy(*out, *in)
+	}
+	if in.AllowedUnsafeSysctls != nil {
+		in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ForbiddenSysctls != nil {
+		in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.AllowedProcMountTypes != nil {
+		in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes
+		*out = make([]corev1.ProcMountType, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec.
+func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec {
+	if in == nil {
+		return nil
+	}
+	out := new(PodSecurityPolicySpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet.
+func (in *ReplicaSet) DeepCopy() *ReplicaSet {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicaSet)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ReplicaSet) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) {
+	*out = *in
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition.
+func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicaSetCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ReplicaSet, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList.
+func (in *ReplicaSetList) DeepCopy() *ReplicaSetList {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicaSetList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ReplicaSetList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) {
+	*out = *in
+	if in.Replicas != nil {
+		in, out := &in.Replicas, &out.Replicas
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Template.DeepCopyInto(&out.Template)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec.
+func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicaSetSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]ReplicaSetCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus.
+func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicaSetStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicationControllerDummy) DeepCopyInto(out *ReplicationControllerDummy) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerDummy.
+func (in *ReplicationControllerDummy) DeepCopy() *ReplicationControllerDummy {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicationControllerDummy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ReplicationControllerDummy) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig.
+func (in *RollbackConfig) DeepCopy() *RollbackConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(RollbackConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) {
+	*out = *in
+	if in.MaxUnavailable != nil {
+		in, out := &in.MaxUnavailable, &out.MaxUnavailable
+		*out = new(intstr.IntOrString)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet.
+func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet {
+	if in == nil {
+		return nil
+	}
+	out := new(RollingUpdateDaemonSet)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) {
+	*out = *in
+	if in.MaxUnavailable != nil {
+		in, out := &in.MaxUnavailable, &out.MaxUnavailable
+		*out = new(intstr.IntOrString)
+		**out = **in
+	}
+	if in.MaxSurge != nil {
+		in, out := &in.MaxSurge, &out.MaxSurge
+		*out = new(intstr.IntOrString)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment.
+func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment {
+	if in == nil {
+		return nil
+	}
+	out := new(RollingUpdateDeployment)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) {
+	*out = *in
+	if in.Ranges != nil {
+		in, out := &in.Ranges, &out.Ranges
+		*out = make([]IDRange, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions.
+func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(RunAsGroupStrategyOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) {
+	*out = *in
+	if in.Ranges != nil {
+		in, out := &in.Ranges, &out.Ranges
+		*out = make([]IDRange, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions.
+func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(RunAsUserStrategyOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) {
+	*out = *in
+	if in.SELinuxOptions != nil {
+		in, out := &in.SELinuxOptions, &out.SELinuxOptions
+		*out = new(corev1.SELinuxOptions)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions.
+func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(SELinuxStrategyOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Scale) DeepCopyInto(out *Scale) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	out.Spec = in.Spec
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale.
+func (in *Scale) DeepCopy() *Scale {
+	if in == nil {
+		return nil
+	}
+	out := new(Scale)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Scale) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec.
+func (in *ScaleSpec) DeepCopy() *ScaleSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ScaleSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) {
+	*out = *in
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus.
+func (in *ScaleStatus) DeepCopy() *ScaleStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ScaleStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) {
+	*out = *in
+	if in.Ranges != nil {
+		in, out := &in.Ranges, &out.Ranges
+		*out = make([]IDRange, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions.
+func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(SupplementalGroupsStrategyOptions)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go
new file mode 100644
index 0000000..887c366
--- /dev/null
+++ b/vendor/k8s.io/api/networking/v1/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+// +groupName=networking.k8s.io
+
+package v1 // import "k8s.io/api/networking/v1"
diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto
new file mode 100644
index 0000000..ab3731e
--- /dev/null
+++ b/vendor/k8s.io/api/networking/v1/generated.proto
@@ -0,0 +1,195 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.networking.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1";
+
+// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods
+// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should
+// not be included within this rule.
+message IPBlock {
+  // CIDR is a string representing the IP Block
+  // Valid examples are "192.168.1.1/24"
+  optional string cidr = 1;
+
+  // Except is a slice of CIDRs that should not be included within an IP Block
+  // Valid examples are "192.168.1.1/24"
+  // Except values will be rejected if they are outside the CIDR range
+  // +optional
+  repeated string except = 2;
+}
+
+// NetworkPolicy describes what network traffic is allowed for a set of Pods
+message NetworkPolicy {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior for this NetworkPolicy.
+  // +optional
+  optional NetworkPolicySpec spec = 2;
+}
+
+// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods
+// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to.
+// This type is beta-level in 1.8
+message NetworkPolicyEgressRule {
+  // List of destination ports for outgoing traffic.
+  // Each item in this list is combined using a logical OR. If this field is
+  // empty or missing, this rule matches all ports (traffic not restricted by port).
+  // If this field is present and contains at least one item, then this rule allows
+  // traffic only if the traffic matches at least one port in the list.
+  // +optional
+  repeated NetworkPolicyPort ports = 1;
+
+  // List of destinations for outgoing traffic of pods selected for this rule.
+  // Items in this list are combined using a logical OR operation. If this field is
+  // empty or missing, this rule matches all destinations (traffic not restricted by
+  // destination). If this field is present and contains at least one item, this rule
+  // allows traffic only if the traffic matches at least one item in the to list.
+  // +optional
+  repeated NetworkPolicyPeer to = 2;
+}
+
+// NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods
+// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.
+message NetworkPolicyIngressRule {
+  // List of ports which should be made accessible on the pods selected for this
+  // rule. Each item in this list is combined using a logical OR. If this field is
+  // empty or missing, this rule matches all ports (traffic not restricted by port).
+  // If this field is present and contains at least one item, then this rule allows
+  // traffic only if the traffic matches at least one port in the list.
+  // +optional
+  repeated NetworkPolicyPort ports = 1;
+
+  // List of sources which should be able to access the pods selected for this rule.
+  // Items in this list are combined using a logical OR operation. If this field is
+  // empty or missing, this rule matches all sources (traffic not restricted by
+  // source). If this field is present and contains at least on item, this rule
+  // allows traffic only if the traffic matches at least one item in the from list.
+  // +optional
+  repeated NetworkPolicyPeer from = 2;
+}
+
+// NetworkPolicyList is a list of NetworkPolicy objects.
+message NetworkPolicyList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of schema objects.
+  repeated NetworkPolicy items = 2;
+}
+
+// NetworkPolicyPeer describes a peer to allow traffic from. Only certain combinations of
+// fields are allowed
+message NetworkPolicyPeer {
+  // This is a label selector which selects Pods. This field follows standard label
+  // selector semantics; if present but empty, it selects all pods.
+  //
+  // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects
+  // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
+  // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1;
+
+  // Selects Namespaces using cluster-scoped labels. This field follows standard label
+  // selector semantics; if present but empty, it selects all namespaces.
+  //
+  // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects
+  // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
+  // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2;
+
+  // IPBlock defines policy on a particular IPBlock. If this field is set then
+  // neither of the other fields can be.
+  // +optional
+  optional IPBlock ipBlock = 3;
+}
+
+// NetworkPolicyPort describes a port to allow traffic on
+message NetworkPolicyPort {
+  // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this
+  // field defaults to TCP.
+  // +optional
+  optional string protocol = 1;
+
+  // The port on the given protocol. This can either be a numerical or named port on
+  // a pod. If this field is not provided, this matches all port names and numbers.
+  // +optional
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2;
+}
+
+// NetworkPolicySpec provides the specification of a NetworkPolicy
+message NetworkPolicySpec {
+  // Selects the pods to which this NetworkPolicy object applies. The array of
+  // ingress rules is applied to any pods selected by this field. Multiple network
+  // policies can select the same set of pods. In this case, the ingress rules for
+  // each are combined additively. This field is NOT optional and follows standard
+  // label selector semantics. An empty podSelector matches all pods in this
+  // namespace.
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1;
+
+  // List of ingress rules to be applied to the selected pods. Traffic is allowed to
+  // a pod if there are no NetworkPolicies selecting the pod
+  // (and cluster policy otherwise allows the traffic), OR if the traffic source is
+  // the pod's local node, OR if the traffic matches at least one ingress rule
+  // across all of the NetworkPolicy objects whose podSelector matches the pod. If
+  // this field is empty then this NetworkPolicy does not allow any traffic (and serves
+  // solely to ensure that the pods it selects are isolated by default)
+  // +optional
+  repeated NetworkPolicyIngressRule ingress = 2;
+
+  // List of egress rules to be applied to the selected pods. Outgoing traffic is
+  // allowed if there are no NetworkPolicies selecting the pod (and cluster policy
+  // otherwise allows the traffic), OR if the traffic matches at least one egress rule
+  // across all of the NetworkPolicy objects whose podSelector matches the pod. If
+  // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves
+  // solely to ensure that the pods it selects are isolated by default).
+  // This field is beta-level in 1.8
+  // +optional
+  repeated NetworkPolicyEgressRule egress = 3;
+
+  // List of rule types that the NetworkPolicy relates to.
+  // Valid options are Ingress, Egress, or Ingress,Egress.
+  // If this field is not specified, it will default based on the existence of Ingress or Egress rules;
+  // policies that contain an Egress section are assumed to affect Egress, and all policies
+  // (whether or not they contain an Ingress section) are assumed to affect Ingress.
+  // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ].
+  // Likewise, if you want to write a policy that specifies that no egress is allowed,
+  // you must specify a policyTypes value that include "Egress" (since such a policy would not include
+  // an Egress section and would otherwise default to just [ "Ingress" ]).
+  // This field is beta-level in 1.8
+  // +optional
+  repeated string policyTypes = 4;
+}
+
diff --git a/vendor/k8s.io/api/networking/v1/register.go b/vendor/k8s.io/api/networking/v1/register.go
new file mode 100644
index 0000000..f47f22e
--- /dev/null
+++ b/vendor/k8s.io/api/networking/v1/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "networking.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&NetworkPolicy{},
+		&NetworkPolicyList{},
+	)
+
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go
new file mode 100644
index 0000000..ce70448
--- /dev/null
+++ b/vendor/k8s.io/api/networking/v1/types.go
@@ -0,0 +1,203 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// NetworkPolicy describes what network traffic is allowed for a set of Pods
+type NetworkPolicy struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired behavior for this NetworkPolicy.
+	// +optional
+	Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// Policy Type string describes the NetworkPolicy type
+// This type is beta-level in 1.8
+type PolicyType string
+
+const (
+	// PolicyTypeIngress is a NetworkPolicy that affects ingress traffic on selected pods
+	PolicyTypeIngress PolicyType = "Ingress"
+	// PolicyTypeEgress is a NetworkPolicy that affects egress traffic on selected pods
+	PolicyTypeEgress PolicyType = "Egress"
+)
+
+// NetworkPolicySpec provides the specification of a NetworkPolicy
+type NetworkPolicySpec struct {
+	// Selects the pods to which this NetworkPolicy object applies. The array of
+	// ingress rules is applied to any pods selected by this field. Multiple network
+	// policies can select the same set of pods. In this case, the ingress rules for
+	// each are combined additively. This field is NOT optional and follows standard
+	// label selector semantics. An empty podSelector matches all pods in this
+	// namespace.
+	PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"`
+
+	// List of ingress rules to be applied to the selected pods. Traffic is allowed to
+	// a pod if there are no NetworkPolicies selecting the pod
+	// (and cluster policy otherwise allows the traffic), OR if the traffic source is
+	// the pod's local node, OR if the traffic matches at least one ingress rule
+	// across all of the NetworkPolicy objects whose podSelector matches the pod. If
+	// this field is empty then this NetworkPolicy does not allow any traffic (and serves
+	// solely to ensure that the pods it selects are isolated by default)
+	// +optional
+	Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"`
+
+	// List of egress rules to be applied to the selected pods. Outgoing traffic is
+	// allowed if there are no NetworkPolicies selecting the pod (and cluster policy
+	// otherwise allows the traffic), OR if the traffic matches at least one egress rule
+	// across all of the NetworkPolicy objects whose podSelector matches the pod. If
+	// this field is empty then this NetworkPolicy limits all outgoing traffic (and serves
+	// solely to ensure that the pods it selects are isolated by default).
+	// This field is beta-level in 1.8
+	// +optional
+	Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"`
+
+	// List of rule types that the NetworkPolicy relates to.
+	// Valid options are Ingress, Egress, or Ingress,Egress.
+	// If this field is not specified, it will default based on the existence of Ingress or Egress rules;
+	// policies that contain an Egress section are assumed to affect Egress, and all policies
+	// (whether or not they contain an Ingress section) are assumed to affect Ingress.
+	// If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ].
+	// Likewise, if you want to write a policy that specifies that no egress is allowed,
+	// you must specify a policyTypes value that include "Egress" (since such a policy would not include
+	// an Egress section and would otherwise default to just [ "Ingress" ]).
+	// This field is beta-level in 1.8
+	// +optional
+	PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"`
+}
+
+// NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods
+// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.
+type NetworkPolicyIngressRule struct {
+	// List of ports which should be made accessible on the pods selected for this
+	// rule. Each item in this list is combined using a logical OR. If this field is
+	// empty or missing, this rule matches all ports (traffic not restricted by port).
+	// If this field is present and contains at least one item, then this rule allows
+	// traffic only if the traffic matches at least one port in the list.
+	// +optional
+	Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"`
+
+	// List of sources which should be able to access the pods selected for this rule.
+	// Items in this list are combined using a logical OR operation. If this field is
+	// empty or missing, this rule matches all sources (traffic not restricted by
+	// source). If this field is present and contains at least on item, this rule
+	// allows traffic only if the traffic matches at least one item in the from list.
+	// +optional
+	From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"`
+}
+
+// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods
+// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to.
+// This type is beta-level in 1.8
+type NetworkPolicyEgressRule struct {
+	// List of destination ports for outgoing traffic.
+	// Each item in this list is combined using a logical OR. If this field is
+	// empty or missing, this rule matches all ports (traffic not restricted by port).
+	// If this field is present and contains at least one item, then this rule allows
+	// traffic only if the traffic matches at least one port in the list.
+	// +optional
+	Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"`
+
+	// List of destinations for outgoing traffic of pods selected for this rule.
+	// Items in this list are combined using a logical OR operation. If this field is
+	// empty or missing, this rule matches all destinations (traffic not restricted by
+	// destination). If this field is present and contains at least one item, this rule
+	// allows traffic only if the traffic matches at least one item in the to list.
+	// +optional
+	To []NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,2,rep,name=to"`
+}
+
+// NetworkPolicyPort describes a port to allow traffic on
+type NetworkPolicyPort struct {
+	// The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this
+	// field defaults to TCP.
+	// +optional
+	Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/api/core/v1.Protocol"`
+
+	// The port on the given protocol. This can either be a numerical or named port on
+	// a pod. If this field is not provided, this matches all port names and numbers.
+	// +optional
+	Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"`
+}
+
+// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods
+// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should
+// not be included within this rule.
+type IPBlock struct {
+	// CIDR is a string representing the IP Block
+	// Valid examples are "192.168.1.1/24"
+	CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"`
+	// Except is a slice of CIDRs that should not be included within an IP Block
+	// Valid examples are "192.168.1.1/24"
+	// Except values will be rejected if they are outside the CIDR range
+	// +optional
+	Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"`
+}
+
+// NetworkPolicyPeer describes a peer to allow traffic from. Only certain combinations of
+// fields are allowed
+type NetworkPolicyPeer struct {
+	// This is a label selector which selects Pods. This field follows standard label
+	// selector semantics; if present but empty, it selects all pods.
+	//
+	// If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects
+	// the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
+	// Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.
+	// +optional
+	PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"`
+
+	// Selects Namespaces using cluster-scoped labels. This field follows standard label
+	// selector semantics; if present but empty, it selects all namespaces.
+	//
+	// If PodSelector is also set, then the NetworkPolicyPeer as a whole selects
+	// the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
+	// Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.
+	// +optional
+	NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"`
+
+	// IPBlock defines policy on a particular IPBlock. If this field is set then
+	// neither of the other fields can be.
+	// +optional
+	IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// NetworkPolicyList is a list of NetworkPolicy objects.
+type NetworkPolicyList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of schema objects.
+	Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..f4363bc
--- /dev/null
+++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
@@ -0,0 +1,113 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_IPBlock = map[string]string{
+	"":       "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.",
+	"cidr":   "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"",
+	"except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range",
+}
+
+func (IPBlock) SwaggerDoc() map[string]string {
+	return map_IPBlock
+}
+
+var map_NetworkPolicy = map[string]string{
+	"":         "NetworkPolicy describes what network traffic is allowed for a set of Pods",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Specification of the desired behavior for this NetworkPolicy.",
+}
+
+func (NetworkPolicy) SwaggerDoc() map[string]string {
+	return map_NetworkPolicy
+}
+
+var map_NetworkPolicyEgressRule = map[string]string{
+	"":      "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8",
+	"ports": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.",
+	"to":    "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.",
+}
+
+func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string {
+	return map_NetworkPolicyEgressRule
+}
+
+var map_NetworkPolicyIngressRule = map[string]string{
+	"":      "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.",
+	"ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.",
+	"from":  "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.",
+}
+
+func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string {
+	return map_NetworkPolicyIngressRule
+}
+
+var map_NetworkPolicyList = map[string]string{
+	"":         "NetworkPolicyList is a list of NetworkPolicy objects.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "Items is a list of schema objects.",
+}
+
+func (NetworkPolicyList) SwaggerDoc() map[string]string {
+	return map_NetworkPolicyList
+}
+
+var map_NetworkPolicyPeer = map[string]string{
+	"":                  "NetworkPolicyPeer describes a peer to allow traffic from. Only certain combinations of fields are allowed",
+	"podSelector":       "This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.",
+	"namespaceSelector": "Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.",
+	"ipBlock":           "IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.",
+}
+
+func (NetworkPolicyPeer) SwaggerDoc() map[string]string {
+	return map_NetworkPolicyPeer
+}
+
+var map_NetworkPolicyPort = map[string]string{
+	"":         "NetworkPolicyPort describes a port to allow traffic on",
+	"protocol": "The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.",
+	"port":     "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.",
+}
+
+func (NetworkPolicyPort) SwaggerDoc() map[string]string {
+	return map_NetworkPolicyPort
+}
+
+var map_NetworkPolicySpec = map[string]string{
+	"":            "NetworkPolicySpec provides the specification of a NetworkPolicy",
+	"podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.",
+	"ingress":     "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)",
+	"egress":      "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8",
+	"policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress, Egress, or Ingress,Egress. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8",
+}
+
+func (NetworkPolicySpec) SwaggerDoc() map[string]string {
+	return map_NetworkPolicySpec
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..d1e4e88
--- /dev/null
+++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go
@@ -0,0 +1,262 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	corev1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPBlock) DeepCopyInto(out *IPBlock) {
+	*out = *in
+	if in.Except != nil {
+		in, out := &in.Except, &out.Except
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPBlock.
+func (in *IPBlock) DeepCopy() *IPBlock {
+	if in == nil {
+		return nil
+	}
+	out := new(IPBlock)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicy.
+func (in *NetworkPolicy) DeepCopy() *NetworkPolicy {
+	if in == nil {
+		return nil
+	}
+	out := new(NetworkPolicy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NetworkPolicy) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicyEgressRule) DeepCopyInto(out *NetworkPolicyEgressRule) {
+	*out = *in
+	if in.Ports != nil {
+		in, out := &in.Ports, &out.Ports
+		*out = make([]NetworkPolicyPort, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.To != nil {
+		in, out := &in.To, &out.To
+		*out = make([]NetworkPolicyPeer, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyEgressRule.
+func (in *NetworkPolicyEgressRule) DeepCopy() *NetworkPolicyEgressRule {
+	if in == nil {
+		return nil
+	}
+	out := new(NetworkPolicyEgressRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicyIngressRule) DeepCopyInto(out *NetworkPolicyIngressRule) {
+	*out = *in
+	if in.Ports != nil {
+		in, out := &in.Ports, &out.Ports
+		*out = make([]NetworkPolicyPort, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.From != nil {
+		in, out := &in.From, &out.From
+		*out = make([]NetworkPolicyPeer, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyIngressRule.
+func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule {
+	if in == nil {
+		return nil
+	}
+	out := new(NetworkPolicyIngressRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]NetworkPolicy, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyList.
+func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList {
+	if in == nil {
+		return nil
+	}
+	out := new(NetworkPolicyList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NetworkPolicyList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicyPeer) DeepCopyInto(out *NetworkPolicyPeer) {
+	*out = *in
+	if in.PodSelector != nil {
+		in, out := &in.PodSelector, &out.PodSelector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.NamespaceSelector != nil {
+		in, out := &in.NamespaceSelector, &out.NamespaceSelector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.IPBlock != nil {
+		in, out := &in.IPBlock, &out.IPBlock
+		*out = new(IPBlock)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPeer.
+func (in *NetworkPolicyPeer) DeepCopy() *NetworkPolicyPeer {
+	if in == nil {
+		return nil
+	}
+	out := new(NetworkPolicyPeer)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) {
+	*out = *in
+	if in.Protocol != nil {
+		in, out := &in.Protocol, &out.Protocol
+		*out = new(corev1.Protocol)
+		**out = **in
+	}
+	if in.Port != nil {
+		in, out := &in.Port, &out.Port
+		*out = new(intstr.IntOrString)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPort.
+func (in *NetworkPolicyPort) DeepCopy() *NetworkPolicyPort {
+	if in == nil {
+		return nil
+	}
+	out := new(NetworkPolicyPort)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) {
+	*out = *in
+	in.PodSelector.DeepCopyInto(&out.PodSelector)
+	if in.Ingress != nil {
+		in, out := &in.Ingress, &out.Ingress
+		*out = make([]NetworkPolicyIngressRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Egress != nil {
+		in, out := &in.Egress, &out.Egress
+		*out = make([]NetworkPolicyEgressRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.PolicyTypes != nil {
+		in, out := &in.PolicyTypes, &out.PolicyTypes
+		*out = make([]PolicyType, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicySpec.
+func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec {
+	if in == nil {
+		return nil
+	}
+	out := new(NetworkPolicySpec)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go
new file mode 100644
index 0000000..74611c6
--- /dev/null
+++ b/vendor/k8s.io/api/policy/v1beta1/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+// Package policy is for any kind of policy object.  Suitable examples, even if
+// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy,
+// NetworkPolicy, etc.
+package v1beta1 // import "k8s.io/api/policy/v1beta1"
diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto
new file mode 100644
index 0000000..e9df3c1
--- /dev/null
+++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto
@@ -0,0 +1,367 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.policy.v1beta1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta1";
+
+// AllowedFlexVolume represents a single Flexvolume that is allowed to be used.
+message AllowedFlexVolume {
+  // driver is the name of the Flexvolume driver.
+  optional string driver = 1;
+}
+
+// AllowedHostPath defines the host volume conditions that will be enabled by a policy
+// for pods to use. It requires the path prefix to be defined.
+message AllowedHostPath {
+  // pathPrefix is the path prefix that the host volume must match.
+  // It does not support `*`.
+  // Trailing slashes are trimmed when validating the path prefix with a host path.
+  //
+  // Examples:
+  // `/foo` would allow `/foo`, `/foo/` and `/foo/bar`
+  // `/foo` would not allow `/food` or `/etc/foo`
+  optional string pathPrefix = 1;
+
+  // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.
+  // +optional
+  optional bool readOnly = 2;
+}
+
+// Eviction evicts a pod from its node subject to certain policies and safety constraints.
+// This is a subresource of Pod.  A request to cause such an eviction is
+// created by POSTing to .../pods/<pod name>/evictions.
+message Eviction {
+  // ObjectMeta describes the pod that is being evicted.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // DeleteOptions may be provided
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2;
+}
+
+// FSGroupStrategyOptions defines the strategy type and options used to create the strategy.
+message FSGroupStrategyOptions {
+  // rule is the strategy that will dictate what FSGroup is used in the SecurityContext.
+  // +optional
+  optional string rule = 1;
+
+  // ranges are the allowed ranges of fs groups.  If you would like to force a single
+  // fs group then supply a single range with the same start and end. Required for MustRunAs.
+  // +optional
+  repeated IDRange ranges = 2;
+}
+
+// HostPortRange defines a range of host ports that will be enabled by a policy
+// for pods to use.  It requires both the start and end to be defined.
+message HostPortRange {
+  // min is the start of the range, inclusive.
+  optional int32 min = 1;
+
+  // max is the end of the range, inclusive.
+  optional int32 max = 2;
+}
+
+// IDRange provides a min/max of an allowed range of IDs.
+message IDRange {
+  // min is the start of the range, inclusive.
+  optional int64 min = 1;
+
+  // max is the end of the range, inclusive.
+  optional int64 max = 2;
+}
+
+// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
+message PodDisruptionBudget {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior of the PodDisruptionBudget.
+  // +optional
+  optional PodDisruptionBudgetSpec spec = 2;
+
+  // Most recently observed status of the PodDisruptionBudget.
+  // +optional
+  optional PodDisruptionBudgetStatus status = 3;
+}
+
+// PodDisruptionBudgetList is a collection of PodDisruptionBudgets.
+message PodDisruptionBudgetList {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  repeated PodDisruptionBudget items = 2;
+}
+
+// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.
+message PodDisruptionBudgetSpec {
+  // An eviction is allowed if at least "minAvailable" pods selected by
+  // "selector" will still be available after the eviction, i.e. even in the
+  // absence of the evicted pod.  So for example you can prevent all voluntary
+  // evictions by specifying "100%".
+  // +optional
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1;
+
+  // Label query over pods whose evictions are managed by the disruption
+  // budget.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
+
+  // An eviction is allowed if at most "maxUnavailable" pods selected by
+  // "selector" are unavailable after the eviction, i.e. even in absence of
+  // the evicted pod. For example, one can prevent all voluntary evictions
+  // by specifying 0. This is a mutually exclusive setting with "minAvailable".
+  // +optional
+  optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3;
+}
+
+// PodDisruptionBudgetStatus represents information about the status of a
+// PodDisruptionBudget. Status may trail the actual state of a system.
+message PodDisruptionBudgetStatus {
+  // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other
+  // status informatio is valid only if observedGeneration equals to PDB's object generation.
+  // +optional
+  optional int64 observedGeneration = 1;
+
+  // DisruptedPods contains information about pods whose eviction was
+  // processed by the API server eviction subresource handler but has not
+  // yet been observed by the PodDisruptionBudget controller.
+  // A pod will be in this map from the time when the API server processed the
+  // eviction request to the time when the pod is seen by PDB controller
+  // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod
+  // and the value is the time when the API server processed the eviction request. If
+  // the deletion didn't occur and a pod is still there it will be removed from
+  // the list automatically by PodDisruptionBudget controller after some time.
+  // If everything goes smooth this map should be empty for the most of the time.
+  // Large number of entries in the map may indicate problems with pod deletions.
+  // +optional
+  map<string, k8s.io.apimachinery.pkg.apis.meta.v1.Time> disruptedPods = 2;
+
+  // Number of pod disruptions that are currently allowed.
+  optional int32 disruptionsAllowed = 3;
+
+  // current number of healthy pods
+  optional int32 currentHealthy = 4;
+
+  // minimum desired number of healthy pods
+  optional int32 desiredHealthy = 5;
+
+  // total number of pods counted by this disruption budget
+  optional int32 expectedPods = 6;
+}
+
+// PodSecurityPolicy governs the ability to make requests that affect the Security Context
+// that will be applied to a pod and container.
+message PodSecurityPolicy {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // spec defines the policy enforced.
+  // +optional
+  optional PodSecurityPolicySpec spec = 2;
+}
+
+// PodSecurityPolicyList is a list of PodSecurityPolicy objects.
+message PodSecurityPolicyList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // items is a list of schema objects.
+  repeated PodSecurityPolicy items = 2;
+}
+
+// PodSecurityPolicySpec defines the policy enforced.
+message PodSecurityPolicySpec {
+  // privileged determines if a pod can request to be run as privileged.
+  // +optional
+  optional bool privileged = 1;
+
+  // defaultAddCapabilities is the default set of capabilities that will be added to the container
+  // unless the pod spec specifically drops the capability.  You may not list a capability in both
+  // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly
+  // allowed, and need not be included in the allowedCapabilities list.
+  // +optional
+  repeated string defaultAddCapabilities = 2;
+
+  // requiredDropCapabilities are the capabilities that will be dropped from the container.  These
+  // are required to be dropped and cannot be added.
+  // +optional
+  repeated string requiredDropCapabilities = 3;
+
+  // allowedCapabilities is a list of capabilities that can be requested to add to the container.
+  // Capabilities in this field may be added at the pod author's discretion.
+  // You must not list a capability in both allowedCapabilities and requiredDropCapabilities.
+  // +optional
+  repeated string allowedCapabilities = 4;
+
+  // volumes is a white list of allowed volume plugins. Empty indicates that
+  // no volumes may be used. To allow all volumes you may use '*'.
+  // +optional
+  repeated string volumes = 5;
+
+  // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
+  // +optional
+  optional bool hostNetwork = 6;
+
+  // hostPorts determines which host port ranges are allowed to be exposed.
+  // +optional
+  repeated HostPortRange hostPorts = 7;
+
+  // hostPID determines if the policy allows the use of HostPID in the pod spec.
+  // +optional
+  optional bool hostPID = 8;
+
+  // hostIPC determines if the policy allows the use of HostIPC in the pod spec.
+  // +optional
+  optional bool hostIPC = 9;
+
+  // seLinux is the strategy that will dictate the allowable labels that may be set.
+  optional SELinuxStrategyOptions seLinux = 10;
+
+  // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
+  optional RunAsUserStrategyOptions runAsUser = 11;
+
+  // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set.
+  // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the
+  // RunAsGroup feature gate to be enabled.
+  // +optional
+  optional RunAsGroupStrategyOptions runAsGroup = 22;
+
+  // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
+  optional SupplementalGroupsStrategyOptions supplementalGroups = 12;
+
+  // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.
+  optional FSGroupStrategyOptions fsGroup = 13;
+
+  // readOnlyRootFilesystem when set to true will force containers to run with a read only root file
+  // system.  If the container specifically requests to run with a non-read only root file system
+  // the PSP should deny the pod.
+  // If set to false the container may run with a read only root file system if it wishes but it
+  // will not be forced to.
+  // +optional
+  optional bool readOnlyRootFilesystem = 14;
+
+  // defaultAllowPrivilegeEscalation controls the default setting for whether a
+  // process can gain more privileges than its parent process.
+  // +optional
+  optional bool defaultAllowPrivilegeEscalation = 15;
+
+  // allowPrivilegeEscalation determines if a pod can request to allow
+  // privilege escalation. If unspecified, defaults to true.
+  // +optional
+  optional bool allowPrivilegeEscalation = 16;
+
+  // allowedHostPaths is a white list of allowed host paths. Empty indicates
+  // that all host paths may be used.
+  // +optional
+  repeated AllowedHostPath allowedHostPaths = 17;
+
+  // allowedFlexVolumes is a whitelist of allowed Flexvolumes.  Empty or nil indicates that all
+  // Flexvolumes may be used.  This parameter is effective only when the usage of the Flexvolumes
+  // is allowed in the "volumes" field.
+  // +optional
+  repeated AllowedFlexVolume allowedFlexVolumes = 18;
+
+  // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none.
+  // Each entry is either a plain sysctl name or ends in "*" in which case it is considered
+  // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed.
+  // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.
+  //
+  // Examples:
+  // e.g. "foo/*" allows "foo/bar", "foo/baz", etc.
+  // e.g. "foo.*" allows "foo.bar", "foo.baz", etc.
+  // +optional
+  repeated string allowedUnsafeSysctls = 19;
+
+  // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none.
+  // Each entry is either a plain sysctl name or ends in "*" in which case it is considered
+  // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.
+  //
+  // Examples:
+  // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc.
+  // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc.
+  // +optional
+  repeated string forbiddenSysctls = 20;
+
+  // AllowedProcMountTypes is a whitelist of allowed ProcMountTypes.
+  // Empty or nil indicates that only the DefaultProcMountType may be used.
+  // This requires the ProcMountType feature flag to be enabled.
+  // +optional
+  repeated string allowedProcMountTypes = 21;
+}
+
+// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.
+message RunAsGroupStrategyOptions {
+  // rule is the strategy that will dictate the allowable RunAsGroup values that may be set.
+  optional string rule = 1;
+
+  // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid
+  // then supply a single range with the same start and end. Required for MustRunAs.
+  // +optional
+  repeated IDRange ranges = 2;
+}
+
+// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.
+message RunAsUserStrategyOptions {
+  // rule is the strategy that will dictate the allowable RunAsUser values that may be set.
+  optional string rule = 1;
+
+  // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid
+  // then supply a single range with the same start and end. Required for MustRunAs.
+  // +optional
+  repeated IDRange ranges = 2;
+}
+
+// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.
+message SELinuxStrategyOptions {
+  // rule is the strategy that will dictate the allowable labels that may be set.
+  optional string rule = 1;
+
+  // seLinuxOptions required to run as; required for MustRunAs
+  // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  // +optional
+  optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2;
+}
+
+// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.
+message SupplementalGroupsStrategyOptions {
+  // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.
+  // +optional
+  optional string rule = 1;
+
+  // ranges are the allowed ranges of supplemental groups.  If you would like to force a single
+  // supplemental group then supply a single range with the same start and end. Required for MustRunAs.
+  // +optional
+  repeated IDRange ranges = 2;
+}
+
diff --git a/vendor/k8s.io/api/policy/v1beta1/register.go b/vendor/k8s.io/api/policy/v1beta1/register.go
new file mode 100644
index 0000000..b3efd63
--- /dev/null
+++ b/vendor/k8s.io/api/policy/v1beta1/register.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "policy"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&PodDisruptionBudget{},
+		&PodDisruptionBudgetList{},
+		&PodSecurityPolicy{},
+		&PodSecurityPolicyList{},
+		&Eviction{},
+	)
+	// Add the watch version that applies
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go
new file mode 100644
index 0000000..91ea118
--- /dev/null
+++ b/vendor/k8s.io/api/policy/v1beta1/types.go
@@ -0,0 +1,454 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	"k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.
+type PodDisruptionBudgetSpec struct {
+	// An eviction is allowed if at least "minAvailable" pods selected by
+	// "selector" will still be available after the eviction, i.e. even in the
+	// absence of the evicted pod.  So for example you can prevent all voluntary
+	// evictions by specifying "100%".
+	// +optional
+	MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty" protobuf:"bytes,1,opt,name=minAvailable"`
+
+	// Label query over pods whose evictions are managed by the disruption
+	// budget.
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
+
+	// An eviction is allowed if at most "maxUnavailable" pods selected by
+	// "selector" are unavailable after the eviction, i.e. even in absence of
+	// the evicted pod. For example, one can prevent all voluntary evictions
+	// by specifying 0. This is a mutually exclusive setting with "minAvailable".
+	// +optional
+	MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,3,opt,name=maxUnavailable"`
+}
+
+// PodDisruptionBudgetStatus represents information about the status of a
+// PodDisruptionBudget. Status may trail the actual state of a system.
+type PodDisruptionBudgetStatus struct {
+	// Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other
+	// status informatio is valid only if observedGeneration equals to PDB's object generation.
+	// +optional
+	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+	// DisruptedPods contains information about pods whose eviction was
+	// processed by the API server eviction subresource handler but has not
+	// yet been observed by the PodDisruptionBudget controller.
+	// A pod will be in this map from the time when the API server processed the
+	// eviction request to the time when the pod is seen by PDB controller
+	// as having been marked for deletion (or after a timeout). The key in the map is the name of the pod
+	// and the value is the time when the API server processed the eviction request. If
+	// the deletion didn't occur and a pod is still there it will be removed from
+	// the list automatically by PodDisruptionBudget controller after some time.
+	// If everything goes smooth this map should be empty for the most of the time.
+	// Large number of entries in the map may indicate problems with pod deletions.
+	// +optional
+	DisruptedPods map[string]metav1.Time `json:"disruptedPods,omitempty" protobuf:"bytes,2,rep,name=disruptedPods"`
+
+	// Number of pod disruptions that are currently allowed.
+	PodDisruptionsAllowed int32 `json:"disruptionsAllowed" protobuf:"varint,3,opt,name=disruptionsAllowed"`
+
+	// current number of healthy pods
+	CurrentHealthy int32 `json:"currentHealthy" protobuf:"varint,4,opt,name=currentHealthy"`
+
+	// minimum desired number of healthy pods
+	DesiredHealthy int32 `json:"desiredHealthy" protobuf:"varint,5,opt,name=desiredHealthy"`
+
+	// total number of pods counted by this disruption budget
+	ExpectedPods int32 `json:"expectedPods" protobuf:"varint,6,opt,name=expectedPods"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
+type PodDisruptionBudget struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired behavior of the PodDisruptionBudget.
+	// +optional
+	Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+	// Most recently observed status of the PodDisruptionBudget.
+	// +optional
+	Status PodDisruptionBudgetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodDisruptionBudgetList is a collection of PodDisruptionBudgets.
+type PodDisruptionBudgetList struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	Items           []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:noVerbs
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Eviction evicts a pod from its node subject to certain policies and safety constraints.
+// This is a subresource of Pod.  A request to cause such an eviction is
+// created by POSTing to .../pods/<pod name>/evictions.
+type Eviction struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// ObjectMeta describes the pod that is being evicted.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// DeleteOptions may be provided
+	// +optional
+	DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodSecurityPolicy governs the ability to make requests that affect the Security Context
+// that will be applied to a pod and container.
+type PodSecurityPolicy struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// spec defines the policy enforced.
+	// +optional
+	Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// PodSecurityPolicySpec defines the policy enforced.
+type PodSecurityPolicySpec struct {
+	// privileged determines if a pod can request to be run as privileged.
+	// +optional
+	Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"`
+	// defaultAddCapabilities is the default set of capabilities that will be added to the container
+	// unless the pod spec specifically drops the capability.  You may not list a capability in both
+	// defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly
+	// allowed, and need not be included in the allowedCapabilities list.
+	// +optional
+	DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"`
+	// requiredDropCapabilities are the capabilities that will be dropped from the container.  These
+	// are required to be dropped and cannot be added.
+	// +optional
+	RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"`
+	// allowedCapabilities is a list of capabilities that can be requested to add to the container.
+	// Capabilities in this field may be added at the pod author's discretion.
+	// You must not list a capability in both allowedCapabilities and requiredDropCapabilities.
+	// +optional
+	AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"`
+	// volumes is a white list of allowed volume plugins. Empty indicates that
+	// no volumes may be used. To allow all volumes you may use '*'.
+	// +optional
+	Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"`
+	// hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
+	// +optional
+	HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"`
+	// hostPorts determines which host port ranges are allowed to be exposed.
+	// +optional
+	HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"`
+	// hostPID determines if the policy allows the use of HostPID in the pod spec.
+	// +optional
+	HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"`
+	// hostIPC determines if the policy allows the use of HostIPC in the pod spec.
+	// +optional
+	HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"`
+	// seLinux is the strategy that will dictate the allowable labels that may be set.
+	SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"`
+	// runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
+	RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"`
+	// RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set.
+	// If this field is omitted, the pod's RunAsGroup can take any value. This field requires the
+	// RunAsGroup feature gate to be enabled.
+	// +optional
+	RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"`
+	// supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
+	SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"`
+	// fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.
+	FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"`
+	// readOnlyRootFilesystem when set to true will force containers to run with a read only root file
+	// system.  If the container specifically requests to run with a non-read only root file system
+	// the PSP should deny the pod.
+	// If set to false the container may run with a read only root file system if it wishes but it
+	// will not be forced to.
+	// +optional
+	ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"`
+	// defaultAllowPrivilegeEscalation controls the default setting for whether a
+	// process can gain more privileges than its parent process.
+	// +optional
+	DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"`
+	// allowPrivilegeEscalation determines if a pod can request to allow
+	// privilege escalation. If unspecified, defaults to true.
+	// +optional
+	AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"`
+	// allowedHostPaths is a white list of allowed host paths. Empty indicates
+	// that all host paths may be used.
+	// +optional
+	AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"`
+	// allowedFlexVolumes is a whitelist of allowed Flexvolumes.  Empty or nil indicates that all
+	// Flexvolumes may be used.  This parameter is effective only when the usage of the Flexvolumes
+	// is allowed in the "volumes" field.
+	// +optional
+	AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"`
+	// allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none.
+	// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
+	// as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed.
+	// Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.
+	//
+	// Examples:
+	// e.g. "foo/*" allows "foo/bar", "foo/baz", etc.
+	// e.g. "foo.*" allows "foo.bar", "foo.baz", etc.
+	// +optional
+	AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"`
+	// forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none.
+	// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
+	// as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.
+	//
+	// Examples:
+	// e.g. "foo/*" forbids "foo/bar", "foo/baz", etc.
+	// e.g. "foo.*" forbids "foo.bar", "foo.baz", etc.
+	// +optional
+	ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"`
+	// AllowedProcMountTypes is a whitelist of allowed ProcMountTypes.
+	// Empty or nil indicates that only the DefaultProcMountType may be used.
+	// This requires the ProcMountType feature flag to be enabled.
+	// +optional
+	AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"`
+}
+
+// AllowedHostPath defines the host volume conditions that will be enabled by a policy
+// for pods to use. It requires the path prefix to be defined.
+type AllowedHostPath struct {
+	// pathPrefix is the path prefix that the host volume must match.
+	// It does not support `*`.
+	// Trailing slashes are trimmed when validating the path prefix with a host path.
+	//
+	// Examples:
+	// `/foo` would allow `/foo`, `/foo/` and `/foo/bar`
+	// `/foo` would not allow `/food` or `/etc/foo`
+	PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"`
+
+	// when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
+}
+
+// AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities
+// field and means that any capabilities are allowed to be requested.
+var AllowAllCapabilities v1.Capability = "*"
+
+// FSType gives strong typing to different file systems that are used by volumes.
+type FSType string
+
+var (
+	AzureFile             FSType = "azureFile"
+	Flocker               FSType = "flocker"
+	FlexVolume            FSType = "flexVolume"
+	HostPath              FSType = "hostPath"
+	EmptyDir              FSType = "emptyDir"
+	GCEPersistentDisk     FSType = "gcePersistentDisk"
+	AWSElasticBlockStore  FSType = "awsElasticBlockStore"
+	GitRepo               FSType = "gitRepo"
+	Secret                FSType = "secret"
+	NFS                   FSType = "nfs"
+	ISCSI                 FSType = "iscsi"
+	Glusterfs             FSType = "glusterfs"
+	PersistentVolumeClaim FSType = "persistentVolumeClaim"
+	RBD                   FSType = "rbd"
+	Cinder                FSType = "cinder"
+	CephFS                FSType = "cephFS"
+	DownwardAPI           FSType = "downwardAPI"
+	FC                    FSType = "fc"
+	ConfigMap             FSType = "configMap"
+	VsphereVolume         FSType = "vsphereVolume"
+	Quobyte               FSType = "quobyte"
+	AzureDisk             FSType = "azureDisk"
+	PhotonPersistentDisk  FSType = "photonPersistentDisk"
+	StorageOS             FSType = "storageos"
+	Projected             FSType = "projected"
+	PortworxVolume        FSType = "portworxVolume"
+	ScaleIO               FSType = "scaleIO"
+	CSI                   FSType = "csi"
+	All                   FSType = "*"
+)
+
+// AllowedFlexVolume represents a single Flexvolume that is allowed to be used.
+type AllowedFlexVolume struct {
+	// driver is the name of the Flexvolume driver.
+	Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
+}
+
+// HostPortRange defines a range of host ports that will be enabled by a policy
+// for pods to use.  It requires both the start and end to be defined.
+type HostPortRange struct {
+	// min is the start of the range, inclusive.
+	Min int32 `json:"min" protobuf:"varint,1,opt,name=min"`
+	// max is the end of the range, inclusive.
+	Max int32 `json:"max" protobuf:"varint,2,opt,name=max"`
+}
+
+// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.
+type SELinuxStrategyOptions struct {
+	// rule is the strategy that will dictate the allowable labels that may be set.
+	Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"`
+	// seLinuxOptions required to run as; required for MustRunAs
+	// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+	// +optional
+	SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"`
+}
+
+// SELinuxStrategy denotes strategy types for generating SELinux options for a
+// Security Context.
+type SELinuxStrategy string
+
+const (
+	// SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied.
+	SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs"
+	// SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels.
+	SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny"
+)
+
+// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.
+type RunAsUserStrategyOptions struct {
+	// rule is the strategy that will dictate the allowable RunAsUser values that may be set.
+	Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"`
+	// ranges are the allowed ranges of uids that may be used. If you would like to force a single uid
+	// then supply a single range with the same start and end. Required for MustRunAs.
+	// +optional
+	Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
+}
+
+// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.
+type RunAsGroupStrategyOptions struct {
+	// rule is the strategy that will dictate the allowable RunAsGroup values that may be set.
+	Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"`
+	// ranges are the allowed ranges of gids that may be used. If you would like to force a single gid
+	// then supply a single range with the same start and end. Required for MustRunAs.
+	// +optional
+	Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
+}
+
+// IDRange provides a min/max of an allowed range of IDs.
+type IDRange struct {
+	// min is the start of the range, inclusive.
+	Min int64 `json:"min" protobuf:"varint,1,opt,name=min"`
+	// max is the end of the range, inclusive.
+	Max int64 `json:"max" protobuf:"varint,2,opt,name=max"`
+}
+
+// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a
+// Security Context.
+type RunAsUserStrategy string
+
+const (
+	// RunAsUserStrategyMustRunAs means that container must run as a particular uid.
+	RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs"
+	// RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid.
+	RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot"
+	// RunAsUserStrategyRunAsAny means that container may make requests for any uid.
+	RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny"
+)
+
+// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a
+// Security Context.
+type RunAsGroupStrategy string
+
+const (
+	// RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid.
+	// However, when RunAsGroup are specified, they have to fall in the defined range.
+	RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs"
+	// RunAsGroupStrategyMustRunAs means that container must run as a particular gid.
+	RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs"
+	// RunAsUserStrategyRunAsAny means that container may make requests for any gid.
+	RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny"
+)
+
+// FSGroupStrategyOptions defines the strategy type and options used to create the strategy.
+type FSGroupStrategyOptions struct {
+	// rule is the strategy that will dictate what FSGroup is used in the SecurityContext.
+	// +optional
+	Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"`
+	// ranges are the allowed ranges of fs groups.  If you would like to force a single
+	// fs group then supply a single range with the same start and end. Required for MustRunAs.
+	// +optional
+	Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
+}
+
+// FSGroupStrategyType denotes strategy types for generating FSGroup values for a
+// SecurityContext
+type FSGroupStrategyType string
+
+const (
+	// FSGroupStrategyMayRunAs means that container does not need to have FSGroup of X applied.
+	// However, when FSGroups are specified, they have to fall in the defined range.
+	FSGroupStrategyMayRunAs FSGroupStrategyType = "MayRunAs"
+	// FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied.
+	FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs"
+	// FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels.
+	FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny"
+)
+
+// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.
+type SupplementalGroupsStrategyOptions struct {
+	// rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.
+	// +optional
+	Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"`
+	// ranges are the allowed ranges of supplemental groups.  If you would like to force a single
+	// supplemental group then supply a single range with the same start and end. Required for MustRunAs.
+	// +optional
+	Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
+}
+
+// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental
+// groups for a SecurityContext.
+type SupplementalGroupsStrategyType string
+
+const (
+	// SupplementalGroupsStrategyMayRunAs means that container does not need to run with a particular gid.
+	// However, when gids are specified, they have to fall in the defined range.
+	SupplementalGroupsStrategyMayRunAs SupplementalGroupsStrategyType = "MayRunAs"
+	// SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid.
+	SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs"
+	// SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid.
+	SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodSecurityPolicyList is a list of PodSecurityPolicy objects.
+type PodSecurityPolicyList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// items is a list of schema objects.
+	Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..547ef18
--- /dev/null
+++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,222 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_AllowedFlexVolume = map[string]string{
+	"":       "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.",
+	"driver": "driver is the name of the Flexvolume driver.",
+}
+
+func (AllowedFlexVolume) SwaggerDoc() map[string]string {
+	return map_AllowedFlexVolume
+}
+
+var map_AllowedHostPath = map[string]string{
+	"":           "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined.",
+	"pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`",
+	"readOnly":   "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.",
+}
+
+func (AllowedHostPath) SwaggerDoc() map[string]string {
+	return map_AllowedHostPath
+}
+
+var map_Eviction = map[string]string{
+	"":              "Eviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod.  A request to cause such an eviction is created by POSTing to .../pods/<pod name>/evictions.",
+	"metadata":      "ObjectMeta describes the pod that is being evicted.",
+	"deleteOptions": "DeleteOptions may be provided",
+}
+
+func (Eviction) SwaggerDoc() map[string]string {
+	return map_Eviction
+}
+
+var map_FSGroupStrategyOptions = map[string]string{
+	"":       "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.",
+	"rule":   "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.",
+	"ranges": "ranges are the allowed ranges of fs groups.  If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.",
+}
+
+func (FSGroupStrategyOptions) SwaggerDoc() map[string]string {
+	return map_FSGroupStrategyOptions
+}
+
+var map_HostPortRange = map[string]string{
+	"":    "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use.  It requires both the start and end to be defined.",
+	"min": "min is the start of the range, inclusive.",
+	"max": "max is the end of the range, inclusive.",
+}
+
+func (HostPortRange) SwaggerDoc() map[string]string {
+	return map_HostPortRange
+}
+
+var map_IDRange = map[string]string{
+	"":    "IDRange provides a min/max of an allowed range of IDs.",
+	"min": "min is the start of the range, inclusive.",
+	"max": "max is the end of the range, inclusive.",
+}
+
+func (IDRange) SwaggerDoc() map[string]string {
+	return map_IDRange
+}
+
+var map_PodDisruptionBudget = map[string]string{
+	"":       "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods",
+	"spec":   "Specification of the desired behavior of the PodDisruptionBudget.",
+	"status": "Most recently observed status of the PodDisruptionBudget.",
+}
+
+func (PodDisruptionBudget) SwaggerDoc() map[string]string {
+	return map_PodDisruptionBudget
+}
+
+var map_PodDisruptionBudgetList = map[string]string{
+	"": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.",
+}
+
+func (PodDisruptionBudgetList) SwaggerDoc() map[string]string {
+	return map_PodDisruptionBudgetList
+}
+
+var map_PodDisruptionBudgetSpec = map[string]string{
+	"":               "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.",
+	"minAvailable":   "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod.  So for example you can prevent all voluntary evictions by specifying \"100%\".",
+	"selector":       "Label query over pods whose evictions are managed by the disruption budget.",
+	"maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".",
+}
+
+func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string {
+	return map_PodDisruptionBudgetSpec
+}
+
+var map_PodDisruptionBudgetStatus = map[string]string{
+	"":                   "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.",
+	"observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status informatio is valid only if observedGeneration equals to PDB's object generation.",
+	"disruptedPods":      "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.",
+	"disruptionsAllowed": "Number of pod disruptions that are currently allowed.",
+	"currentHealthy":     "current number of healthy pods",
+	"desiredHealthy":     "minimum desired number of healthy pods",
+	"expectedPods":       "total number of pods counted by this disruption budget",
+}
+
+func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string {
+	return map_PodDisruptionBudgetStatus
+}
+
+var map_PodSecurityPolicy = map[string]string{
+	"":         "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "spec defines the policy enforced.",
+}
+
+func (PodSecurityPolicy) SwaggerDoc() map[string]string {
+	return map_PodSecurityPolicy
+}
+
+var map_PodSecurityPolicyList = map[string]string{
+	"":         "PodSecurityPolicyList is a list of PodSecurityPolicy objects.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "items is a list of schema objects.",
+}
+
+func (PodSecurityPolicyList) SwaggerDoc() map[string]string {
+	return map_PodSecurityPolicyList
+}
+
+var map_PodSecurityPolicySpec = map[string]string{
+	"":                                "PodSecurityPolicySpec defines the policy enforced.",
+	"privileged":                      "privileged determines if a pod can request to be run as privileged.",
+	"defaultAddCapabilities":          "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability.  You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.",
+	"requiredDropCapabilities":        "requiredDropCapabilities are the capabilities that will be dropped from the container.  These are required to be dropped and cannot be added.",
+	"allowedCapabilities":             "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.",
+	"volumes":                         "volumes is a white list of allowed volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.",
+	"hostNetwork":                     "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.",
+	"hostPorts":                       "hostPorts determines which host port ranges are allowed to be exposed.",
+	"hostPID":                         "hostPID determines if the policy allows the use of HostPID in the pod spec.",
+	"hostIPC":                         "hostIPC determines if the policy allows the use of HostIPC in the pod spec.",
+	"seLinux":                         "seLinux is the strategy that will dictate the allowable labels that may be set.",
+	"runAsUser":                       "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.",
+	"runAsGroup":                      "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.",
+	"supplementalGroups":              "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.",
+	"fsGroup":                         "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.",
+	"readOnlyRootFilesystem":          "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system.  If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.",
+	"defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.",
+	"allowPrivilegeEscalation":        "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.",
+	"allowedHostPaths":                "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.",
+	"allowedFlexVolumes":              "allowedFlexVolumes is a whitelist of allowed Flexvolumes.  Empty or nil indicates that all Flexvolumes may be used.  This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.",
+	"allowedUnsafeSysctls":            "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.",
+	"forbiddenSysctls":                "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.",
+	"allowedProcMountTypes":           "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.",
+}
+
+func (PodSecurityPolicySpec) SwaggerDoc() map[string]string {
+	return map_PodSecurityPolicySpec
+}
+
+var map_RunAsGroupStrategyOptions = map[string]string{
+	"":       "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.",
+	"rule":   "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.",
+	"ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.",
+}
+
+func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string {
+	return map_RunAsGroupStrategyOptions
+}
+
+var map_RunAsUserStrategyOptions = map[string]string{
+	"":       "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.",
+	"rule":   "rule is the strategy that will dictate the allowable RunAsUser values that may be set.",
+	"ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.",
+}
+
+func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string {
+	return map_RunAsUserStrategyOptions
+}
+
+var map_SELinuxStrategyOptions = map[string]string{
+	"":               "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.",
+	"rule":           "rule is the strategy that will dictate the allowable labels that may be set.",
+	"seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
+}
+
+func (SELinuxStrategyOptions) SwaggerDoc() map[string]string {
+	return map_SELinuxStrategyOptions
+}
+
+var map_SupplementalGroupsStrategyOptions = map[string]string{
+	"":       "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.",
+	"rule":   "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.",
+	"ranges": "ranges are the allowed ranges of supplemental groups.  If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.",
+}
+
+func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string {
+	return map_SupplementalGroupsStrategyOptions
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..1a02ae6
--- /dev/null
+++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,488 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	corev1 "k8s.io/api/core/v1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume.
+func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume {
+	if in == nil {
+		return nil
+	}
+	out := new(AllowedFlexVolume)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath.
+func (in *AllowedHostPath) DeepCopy() *AllowedHostPath {
+	if in == nil {
+		return nil
+	}
+	out := new(AllowedHostPath)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Eviction) DeepCopyInto(out *Eviction) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.DeleteOptions != nil {
+		in, out := &in.DeleteOptions, &out.DeleteOptions
+		*out = new(v1.DeleteOptions)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Eviction.
+func (in *Eviction) DeepCopy() *Eviction {
+	if in == nil {
+		return nil
+	}
+	out := new(Eviction)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Eviction) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) {
+	*out = *in
+	if in.Ranges != nil {
+		in, out := &in.Ranges, &out.Ranges
+		*out = make([]IDRange, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions.
+func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(FSGroupStrategyOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostPortRange) DeepCopyInto(out *HostPortRange) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange.
+func (in *HostPortRange) DeepCopy() *HostPortRange {
+	if in == nil {
+		return nil
+	}
+	out := new(HostPortRange)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IDRange) DeepCopyInto(out *IDRange) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange.
+func (in *IDRange) DeepCopy() *IDRange {
+	if in == nil {
+		return nil
+	}
+	out := new(IDRange)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodDisruptionBudget) DeepCopyInto(out *PodDisruptionBudget) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudget.
+func (in *PodDisruptionBudget) DeepCopy() *PodDisruptionBudget {
+	if in == nil {
+		return nil
+	}
+	out := new(PodDisruptionBudget)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodDisruptionBudget) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodDisruptionBudgetList) DeepCopyInto(out *PodDisruptionBudgetList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]PodDisruptionBudget, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetList.
+func (in *PodDisruptionBudgetList) DeepCopy() *PodDisruptionBudgetList {
+	if in == nil {
+		return nil
+	}
+	out := new(PodDisruptionBudgetList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodDisruptionBudgetList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodDisruptionBudgetSpec) DeepCopyInto(out *PodDisruptionBudgetSpec) {
+	*out = *in
+	if in.MinAvailable != nil {
+		in, out := &in.MinAvailable, &out.MinAvailable
+		*out = new(intstr.IntOrString)
+		**out = **in
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.MaxUnavailable != nil {
+		in, out := &in.MaxUnavailable, &out.MaxUnavailable
+		*out = new(intstr.IntOrString)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetSpec.
+func (in *PodDisruptionBudgetSpec) DeepCopy() *PodDisruptionBudgetSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(PodDisruptionBudgetSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodDisruptionBudgetStatus) DeepCopyInto(out *PodDisruptionBudgetStatus) {
+	*out = *in
+	if in.DisruptedPods != nil {
+		in, out := &in.DisruptedPods, &out.DisruptedPods
+		*out = make(map[string]v1.Time, len(*in))
+		for key, val := range *in {
+			(*out)[key] = *val.DeepCopy()
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetStatus.
+func (in *PodDisruptionBudgetStatus) DeepCopy() *PodDisruptionBudgetStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(PodDisruptionBudgetStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy.
+func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy {
+	if in == nil {
+		return nil
+	}
+	out := new(PodSecurityPolicy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]PodSecurityPolicy, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList.
+func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList {
+	if in == nil {
+		return nil
+	}
+	out := new(PodSecurityPolicyList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) {
+	*out = *in
+	if in.DefaultAddCapabilities != nil {
+		in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities
+		*out = make([]corev1.Capability, len(*in))
+		copy(*out, *in)
+	}
+	if in.RequiredDropCapabilities != nil {
+		in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities
+		*out = make([]corev1.Capability, len(*in))
+		copy(*out, *in)
+	}
+	if in.AllowedCapabilities != nil {
+		in, out := &in.AllowedCapabilities, &out.AllowedCapabilities
+		*out = make([]corev1.Capability, len(*in))
+		copy(*out, *in)
+	}
+	if in.Volumes != nil {
+		in, out := &in.Volumes, &out.Volumes
+		*out = make([]FSType, len(*in))
+		copy(*out, *in)
+	}
+	if in.HostPorts != nil {
+		in, out := &in.HostPorts, &out.HostPorts
+		*out = make([]HostPortRange, len(*in))
+		copy(*out, *in)
+	}
+	in.SELinux.DeepCopyInto(&out.SELinux)
+	in.RunAsUser.DeepCopyInto(&out.RunAsUser)
+	if in.RunAsGroup != nil {
+		in, out := &in.RunAsGroup, &out.RunAsGroup
+		*out = new(RunAsGroupStrategyOptions)
+		(*in).DeepCopyInto(*out)
+	}
+	in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups)
+	in.FSGroup.DeepCopyInto(&out.FSGroup)
+	if in.DefaultAllowPrivilegeEscalation != nil {
+		in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation
+		*out = new(bool)
+		**out = **in
+	}
+	if in.AllowPrivilegeEscalation != nil {
+		in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation
+		*out = new(bool)
+		**out = **in
+	}
+	if in.AllowedHostPaths != nil {
+		in, out := &in.AllowedHostPaths, &out.AllowedHostPaths
+		*out = make([]AllowedHostPath, len(*in))
+		copy(*out, *in)
+	}
+	if in.AllowedFlexVolumes != nil {
+		in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes
+		*out = make([]AllowedFlexVolume, len(*in))
+		copy(*out, *in)
+	}
+	if in.AllowedUnsafeSysctls != nil {
+		in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ForbiddenSysctls != nil {
+		in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.AllowedProcMountTypes != nil {
+		in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes
+		*out = make([]corev1.ProcMountType, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec.
+func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec {
+	if in == nil {
+		return nil
+	}
+	out := new(PodSecurityPolicySpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) {
+	*out = *in
+	if in.Ranges != nil {
+		in, out := &in.Ranges, &out.Ranges
+		*out = make([]IDRange, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions.
+func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(RunAsGroupStrategyOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) {
+	*out = *in
+	if in.Ranges != nil {
+		in, out := &in.Ranges, &out.Ranges
+		*out = make([]IDRange, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions.
+func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(RunAsUserStrategyOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) {
+	*out = *in
+	if in.SELinuxOptions != nil {
+		in, out := &in.SELinuxOptions, &out.SELinuxOptions
+		*out = new(corev1.SELinuxOptions)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions.
+func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(SELinuxStrategyOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) {
+	*out = *in
+	if in.Ranges != nil {
+		in, out := &in.Ranges, &out.Ranges
+		*out = make([]IDRange, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions.
+func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(SupplementalGroupsStrategyOptions)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go
new file mode 100644
index 0000000..76899ef
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+// +groupName=rbac.authorization.k8s.io
+
+package v1 // import "k8s.io/api/rbac/v1"
diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto
new file mode 100644
index 0000000..4b321a7
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1/generated.proto
@@ -0,0 +1,197 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.rbac.v1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1";
+
+// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole
+message AggregationRule {
+  // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules.
+  // If any of the selectors match, then the ClusterRole's permissions will be added
+  // +optional
+  repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1;
+}
+
+// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
+message ClusterRole {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Rules holds all the PolicyRules for this ClusterRole
+  repeated PolicyRule rules = 2;
+
+  // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
+  // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be
+  // stomped by the controller.
+  // +optional
+  optional AggregationRule aggregationRule = 3;
+}
+
+// ClusterRoleBinding references a ClusterRole, but not contain it.  It can reference a ClusterRole in the global namespace,
+// and adds who information via Subject.
+message ClusterRoleBinding {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Subjects holds references to the objects the role applies to.
+  // +optional
+  repeated Subject subjects = 2;
+
+  // RoleRef can only reference a ClusterRole in the global namespace.
+  // If the RoleRef cannot be resolved, the Authorizer must return an error.
+  optional RoleRef roleRef = 3;
+}
+
+// ClusterRoleBindingList is a collection of ClusterRoleBindings
+message ClusterRoleBindingList {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of ClusterRoleBindings
+  repeated ClusterRoleBinding items = 2;
+}
+
+// ClusterRoleList is a collection of ClusterRoles
+message ClusterRoleList {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of ClusterRoles
+  repeated ClusterRole items = 2;
+}
+
+// PolicyRule holds information that describes a policy rule, but does not contain information
+// about who the rule applies to or which namespace the rule applies to.
+message PolicyRule {
+  // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule.  VerbAll represents all kinds.
+  repeated string verbs = 1;
+
+  // APIGroups is the name of the APIGroup that contains the resources.  If multiple API groups are specified, any action requested against one of
+  // the enumerated resources in any API group will be allowed.
+  // +optional
+  repeated string apiGroups = 2;
+
+  // Resources is a list of resources this rule applies to.  ResourceAll represents all resources.
+  // +optional
+  repeated string resources = 3;
+
+  // ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.
+  // +optional
+  repeated string resourceNames = 4;
+
+  // NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full, final step in the path
+  // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.
+  // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"),  but not both.
+  // +optional
+  repeated string nonResourceURLs = 5;
+}
+
+// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
+message Role {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Rules holds all the PolicyRules for this Role
+  repeated PolicyRule rules = 2;
+}
+
+// RoleBinding references a role, but does not contain it.  It can reference a Role in the same namespace or a ClusterRole in the global namespace.
+// It adds who information via Subjects and namespace information by which namespace it exists in.  RoleBindings in a given
+// namespace only have effect in that namespace.
+message RoleBinding {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Subjects holds references to the objects the role applies to.
+  // +optional
+  repeated Subject subjects = 2;
+
+  // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace.
+  // If the RoleRef cannot be resolved, the Authorizer must return an error.
+  optional RoleRef roleRef = 3;
+}
+
+// RoleBindingList is a collection of RoleBindings
+message RoleBindingList {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of RoleBindings
+  repeated RoleBinding items = 2;
+}
+
+// RoleList is a collection of Roles
+message RoleList {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of Roles
+  repeated Role items = 2;
+}
+
+// RoleRef contains information that points to the role being used
+message RoleRef {
+  // APIGroup is the group for the resource being referenced
+  optional string apiGroup = 1;
+
+  // Kind is the type of resource being referenced
+  optional string kind = 2;
+
+  // Name is the name of resource being referenced
+  optional string name = 3;
+}
+
+// Subject contains a reference to the object or user identities a role binding applies to.  This can either hold a direct API object reference,
+// or a value for non-objects such as user and group names.
+message Subject {
+  // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount".
+  // If the Authorizer does not recognized the kind value, the Authorizer should report an error.
+  optional string kind = 1;
+
+  // APIGroup holds the API group of the referenced subject.
+  // Defaults to "" for ServiceAccount subjects.
+  // Defaults to "rbac.authorization.k8s.io" for User and Group subjects.
+  // +optional
+  optional string apiGroup = 2;
+
+  // Name of the object being referenced.
+  optional string name = 3;
+
+  // Namespace of the referenced object.  If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
+  // the Authorizer should report an error.
+  // +optional
+  optional string namespace = 4;
+}
+
diff --git a/vendor/k8s.io/api/rbac/v1/register.go b/vendor/k8s.io/api/rbac/v1/register.go
new file mode 100644
index 0000000..8f1fd46
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1/register.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const GroupName = "rbac.authorization.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Role{},
+		&RoleBinding{},
+		&RoleBindingList{},
+		&RoleList{},
+
+		&ClusterRole{},
+		&ClusterRoleBinding{},
+		&ClusterRoleBindingList{},
+		&ClusterRoleList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go
new file mode 100644
index 0000000..17163cb
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1/types.go
@@ -0,0 +1,235 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// Authorization is calculated against
+// 1. evaluation of ClusterRoleBindings - short circuit on match
+// 2. evaluation of RoleBindings in the namespace requested - short circuit on match
+// 3. deny by default
+
+const (
+	APIGroupAll    = "*"
+	ResourceAll    = "*"
+	VerbAll        = "*"
+	NonResourceAll = "*"
+
+	GroupKind          = "Group"
+	ServiceAccountKind = "ServiceAccount"
+	UserKind           = "User"
+
+	// AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false"
+	AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate"
+)
+
+// Authorization is calculated against
+// 1. evaluation of ClusterRoleBindings - short circuit on match
+// 2. evaluation of RoleBindings in the namespace requested - short circuit on match
+// 3. deny by default
+
+// PolicyRule holds information that describes a policy rule, but does not contain information
+// about who the rule applies to or which namespace the rule applies to.
+type PolicyRule struct {
+	// Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule.  VerbAll represents all kinds.
+	Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
+
+	// APIGroups is the name of the APIGroup that contains the resources.  If multiple API groups are specified, any action requested against one of
+	// the enumerated resources in any API group will be allowed.
+	// +optional
+	APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"`
+	// Resources is a list of resources this rule applies to.  ResourceAll represents all resources.
+	// +optional
+	Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"`
+	// ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.
+	// +optional
+	ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"`
+
+	// NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full, final step in the path
+	// Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.
+	// Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"),  but not both.
+	// +optional
+	NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,5,rep,name=nonResourceURLs"`
+}
+
+// Subject contains a reference to the object or user identities a role binding applies to.  This can either hold a direct API object reference,
+// or a value for non-objects such as user and group names.
+type Subject struct {
+	// Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount".
+	// If the Authorizer does not recognized the kind value, the Authorizer should report an error.
+	Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+	// APIGroup holds the API group of the referenced subject.
+	// Defaults to "" for ServiceAccount subjects.
+	// Defaults to "rbac.authorization.k8s.io" for User and Group subjects.
+	// +optional
+	APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"`
+	// Name of the object being referenced.
+	Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
+	// Namespace of the referenced object.  If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
+	// the Authorizer should report an error.
+	// +optional
+	Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"`
+}
+
+// RoleRef contains information that points to the role being used
+type RoleRef struct {
+	// APIGroup is the group for the resource being referenced
+	APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"`
+	// Kind is the type of resource being referenced
+	Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
+	// Name is the name of resource being referenced
+	Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
+type Role struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Rules holds all the PolicyRules for this Role
+	Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RoleBinding references a role, but does not contain it.  It can reference a Role in the same namespace or a ClusterRole in the global namespace.
+// It adds who information via Subjects and namespace information by which namespace it exists in.  RoleBindings in a given
+// namespace only have effect in that namespace.
+type RoleBinding struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Subjects holds references to the objects the role applies to.
+	// +optional
+	Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"`
+
+	// RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace.
+	// If the RoleRef cannot be resolved, the Authorizer must return an error.
+	RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RoleBindingList is a collection of RoleBindings
+type RoleBindingList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of RoleBindings
+	Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RoleList is a collection of Roles
+type RoleList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of Roles
+	Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
+type ClusterRole struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Rules holds all the PolicyRules for this ClusterRole
+	Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"`
+
+	// AggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
+	// If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be
+	// stomped by the controller.
+	// +optional
+	AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"`
+}
+
+// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole
+type AggregationRule struct {
+	// ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules.
+	// If any of the selectors match, then the ClusterRole's permissions will be added
+	// +optional
+	ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterRoleBinding references a ClusterRole, but not contain it.  It can reference a ClusterRole in the global namespace,
+// and adds who information via Subject.
+type ClusterRoleBinding struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Subjects holds references to the objects the role applies to.
+	// +optional
+	Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"`
+
+	// RoleRef can only reference a ClusterRole in the global namespace.
+	// If the RoleRef cannot be resolved, the Authorizer must return an error.
+	RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterRoleBindingList is a collection of ClusterRoleBindings
+type ClusterRoleBindingList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of ClusterRoleBindings
+	Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterRoleList is a collection of ClusterRoles
+type ClusterRoleList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of ClusterRoles
+	Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..83ce310
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go
@@ -0,0 +1,158 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_AggregationRule = map[string]string{
+	"":                     "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole",
+	"clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added",
+}
+
+func (AggregationRule) SwaggerDoc() map[string]string {
+	return map_AggregationRule
+}
+
+var map_ClusterRole = map[string]string{
+	"":                "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.",
+	"metadata":        "Standard object's metadata.",
+	"rules":           "Rules holds all the PolicyRules for this ClusterRole",
+	"aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.",
+}
+
+func (ClusterRole) SwaggerDoc() map[string]string {
+	return map_ClusterRole
+}
+
+var map_ClusterRoleBinding = map[string]string{
+	"":         "ClusterRoleBinding references a ClusterRole, but not contain it.  It can reference a ClusterRole in the global namespace, and adds who information via Subject.",
+	"metadata": "Standard object's metadata.",
+	"subjects": "Subjects holds references to the objects the role applies to.",
+	"roleRef":  "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
+}
+
+func (ClusterRoleBinding) SwaggerDoc() map[string]string {
+	return map_ClusterRoleBinding
+}
+
+var map_ClusterRoleBindingList = map[string]string{
+	"":         "ClusterRoleBindingList is a collection of ClusterRoleBindings",
+	"metadata": "Standard object's metadata.",
+	"items":    "Items is a list of ClusterRoleBindings",
+}
+
+func (ClusterRoleBindingList) SwaggerDoc() map[string]string {
+	return map_ClusterRoleBindingList
+}
+
+var map_ClusterRoleList = map[string]string{
+	"":         "ClusterRoleList is a collection of ClusterRoles",
+	"metadata": "Standard object's metadata.",
+	"items":    "Items is a list of ClusterRoles",
+}
+
+func (ClusterRoleList) SwaggerDoc() map[string]string {
+	return map_ClusterRoleList
+}
+
+var map_PolicyRule = map[string]string{
+	"":                "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.",
+	"verbs":           "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule.  VerbAll represents all kinds.",
+	"apiGroups":       "APIGroups is the name of the APIGroup that contains the resources.  If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.",
+	"resources":       "Resources is a list of resources this rule applies to.  ResourceAll represents all resources.",
+	"resourceNames":   "ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.",
+	"nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"),  but not both.",
+}
+
+func (PolicyRule) SwaggerDoc() map[string]string {
+	return map_PolicyRule
+}
+
+var map_Role = map[string]string{
+	"":         "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.",
+	"metadata": "Standard object's metadata.",
+	"rules":    "Rules holds all the PolicyRules for this Role",
+}
+
+func (Role) SwaggerDoc() map[string]string {
+	return map_Role
+}
+
+var map_RoleBinding = map[string]string{
+	"":         "RoleBinding references a role, but does not contain it.  It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in.  RoleBindings in a given namespace only have effect in that namespace.",
+	"metadata": "Standard object's metadata.",
+	"subjects": "Subjects holds references to the objects the role applies to.",
+	"roleRef":  "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
+}
+
+func (RoleBinding) SwaggerDoc() map[string]string {
+	return map_RoleBinding
+}
+
+var map_RoleBindingList = map[string]string{
+	"":         "RoleBindingList is a collection of RoleBindings",
+	"metadata": "Standard object's metadata.",
+	"items":    "Items is a list of RoleBindings",
+}
+
+func (RoleBindingList) SwaggerDoc() map[string]string {
+	return map_RoleBindingList
+}
+
+var map_RoleList = map[string]string{
+	"":         "RoleList is a collection of Roles",
+	"metadata": "Standard object's metadata.",
+	"items":    "Items is a list of Roles",
+}
+
+func (RoleList) SwaggerDoc() map[string]string {
+	return map_RoleList
+}
+
+var map_RoleRef = map[string]string{
+	"":         "RoleRef contains information that points to the role being used",
+	"apiGroup": "APIGroup is the group for the resource being referenced",
+	"kind":     "Kind is the type of resource being referenced",
+	"name":     "Name is the name of resource being referenced",
+}
+
+func (RoleRef) SwaggerDoc() map[string]string {
+	return map_RoleRef
+}
+
+var map_Subject = map[string]string{
+	"":          "Subject contains a reference to the object or user identities a role binding applies to.  This can either hold a direct API object reference, or a value for non-objects such as user and group names.",
+	"kind":      "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.",
+	"apiGroup":  "APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.",
+	"name":      "Name of the object being referenced.",
+	"namespace": "Namespace of the referenced object.  If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.",
+}
+
+func (Subject) SwaggerDoc() map[string]string {
+	return map_Subject
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..07eb321
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go
@@ -0,0 +1,389 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AggregationRule) DeepCopyInto(out *AggregationRule) {
+	*out = *in
+	if in.ClusterRoleSelectors != nil {
+		in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors
+		*out = make([]metav1.LabelSelector, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule.
+func (in *AggregationRule) DeepCopy() *AggregationRule {
+	if in == nil {
+		return nil
+	}
+	out := new(AggregationRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRole) DeepCopyInto(out *ClusterRole) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Rules != nil {
+		in, out := &in.Rules, &out.Rules
+		*out = make([]PolicyRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.AggregationRule != nil {
+		in, out := &in.AggregationRule, &out.AggregationRule
+		*out = new(AggregationRule)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole.
+func (in *ClusterRole) DeepCopy() *ClusterRole {
+	if in == nil {
+		return nil
+	}
+	out := new(ClusterRole)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterRole) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Subjects != nil {
+		in, out := &in.Subjects, &out.Subjects
+		*out = make([]Subject, len(*in))
+		copy(*out, *in)
+	}
+	out.RoleRef = in.RoleRef
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding.
+func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding {
+	if in == nil {
+		return nil
+	}
+	out := new(ClusterRoleBinding)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ClusterRoleBinding, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList.
+func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList {
+	if in == nil {
+		return nil
+	}
+	out := new(ClusterRoleBindingList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ClusterRole, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList.
+func (in *ClusterRoleList) DeepCopy() *ClusterRoleList {
+	if in == nil {
+		return nil
+	}
+	out := new(ClusterRoleList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterRoleList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicyRule) DeepCopyInto(out *PolicyRule) {
+	*out = *in
+	if in.Verbs != nil {
+		in, out := &in.Verbs, &out.Verbs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.APIGroups != nil {
+		in, out := &in.APIGroups, &out.APIGroups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ResourceNames != nil {
+		in, out := &in.ResourceNames, &out.ResourceNames
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.NonResourceURLs != nil {
+		in, out := &in.NonResourceURLs, &out.NonResourceURLs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule.
+func (in *PolicyRule) DeepCopy() *PolicyRule {
+	if in == nil {
+		return nil
+	}
+	out := new(PolicyRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Role) DeepCopyInto(out *Role) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Rules != nil {
+		in, out := &in.Rules, &out.Rules
+		*out = make([]PolicyRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role.
+func (in *Role) DeepCopy() *Role {
+	if in == nil {
+		return nil
+	}
+	out := new(Role)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Role) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleBinding) DeepCopyInto(out *RoleBinding) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Subjects != nil {
+		in, out := &in.Subjects, &out.Subjects
+		*out = make([]Subject, len(*in))
+		copy(*out, *in)
+	}
+	out.RoleRef = in.RoleRef
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding.
+func (in *RoleBinding) DeepCopy() *RoleBinding {
+	if in == nil {
+		return nil
+	}
+	out := new(RoleBinding)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RoleBinding) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]RoleBinding, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList.
+func (in *RoleBindingList) DeepCopy() *RoleBindingList {
+	if in == nil {
+		return nil
+	}
+	out := new(RoleBindingList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RoleBindingList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleList) DeepCopyInto(out *RoleList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Role, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList.
+func (in *RoleList) DeepCopy() *RoleList {
+	if in == nil {
+		return nil
+	}
+	out := new(RoleList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RoleList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleRef) DeepCopyInto(out *RoleRef) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleRef.
+func (in *RoleRef) DeepCopy() *RoleRef {
+	if in == nil {
+		return nil
+	}
+	out := new(RoleRef)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Subject) DeepCopyInto(out *Subject) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject.
+func (in *Subject) DeepCopy() *Subject {
+	if in == nil {
+		return nil
+	}
+	out := new(Subject)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/rbac/v1alpha1/doc.go b/vendor/k8s.io/api/rbac/v1alpha1/doc.go
new file mode 100644
index 0000000..f2547a5
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1alpha1/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+// +groupName=rbac.authorization.k8s.io
+
+package v1alpha1 // import "k8s.io/api/rbac/v1alpha1"
diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto
new file mode 100644
index 0000000..cde3aaa
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto
@@ -0,0 +1,199 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.rbac.v1alpha1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1alpha1";
+
+// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole
+message AggregationRule {
+  // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules.
+  // If any of the selectors match, then the ClusterRole's permissions will be added
+  // +optional
+  repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1;
+}
+
+// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
+message ClusterRole {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Rules holds all the PolicyRules for this ClusterRole
+  repeated PolicyRule rules = 2;
+
+  // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
+  // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be
+  // stomped by the controller.
+  // +optional
+  optional AggregationRule aggregationRule = 3;
+}
+
+// ClusterRoleBinding references a ClusterRole, but not contain it.  It can reference a ClusterRole in the global namespace,
+// and adds who information via Subject.
+message ClusterRoleBinding {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Subjects holds references to the objects the role applies to.
+  // +optional
+  repeated Subject subjects = 2;
+
+  // RoleRef can only reference a ClusterRole in the global namespace.
+  // If the RoleRef cannot be resolved, the Authorizer must return an error.
+  optional RoleRef roleRef = 3;
+}
+
+// ClusterRoleBindingList is a collection of ClusterRoleBindings
+message ClusterRoleBindingList {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of ClusterRoleBindings
+  repeated ClusterRoleBinding items = 2;
+}
+
+// ClusterRoleList is a collection of ClusterRoles
+message ClusterRoleList {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of ClusterRoles
+  repeated ClusterRole items = 2;
+}
+
+// PolicyRule holds information that describes a policy rule, but does not contain information
+// about who the rule applies to or which namespace the rule applies to.
+message PolicyRule {
+  // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule.  VerbAll represents all kinds.
+  repeated string verbs = 1;
+
+  // APIGroups is the name of the APIGroup that contains the resources.  If multiple API groups are specified, any action requested against one of
+  // the enumerated resources in any API group will be allowed.
+  // +optional
+  repeated string apiGroups = 3;
+
+  // Resources is a list of resources this rule applies to.  ResourceAll represents all resources.
+  // +optional
+  repeated string resources = 4;
+
+  // ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.
+  // +optional
+  repeated string resourceNames = 5;
+
+  // NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full, final step in the path
+  // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.
+  // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.
+  // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"),  but not both.
+  // +optional
+  repeated string nonResourceURLs = 6;
+}
+
+// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
+message Role {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Rules holds all the PolicyRules for this Role
+  repeated PolicyRule rules = 2;
+}
+
+// RoleBinding references a role, but does not contain it.  It can reference a Role in the same namespace or a ClusterRole in the global namespace.
+// It adds who information via Subjects and namespace information by which namespace it exists in.  RoleBindings in a given
+// namespace only have effect in that namespace.
+message RoleBinding {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Subjects holds references to the objects the role applies to.
+  // +optional
+  repeated Subject subjects = 2;
+
+  // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace.
+  // If the RoleRef cannot be resolved, the Authorizer must return an error.
+  optional RoleRef roleRef = 3;
+}
+
+// RoleBindingList is a collection of RoleBindings
+message RoleBindingList {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of RoleBindings
+  repeated RoleBinding items = 2;
+}
+
+// RoleList is a collection of Roles
+message RoleList {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of Roles
+  repeated Role items = 2;
+}
+
+// RoleRef contains information that points to the role being used
+message RoleRef {
+  // APIGroup is the group for the resource being referenced
+  optional string apiGroup = 1;
+
+  // Kind is the type of resource being referenced
+  optional string kind = 2;
+
+  // Name is the name of resource being referenced
+  optional string name = 3;
+}
+
+// Subject contains a reference to the object or user identities a role binding applies to.  This can either hold a direct API object reference,
+// or a value for non-objects such as user and group names.
+message Subject {
+  // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount".
+  // If the Authorizer does not recognized the kind value, the Authorizer should report an error.
+  optional string kind = 1;
+
+  // APIVersion holds the API group and version of the referenced subject.
+  // Defaults to "v1" for ServiceAccount subjects.
+  // Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects.
+  // +k8s:conversion-gen=false
+  // +optional
+  optional string apiVersion = 2;
+
+  // Name of the object being referenced.
+  optional string name = 3;
+
+  // Namespace of the referenced object.  If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
+  // the Authorizer should report an error.
+  // +optional
+  optional string namespace = 4;
+}
+
diff --git a/vendor/k8s.io/api/rbac/v1alpha1/register.go b/vendor/k8s.io/api/rbac/v1alpha1/register.go
new file mode 100644
index 0000000..0c69776
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1alpha1/register.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const GroupName = "rbac.authorization.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Role{},
+		&RoleBinding{},
+		&RoleBindingList{},
+		&RoleList{},
+
+		&ClusterRole{},
+		&ClusterRoleBinding{},
+		&ClusterRoleBindingList{},
+		&ClusterRoleList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go
new file mode 100644
index 0000000..398d6a1
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go
@@ -0,0 +1,237 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// Authorization is calculated against
+// 1. evaluation of ClusterRoleBindings - short circuit on match
+// 2. evaluation of RoleBindings in the namespace requested - short circuit on match
+// 3. deny by default
+
+const (
+	APIGroupAll    = "*"
+	ResourceAll    = "*"
+	VerbAll        = "*"
+	NonResourceAll = "*"
+
+	GroupKind          = "Group"
+	ServiceAccountKind = "ServiceAccount"
+	UserKind           = "User"
+
+	// AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false"
+	AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate"
+)
+
+// Authorization is calculated against
+// 1. evaluation of ClusterRoleBindings - short circuit on match
+// 2. evaluation of RoleBindings in the namespace requested - short circuit on match
+// 3. deny by default
+
+// PolicyRule holds information that describes a policy rule, but does not contain information
+// about who the rule applies to or which namespace the rule applies to.
+type PolicyRule struct {
+	// Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule.  VerbAll represents all kinds.
+	Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
+
+	// APIGroups is the name of the APIGroup that contains the resources.  If multiple API groups are specified, any action requested against one of
+	// the enumerated resources in any API group will be allowed.
+	// +optional
+	APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,3,rep,name=apiGroups"`
+	// Resources is a list of resources this rule applies to.  ResourceAll represents all resources.
+	// +optional
+	Resources []string `json:"resources,omitempty" protobuf:"bytes,4,rep,name=resources"`
+	// ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.
+	// +optional
+	ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"`
+
+	// NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full, final step in the path
+	// This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.
+	// Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.
+	// Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"),  but not both.
+	// +optional
+	NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,6,rep,name=nonResourceURLs"`
+}
+
+// Subject contains a reference to the object or user identities a role binding applies to.  This can either hold a direct API object reference,
+// or a value for non-objects such as user and group names.
+type Subject struct {
+	// Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount".
+	// If the Authorizer does not recognized the kind value, the Authorizer should report an error.
+	Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+	// APIVersion holds the API group and version of the referenced subject.
+	// Defaults to "v1" for ServiceAccount subjects.
+	// Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects.
+	// +k8s:conversion-gen=false
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt.name=apiVersion"`
+	// Name of the object being referenced.
+	Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
+	// Namespace of the referenced object.  If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
+	// the Authorizer should report an error.
+	// +optional
+	Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"`
+}
+
+// RoleRef contains information that points to the role being used
+type RoleRef struct {
+	// APIGroup is the group for the resource being referenced
+	APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"`
+	// Kind is the type of resource being referenced
+	Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
+	// Name is the name of resource being referenced
+	Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
+type Role struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Rules holds all the PolicyRules for this Role
+	Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RoleBinding references a role, but does not contain it.  It can reference a Role in the same namespace or a ClusterRole in the global namespace.
+// It adds who information via Subjects and namespace information by which namespace it exists in.  RoleBindings in a given
+// namespace only have effect in that namespace.
+type RoleBinding struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Subjects holds references to the objects the role applies to.
+	// +optional
+	Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"`
+
+	// RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace.
+	// If the RoleRef cannot be resolved, the Authorizer must return an error.
+	RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RoleBindingList is a collection of RoleBindings
+type RoleBindingList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of RoleBindings
+	Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RoleList is a collection of Roles
+type RoleList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of Roles
+	Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
+type ClusterRole struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Rules holds all the PolicyRules for this ClusterRole
+	Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"`
+
+	// AggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
+	// If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be
+	// stomped by the controller.
+	// +optional
+	AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"`
+}
+
+// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole
+type AggregationRule struct {
+	// ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules.
+	// If any of the selectors match, then the ClusterRole's permissions will be added
+	// +optional
+	ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterRoleBinding references a ClusterRole, but not contain it.  It can reference a ClusterRole in the global namespace,
+// and adds who information via Subject.
+type ClusterRoleBinding struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Subjects holds references to the objects the role applies to.
+	// +optional
+	Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"`
+
+	// RoleRef can only reference a ClusterRole in the global namespace.
+	// If the RoleRef cannot be resolved, the Authorizer must return an error.
+	RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterRoleBindingList is a collection of ClusterRoleBindings
+type ClusterRoleBindingList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of ClusterRoleBindings
+	Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterRoleList is a collection of ClusterRoles
+type ClusterRoleList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of ClusterRoles
+	Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..d7b194a
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go
@@ -0,0 +1,158 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_AggregationRule = map[string]string{
+	"":                     "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole",
+	"clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added",
+}
+
+func (AggregationRule) SwaggerDoc() map[string]string {
+	return map_AggregationRule
+}
+
+var map_ClusterRole = map[string]string{
+	"":                "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.",
+	"metadata":        "Standard object's metadata.",
+	"rules":           "Rules holds all the PolicyRules for this ClusterRole",
+	"aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.",
+}
+
+func (ClusterRole) SwaggerDoc() map[string]string {
+	return map_ClusterRole
+}
+
+var map_ClusterRoleBinding = map[string]string{
+	"":         "ClusterRoleBinding references a ClusterRole, but not contain it.  It can reference a ClusterRole in the global namespace, and adds who information via Subject.",
+	"metadata": "Standard object's metadata.",
+	"subjects": "Subjects holds references to the objects the role applies to.",
+	"roleRef":  "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
+}
+
+func (ClusterRoleBinding) SwaggerDoc() map[string]string {
+	return map_ClusterRoleBinding
+}
+
+var map_ClusterRoleBindingList = map[string]string{
+	"":         "ClusterRoleBindingList is a collection of ClusterRoleBindings",
+	"metadata": "Standard object's metadata.",
+	"items":    "Items is a list of ClusterRoleBindings",
+}
+
+func (ClusterRoleBindingList) SwaggerDoc() map[string]string {
+	return map_ClusterRoleBindingList
+}
+
+var map_ClusterRoleList = map[string]string{
+	"":         "ClusterRoleList is a collection of ClusterRoles",
+	"metadata": "Standard object's metadata.",
+	"items":    "Items is a list of ClusterRoles",
+}
+
+func (ClusterRoleList) SwaggerDoc() map[string]string {
+	return map_ClusterRoleList
+}
+
+var map_PolicyRule = map[string]string{
+	"":                "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.",
+	"verbs":           "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule.  VerbAll represents all kinds.",
+	"apiGroups":       "APIGroups is the name of the APIGroup that contains the resources.  If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.",
+	"resources":       "Resources is a list of resources this rule applies to.  ResourceAll represents all resources.",
+	"resourceNames":   "ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.",
+	"nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"),  but not both.",
+}
+
+func (PolicyRule) SwaggerDoc() map[string]string {
+	return map_PolicyRule
+}
+
+var map_Role = map[string]string{
+	"":         "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.",
+	"metadata": "Standard object's metadata.",
+	"rules":    "Rules holds all the PolicyRules for this Role",
+}
+
+func (Role) SwaggerDoc() map[string]string {
+	return map_Role
+}
+
+var map_RoleBinding = map[string]string{
+	"":         "RoleBinding references a role, but does not contain it.  It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in.  RoleBindings in a given namespace only have effect in that namespace.",
+	"metadata": "Standard object's metadata.",
+	"subjects": "Subjects holds references to the objects the role applies to.",
+	"roleRef":  "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
+}
+
+func (RoleBinding) SwaggerDoc() map[string]string {
+	return map_RoleBinding
+}
+
+var map_RoleBindingList = map[string]string{
+	"":         "RoleBindingList is a collection of RoleBindings",
+	"metadata": "Standard object's metadata.",
+	"items":    "Items is a list of RoleBindings",
+}
+
+func (RoleBindingList) SwaggerDoc() map[string]string {
+	return map_RoleBindingList
+}
+
+var map_RoleList = map[string]string{
+	"":         "RoleList is a collection of Roles",
+	"metadata": "Standard object's metadata.",
+	"items":    "Items is a list of Roles",
+}
+
+func (RoleList) SwaggerDoc() map[string]string {
+	return map_RoleList
+}
+
+var map_RoleRef = map[string]string{
+	"":         "RoleRef contains information that points to the role being used",
+	"apiGroup": "APIGroup is the group for the resource being referenced",
+	"kind":     "Kind is the type of resource being referenced",
+	"name":     "Name is the name of resource being referenced",
+}
+
+func (RoleRef) SwaggerDoc() map[string]string {
+	return map_RoleRef
+}
+
+var map_Subject = map[string]string{
+	"":           "Subject contains a reference to the object or user identities a role binding applies to.  This can either hold a direct API object reference, or a value for non-objects such as user and group names.",
+	"kind":       "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.",
+	"apiVersion": "APIVersion holds the API group and version of the referenced subject. Defaults to \"v1\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io/v1alpha1\" for User and Group subjects.",
+	"name":       "Name of the object being referenced.",
+	"namespace":  "Namespace of the referenced object.  If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.",
+}
+
+func (Subject) SwaggerDoc() map[string]string {
+	return map_Subject
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..97f6333
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,389 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AggregationRule) DeepCopyInto(out *AggregationRule) {
+	*out = *in
+	if in.ClusterRoleSelectors != nil {
+		in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors
+		*out = make([]v1.LabelSelector, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule.
+func (in *AggregationRule) DeepCopy() *AggregationRule {
+	if in == nil {
+		return nil
+	}
+	out := new(AggregationRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRole) DeepCopyInto(out *ClusterRole) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Rules != nil {
+		in, out := &in.Rules, &out.Rules
+		*out = make([]PolicyRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.AggregationRule != nil {
+		in, out := &in.AggregationRule, &out.AggregationRule
+		*out = new(AggregationRule)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole.
+func (in *ClusterRole) DeepCopy() *ClusterRole {
+	if in == nil {
+		return nil
+	}
+	out := new(ClusterRole)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterRole) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Subjects != nil {
+		in, out := &in.Subjects, &out.Subjects
+		*out = make([]Subject, len(*in))
+		copy(*out, *in)
+	}
+	out.RoleRef = in.RoleRef
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding.
+func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding {
+	if in == nil {
+		return nil
+	}
+	out := new(ClusterRoleBinding)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ClusterRoleBinding, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList.
+func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList {
+	if in == nil {
+		return nil
+	}
+	out := new(ClusterRoleBindingList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ClusterRole, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList.
+func (in *ClusterRoleList) DeepCopy() *ClusterRoleList {
+	if in == nil {
+		return nil
+	}
+	out := new(ClusterRoleList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterRoleList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicyRule) DeepCopyInto(out *PolicyRule) {
+	*out = *in
+	if in.Verbs != nil {
+		in, out := &in.Verbs, &out.Verbs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.APIGroups != nil {
+		in, out := &in.APIGroups, &out.APIGroups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ResourceNames != nil {
+		in, out := &in.ResourceNames, &out.ResourceNames
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.NonResourceURLs != nil {
+		in, out := &in.NonResourceURLs, &out.NonResourceURLs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule.
+func (in *PolicyRule) DeepCopy() *PolicyRule {
+	if in == nil {
+		return nil
+	}
+	out := new(PolicyRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Role) DeepCopyInto(out *Role) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Rules != nil {
+		in, out := &in.Rules, &out.Rules
+		*out = make([]PolicyRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role.
+func (in *Role) DeepCopy() *Role {
+	if in == nil {
+		return nil
+	}
+	out := new(Role)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Role) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleBinding) DeepCopyInto(out *RoleBinding) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Subjects != nil {
+		in, out := &in.Subjects, &out.Subjects
+		*out = make([]Subject, len(*in))
+		copy(*out, *in)
+	}
+	out.RoleRef = in.RoleRef
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding.
+func (in *RoleBinding) DeepCopy() *RoleBinding {
+	if in == nil {
+		return nil
+	}
+	out := new(RoleBinding)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RoleBinding) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]RoleBinding, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList.
+func (in *RoleBindingList) DeepCopy() *RoleBindingList {
+	if in == nil {
+		return nil
+	}
+	out := new(RoleBindingList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RoleBindingList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleList) DeepCopyInto(out *RoleList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Role, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList.
+func (in *RoleList) DeepCopy() *RoleList {
+	if in == nil {
+		return nil
+	}
+	out := new(RoleList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RoleList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleRef) DeepCopyInto(out *RoleRef) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleRef.
+func (in *RoleRef) DeepCopy() *RoleRef {
+	if in == nil {
+		return nil
+	}
+	out := new(RoleRef)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Subject) DeepCopyInto(out *Subject) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject.
+func (in *Subject) DeepCopy() *Subject {
+	if in == nil {
+		return nil
+	}
+	out := new(Subject)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/rbac/v1beta1/doc.go b/vendor/k8s.io/api/rbac/v1beta1/doc.go
new file mode 100644
index 0000000..516625e
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1beta1/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+// +groupName=rbac.authorization.k8s.io
+
+package v1beta1 // import "k8s.io/api/rbac/v1beta1"
diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto
new file mode 100644
index 0000000..27bd30c
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto
@@ -0,0 +1,198 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.rbac.v1beta1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta1";
+
+// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole
+message AggregationRule {
+  // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules.
+  // If any of the selectors match, then the ClusterRole's permissions will be added
+  // +optional
+  repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1;
+}
+
+// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
+message ClusterRole {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Rules holds all the PolicyRules for this ClusterRole
+  repeated PolicyRule rules = 2;
+
+  // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
+  // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be
+  // stomped by the controller.
+  // +optional
+  optional AggregationRule aggregationRule = 3;
+}
+
+// ClusterRoleBinding references a ClusterRole, but not contain it.  It can reference a ClusterRole in the global namespace,
+// and adds who information via Subject.
+message ClusterRoleBinding {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Subjects holds references to the objects the role applies to.
+  // +optional
+  repeated Subject subjects = 2;
+
+  // RoleRef can only reference a ClusterRole in the global namespace.
+  // If the RoleRef cannot be resolved, the Authorizer must return an error.
+  optional RoleRef roleRef = 3;
+}
+
+// ClusterRoleBindingList is a collection of ClusterRoleBindings
+message ClusterRoleBindingList {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of ClusterRoleBindings
+  repeated ClusterRoleBinding items = 2;
+}
+
+// ClusterRoleList is a collection of ClusterRoles
+message ClusterRoleList {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of ClusterRoles
+  repeated ClusterRole items = 2;
+}
+
+// PolicyRule holds information that describes a policy rule, but does not contain information
+// about who the rule applies to or which namespace the rule applies to.
+message PolicyRule {
+  // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule.  VerbAll represents all kinds.
+  repeated string verbs = 1;
+
+  // APIGroups is the name of the APIGroup that contains the resources.  If multiple API groups are specified, any action requested against one of
+  // the enumerated resources in any API group will be allowed.
+  // +optional
+  repeated string apiGroups = 2;
+
+  // Resources is a list of resources this rule applies to.  '*' represents all resources in the specified apiGroups.
+  // '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.
+  // +optional
+  repeated string resources = 3;
+
+  // ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.
+  // +optional
+  repeated string resourceNames = 4;
+
+  // NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full, final step in the path
+  // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.
+  // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"),  but not both.
+  // +optional
+  repeated string nonResourceURLs = 5;
+}
+
+// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
+message Role {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Rules holds all the PolicyRules for this Role
+  repeated PolicyRule rules = 2;
+}
+
+// RoleBinding references a role, but does not contain it.  It can reference a Role in the same namespace or a ClusterRole in the global namespace.
+// It adds who information via Subjects and namespace information by which namespace it exists in.  RoleBindings in a given
+// namespace only have effect in that namespace.
+message RoleBinding {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Subjects holds references to the objects the role applies to.
+  // +optional
+  repeated Subject subjects = 2;
+
+  // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace.
+  // If the RoleRef cannot be resolved, the Authorizer must return an error.
+  optional RoleRef roleRef = 3;
+}
+
+// RoleBindingList is a collection of RoleBindings
+message RoleBindingList {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of RoleBindings
+  repeated RoleBinding items = 2;
+}
+
+// RoleList is a collection of Roles
+message RoleList {
+  // Standard object's metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of Roles
+  repeated Role items = 2;
+}
+
+// RoleRef contains information that points to the role being used
+message RoleRef {
+  // APIGroup is the group for the resource being referenced
+  optional string apiGroup = 1;
+
+  // Kind is the type of resource being referenced
+  optional string kind = 2;
+
+  // Name is the name of resource being referenced
+  optional string name = 3;
+}
+
+// Subject contains a reference to the object or user identities a role binding applies to.  This can either hold a direct API object reference,
+// or a value for non-objects such as user and group names.
+message Subject {
+  // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount".
+  // If the Authorizer does not recognized the kind value, the Authorizer should report an error.
+  optional string kind = 1;
+
+  // APIGroup holds the API group of the referenced subject.
+  // Defaults to "" for ServiceAccount subjects.
+  // Defaults to "rbac.authorization.k8s.io" for User and Group subjects.
+  // +optional
+  optional string apiGroup = 2;
+
+  // Name of the object being referenced.
+  optional string name = 3;
+
+  // Namespace of the referenced object.  If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
+  // the Authorizer should report an error.
+  // +optional
+  optional string namespace = 4;
+}
+
diff --git a/vendor/k8s.io/api/rbac/v1beta1/register.go b/vendor/k8s.io/api/rbac/v1beta1/register.go
new file mode 100644
index 0000000..c8526a6
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1beta1/register.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const GroupName = "rbac.authorization.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Role{},
+		&RoleBinding{},
+		&RoleBindingList{},
+		&RoleList{},
+
+		&ClusterRole{},
+		&ClusterRoleBinding{},
+		&ClusterRoleBindingList{},
+		&ClusterRoleList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go
new file mode 100644
index 0000000..857b67a
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1beta1/types.go
@@ -0,0 +1,235 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// Authorization is calculated against
+// 1. evaluation of ClusterRoleBindings - short circuit on match
+// 2. evaluation of RoleBindings in the namespace requested - short circuit on match
+// 3. deny by default
+
+const (
+	APIGroupAll    = "*"
+	ResourceAll    = "*"
+	VerbAll        = "*"
+	NonResourceAll = "*"
+
+	GroupKind          = "Group"
+	ServiceAccountKind = "ServiceAccount"
+	UserKind           = "User"
+
+	// AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false"
+	AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate"
+)
+
+// Authorization is calculated against
+// 1. evaluation of ClusterRoleBindings - short circuit on match
+// 2. evaluation of RoleBindings in the namespace requested - short circuit on match
+// 3. deny by default
+
+// PolicyRule holds information that describes a policy rule, but does not contain information
+// about who the rule applies to or which namespace the rule applies to.
+type PolicyRule struct {
+	// Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule.  VerbAll represents all kinds.
+	Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
+
+	// APIGroups is the name of the APIGroup that contains the resources.  If multiple API groups are specified, any action requested against one of
+	// the enumerated resources in any API group will be allowed.
+	// +optional
+	APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"`
+	// Resources is a list of resources this rule applies to.  '*' represents all resources in the specified apiGroups.
+	// '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.
+	// +optional
+	Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"`
+	// ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.
+	// +optional
+	ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"`
+
+	// NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full, final step in the path
+	// Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.
+	// Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"),  but not both.
+	// +optional
+	NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,5,rep,name=nonResourceURLs"`
+}
+
+// Subject contains a reference to the object or user identities a role binding applies to.  This can either hold a direct API object reference,
+// or a value for non-objects such as user and group names.
+type Subject struct {
+	// Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount".
+	// If the Authorizer does not recognized the kind value, the Authorizer should report an error.
+	Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+	// APIGroup holds the API group of the referenced subject.
+	// Defaults to "" for ServiceAccount subjects.
+	// Defaults to "rbac.authorization.k8s.io" for User and Group subjects.
+	// +optional
+	APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"`
+	// Name of the object being referenced.
+	Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
+	// Namespace of the referenced object.  If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
+	// the Authorizer should report an error.
+	// +optional
+	Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"`
+}
+
+// RoleRef contains information that points to the role being used
+type RoleRef struct {
+	// APIGroup is the group for the resource being referenced
+	APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"`
+	// Kind is the type of resource being referenced
+	Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
+	// Name is the name of resource being referenced
+	Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
+type Role struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Rules holds all the PolicyRules for this Role
+	Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RoleBinding references a role, but does not contain it.  It can reference a Role in the same namespace or a ClusterRole in the global namespace.
+// It adds who information via Subjects and namespace information by which namespace it exists in.  RoleBindings in a given
+// namespace only have effect in that namespace.
+type RoleBinding struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Subjects holds references to the objects the role applies to.
+	// +optional
+	Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"`
+
+	// RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace.
+	// If the RoleRef cannot be resolved, the Authorizer must return an error.
+	RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RoleBindingList is a collection of RoleBindings
+type RoleBindingList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of RoleBindings
+	Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RoleList is a collection of Roles
+type RoleList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of Roles
+	Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
+type ClusterRole struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Rules holds all the PolicyRules for this ClusterRole
+	Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"`
+	// AggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
+	// If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be
+	// stomped by the controller.
+	// +optional
+	AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"`
+}
+
+// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole
+type AggregationRule struct {
+	// ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules.
+	// If any of the selectors match, then the ClusterRole's permissions will be added
+	// +optional
+	ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterRoleBinding references a ClusterRole, but not contain it.  It can reference a ClusterRole in the global namespace,
+// and adds who information via Subject.
+type ClusterRoleBinding struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Subjects holds references to the objects the role applies to.
+	// +optional
+	Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"`
+
+	// RoleRef can only reference a ClusterRole in the global namespace.
+	// If the RoleRef cannot be resolved, the Authorizer must return an error.
+	RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterRoleBindingList is a collection of ClusterRoleBindings
+type ClusterRoleBindingList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of ClusterRoleBindings
+	Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterRoleList is a collection of ClusterRoles
+type ClusterRoleList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of ClusterRoles
+	Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..c803275
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,158 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_AggregationRule = map[string]string{
+	"":                     "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole",
+	"clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added",
+}
+
+func (AggregationRule) SwaggerDoc() map[string]string {
+	return map_AggregationRule
+}
+
+var map_ClusterRole = map[string]string{
+	"":                "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.",
+	"metadata":        "Standard object's metadata.",
+	"rules":           "Rules holds all the PolicyRules for this ClusterRole",
+	"aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.",
+}
+
+func (ClusterRole) SwaggerDoc() map[string]string {
+	return map_ClusterRole
+}
+
+var map_ClusterRoleBinding = map[string]string{
+	"":         "ClusterRoleBinding references a ClusterRole, but not contain it.  It can reference a ClusterRole in the global namespace, and adds who information via Subject.",
+	"metadata": "Standard object's metadata.",
+	"subjects": "Subjects holds references to the objects the role applies to.",
+	"roleRef":  "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
+}
+
+func (ClusterRoleBinding) SwaggerDoc() map[string]string {
+	return map_ClusterRoleBinding
+}
+
+var map_ClusterRoleBindingList = map[string]string{
+	"":         "ClusterRoleBindingList is a collection of ClusterRoleBindings",
+	"metadata": "Standard object's metadata.",
+	"items":    "Items is a list of ClusterRoleBindings",
+}
+
+func (ClusterRoleBindingList) SwaggerDoc() map[string]string {
+	return map_ClusterRoleBindingList
+}
+
+var map_ClusterRoleList = map[string]string{
+	"":         "ClusterRoleList is a collection of ClusterRoles",
+	"metadata": "Standard object's metadata.",
+	"items":    "Items is a list of ClusterRoles",
+}
+
+func (ClusterRoleList) SwaggerDoc() map[string]string {
+	return map_ClusterRoleList
+}
+
+var map_PolicyRule = map[string]string{
+	"":                "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.",
+	"verbs":           "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule.  VerbAll represents all kinds.",
+	"apiGroups":       "APIGroups is the name of the APIGroup that contains the resources.  If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.",
+	"resources":       "Resources is a list of resources this rule applies to.  '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.",
+	"resourceNames":   "ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.",
+	"nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"),  but not both.",
+}
+
+func (PolicyRule) SwaggerDoc() map[string]string {
+	return map_PolicyRule
+}
+
+var map_Role = map[string]string{
+	"":         "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.",
+	"metadata": "Standard object's metadata.",
+	"rules":    "Rules holds all the PolicyRules for this Role",
+}
+
+func (Role) SwaggerDoc() map[string]string {
+	return map_Role
+}
+
+var map_RoleBinding = map[string]string{
+	"":         "RoleBinding references a role, but does not contain it.  It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in.  RoleBindings in a given namespace only have effect in that namespace.",
+	"metadata": "Standard object's metadata.",
+	"subjects": "Subjects holds references to the objects the role applies to.",
+	"roleRef":  "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
+}
+
+func (RoleBinding) SwaggerDoc() map[string]string {
+	return map_RoleBinding
+}
+
+var map_RoleBindingList = map[string]string{
+	"":         "RoleBindingList is a collection of RoleBindings",
+	"metadata": "Standard object's metadata.",
+	"items":    "Items is a list of RoleBindings",
+}
+
+func (RoleBindingList) SwaggerDoc() map[string]string {
+	return map_RoleBindingList
+}
+
+var map_RoleList = map[string]string{
+	"":         "RoleList is a collection of Roles",
+	"metadata": "Standard object's metadata.",
+	"items":    "Items is a list of Roles",
+}
+
+func (RoleList) SwaggerDoc() map[string]string {
+	return map_RoleList
+}
+
+var map_RoleRef = map[string]string{
+	"":         "RoleRef contains information that points to the role being used",
+	"apiGroup": "APIGroup is the group for the resource being referenced",
+	"kind":     "Kind is the type of resource being referenced",
+	"name":     "Name is the name of resource being referenced",
+}
+
+func (RoleRef) SwaggerDoc() map[string]string {
+	return map_RoleRef
+}
+
+var map_Subject = map[string]string{
+	"":          "Subject contains a reference to the object or user identities a role binding applies to.  This can either hold a direct API object reference, or a value for non-objects such as user and group names.",
+	"kind":      "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.",
+	"apiGroup":  "APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.",
+	"name":      "Name of the object being referenced.",
+	"namespace": "Namespace of the referenced object.  If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.",
+}
+
+func (Subject) SwaggerDoc() map[string]string {
+	return map_Subject
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..c085c90
--- /dev/null
+++ b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,389 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AggregationRule) DeepCopyInto(out *AggregationRule) {
+	*out = *in
+	if in.ClusterRoleSelectors != nil {
+		in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors
+		*out = make([]v1.LabelSelector, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule.
+func (in *AggregationRule) DeepCopy() *AggregationRule {
+	if in == nil {
+		return nil
+	}
+	out := new(AggregationRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRole) DeepCopyInto(out *ClusterRole) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Rules != nil {
+		in, out := &in.Rules, &out.Rules
+		*out = make([]PolicyRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.AggregationRule != nil {
+		in, out := &in.AggregationRule, &out.AggregationRule
+		*out = new(AggregationRule)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole.
+func (in *ClusterRole) DeepCopy() *ClusterRole {
+	if in == nil {
+		return nil
+	}
+	out := new(ClusterRole)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterRole) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Subjects != nil {
+		in, out := &in.Subjects, &out.Subjects
+		*out = make([]Subject, len(*in))
+		copy(*out, *in)
+	}
+	out.RoleRef = in.RoleRef
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding.
+func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding {
+	if in == nil {
+		return nil
+	}
+	out := new(ClusterRoleBinding)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ClusterRoleBinding, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList.
+func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList {
+	if in == nil {
+		return nil
+	}
+	out := new(ClusterRoleBindingList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ClusterRole, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList.
+func (in *ClusterRoleList) DeepCopy() *ClusterRoleList {
+	if in == nil {
+		return nil
+	}
+	out := new(ClusterRoleList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterRoleList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicyRule) DeepCopyInto(out *PolicyRule) {
+	*out = *in
+	if in.Verbs != nil {
+		in, out := &in.Verbs, &out.Verbs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.APIGroups != nil {
+		in, out := &in.APIGroups, &out.APIGroups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ResourceNames != nil {
+		in, out := &in.ResourceNames, &out.ResourceNames
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.NonResourceURLs != nil {
+		in, out := &in.NonResourceURLs, &out.NonResourceURLs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule.
+func (in *PolicyRule) DeepCopy() *PolicyRule {
+	if in == nil {
+		return nil
+	}
+	out := new(PolicyRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Role) DeepCopyInto(out *Role) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Rules != nil {
+		in, out := &in.Rules, &out.Rules
+		*out = make([]PolicyRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role.
+func (in *Role) DeepCopy() *Role {
+	if in == nil {
+		return nil
+	}
+	out := new(Role)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Role) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleBinding) DeepCopyInto(out *RoleBinding) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Subjects != nil {
+		in, out := &in.Subjects, &out.Subjects
+		*out = make([]Subject, len(*in))
+		copy(*out, *in)
+	}
+	out.RoleRef = in.RoleRef
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding.
+func (in *RoleBinding) DeepCopy() *RoleBinding {
+	if in == nil {
+		return nil
+	}
+	out := new(RoleBinding)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RoleBinding) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]RoleBinding, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList.
+func (in *RoleBindingList) DeepCopy() *RoleBindingList {
+	if in == nil {
+		return nil
+	}
+	out := new(RoleBindingList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RoleBindingList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleList) DeepCopyInto(out *RoleList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Role, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList.
+func (in *RoleList) DeepCopy() *RoleList {
+	if in == nil {
+		return nil
+	}
+	out := new(RoleList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RoleList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleRef) DeepCopyInto(out *RoleRef) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleRef.
+func (in *RoleRef) DeepCopy() *RoleRef {
+	if in == nil {
+		return nil
+	}
+	out := new(RoleRef)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Subject) DeepCopyInto(out *Subject) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject.
+func (in *Subject) DeepCopy() *Subject {
+	if in == nil {
+		return nil
+	}
+	out := new(Subject)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go
new file mode 100644
index 0000000..05a454a
--- /dev/null
+++ b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+// +groupName=scheduling.k8s.io
+
+package v1alpha1 // import "k8s.io/api/scheduling/v1alpha1"
diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto
new file mode 100644
index 0000000..5fb5472
--- /dev/null
+++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto
@@ -0,0 +1,67 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.scheduling.v1alpha1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1alpha1";
+
+// PriorityClass defines mapping from a priority class name to the priority
+// integer value. The value can be any valid integer.
+message PriorityClass {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // The value of this priority class. This is the actual priority that pods
+  // receive when they have the name of this class in their pod spec.
+  optional int32 value = 2;
+
+  // globalDefault specifies whether this PriorityClass should be considered as
+  // the default priority for pods that do not have any priority class.
+  // Only one PriorityClass can be marked as `globalDefault`. However, if more than
+  // one PriorityClasses exists with their `globalDefault` field set to true,
+  // the smallest value of such global default PriorityClasses will be used as the default priority.
+  // +optional
+  optional bool globalDefault = 3;
+
+  // description is an arbitrary string that usually provides guidelines on
+  // when this priority class should be used.
+  // +optional
+  optional string description = 4;
+}
+
+// PriorityClassList is a collection of priority classes.
+message PriorityClassList {
+  // Standard list metadata
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // items is the list of PriorityClasses
+  repeated PriorityClass items = 2;
+}
+
diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/register.go b/vendor/k8s.io/api/scheduling/v1alpha1/register.go
new file mode 100644
index 0000000..24689f0
--- /dev/null
+++ b/vendor/k8s.io/api/scheduling/v1alpha1/register.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "scheduling.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&PriorityClass{},
+		&PriorityClassList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/vendor/k8s.io/api/scheduling/v1alpha1/types.go
new file mode 100644
index 0000000..21e3df0
--- /dev/null
+++ b/vendor/k8s.io/api/scheduling/v1alpha1/types.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PriorityClass defines mapping from a priority class name to the priority
+// integer value. The value can be any valid integer.
+type PriorityClass struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// The value of this priority class. This is the actual priority that pods
+	// receive when they have the name of this class in their pod spec.
+	Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"`
+
+	// globalDefault specifies whether this PriorityClass should be considered as
+	// the default priority for pods that do not have any priority class.
+	// Only one PriorityClass can be marked as `globalDefault`. However, if more than
+	// one PriorityClasses exists with their `globalDefault` field set to true,
+	// the smallest value of such global default PriorityClasses will be used as the default priority.
+	// +optional
+	GlobalDefault bool `json:"globalDefault,omitempty" protobuf:"bytes,3,opt,name=globalDefault"`
+
+	// description is an arbitrary string that usually provides guidelines on
+	// when this priority class should be used.
+	// +optional
+	Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PriorityClassList is a collection of priority classes.
+type PriorityClassList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// items is the list of PriorityClasses
+	Items []PriorityClass `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..f406f44
--- /dev/null
+++ b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go
@@ -0,0 +1,52 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_PriorityClass = map[string]string{
+	"":              "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.",
+	"metadata":      "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"value":         "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.",
+	"globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.",
+	"description":   "description is an arbitrary string that usually provides guidelines on when this priority class should be used.",
+}
+
+func (PriorityClass) SwaggerDoc() map[string]string {
+	return map_PriorityClass
+}
+
+var map_PriorityClassList = map[string]string{
+	"":         "PriorityClassList is a collection of priority classes.",
+	"metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "items is the list of PriorityClasses",
+}
+
+func (PriorityClassList) SwaggerDoc() map[string]string {
+	return map_PriorityClassList
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..fe0c860
--- /dev/null
+++ b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,84 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PriorityClass) DeepCopyInto(out *PriorityClass) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClass.
+func (in *PriorityClass) DeepCopy() *PriorityClass {
+	if in == nil {
+		return nil
+	}
+	out := new(PriorityClass)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PriorityClass) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]PriorityClass, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassList.
+func (in *PriorityClassList) DeepCopy() *PriorityClassList {
+	if in == nil {
+		return nil
+	}
+	out := new(PriorityClassList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PriorityClassList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/api/scheduling/v1beta1/doc.go b/vendor/k8s.io/api/scheduling/v1beta1/doc.go
new file mode 100644
index 0000000..7cf1af2
--- /dev/null
+++ b/vendor/k8s.io/api/scheduling/v1beta1/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+// +groupName=scheduling.k8s.io
+
+package v1beta1 // import "k8s.io/api/scheduling/v1beta1"
diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto
new file mode 100644
index 0000000..0a95755
--- /dev/null
+++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto
@@ -0,0 +1,67 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.scheduling.v1beta1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta1";
+
+// PriorityClass defines mapping from a priority class name to the priority
+// integer value. The value can be any valid integer.
+message PriorityClass {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // The value of this priority class. This is the actual priority that pods
+  // receive when they have the name of this class in their pod spec.
+  optional int32 value = 2;
+
+  // globalDefault specifies whether this PriorityClass should be considered as
+  // the default priority for pods that do not have any priority class.
+  // Only one PriorityClass can be marked as `globalDefault`. However, if more than
+  // one PriorityClasses exists with their `globalDefault` field set to true,
+  // the smallest value of such global default PriorityClasses will be used as the default priority.
+  // +optional
+  optional bool globalDefault = 3;
+
+  // description is an arbitrary string that usually provides guidelines on
+  // when this priority class should be used.
+  // +optional
+  optional string description = 4;
+}
+
+// PriorityClassList is a collection of priority classes.
+message PriorityClassList {
+  // Standard list metadata
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // items is the list of PriorityClasses
+  repeated PriorityClass items = 2;
+}
+
diff --git a/vendor/k8s.io/api/scheduling/v1beta1/register.go b/vendor/k8s.io/api/scheduling/v1beta1/register.go
new file mode 100644
index 0000000..fb26557
--- /dev/null
+++ b/vendor/k8s.io/api/scheduling/v1beta1/register.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "scheduling.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&PriorityClass{},
+		&PriorityClassList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types.go b/vendor/k8s.io/api/scheduling/v1beta1/types.go
new file mode 100644
index 0000000..a9aaa86
--- /dev/null
+++ b/vendor/k8s.io/api/scheduling/v1beta1/types.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PriorityClass defines mapping from a priority class name to the priority
+// integer value. The value can be any valid integer.
+type PriorityClass struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// The value of this priority class. This is the actual priority that pods
+	// receive when they have the name of this class in their pod spec.
+	Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"`
+
+	// globalDefault specifies whether this PriorityClass should be considered as
+	// the default priority for pods that do not have any priority class.
+	// Only one PriorityClass can be marked as `globalDefault`. However, if more than
+	// one PriorityClasses exists with their `globalDefault` field set to true,
+	// the smallest value of such global default PriorityClasses will be used as the default priority.
+	// +optional
+	GlobalDefault bool `json:"globalDefault,omitempty" protobuf:"bytes,3,opt,name=globalDefault"`
+
+	// description is an arbitrary string that usually provides guidelines on
+	// when this priority class should be used.
+	// +optional
+	Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PriorityClassList is a collection of priority classes.
+type PriorityClassList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// items is the list of PriorityClasses
+	Items []PriorityClass `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..c18f54a
--- /dev/null
+++ b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,52 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_PriorityClass = map[string]string{
+	"":              "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.",
+	"metadata":      "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"value":         "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.",
+	"globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.",
+	"description":   "description is an arbitrary string that usually provides guidelines on when this priority class should be used.",
+}
+
+func (PriorityClass) SwaggerDoc() map[string]string {
+	return map_PriorityClass
+}
+
+var map_PriorityClassList = map[string]string{
+	"":         "PriorityClassList is a collection of priority classes.",
+	"metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "items is the list of PriorityClasses",
+}
+
+func (PriorityClassList) SwaggerDoc() map[string]string {
+	return map_PriorityClassList
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..6f68e4a
--- /dev/null
+++ b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,84 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PriorityClass) DeepCopyInto(out *PriorityClass) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClass.
+func (in *PriorityClass) DeepCopy() *PriorityClass {
+	if in == nil {
+		return nil
+	}
+	out := new(PriorityClass)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PriorityClass) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]PriorityClass, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassList.
+func (in *PriorityClassList) DeepCopy() *PriorityClassList {
+	if in == nil {
+		return nil
+	}
+	out := new(PriorityClassList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PriorityClassList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/api/settings/v1alpha1/doc.go b/vendor/k8s.io/api/settings/v1alpha1/doc.go
new file mode 100644
index 0000000..9126211
--- /dev/null
+++ b/vendor/k8s.io/api/settings/v1alpha1/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+
+// +groupName=settings.k8s.io
+
+package v1alpha1 // import "k8s.io/api/settings/v1alpha1"
diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.proto b/vendor/k8s.io/api/settings/v1alpha1/generated.proto
new file mode 100644
index 0000000..d5534c4
--- /dev/null
+++ b/vendor/k8s.io/api/settings/v1alpha1/generated.proto
@@ -0,0 +1,75 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.settings.v1alpha1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1alpha1";
+
+// PodPreset is a policy resource that defines additional runtime
+// requirements for a Pod.
+message PodPreset {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // +optional
+  optional PodPresetSpec spec = 2;
+}
+
+// PodPresetList is a list of PodPreset objects.
+message PodPresetList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of schema objects.
+  repeated PodPreset items = 2;
+}
+
+// PodPresetSpec is a description of a pod preset.
+message PodPresetSpec {
+  // Selector is a label query over a set of resources, in this case pods.
+  // Required.
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
+
+  // Env defines the collection of EnvVar to inject into containers.
+  // +optional
+  repeated k8s.io.api.core.v1.EnvVar env = 2;
+
+  // EnvFrom defines the collection of EnvFromSource to inject into containers.
+  // +optional
+  repeated k8s.io.api.core.v1.EnvFromSource envFrom = 3;
+
+  // Volumes defines the collection of Volume to inject into the pod.
+  // +optional
+  repeated k8s.io.api.core.v1.Volume volumes = 4;
+
+  // VolumeMounts defines the collection of VolumeMount to inject into containers.
+  // +optional
+  repeated k8s.io.api.core.v1.VolumeMount volumeMounts = 5;
+}
+
diff --git a/vendor/k8s.io/api/settings/v1alpha1/register.go b/vendor/k8s.io/api/settings/v1alpha1/register.go
new file mode 100644
index 0000000..eee278d
--- /dev/null
+++ b/vendor/k8s.io/api/settings/v1alpha1/register.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "settings.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&PodPreset{},
+		&PodPresetList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/settings/v1alpha1/types.go b/vendor/k8s.io/api/settings/v1alpha1/types.go
new file mode 100644
index 0000000..506aacf
--- /dev/null
+++ b/vendor/k8s.io/api/settings/v1alpha1/types.go
@@ -0,0 +1,70 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodPreset is a policy resource that defines additional runtime
+// requirements for a Pod.
+type PodPreset struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// +optional
+	Spec PodPresetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// PodPresetSpec is a description of a pod preset.
+type PodPresetSpec struct {
+	// Selector is a label query over a set of resources, in this case pods.
+	// Required.
+	Selector metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"`
+
+	// Env defines the collection of EnvVar to inject into containers.
+	// +optional
+	Env []v1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"`
+	// EnvFrom defines the collection of EnvFromSource to inject into containers.
+	// +optional
+	EnvFrom []v1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,3,rep,name=envFrom"`
+	// Volumes defines the collection of Volume to inject into the pod.
+	// +optional
+	Volumes []v1.Volume `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"`
+	// VolumeMounts defines the collection of VolumeMount to inject into containers.
+	// +optional
+	VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty" protobuf:"bytes,5,rep,name=volumeMounts"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodPresetList is a list of PodPreset objects.
+type PodPresetList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of schema objects.
+	Items []PodPreset `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..508c452
--- /dev/null
+++ b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go
@@ -0,0 +1,61 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_PodPreset = map[string]string{
+	"": "PodPreset is a policy resource that defines additional runtime requirements for a Pod.",
+}
+
+func (PodPreset) SwaggerDoc() map[string]string {
+	return map_PodPreset
+}
+
+var map_PodPresetList = map[string]string{
+	"":         "PodPresetList is a list of PodPreset objects.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "Items is a list of schema objects.",
+}
+
+func (PodPresetList) SwaggerDoc() map[string]string {
+	return map_PodPresetList
+}
+
+var map_PodPresetSpec = map[string]string{
+	"":             "PodPresetSpec is a description of a pod preset.",
+	"selector":     "Selector is a label query over a set of resources, in this case pods. Required.",
+	"env":          "Env defines the collection of EnvVar to inject into containers.",
+	"envFrom":      "EnvFrom defines the collection of EnvFromSource to inject into containers.",
+	"volumes":      "Volumes defines the collection of Volume to inject into the pod.",
+	"volumeMounts": "VolumeMounts defines the collection of VolumeMount to inject into containers.",
+}
+
+func (PodPresetSpec) SwaggerDoc() map[string]string {
+	return map_PodPresetSpec
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..6397a88
--- /dev/null
+++ b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,131 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	v1 "k8s.io/api/core/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodPreset) DeepCopyInto(out *PodPreset) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPreset.
+func (in *PodPreset) DeepCopy() *PodPreset {
+	if in == nil {
+		return nil
+	}
+	out := new(PodPreset)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodPreset) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodPresetList) DeepCopyInto(out *PodPresetList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]PodPreset, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPresetList.
+func (in *PodPresetList) DeepCopy() *PodPresetList {
+	if in == nil {
+		return nil
+	}
+	out := new(PodPresetList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodPresetList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodPresetSpec) DeepCopyInto(out *PodPresetSpec) {
+	*out = *in
+	in.Selector.DeepCopyInto(&out.Selector)
+	if in.Env != nil {
+		in, out := &in.Env, &out.Env
+		*out = make([]v1.EnvVar, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.EnvFrom != nil {
+		in, out := &in.EnvFrom, &out.EnvFrom
+		*out = make([]v1.EnvFromSource, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Volumes != nil {
+		in, out := &in.Volumes, &out.Volumes
+		*out = make([]v1.Volume, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.VolumeMounts != nil {
+		in, out := &in.VolumeMounts, &out.VolumeMounts
+		*out = make([]v1.VolumeMount, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPresetSpec.
+func (in *PodPresetSpec) DeepCopy() *PodPresetSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(PodPresetSpec)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/storage/v1/doc.go b/vendor/k8s.io/api/storage/v1/doc.go
new file mode 100644
index 0000000..ff8bb34
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +groupName=storage.k8s.io
+// +k8s:openapi-gen=true
+
+package v1
diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto
new file mode 100644
index 0000000..7ac6cb2
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1/generated.proto
@@ -0,0 +1,186 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.storage.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1";
+
+// StorageClass describes the parameters for a class of storage for
+// which PersistentVolumes can be dynamically provisioned.
+//
+// StorageClasses are non-namespaced; the name of the storage class
+// according to etcd is in ObjectMeta.Name.
+message StorageClass {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Provisioner indicates the type of the provisioner.
+  optional string provisioner = 2;
+
+  // Parameters holds the parameters for the provisioner that should
+  // create volumes of this storage class.
+  // +optional
+  map<string, string> parameters = 3;
+
+  // Dynamically provisioned PersistentVolumes of this storage class are
+  // created with this reclaimPolicy. Defaults to Delete.
+  // +optional
+  optional string reclaimPolicy = 4;
+
+  // Dynamically provisioned PersistentVolumes of this storage class are
+  // created with these mountOptions, e.g. ["ro", "soft"]. Not validated -
+  // mount of the PVs will simply fail if one is invalid.
+  // +optional
+  repeated string mountOptions = 5;
+
+  // AllowVolumeExpansion shows whether the storage class allow volume expand
+  // +optional
+  optional bool allowVolumeExpansion = 6;
+
+  // VolumeBindingMode indicates how PersistentVolumeClaims should be
+  // provisioned and bound.  When unset, VolumeBindingImmediate is used.
+  // This field is only honored by servers that enable the VolumeScheduling feature.
+  // +optional
+  optional string volumeBindingMode = 7;
+
+  // Restrict the node topologies where volumes can be dynamically provisioned.
+  // Each volume plugin defines its own supported topology specifications.
+  // An empty TopologySelectorTerm list means there is no topology restriction.
+  // This field is only honored by servers that enable the VolumeScheduling feature.
+  // +optional
+  repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8;
+}
+
+// StorageClassList is a collection of storage classes.
+message StorageClassList {
+  // Standard list metadata
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of StorageClasses
+  repeated StorageClass items = 2;
+}
+
+// VolumeAttachment captures the intent to attach or detach the specified volume
+// to/from the specified node.
+//
+// VolumeAttachment objects are non-namespaced.
+message VolumeAttachment {
+  // Standard object metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired attach/detach volume behavior.
+  // Populated by the Kubernetes system.
+  optional VolumeAttachmentSpec spec = 2;
+
+  // Status of the VolumeAttachment request.
+  // Populated by the entity completing the attach or detach
+  // operation, i.e. the external-attacher.
+  // +optional
+  optional VolumeAttachmentStatus status = 3;
+}
+
+// VolumeAttachmentList is a collection of VolumeAttachment objects.
+message VolumeAttachmentList {
+  // Standard list metadata
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of VolumeAttachments
+  repeated VolumeAttachment items = 2;
+}
+
+// VolumeAttachmentSource represents a volume that should be attached.
+// Right now only PersistenVolumes can be attached via external attacher,
+// in future we may allow also inline volumes in pods.
+// Exactly one member can be set.
+message VolumeAttachmentSource {
+  // Name of the persistent volume to attach.
+  // +optional
+  optional string persistentVolumeName = 1;
+}
+
+// VolumeAttachmentSpec is the specification of a VolumeAttachment request.
+message VolumeAttachmentSpec {
+  // Attacher indicates the name of the volume driver that MUST handle this
+  // request. This is the name returned by GetPluginName().
+  optional string attacher = 1;
+
+  // Source represents the volume that should be attached.
+  optional VolumeAttachmentSource source = 2;
+
+  // The node that the volume should be attached to.
+  optional string nodeName = 3;
+}
+
+// VolumeAttachmentStatus is the status of a VolumeAttachment request.
+message VolumeAttachmentStatus {
+  // Indicates the volume is successfully attached.
+  // This field must only be set by the entity completing the attach
+  // operation, i.e. the external-attacher.
+  optional bool attached = 1;
+
+  // Upon successful attach, this field is populated with any
+  // information returned by the attach operation that must be passed
+  // into subsequent WaitForAttach or Mount calls.
+  // This field must only be set by the entity completing the attach
+  // operation, i.e. the external-attacher.
+  // +optional
+  map<string, string> attachmentMetadata = 2;
+
+  // The last error encountered during attach operation, if any.
+  // This field must only be set by the entity completing the attach
+  // operation, i.e. the external-attacher.
+  // +optional
+  optional VolumeError attachError = 3;
+
+  // The last error encountered during detach operation, if any.
+  // This field must only be set by the entity completing the detach
+  // operation, i.e. the external-attacher.
+  // +optional
+  optional VolumeError detachError = 4;
+}
+
+// VolumeError captures an error encountered during a volume operation.
+message VolumeError {
+  // Time the error was encountered.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1;
+
+  // String detailing the error encountered during Attach or Detach operation.
+  // This string may be logged, so it should not contain sensitive
+  // information.
+  // +optional
+  optional string message = 2;
+}
+
diff --git a/vendor/k8s.io/api/storage/v1/register.go b/vendor/k8s.io/api/storage/v1/register.go
new file mode 100644
index 0000000..473c687
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1/register.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "storage.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&StorageClass{},
+		&StorageClassList{},
+
+		&VolumeAttachment{},
+		&VolumeAttachmentList{},
+	)
+
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go
new file mode 100644
index 0000000..bd60e10
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1/types.go
@@ -0,0 +1,211 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// StorageClass describes the parameters for a class of storage for
+// which PersistentVolumes can be dynamically provisioned.
+//
+// StorageClasses are non-namespaced; the name of the storage class
+// according to etcd is in ObjectMeta.Name.
+type StorageClass struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Provisioner indicates the type of the provisioner.
+	Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"`
+
+	// Parameters holds the parameters for the provisioner that should
+	// create volumes of this storage class.
+	// +optional
+	Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"`
+
+	// Dynamically provisioned PersistentVolumes of this storage class are
+	// created with this reclaimPolicy. Defaults to Delete.
+	// +optional
+	ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"`
+
+	// Dynamically provisioned PersistentVolumes of this storage class are
+	// created with these mountOptions, e.g. ["ro", "soft"]. Not validated -
+	// mount of the PVs will simply fail if one is invalid.
+	// +optional
+	MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"`
+
+	// AllowVolumeExpansion shows whether the storage class allow volume expand
+	// +optional
+	AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"`
+
+	// VolumeBindingMode indicates how PersistentVolumeClaims should be
+	// provisioned and bound.  When unset, VolumeBindingImmediate is used.
+	// This field is only honored by servers that enable the VolumeScheduling feature.
+	// +optional
+	VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"`
+
+	// Restrict the node topologies where volumes can be dynamically provisioned.
+	// Each volume plugin defines its own supported topology specifications.
+	// An empty TopologySelectorTerm list means there is no topology restriction.
+	// This field is only honored by servers that enable the VolumeScheduling feature.
+	// +optional
+	AllowedTopologies []v1.TopologySelectorTerm `json:"allowedTopologies,omitempty" protobuf:"bytes,8,rep,name=allowedTopologies"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// StorageClassList is a collection of storage classes.
+type StorageClassList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of StorageClasses
+	Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// VolumeBindingMode indicates how PersistentVolumeClaims should be bound.
+type VolumeBindingMode string
+
+const (
+	// VolumeBindingImmediate indicates that PersistentVolumeClaims should be
+	// immediately provisioned and bound.  This is the default mode.
+	VolumeBindingImmediate VolumeBindingMode = "Immediate"
+
+	// VolumeBindingWaitForFirstConsumer indicates that PersistentVolumeClaims
+	// should not be provisioned and bound until the first Pod is created that
+	// references the PeristentVolumeClaim.  The volume provisioning and
+	// binding will occur during Pod scheduing.
+	VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// VolumeAttachment captures the intent to attach or detach the specified volume
+// to/from the specified node.
+//
+// VolumeAttachment objects are non-namespaced.
+type VolumeAttachment struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Standard object metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired attach/detach volume behavior.
+	// Populated by the Kubernetes system.
+	Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status of the VolumeAttachment request.
+	// Populated by the entity completing the attach or detach
+	// operation, i.e. the external-attacher.
+	// +optional
+	Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// VolumeAttachmentList is a collection of VolumeAttachment objects.
+type VolumeAttachmentList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of VolumeAttachments
+	Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// VolumeAttachmentSpec is the specification of a VolumeAttachment request.
+type VolumeAttachmentSpec struct {
+	// Attacher indicates the name of the volume driver that MUST handle this
+	// request. This is the name returned by GetPluginName().
+	Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"`
+
+	// Source represents the volume that should be attached.
+	Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"`
+
+	// The node that the volume should be attached to.
+	NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"`
+}
+
+// VolumeAttachmentSource represents a volume that should be attached.
+// Right now only PersistenVolumes can be attached via external attacher,
+// in future we may allow also inline volumes in pods.
+// Exactly one member can be set.
+type VolumeAttachmentSource struct {
+	// Name of the persistent volume to attach.
+	// +optional
+	PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"`
+
+	// Placeholder for *VolumeSource to accommodate inline volumes in pods.
+}
+
+// VolumeAttachmentStatus is the status of a VolumeAttachment request.
+type VolumeAttachmentStatus struct {
+	// Indicates the volume is successfully attached.
+	// This field must only be set by the entity completing the attach
+	// operation, i.e. the external-attacher.
+	Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"`
+
+	// Upon successful attach, this field is populated with any
+	// information returned by the attach operation that must be passed
+	// into subsequent WaitForAttach or Mount calls.
+	// This field must only be set by the entity completing the attach
+	// operation, i.e. the external-attacher.
+	// +optional
+	AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"`
+
+	// The last error encountered during attach operation, if any.
+	// This field must only be set by the entity completing the attach
+	// operation, i.e. the external-attacher.
+	// +optional
+	AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"`
+
+	// The last error encountered during detach operation, if any.
+	// This field must only be set by the entity completing the detach
+	// operation, i.e. the external-attacher.
+	// +optional
+	DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"`
+}
+
+// VolumeError captures an error encountered during a volume operation.
+type VolumeError struct {
+	// Time the error was encountered.
+	// +optional
+	Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"`
+
+	// String detailing the error encountered during Attach or Detach operation.
+	// This string may be logged, so it should not contain sensitive
+	// information.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
+}
diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..e31dd7f
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
@@ -0,0 +1,119 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_StorageClass = map[string]string{
+	"":                     "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.",
+	"metadata":             "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"provisioner":          "Provisioner indicates the type of the provisioner.",
+	"parameters":           "Parameters holds the parameters for the provisioner that should create volumes of this storage class.",
+	"reclaimPolicy":        "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.",
+	"mountOptions":         "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.",
+	"allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand",
+	"volumeBindingMode":    "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound.  When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.",
+	"allowedTopologies":    "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.",
+}
+
+func (StorageClass) SwaggerDoc() map[string]string {
+	return map_StorageClass
+}
+
+var map_StorageClassList = map[string]string{
+	"":         "StorageClassList is a collection of storage classes.",
+	"metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "Items is the list of StorageClasses",
+}
+
+func (StorageClassList) SwaggerDoc() map[string]string {
+	return map_StorageClassList
+}
+
+var map_VolumeAttachment = map[string]string{
+	"":         "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.",
+	"metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.",
+	"status":   "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.",
+}
+
+func (VolumeAttachment) SwaggerDoc() map[string]string {
+	return map_VolumeAttachment
+}
+
+var map_VolumeAttachmentList = map[string]string{
+	"":         "VolumeAttachmentList is a collection of VolumeAttachment objects.",
+	"metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "Items is the list of VolumeAttachments",
+}
+
+func (VolumeAttachmentList) SwaggerDoc() map[string]string {
+	return map_VolumeAttachmentList
+}
+
+var map_VolumeAttachmentSource = map[string]string{
+	"":                     "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.",
+	"persistentVolumeName": "Name of the persistent volume to attach.",
+}
+
+func (VolumeAttachmentSource) SwaggerDoc() map[string]string {
+	return map_VolumeAttachmentSource
+}
+
+var map_VolumeAttachmentSpec = map[string]string{
+	"":         "VolumeAttachmentSpec is the specification of a VolumeAttachment request.",
+	"attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().",
+	"source":   "Source represents the volume that should be attached.",
+	"nodeName": "The node that the volume should be attached to.",
+}
+
+func (VolumeAttachmentSpec) SwaggerDoc() map[string]string {
+	return map_VolumeAttachmentSpec
+}
+
+var map_VolumeAttachmentStatus = map[string]string{
+	"":                   "VolumeAttachmentStatus is the status of a VolumeAttachment request.",
+	"attached":           "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.",
+	"attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.",
+	"attachError":        "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.",
+	"detachError":        "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.",
+}
+
+func (VolumeAttachmentStatus) SwaggerDoc() map[string]string {
+	return map_VolumeAttachmentStatus
+}
+
+var map_VolumeError = map[string]string{
+	"":        "VolumeError captures an error encountered during a volume operation.",
+	"time":    "Time the error was encountered.",
+	"message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.",
+}
+
+func (VolumeError) SwaggerDoc() map[string]string {
+	return map_VolumeError
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..3157ec6
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go
@@ -0,0 +1,268 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	corev1 "k8s.io/api/core/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageClass) DeepCopyInto(out *StorageClass) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Parameters != nil {
+		in, out := &in.Parameters, &out.Parameters
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.ReclaimPolicy != nil {
+		in, out := &in.ReclaimPolicy, &out.ReclaimPolicy
+		*out = new(corev1.PersistentVolumeReclaimPolicy)
+		**out = **in
+	}
+	if in.MountOptions != nil {
+		in, out := &in.MountOptions, &out.MountOptions
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.AllowVolumeExpansion != nil {
+		in, out := &in.AllowVolumeExpansion, &out.AllowVolumeExpansion
+		*out = new(bool)
+		**out = **in
+	}
+	if in.VolumeBindingMode != nil {
+		in, out := &in.VolumeBindingMode, &out.VolumeBindingMode
+		*out = new(VolumeBindingMode)
+		**out = **in
+	}
+	if in.AllowedTopologies != nil {
+		in, out := &in.AllowedTopologies, &out.AllowedTopologies
+		*out = make([]corev1.TopologySelectorTerm, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClass.
+func (in *StorageClass) DeepCopy() *StorageClass {
+	if in == nil {
+		return nil
+	}
+	out := new(StorageClass)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StorageClass) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageClassList) DeepCopyInto(out *StorageClassList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]StorageClass, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassList.
+func (in *StorageClassList) DeepCopy() *StorageClassList {
+	if in == nil {
+		return nil
+	}
+	out := new(StorageClassList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StorageClassList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment.
+func (in *VolumeAttachment) DeepCopy() *VolumeAttachment {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeAttachment)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *VolumeAttachment) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]VolumeAttachment, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList.
+func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeAttachmentList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) {
+	*out = *in
+	if in.PersistentVolumeName != nil {
+		in, out := &in.PersistentVolumeName, &out.PersistentVolumeName
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource.
+func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeAttachmentSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) {
+	*out = *in
+	in.Source.DeepCopyInto(&out.Source)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec.
+func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeAttachmentSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) {
+	*out = *in
+	if in.AttachmentMetadata != nil {
+		in, out := &in.AttachmentMetadata, &out.AttachmentMetadata
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.AttachError != nil {
+		in, out := &in.AttachError, &out.AttachError
+		*out = new(VolumeError)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.DetachError != nil {
+		in, out := &in.DetachError, &out.DetachError
+		*out = new(VolumeError)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus.
+func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeAttachmentStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeError) DeepCopyInto(out *VolumeError) {
+	*out = *in
+	in.Time.DeepCopyInto(&out.Time)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError.
+func (in *VolumeError) DeepCopy() *VolumeError {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeError)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/storage/v1alpha1/doc.go b/vendor/k8s.io/api/storage/v1alpha1/doc.go
new file mode 100644
index 0000000..0056b00
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1alpha1/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+// +groupName=storage.k8s.io
+// +k8s:openapi-gen=true
+
+package v1alpha1 // import "k8s.io/api/storage/v1alpha1"
diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto
new file mode 100644
index 0000000..fdc4ad2
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto
@@ -0,0 +1,126 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.storage.v1alpha1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1alpha1";
+
+// VolumeAttachment captures the intent to attach or detach the specified volume
+// to/from the specified node.
+//
+// VolumeAttachment objects are non-namespaced.
+message VolumeAttachment {
+  // Standard object metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired attach/detach volume behavior.
+  // Populated by the Kubernetes system.
+  optional VolumeAttachmentSpec spec = 2;
+
+  // Status of the VolumeAttachment request.
+  // Populated by the entity completing the attach or detach
+  // operation, i.e. the external-attacher.
+  // +optional
+  optional VolumeAttachmentStatus status = 3;
+}
+
+// VolumeAttachmentList is a collection of VolumeAttachment objects.
+message VolumeAttachmentList {
+  // Standard list metadata
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of VolumeAttachments
+  repeated VolumeAttachment items = 2;
+}
+
+// VolumeAttachmentSource represents a volume that should be attached.
+// Right now only PersistenVolumes can be attached via external attacher,
+// in future we may allow also inline volumes in pods.
+// Exactly one member can be set.
+message VolumeAttachmentSource {
+  // Name of the persistent volume to attach.
+  // +optional
+  optional string persistentVolumeName = 1;
+}
+
+// VolumeAttachmentSpec is the specification of a VolumeAttachment request.
+message VolumeAttachmentSpec {
+  // Attacher indicates the name of the volume driver that MUST handle this
+  // request. This is the name returned by GetPluginName().
+  optional string attacher = 1;
+
+  // Source represents the volume that should be attached.
+  optional VolumeAttachmentSource source = 2;
+
+  // The node that the volume should be attached to.
+  optional string nodeName = 3;
+}
+
+// VolumeAttachmentStatus is the status of a VolumeAttachment request.
+message VolumeAttachmentStatus {
+  // Indicates the volume is successfully attached.
+  // This field must only be set by the entity completing the attach
+  // operation, i.e. the external-attacher.
+  optional bool attached = 1;
+
+  // Upon successful attach, this field is populated with any
+  // information returned by the attach operation that must be passed
+  // into subsequent WaitForAttach or Mount calls.
+  // This field must only be set by the entity completing the attach
+  // operation, i.e. the external-attacher.
+  // +optional
+  map<string, string> attachmentMetadata = 2;
+
+  // The last error encountered during attach operation, if any.
+  // This field must only be set by the entity completing the attach
+  // operation, i.e. the external-attacher.
+  // +optional
+  optional VolumeError attachError = 3;
+
+  // The last error encountered during detach operation, if any.
+  // This field must only be set by the entity completing the detach
+  // operation, i.e. the external-attacher.
+  // +optional
+  optional VolumeError detachError = 4;
+}
+
+// VolumeError captures an error encountered during a volume operation.
+message VolumeError {
+  // Time the error was encountered.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1;
+
+  // String detailing the error encountered during Attach or Detach operation.
+  // This string maybe logged, so it should not contain sensitive
+  // information.
+  // +optional
+  optional string message = 2;
+}
+
diff --git a/vendor/k8s.io/api/storage/v1alpha1/register.go b/vendor/k8s.io/api/storage/v1alpha1/register.go
new file mode 100644
index 0000000..7b81ee4
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1alpha1/register.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "storage.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+	AddToScheme   = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&VolumeAttachment{},
+		&VolumeAttachmentList{},
+	)
+
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go
new file mode 100644
index 0000000..964bb5f
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1alpha1/types.go
@@ -0,0 +1,126 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// VolumeAttachment captures the intent to attach or detach the specified volume
+// to/from the specified node.
+//
+// VolumeAttachment objects are non-namespaced.
+type VolumeAttachment struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Standard object metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired attach/detach volume behavior.
+	// Populated by the Kubernetes system.
+	Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status of the VolumeAttachment request.
+	// Populated by the entity completing the attach or detach
+	// operation, i.e. the external-attacher.
+	// +optional
+	Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// VolumeAttachmentList is a collection of VolumeAttachment objects.
+type VolumeAttachmentList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of VolumeAttachments
+	Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// VolumeAttachmentSpec is the specification of a VolumeAttachment request.
+type VolumeAttachmentSpec struct {
+	// Attacher indicates the name of the volume driver that MUST handle this
+	// request. This is the name returned by GetPluginName().
+	Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"`
+
+	// Source represents the volume that should be attached.
+	Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"`
+
+	// The node that the volume should be attached to.
+	NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"`
+}
+
+// VolumeAttachmentSource represents a volume that should be attached.
+// Right now only PersistenVolumes can be attached via external attacher,
+// in future we may allow also inline volumes in pods.
+// Exactly one member can be set.
+type VolumeAttachmentSource struct {
+	// Name of the persistent volume to attach.
+	// +optional
+	PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"`
+
+	// Placeholder for *VolumeSource to accommodate inline volumes in pods.
+}
+
+// VolumeAttachmentStatus is the status of a VolumeAttachment request.
+type VolumeAttachmentStatus struct {
+	// Indicates the volume is successfully attached.
+	// This field must only be set by the entity completing the attach
+	// operation, i.e. the external-attacher.
+	Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"`
+
+	// Upon successful attach, this field is populated with any
+	// information returned by the attach operation that must be passed
+	// into subsequent WaitForAttach or Mount calls.
+	// This field must only be set by the entity completing the attach
+	// operation, i.e. the external-attacher.
+	// +optional
+	AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"`
+
+	// The last error encountered during attach operation, if any.
+	// This field must only be set by the entity completing the attach
+	// operation, i.e. the external-attacher.
+	// +optional
+	AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"`
+
+	// The last error encountered during detach operation, if any.
+	// This field must only be set by the entity completing the detach
+	// operation, i.e. the external-attacher.
+	// +optional
+	DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"`
+}
+
+// VolumeError captures an error encountered during a volume operation.
+type VolumeError struct {
+	// Time the error was encountered.
+	// +optional
+	Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"`
+
+	// String detailing the error encountered during Attach or Detach operation.
+	// This string maybe logged, so it should not contain sensitive
+	// information.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
+}
diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..3701b08
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go
@@ -0,0 +1,93 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_VolumeAttachment = map[string]string{
+	"":         "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.",
+	"metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.",
+	"status":   "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.",
+}
+
+func (VolumeAttachment) SwaggerDoc() map[string]string {
+	return map_VolumeAttachment
+}
+
+var map_VolumeAttachmentList = map[string]string{
+	"":         "VolumeAttachmentList is a collection of VolumeAttachment objects.",
+	"metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "Items is the list of VolumeAttachments",
+}
+
+func (VolumeAttachmentList) SwaggerDoc() map[string]string {
+	return map_VolumeAttachmentList
+}
+
+var map_VolumeAttachmentSource = map[string]string{
+	"":                     "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.",
+	"persistentVolumeName": "Name of the persistent volume to attach.",
+}
+
+func (VolumeAttachmentSource) SwaggerDoc() map[string]string {
+	return map_VolumeAttachmentSource
+}
+
+var map_VolumeAttachmentSpec = map[string]string{
+	"":         "VolumeAttachmentSpec is the specification of a VolumeAttachment request.",
+	"attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().",
+	"source":   "Source represents the volume that should be attached.",
+	"nodeName": "The node that the volume should be attached to.",
+}
+
+func (VolumeAttachmentSpec) SwaggerDoc() map[string]string {
+	return map_VolumeAttachmentSpec
+}
+
+var map_VolumeAttachmentStatus = map[string]string{
+	"":                   "VolumeAttachmentStatus is the status of a VolumeAttachment request.",
+	"attached":           "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.",
+	"attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.",
+	"attachError":        "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.",
+	"detachError":        "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.",
+}
+
+func (VolumeAttachmentStatus) SwaggerDoc() map[string]string {
+	return map_VolumeAttachmentStatus
+}
+
+var map_VolumeError = map[string]string{
+	"":        "VolumeError captures an error encountered during a volume operation.",
+	"time":    "Time the error was encountered.",
+	"message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.",
+}
+
+func (VolumeError) SwaggerDoc() map[string]string {
+	return map_VolumeError
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..e27c6ff
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,174 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment.
+func (in *VolumeAttachment) DeepCopy() *VolumeAttachment {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeAttachment)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *VolumeAttachment) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]VolumeAttachment, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList.
+func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeAttachmentList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) {
+	*out = *in
+	if in.PersistentVolumeName != nil {
+		in, out := &in.PersistentVolumeName, &out.PersistentVolumeName
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource.
+func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeAttachmentSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) {
+	*out = *in
+	in.Source.DeepCopyInto(&out.Source)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec.
+func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeAttachmentSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) {
+	*out = *in
+	if in.AttachmentMetadata != nil {
+		in, out := &in.AttachmentMetadata, &out.AttachmentMetadata
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.AttachError != nil {
+		in, out := &in.AttachError, &out.AttachError
+		*out = new(VolumeError)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.DetachError != nil {
+		in, out := &in.DetachError, &out.DetachError
+		*out = new(VolumeError)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus.
+func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeAttachmentStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeError) DeepCopyInto(out *VolumeError) {
+	*out = *in
+	in.Time.DeepCopyInto(&out.Time)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError.
+func (in *VolumeError) DeepCopy() *VolumeError {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeError)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/storage/v1beta1/doc.go b/vendor/k8s.io/api/storage/v1beta1/doc.go
new file mode 100644
index 0000000..ea7667d
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1beta1/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +groupName=storage.k8s.io
+// +k8s:openapi-gen=true
+
+package v1beta1 // import "k8s.io/api/storage/v1beta1"
diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto
new file mode 100644
index 0000000..4efe7d7
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto
@@ -0,0 +1,186 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.storage.v1beta1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta1";
+
+// StorageClass describes the parameters for a class of storage for
+// which PersistentVolumes can be dynamically provisioned.
+//
+// StorageClasses are non-namespaced; the name of the storage class
+// according to etcd is in ObjectMeta.Name.
+message StorageClass {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Provisioner indicates the type of the provisioner.
+  optional string provisioner = 2;
+
+  // Parameters holds the parameters for the provisioner that should
+  // create volumes of this storage class.
+  // +optional
+  map<string, string> parameters = 3;
+
+  // Dynamically provisioned PersistentVolumes of this storage class are
+  // created with this reclaimPolicy. Defaults to Delete.
+  // +optional
+  optional string reclaimPolicy = 4;
+
+  // Dynamically provisioned PersistentVolumes of this storage class are
+  // created with these mountOptions, e.g. ["ro", "soft"]. Not validated -
+  // mount of the PVs will simply fail if one is invalid.
+  // +optional
+  repeated string mountOptions = 5;
+
+  // AllowVolumeExpansion shows whether the storage class allow volume expand
+  // +optional
+  optional bool allowVolumeExpansion = 6;
+
+  // VolumeBindingMode indicates how PersistentVolumeClaims should be
+  // provisioned and bound.  When unset, VolumeBindingImmediate is used.
+  // This field is only honored by servers that enable the VolumeScheduling feature.
+  // +optional
+  optional string volumeBindingMode = 7;
+
+  // Restrict the node topologies where volumes can be dynamically provisioned.
+  // Each volume plugin defines its own supported topology specifications.
+  // An empty TopologySelectorTerm list means there is no topology restriction.
+  // This field is only honored by servers that enable the VolumeScheduling feature.
+  // +optional
+  repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8;
+}
+
+// StorageClassList is a collection of storage classes.
+message StorageClassList {
+  // Standard list metadata
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of StorageClasses
+  repeated StorageClass items = 2;
+}
+
+// VolumeAttachment captures the intent to attach or detach the specified volume
+// to/from the specified node.
+//
+// VolumeAttachment objects are non-namespaced.
+message VolumeAttachment {
+  // Standard object metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired attach/detach volume behavior.
+  // Populated by the Kubernetes system.
+  optional VolumeAttachmentSpec spec = 2;
+
+  // Status of the VolumeAttachment request.
+  // Populated by the entity completing the attach or detach
+  // operation, i.e. the external-attacher.
+  // +optional
+  optional VolumeAttachmentStatus status = 3;
+}
+
+// VolumeAttachmentList is a collection of VolumeAttachment objects.
+message VolumeAttachmentList {
+  // Standard list metadata
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of VolumeAttachments
+  repeated VolumeAttachment items = 2;
+}
+
+// VolumeAttachmentSource represents a volume that should be attached.
+// Right now only PersistenVolumes can be attached via external attacher,
+// in future we may allow also inline volumes in pods.
+// Exactly one member can be set.
+message VolumeAttachmentSource {
+  // Name of the persistent volume to attach.
+  // +optional
+  optional string persistentVolumeName = 1;
+}
+
+// VolumeAttachmentSpec is the specification of a VolumeAttachment request.
+message VolumeAttachmentSpec {
+  // Attacher indicates the name of the volume driver that MUST handle this
+  // request. This is the name returned by GetPluginName().
+  optional string attacher = 1;
+
+  // Source represents the volume that should be attached.
+  optional VolumeAttachmentSource source = 2;
+
+  // The node that the volume should be attached to.
+  optional string nodeName = 3;
+}
+
+// VolumeAttachmentStatus is the status of a VolumeAttachment request.
+message VolumeAttachmentStatus {
+  // Indicates the volume is successfully attached.
+  // This field must only be set by the entity completing the attach
+  // operation, i.e. the external-attacher.
+  optional bool attached = 1;
+
+  // Upon successful attach, this field is populated with any
+  // information returned by the attach operation that must be passed
+  // into subsequent WaitForAttach or Mount calls.
+  // This field must only be set by the entity completing the attach
+  // operation, i.e. the external-attacher.
+  // +optional
+  map<string, string> attachmentMetadata = 2;
+
+  // The last error encountered during attach operation, if any.
+  // This field must only be set by the entity completing the attach
+  // operation, i.e. the external-attacher.
+  // +optional
+  optional VolumeError attachError = 3;
+
+  // The last error encountered during detach operation, if any.
+  // This field must only be set by the entity completing the detach
+  // operation, i.e. the external-attacher.
+  // +optional
+  optional VolumeError detachError = 4;
+}
+
+// VolumeError captures an error encountered during a volume operation.
+message VolumeError {
+  // Time the error was encountered.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1;
+
+  // String detailing the error encountered during Attach or Detach operation.
+  // This string may be logged, so it should not contain sensitive
+  // information.
+  // +optional
+  optional string message = 2;
+}
+
diff --git a/vendor/k8s.io/api/storage/v1beta1/register.go b/vendor/k8s.io/api/storage/v1beta1/register.go
new file mode 100644
index 0000000..06b0f3d
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1beta1/register.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "storage.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&StorageClass{},
+		&StorageClassList{},
+
+		&VolumeAttachment{},
+		&VolumeAttachmentList{},
+	)
+
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go
new file mode 100644
index 0000000..a955542
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1beta1/types.go
@@ -0,0 +1,211 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	"k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// StorageClass describes the parameters for a class of storage for
+// which PersistentVolumes can be dynamically provisioned.
+//
+// StorageClasses are non-namespaced; the name of the storage class
+// according to etcd is in ObjectMeta.Name.
+type StorageClass struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Provisioner indicates the type of the provisioner.
+	Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"`
+
+	// Parameters holds the parameters for the provisioner that should
+	// create volumes of this storage class.
+	// +optional
+	Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"`
+
+	// Dynamically provisioned PersistentVolumes of this storage class are
+	// created with this reclaimPolicy. Defaults to Delete.
+	// +optional
+	ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"`
+
+	// Dynamically provisioned PersistentVolumes of this storage class are
+	// created with these mountOptions, e.g. ["ro", "soft"]. Not validated -
+	// mount of the PVs will simply fail if one is invalid.
+	// +optional
+	MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"`
+
+	// AllowVolumeExpansion shows whether the storage class allow volume expand
+	// +optional
+	AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"`
+
+	// VolumeBindingMode indicates how PersistentVolumeClaims should be
+	// provisioned and bound.  When unset, VolumeBindingImmediate is used.
+	// This field is only honored by servers that enable the VolumeScheduling feature.
+	// +optional
+	VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"`
+
+	// Restrict the node topologies where volumes can be dynamically provisioned.
+	// Each volume plugin defines its own supported topology specifications.
+	// An empty TopologySelectorTerm list means there is no topology restriction.
+	// This field is only honored by servers that enable the VolumeScheduling feature.
+	// +optional
+	AllowedTopologies []v1.TopologySelectorTerm `json:"allowedTopologies,omitempty" protobuf:"bytes,8,rep,name=allowedTopologies"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// StorageClassList is a collection of storage classes.
+type StorageClassList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of StorageClasses
+	Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// VolumeBindingMode indicates how PersistentVolumeClaims should be bound.
+type VolumeBindingMode string
+
+const (
+	// VolumeBindingImmediate indicates that PersistentVolumeClaims should be
+	// immediately provisioned and bound.  This is the default mode.
+	VolumeBindingImmediate VolumeBindingMode = "Immediate"
+
+	// VolumeBindingWaitForFirstConsumer indicates that PersistentVolumeClaims
+	// should not be provisioned and bound until the first Pod is created that
+	// references the PeristentVolumeClaim.  The volume provisioning and
+	// binding will occur during Pod scheduing.
+	VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// VolumeAttachment captures the intent to attach or detach the specified volume
+// to/from the specified node.
+//
+// VolumeAttachment objects are non-namespaced.
+type VolumeAttachment struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Standard object metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired attach/detach volume behavior.
+	// Populated by the Kubernetes system.
+	Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status of the VolumeAttachment request.
+	// Populated by the entity completing the attach or detach
+	// operation, i.e. the external-attacher.
+	// +optional
+	Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// VolumeAttachmentList is a collection of VolumeAttachment objects.
+type VolumeAttachmentList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of VolumeAttachments
+	Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// VolumeAttachmentSpec is the specification of a VolumeAttachment request.
+type VolumeAttachmentSpec struct {
+	// Attacher indicates the name of the volume driver that MUST handle this
+	// request. This is the name returned by GetPluginName().
+	Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"`
+
+	// Source represents the volume that should be attached.
+	Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"`
+
+	// The node that the volume should be attached to.
+	NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"`
+}
+
+// VolumeAttachmentSource represents a volume that should be attached.
+// Right now only PersistenVolumes can be attached via external attacher,
+// in future we may allow also inline volumes in pods.
+// Exactly one member can be set.
+type VolumeAttachmentSource struct {
+	// Name of the persistent volume to attach.
+	// +optional
+	PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"`
+
+	// Placeholder for *VolumeSource to accommodate inline volumes in pods.
+}
+
+// VolumeAttachmentStatus is the status of a VolumeAttachment request.
+type VolumeAttachmentStatus struct {
+	// Indicates the volume is successfully attached.
+	// This field must only be set by the entity completing the attach
+	// operation, i.e. the external-attacher.
+	Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"`
+
+	// Upon successful attach, this field is populated with any
+	// information returned by the attach operation that must be passed
+	// into subsequent WaitForAttach or Mount calls.
+	// This field must only be set by the entity completing the attach
+	// operation, i.e. the external-attacher.
+	// +optional
+	AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"`
+
+	// The last error encountered during attach operation, if any.
+	// This field must only be set by the entity completing the attach
+	// operation, i.e. the external-attacher.
+	// +optional
+	AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"`
+
+	// The last error encountered during detach operation, if any.
+	// This field must only be set by the entity completing the detach
+	// operation, i.e. the external-attacher.
+	// +optional
+	DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"`
+}
+
+// VolumeError captures an error encountered during a volume operation.
+type VolumeError struct {
+	// Time the error was encountered.
+	// +optional
+	Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"`
+
+	// String detailing the error encountered during Attach or Detach operation.
+	// This string may be logged, so it should not contain sensitive
+	// information.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
+}
diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..e41197b
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,119 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_StorageClass = map[string]string{
+	"":                     "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.",
+	"metadata":             "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"provisioner":          "Provisioner indicates the type of the provisioner.",
+	"parameters":           "Parameters holds the parameters for the provisioner that should create volumes of this storage class.",
+	"reclaimPolicy":        "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.",
+	"mountOptions":         "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.",
+	"allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand",
+	"volumeBindingMode":    "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound.  When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.",
+	"allowedTopologies":    "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.",
+}
+
+func (StorageClass) SwaggerDoc() map[string]string {
+	return map_StorageClass
+}
+
+var map_StorageClassList = map[string]string{
+	"":         "StorageClassList is a collection of storage classes.",
+	"metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "Items is the list of StorageClasses",
+}
+
+func (StorageClassList) SwaggerDoc() map[string]string {
+	return map_StorageClassList
+}
+
+var map_VolumeAttachment = map[string]string{
+	"":         "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.",
+	"metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"spec":     "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.",
+	"status":   "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.",
+}
+
+func (VolumeAttachment) SwaggerDoc() map[string]string {
+	return map_VolumeAttachment
+}
+
+var map_VolumeAttachmentList = map[string]string{
+	"":         "VolumeAttachmentList is a collection of VolumeAttachment objects.",
+	"metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"items":    "Items is the list of VolumeAttachments",
+}
+
+func (VolumeAttachmentList) SwaggerDoc() map[string]string {
+	return map_VolumeAttachmentList
+}
+
+var map_VolumeAttachmentSource = map[string]string{
+	"":                     "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.",
+	"persistentVolumeName": "Name of the persistent volume to attach.",
+}
+
+func (VolumeAttachmentSource) SwaggerDoc() map[string]string {
+	return map_VolumeAttachmentSource
+}
+
+var map_VolumeAttachmentSpec = map[string]string{
+	"":         "VolumeAttachmentSpec is the specification of a VolumeAttachment request.",
+	"attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().",
+	"source":   "Source represents the volume that should be attached.",
+	"nodeName": "The node that the volume should be attached to.",
+}
+
+func (VolumeAttachmentSpec) SwaggerDoc() map[string]string {
+	return map_VolumeAttachmentSpec
+}
+
+var map_VolumeAttachmentStatus = map[string]string{
+	"":                   "VolumeAttachmentStatus is the status of a VolumeAttachment request.",
+	"attached":           "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.",
+	"attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.",
+	"attachError":        "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.",
+	"detachError":        "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.",
+}
+
+func (VolumeAttachmentStatus) SwaggerDoc() map[string]string {
+	return map_VolumeAttachmentStatus
+}
+
+var map_VolumeError = map[string]string{
+	"":        "VolumeError captures an error encountered during a volume operation.",
+	"time":    "Time the error was encountered.",
+	"message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.",
+}
+
+func (VolumeError) SwaggerDoc() map[string]string {
+	return map_VolumeError
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..8096dba
--- /dev/null
+++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,268 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1 "k8s.io/api/core/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageClass) DeepCopyInto(out *StorageClass) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Parameters != nil {
+		in, out := &in.Parameters, &out.Parameters
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.ReclaimPolicy != nil {
+		in, out := &in.ReclaimPolicy, &out.ReclaimPolicy
+		*out = new(v1.PersistentVolumeReclaimPolicy)
+		**out = **in
+	}
+	if in.MountOptions != nil {
+		in, out := &in.MountOptions, &out.MountOptions
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.AllowVolumeExpansion != nil {
+		in, out := &in.AllowVolumeExpansion, &out.AllowVolumeExpansion
+		*out = new(bool)
+		**out = **in
+	}
+	if in.VolumeBindingMode != nil {
+		in, out := &in.VolumeBindingMode, &out.VolumeBindingMode
+		*out = new(VolumeBindingMode)
+		**out = **in
+	}
+	if in.AllowedTopologies != nil {
+		in, out := &in.AllowedTopologies, &out.AllowedTopologies
+		*out = make([]v1.TopologySelectorTerm, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClass.
+func (in *StorageClass) DeepCopy() *StorageClass {
+	if in == nil {
+		return nil
+	}
+	out := new(StorageClass)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StorageClass) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageClassList) DeepCopyInto(out *StorageClassList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]StorageClass, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassList.
+func (in *StorageClassList) DeepCopy() *StorageClassList {
+	if in == nil {
+		return nil
+	}
+	out := new(StorageClassList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StorageClassList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment.
+func (in *VolumeAttachment) DeepCopy() *VolumeAttachment {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeAttachment)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *VolumeAttachment) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]VolumeAttachment, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList.
+func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeAttachmentList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) {
+	*out = *in
+	if in.PersistentVolumeName != nil {
+		in, out := &in.PersistentVolumeName, &out.PersistentVolumeName
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource.
+func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeAttachmentSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) {
+	*out = *in
+	in.Source.DeepCopyInto(&out.Source)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec.
+func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeAttachmentSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) {
+	*out = *in
+	if in.AttachmentMetadata != nil {
+		in, out := &in.AttachmentMetadata, &out.AttachmentMetadata
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.AttachError != nil {
+		in, out := &in.AttachError, &out.AttachError
+		*out = new(VolumeError)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.DetachError != nil {
+		in, out := &in.DetachError, &out.DetachError
+		*out = new(VolumeError)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus.
+func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeAttachmentStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeError) DeepCopyInto(out *VolumeError) {
+	*out = *in
+	in.Time.DeepCopyInto(&out.Time)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError.
+func (in *VolumeError) DeepCopy() *VolumeError {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeError)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/apimachinery/LICENSE b/vendor/k8s.io/apimachinery/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS
new file mode 100755
index 0000000..dc6a4c7
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS
@@ -0,0 +1,24 @@
+reviewers:
+- thockin
+- lavalamp
+- smarterclayton
+- wojtek-t
+- deads2k
+- brendandburns
+- derekwaynecarr
+- caesarxuchao
+- mikedanese
+- liggitt
+- nikhiljindal
+- gmarek
+- erictune
+- saad-ali
+- janetkuo
+- tallclair
+- eparis
+- dims
+- hongchaodeng
+- krousey
+- cjcullen
+- david-mcmahon
+- goltermann
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go
new file mode 100644
index 0000000..167baf6
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package errors provides detailed error types for api field validation.
+package errors // import "k8s.io/apimachinery/pkg/api/errors"
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
new file mode 100644
index 0000000..e736a98
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
@@ -0,0 +1,581 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package errors
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"reflect"
+	"strings"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+const (
+	// StatusTooManyRequests means the server experienced too many requests within a
+	// given window and that the client must wait to perform the action again.
+	StatusTooManyRequests = 429
+)
+
+// StatusError is an error intended for consumption by a REST API server; it can also be
+// reconstructed by clients from a REST response. Public to allow easy type switches.
+type StatusError struct {
+	ErrStatus metav1.Status
+}
+
+// APIStatus is exposed by errors that can be converted to an api.Status object
+// for finer grained details.
+type APIStatus interface {
+	Status() metav1.Status
+}
+
+var _ error = &StatusError{}
+
+// Error implements the Error interface.
+func (e *StatusError) Error() string {
+	return e.ErrStatus.Message
+}
+
+// Status allows access to e's status without having to know the detailed workings
+// of StatusError.
+func (e *StatusError) Status() metav1.Status {
+	return e.ErrStatus
+}
+
+// DebugError reports extended info about the error to debug output.
+func (e *StatusError) DebugError() (string, []interface{}) {
+	if out, err := json.MarshalIndent(e.ErrStatus, "", "  "); err == nil {
+		return "server response object: %s", []interface{}{string(out)}
+	}
+	return "server response object: %#v", []interface{}{e.ErrStatus}
+}
+
+// UnexpectedObjectError can be returned by FromObject if it's passed a non-status object.
+type UnexpectedObjectError struct {
+	Object runtime.Object
+}
+
+// Error returns an error message describing 'u'.
+func (u *UnexpectedObjectError) Error() string {
+	return fmt.Sprintf("unexpected object: %v", u.Object)
+}
+
+// FromObject generates an StatusError from an metav1.Status, if that is the type of obj; otherwise,
+// returns an UnexpecteObjectError.
+func FromObject(obj runtime.Object) error {
+	switch t := obj.(type) {
+	case *metav1.Status:
+		return &StatusError{ErrStatus: *t}
+	case runtime.Unstructured:
+		var status metav1.Status
+		obj := t.UnstructuredContent()
+		if !reflect.DeepEqual(obj["kind"], "Status") {
+			break
+		}
+		if err := runtime.DefaultUnstructuredConverter.FromUnstructured(t.UnstructuredContent(), &status); err != nil {
+			return err
+		}
+		if status.APIVersion != "v1" && status.APIVersion != "meta.k8s.io/v1" {
+			break
+		}
+		return &StatusError{ErrStatus: status}
+	}
+	return &UnexpectedObjectError{obj}
+}
+
+// NewNotFound returns a new error which indicates that the resource of the kind and the name was not found.
+func NewNotFound(qualifiedResource schema.GroupResource, name string) *StatusError {
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   http.StatusNotFound,
+		Reason: metav1.StatusReasonNotFound,
+		Details: &metav1.StatusDetails{
+			Group: qualifiedResource.Group,
+			Kind:  qualifiedResource.Resource,
+			Name:  name,
+		},
+		Message: fmt.Sprintf("%s %q not found", qualifiedResource.String(), name),
+	}}
+}
+
+// NewAlreadyExists returns an error indicating the item requested exists by that identifier.
+func NewAlreadyExists(qualifiedResource schema.GroupResource, name string) *StatusError {
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   http.StatusConflict,
+		Reason: metav1.StatusReasonAlreadyExists,
+		Details: &metav1.StatusDetails{
+			Group: qualifiedResource.Group,
+			Kind:  qualifiedResource.Resource,
+			Name:  name,
+		},
+		Message: fmt.Sprintf("%s %q already exists", qualifiedResource.String(), name),
+	}}
+}
+
+// NewUnauthorized returns an error indicating the client is not authorized to perform the requested
+// action.
+func NewUnauthorized(reason string) *StatusError {
+	message := reason
+	if len(message) == 0 {
+		message = "not authorized"
+	}
+	return &StatusError{metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusUnauthorized,
+		Reason:  metav1.StatusReasonUnauthorized,
+		Message: message,
+	}}
+}
+
+// NewForbidden returns an error indicating the requested action was forbidden
+func NewForbidden(qualifiedResource schema.GroupResource, name string, err error) *StatusError {
+	var message string
+	if qualifiedResource.Empty() {
+		message = fmt.Sprintf("forbidden: %v", err)
+	} else if name == "" {
+		message = fmt.Sprintf("%s is forbidden: %v", qualifiedResource.String(), err)
+	} else {
+		message = fmt.Sprintf("%s %q is forbidden: %v", qualifiedResource.String(), name, err)
+	}
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   http.StatusForbidden,
+		Reason: metav1.StatusReasonForbidden,
+		Details: &metav1.StatusDetails{
+			Group: qualifiedResource.Group,
+			Kind:  qualifiedResource.Resource,
+			Name:  name,
+		},
+		Message: message,
+	}}
+}
+
+// NewConflict returns an error indicating the item can't be updated as provided.
+func NewConflict(qualifiedResource schema.GroupResource, name string, err error) *StatusError {
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   http.StatusConflict,
+		Reason: metav1.StatusReasonConflict,
+		Details: &metav1.StatusDetails{
+			Group: qualifiedResource.Group,
+			Kind:  qualifiedResource.Resource,
+			Name:  name,
+		},
+		Message: fmt.Sprintf("Operation cannot be fulfilled on %s %q: %v", qualifiedResource.String(), name, err),
+	}}
+}
+
+// NewGone returns an error indicating the item no longer available at the server and no forwarding address is known.
+func NewGone(message string) *StatusError {
+	return &StatusError{metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusGone,
+		Reason:  metav1.StatusReasonGone,
+		Message: message,
+	}}
+}
+
+// NewResourceExpired creates an error that indicates that the requested resource content has expired from
+// the server (usually due to a resourceVersion that is too old).
+func NewResourceExpired(message string) *StatusError {
+	return &StatusError{metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusGone,
+		Reason:  metav1.StatusReasonExpired,
+		Message: message,
+	}}
+}
+
+// NewInvalid returns an error indicating the item is invalid and cannot be processed.
+func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorList) *StatusError {
+	causes := make([]metav1.StatusCause, 0, len(errs))
+	for i := range errs {
+		err := errs[i]
+		causes = append(causes, metav1.StatusCause{
+			Type:    metav1.CauseType(err.Type),
+			Message: err.ErrorBody(),
+			Field:   err.Field,
+		})
+	}
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   http.StatusUnprocessableEntity,
+		Reason: metav1.StatusReasonInvalid,
+		Details: &metav1.StatusDetails{
+			Group:  qualifiedKind.Group,
+			Kind:   qualifiedKind.Kind,
+			Name:   name,
+			Causes: causes,
+		},
+		Message: fmt.Sprintf("%s %q is invalid: %v", qualifiedKind.String(), name, errs.ToAggregate()),
+	}}
+}
+
+// NewBadRequest creates an error that indicates that the request is invalid and can not be processed.
+func NewBadRequest(reason string) *StatusError {
+	return &StatusError{metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusBadRequest,
+		Reason:  metav1.StatusReasonBadRequest,
+		Message: reason,
+	}}
+}
+
+// NewTooManyRequests creates an error that indicates that the client must try again later because
+// the specified endpoint is not accepting requests. More specific details should be provided
+// if client should know why the failure was limited4.
+func NewTooManyRequests(message string, retryAfterSeconds int) *StatusError {
+	return &StatusError{metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusTooManyRequests,
+		Reason:  metav1.StatusReasonTooManyRequests,
+		Message: message,
+		Details: &metav1.StatusDetails{
+			RetryAfterSeconds: int32(retryAfterSeconds),
+		},
+	}}
+}
+
+// NewServiceUnavailable creates an error that indicates that the requested service is unavailable.
+func NewServiceUnavailable(reason string) *StatusError {
+	return &StatusError{metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusServiceUnavailable,
+		Reason:  metav1.StatusReasonServiceUnavailable,
+		Message: reason,
+	}}
+}
+
+// NewMethodNotSupported returns an error indicating the requested action is not supported on this kind.
+func NewMethodNotSupported(qualifiedResource schema.GroupResource, action string) *StatusError {
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   http.StatusMethodNotAllowed,
+		Reason: metav1.StatusReasonMethodNotAllowed,
+		Details: &metav1.StatusDetails{
+			Group: qualifiedResource.Group,
+			Kind:  qualifiedResource.Resource,
+		},
+		Message: fmt.Sprintf("%s is not supported on resources of kind %q", action, qualifiedResource.String()),
+	}}
+}
+
+// NewServerTimeout returns an error indicating the requested action could not be completed due to a
+// transient error, and the client should try again.
+func NewServerTimeout(qualifiedResource schema.GroupResource, operation string, retryAfterSeconds int) *StatusError {
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   http.StatusInternalServerError,
+		Reason: metav1.StatusReasonServerTimeout,
+		Details: &metav1.StatusDetails{
+			Group:             qualifiedResource.Group,
+			Kind:              qualifiedResource.Resource,
+			Name:              operation,
+			RetryAfterSeconds: int32(retryAfterSeconds),
+		},
+		Message: fmt.Sprintf("The %s operation against %s could not be completed at this time, please try again.", operation, qualifiedResource.String()),
+	}}
+}
+
+// NewServerTimeoutForKind should not exist.  Server timeouts happen when accessing resources, the Kind is just what we
+// happened to be looking at when the request failed.  This delegates to keep code sane, but we should work towards removing this.
+func NewServerTimeoutForKind(qualifiedKind schema.GroupKind, operation string, retryAfterSeconds int) *StatusError {
+	return NewServerTimeout(schema.GroupResource{Group: qualifiedKind.Group, Resource: qualifiedKind.Kind}, operation, retryAfterSeconds)
+}
+
+// NewInternalError returns an error indicating the item is invalid and cannot be processed.
+func NewInternalError(err error) *StatusError {
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   http.StatusInternalServerError,
+		Reason: metav1.StatusReasonInternalError,
+		Details: &metav1.StatusDetails{
+			Causes: []metav1.StatusCause{{Message: err.Error()}},
+		},
+		Message: fmt.Sprintf("Internal error occurred: %v", err),
+	}}
+}
+
+// NewTimeoutError returns an error indicating that a timeout occurred before the request
+// could be completed.  Clients may retry, but the operation may still complete.
+func NewTimeoutError(message string, retryAfterSeconds int) *StatusError {
+	return &StatusError{metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusGatewayTimeout,
+		Reason:  metav1.StatusReasonTimeout,
+		Message: fmt.Sprintf("Timeout: %s", message),
+		Details: &metav1.StatusDetails{
+			RetryAfterSeconds: int32(retryAfterSeconds),
+		},
+	}}
+}
+
+// NewTooManyRequestsError returns an error indicating that the request was rejected because
+// the server has received too many requests. Client should wait and retry. But if the request
+// is perishable, then the client should not retry the request.
+func NewTooManyRequestsError(message string) *StatusError {
+	return &StatusError{metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    StatusTooManyRequests,
+		Reason:  metav1.StatusReasonTooManyRequests,
+		Message: fmt.Sprintf("Too many requests: %s", message),
+	}}
+}
+
+// NewGenericServerResponse returns a new error for server responses that are not in a recognizable form.
+func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError {
+	reason := metav1.StatusReasonUnknown
+	message := fmt.Sprintf("the server responded with the status code %d but did not return more information", code)
+	switch code {
+	case http.StatusConflict:
+		if verb == "POST" {
+			reason = metav1.StatusReasonAlreadyExists
+		} else {
+			reason = metav1.StatusReasonConflict
+		}
+		message = "the server reported a conflict"
+	case http.StatusNotFound:
+		reason = metav1.StatusReasonNotFound
+		message = "the server could not find the requested resource"
+	case http.StatusBadRequest:
+		reason = metav1.StatusReasonBadRequest
+		message = "the server rejected our request for an unknown reason"
+	case http.StatusUnauthorized:
+		reason = metav1.StatusReasonUnauthorized
+		message = "the server has asked for the client to provide credentials"
+	case http.StatusForbidden:
+		reason = metav1.StatusReasonForbidden
+		// the server message has details about who is trying to perform what action.  Keep its message.
+		message = serverMessage
+	case http.StatusNotAcceptable:
+		reason = metav1.StatusReasonNotAcceptable
+		// the server message has details about what types are acceptable
+		message = serverMessage
+	case http.StatusUnsupportedMediaType:
+		reason = metav1.StatusReasonUnsupportedMediaType
+		// the server message has details about what types are acceptable
+		message = serverMessage
+	case http.StatusMethodNotAllowed:
+		reason = metav1.StatusReasonMethodNotAllowed
+		message = "the server does not allow this method on the requested resource"
+	case http.StatusUnprocessableEntity:
+		reason = metav1.StatusReasonInvalid
+		message = "the server rejected our request due to an error in our request"
+	case http.StatusServiceUnavailable:
+		reason = metav1.StatusReasonServiceUnavailable
+		message = "the server is currently unable to handle the request"
+	case http.StatusGatewayTimeout:
+		reason = metav1.StatusReasonTimeout
+		message = "the server was unable to return a response in the time allotted, but may still be processing the request"
+	case http.StatusTooManyRequests:
+		reason = metav1.StatusReasonTooManyRequests
+		message = "the server has received too many requests and has asked us to try again later"
+	default:
+		if code >= 500 {
+			reason = metav1.StatusReasonInternalError
+			message = fmt.Sprintf("an error on the server (%q) has prevented the request from succeeding", serverMessage)
+		}
+	}
+	switch {
+	case !qualifiedResource.Empty() && len(name) > 0:
+		message = fmt.Sprintf("%s (%s %s %s)", message, strings.ToLower(verb), qualifiedResource.String(), name)
+	case !qualifiedResource.Empty():
+		message = fmt.Sprintf("%s (%s %s)", message, strings.ToLower(verb), qualifiedResource.String())
+	}
+	var causes []metav1.StatusCause
+	if isUnexpectedResponse {
+		causes = []metav1.StatusCause{
+			{
+				Type:    metav1.CauseTypeUnexpectedServerResponse,
+				Message: serverMessage,
+			},
+		}
+	} else {
+		causes = nil
+	}
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   int32(code),
+		Reason: reason,
+		Details: &metav1.StatusDetails{
+			Group: qualifiedResource.Group,
+			Kind:  qualifiedResource.Resource,
+			Name:  name,
+
+			Causes:            causes,
+			RetryAfterSeconds: int32(retryAfterSeconds),
+		},
+		Message: message,
+	}}
+}
+
+// IsNotFound returns true if the specified error was created by NewNotFound.
+func IsNotFound(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonNotFound
+}
+
+// IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists.
+func IsAlreadyExists(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonAlreadyExists
+}
+
+// IsConflict determines if the err is an error which indicates the provided update conflicts.
+func IsConflict(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonConflict
+}
+
+// IsInvalid determines if the err is an error which indicates the provided resource is not valid.
+func IsInvalid(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonInvalid
+}
+
+// IsGone is true if the error indicates the requested resource is no longer available.
+func IsGone(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonGone
+}
+
+// IsResourceExpired is true if the error indicates the resource has expired and the current action is
+// no longer possible.
+func IsResourceExpired(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonExpired
+}
+
+// IsNotAcceptable determines if err is an error which indicates that the request failed due to an invalid Accept header
+func IsNotAcceptable(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonNotAcceptable
+}
+
+// IsUnsupportedMediaType determines if err is an error which indicates that the request failed due to an invalid Content-Type header
+func IsUnsupportedMediaType(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonUnsupportedMediaType
+}
+
+// IsMethodNotSupported determines if the err is an error which indicates the provided action could not
+// be performed because it is not supported by the server.
+func IsMethodNotSupported(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonMethodNotAllowed
+}
+
+// IsServiceUnavailable is true if the error indicates the underlying service is no longer available.
+func IsServiceUnavailable(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonServiceUnavailable
+}
+
+// IsBadRequest determines if err is an error which indicates that the request is invalid.
+func IsBadRequest(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonBadRequest
+}
+
+// IsUnauthorized determines if err is an error which indicates that the request is unauthorized and
+// requires authentication by the user.
+func IsUnauthorized(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonUnauthorized
+}
+
+// IsForbidden determines if err is an error which indicates that the request is forbidden and cannot
+// be completed as requested.
+func IsForbidden(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonForbidden
+}
+
+// IsTimeout determines if err is an error which indicates that request times out due to long
+// processing.
+func IsTimeout(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonTimeout
+}
+
+// IsServerTimeout determines if err is an error which indicates that the request needs to be retried
+// by the client.
+func IsServerTimeout(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonServerTimeout
+}
+
+// IsInternalError determines if err is an error which indicates an internal server error.
+func IsInternalError(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonInternalError
+}
+
+// IsTooManyRequests determines if err is an error which indicates that there are too many requests
+// that the server cannot handle.
+func IsTooManyRequests(err error) bool {
+	if ReasonForError(err) == metav1.StatusReasonTooManyRequests {
+		return true
+	}
+	switch t := err.(type) {
+	case APIStatus:
+		return t.Status().Code == http.StatusTooManyRequests
+	}
+	return false
+}
+
+// IsUnexpectedServerError returns true if the server response was not in the expected API format,
+// and may be the result of another HTTP actor.
+func IsUnexpectedServerError(err error) bool {
+	switch t := err.(type) {
+	case APIStatus:
+		if d := t.Status().Details; d != nil {
+			for _, cause := range d.Causes {
+				if cause.Type == metav1.CauseTypeUnexpectedServerResponse {
+					return true
+				}
+			}
+		}
+	}
+	return false
+}
+
+// IsUnexpectedObjectError determines if err is due to an unexpected object from the master.
+func IsUnexpectedObjectError(err error) bool {
+	_, ok := err.(*UnexpectedObjectError)
+	return err != nil && ok
+}
+
+// SuggestsClientDelay returns true if this error suggests a client delay as well as the
+// suggested seconds to wait, or false if the error does not imply a wait. It does not
+// address whether the error *should* be retried, since some errors (like a 3xx) may
+// request delay without retry.
+func SuggestsClientDelay(err error) (int, bool) {
+	switch t := err.(type) {
+	case APIStatus:
+		if t.Status().Details != nil {
+			switch t.Status().Reason {
+			// this StatusReason explicitly requests the caller to delay the action
+			case metav1.StatusReasonServerTimeout:
+				return int(t.Status().Details.RetryAfterSeconds), true
+			}
+			// If the client requests that we retry after a certain number of seconds
+			if t.Status().Details.RetryAfterSeconds > 0 {
+				return int(t.Status().Details.RetryAfterSeconds), true
+			}
+		}
+	}
+	return 0, false
+}
+
+// ReasonForError returns the HTTP status for a particular error.
+func ReasonForError(err error) metav1.StatusReason {
+	switch t := err.(type) {
+	case APIStatus:
+		return t.Status().Reason
+	}
+	return metav1.StatusReasonUnknown
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
new file mode 100755
index 0000000..5f729ff
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
@@ -0,0 +1,25 @@
+reviewers:
+- thockin
+- smarterclayton
+- wojtek-t
+- deads2k
+- brendandburns
+- derekwaynecarr
+- caesarxuchao
+- mikedanese
+- liggitt
+- nikhiljindal
+- gmarek
+- janetkuo
+- ncdc
+- eparis
+- dims
+- krousey
+- markturansky
+- fabioy
+- resouer
+- david-mcmahon
+- mfojtik
+- jianhuiz
+- feihujiang
+- ghodss
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go
new file mode 100644
index 0000000..b6d42ac
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package meta provides functions for retrieving API metadata from objects
+// belonging to the Kubernetes API
+package meta // import "k8s.io/apimachinery/pkg/api/meta"
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go
new file mode 100644
index 0000000..cbf5d02
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/util/sets"
+)
+
+// AmbiguousResourceError is returned if the RESTMapper finds multiple matches for a resource
+type AmbiguousResourceError struct {
+	PartialResource schema.GroupVersionResource
+
+	MatchingResources []schema.GroupVersionResource
+	MatchingKinds     []schema.GroupVersionKind
+}
+
+func (e *AmbiguousResourceError) Error() string {
+	switch {
+	case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0:
+		return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialResource, e.MatchingResources, e.MatchingKinds)
+	case len(e.MatchingKinds) > 0:
+		return fmt.Sprintf("%v matches multiple kinds %v", e.PartialResource, e.MatchingKinds)
+	case len(e.MatchingResources) > 0:
+		return fmt.Sprintf("%v matches multiple resources %v", e.PartialResource, e.MatchingResources)
+	}
+	return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialResource)
+}
+
+// AmbiguousKindError is returned if the RESTMapper finds multiple matches for a kind
+type AmbiguousKindError struct {
+	PartialKind schema.GroupVersionKind
+
+	MatchingResources []schema.GroupVersionResource
+	MatchingKinds     []schema.GroupVersionKind
+}
+
+func (e *AmbiguousKindError) Error() string {
+	switch {
+	case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0:
+		return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialKind, e.MatchingResources, e.MatchingKinds)
+	case len(e.MatchingKinds) > 0:
+		return fmt.Sprintf("%v matches multiple kinds %v", e.PartialKind, e.MatchingKinds)
+	case len(e.MatchingResources) > 0:
+		return fmt.Sprintf("%v matches multiple resources %v", e.PartialKind, e.MatchingResources)
+	}
+	return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialKind)
+}
+
+func IsAmbiguousError(err error) bool {
+	if err == nil {
+		return false
+	}
+	switch err.(type) {
+	case *AmbiguousResourceError, *AmbiguousKindError:
+		return true
+	default:
+		return false
+	}
+}
+
+// NoResourceMatchError is returned if the RESTMapper can't find any match for a resource
+type NoResourceMatchError struct {
+	PartialResource schema.GroupVersionResource
+}
+
+func (e *NoResourceMatchError) Error() string {
+	return fmt.Sprintf("no matches for %v", e.PartialResource)
+}
+
+// NoKindMatchError is returned if the RESTMapper can't find any match for a kind
+type NoKindMatchError struct {
+	// GroupKind is the API group and kind that was searched
+	GroupKind schema.GroupKind
+	// SearchedVersions is the optional list of versions the search was restricted to
+	SearchedVersions []string
+}
+
+func (e *NoKindMatchError) Error() string {
+	searchedVersions := sets.NewString()
+	for _, v := range e.SearchedVersions {
+		searchedVersions.Insert(schema.GroupVersion{Group: e.GroupKind.Group, Version: v}.String())
+	}
+
+	switch len(searchedVersions) {
+	case 0:
+		return fmt.Sprintf("no matches for kind %q in group %q", e.GroupKind.Kind, e.GroupKind.Group)
+	case 1:
+		return fmt.Sprintf("no matches for kind %q in version %q", e.GroupKind.Kind, searchedVersions.List()[0])
+	default:
+		return fmt.Sprintf("no matches for kind %q in versions %q", e.GroupKind.Kind, searchedVersions.List())
+	}
+}
+
+func IsNoMatchError(err error) bool {
+	if err == nil {
+		return false
+	}
+	switch err.(type) {
+	case *NoResourceMatchError, *NoKindMatchError:
+		return true
+	default:
+		return false
+	}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go
new file mode 100644
index 0000000..fd22100
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+)
+
+// FirstHitRESTMapper is a wrapper for multiple RESTMappers which returns the
+// first successful result for the singular requests
+type FirstHitRESTMapper struct {
+	MultiRESTMapper
+}
+
+func (m FirstHitRESTMapper) String() string {
+	return fmt.Sprintf("FirstHitRESTMapper{\n\t%v\n}", m.MultiRESTMapper)
+}
+
+func (m FirstHitRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+	errors := []error{}
+	for _, t := range m.MultiRESTMapper {
+		ret, err := t.ResourceFor(resource)
+		if err == nil {
+			return ret, nil
+		}
+		errors = append(errors, err)
+	}
+
+	return schema.GroupVersionResource{}, collapseAggregateErrors(errors)
+}
+
+func (m FirstHitRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+	errors := []error{}
+	for _, t := range m.MultiRESTMapper {
+		ret, err := t.KindFor(resource)
+		if err == nil {
+			return ret, nil
+		}
+		errors = append(errors, err)
+	}
+
+	return schema.GroupVersionKind{}, collapseAggregateErrors(errors)
+}
+
+// RESTMapping provides the REST mapping for the resource based on the
+// kind and version. This implementation supports multiple REST schemas and
+// return the first match.
+func (m FirstHitRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) {
+	errors := []error{}
+	for _, t := range m.MultiRESTMapper {
+		ret, err := t.RESTMapping(gk, versions...)
+		if err == nil {
+			return ret, nil
+		}
+		errors = append(errors, err)
+	}
+
+	return nil, collapseAggregateErrors(errors)
+}
+
+// collapseAggregateErrors returns the minimal errors.  it handles empty as nil, handles one item in a list
+// by returning the item, and collapses all NoMatchErrors to a single one (since they should all be the same)
+func collapseAggregateErrors(errors []error) error {
+	if len(errors) == 0 {
+		return nil
+	}
+	if len(errors) == 1 {
+		return errors[0]
+	}
+
+	allNoMatchErrors := true
+	for _, err := range errors {
+		allNoMatchErrors = allNoMatchErrors && IsNoMatchError(err)
+	}
+	if allNoMatchErrors {
+		return errors[0]
+	}
+
+	return utilerrors.NewAggregate(errors)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go
new file mode 100644
index 0000000..3425055
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go
@@ -0,0 +1,218 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+	"fmt"
+	"reflect"
+
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// IsListType returns true if the provided Object has a slice called Items
+func IsListType(obj runtime.Object) bool {
+	// if we're a runtime.Unstructured, check whether this is a list.
+	// TODO: refactor GetItemsPtr to use an interface that returns []runtime.Object
+	if unstructured, ok := obj.(runtime.Unstructured); ok {
+		return unstructured.IsList()
+	}
+
+	_, err := GetItemsPtr(obj)
+	return err == nil
+}
+
+// GetItemsPtr returns a pointer to the list object's Items member.
+// If 'list' doesn't have an Items member, it's not really a list type
+// and an error will be returned.
+// This function will either return a pointer to a slice, or an error, but not both.
+func GetItemsPtr(list runtime.Object) (interface{}, error) {
+	v, err := conversion.EnforcePtr(list)
+	if err != nil {
+		return nil, err
+	}
+
+	items := v.FieldByName("Items")
+	if !items.IsValid() {
+		return nil, fmt.Errorf("no Items field in %#v", list)
+	}
+	switch items.Kind() {
+	case reflect.Interface, reflect.Ptr:
+		target := reflect.TypeOf(items.Interface()).Elem()
+		if target.Kind() != reflect.Slice {
+			return nil, fmt.Errorf("items: Expected slice, got %s", target.Kind())
+		}
+		return items.Interface(), nil
+	case reflect.Slice:
+		return items.Addr().Interface(), nil
+	default:
+		return nil, fmt.Errorf("items: Expected slice, got %s", items.Kind())
+	}
+}
+
+// EachListItem invokes fn on each runtime.Object in the list. Any error immediately terminates
+// the loop.
+func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error {
+	if unstructured, ok := obj.(runtime.Unstructured); ok {
+		return unstructured.EachListItem(fn)
+	}
+	// TODO: Change to an interface call?
+	itemsPtr, err := GetItemsPtr(obj)
+	if err != nil {
+		return err
+	}
+	items, err := conversion.EnforcePtr(itemsPtr)
+	if err != nil {
+		return err
+	}
+	len := items.Len()
+	if len == 0 {
+		return nil
+	}
+	takeAddr := false
+	if elemType := items.Type().Elem(); elemType.Kind() != reflect.Ptr && elemType.Kind() != reflect.Interface {
+		if !items.Index(0).CanAddr() {
+			return fmt.Errorf("unable to take address of items in %T for EachListItem", obj)
+		}
+		takeAddr = true
+	}
+
+	for i := 0; i < len; i++ {
+		raw := items.Index(i)
+		if takeAddr {
+			raw = raw.Addr()
+		}
+		switch item := raw.Interface().(type) {
+		case *runtime.RawExtension:
+			if err := fn(item.Object); err != nil {
+				return err
+			}
+		case runtime.Object:
+			if err := fn(item); err != nil {
+				return err
+			}
+		default:
+			obj, ok := item.(runtime.Object)
+			if !ok {
+				return fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind())
+			}
+			if err := fn(obj); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// ExtractList returns obj's Items element as an array of runtime.Objects.
+// Returns an error if obj is not a List type (does not have an Items member).
+func ExtractList(obj runtime.Object) ([]runtime.Object, error) {
+	itemsPtr, err := GetItemsPtr(obj)
+	if err != nil {
+		return nil, err
+	}
+	items, err := conversion.EnforcePtr(itemsPtr)
+	if err != nil {
+		return nil, err
+	}
+	list := make([]runtime.Object, items.Len())
+	for i := range list {
+		raw := items.Index(i)
+		switch item := raw.Interface().(type) {
+		case runtime.RawExtension:
+			switch {
+			case item.Object != nil:
+				list[i] = item.Object
+			case item.Raw != nil:
+				// TODO: Set ContentEncoding and ContentType correctly.
+				list[i] = &runtime.Unknown{Raw: item.Raw}
+			default:
+				list[i] = nil
+			}
+		case runtime.Object:
+			list[i] = item
+		default:
+			var found bool
+			if list[i], found = raw.Addr().Interface().(runtime.Object); !found {
+				return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind())
+			}
+		}
+	}
+	return list, nil
+}
+
+// objectSliceType is the type of a slice of Objects
+var objectSliceType = reflect.TypeOf([]runtime.Object{})
+
+// LenList returns the length of this list or 0 if it is not a list.
+func LenList(list runtime.Object) int {
+	itemsPtr, err := GetItemsPtr(list)
+	if err != nil {
+		return 0
+	}
+	items, err := conversion.EnforcePtr(itemsPtr)
+	if err != nil {
+		return 0
+	}
+	return items.Len()
+}
+
+// SetList sets the given list object's Items member have the elements given in
+// objects.
+// Returns an error if list is not a List type (does not have an Items member),
+// or if any of the objects are not of the right type.
+func SetList(list runtime.Object, objects []runtime.Object) error {
+	itemsPtr, err := GetItemsPtr(list)
+	if err != nil {
+		return err
+	}
+	items, err := conversion.EnforcePtr(itemsPtr)
+	if err != nil {
+		return err
+	}
+	if items.Type() == objectSliceType {
+		items.Set(reflect.ValueOf(objects))
+		return nil
+	}
+	slice := reflect.MakeSlice(items.Type(), len(objects), len(objects))
+	for i := range objects {
+		dest := slice.Index(i)
+		if dest.Type() == reflect.TypeOf(runtime.RawExtension{}) {
+			dest = dest.FieldByName("Object")
+		}
+
+		// check to see if you're directly assignable
+		if reflect.TypeOf(objects[i]).AssignableTo(dest.Type()) {
+			dest.Set(reflect.ValueOf(objects[i]))
+			continue
+		}
+
+		src, err := conversion.EnforcePtr(objects[i])
+		if err != nil {
+			return err
+		}
+		if src.Type().AssignableTo(dest.Type()) {
+			dest.Set(src)
+		} else if src.Type().ConvertibleTo(dest.Type()) {
+			dest.Set(src.Convert(dest.Type()))
+		} else {
+			return fmt.Errorf("item[%d]: can't assign or convert %v into %v", i, src.Type(), dest.Type())
+		}
+	}
+	items.Set(slice)
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go
new file mode 100644
index 0000000..42eac3a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go
@@ -0,0 +1,134 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+type ListMetaAccessor interface {
+	GetListMeta() List
+}
+
+// List lets you work with list metadata from any of the versioned or
+// internal API objects. Attempting to set or retrieve a field on an object that does
+// not support that field will be a no-op and return a default value.
+type List metav1.ListInterface
+
+// Type exposes the type and APIVersion of versioned or internal API objects.
+type Type metav1.Type
+
+// MetadataAccessor lets you work with object and list metadata from any of the versioned or
+// internal API objects. Attempting to set or retrieve a field on an object that does
+// not support that field (Name, UID, Namespace on lists) will be a no-op and return
+// a default value.
+//
+// MetadataAccessor exposes Interface in a way that can be used with multiple objects.
+type MetadataAccessor interface {
+	APIVersion(obj runtime.Object) (string, error)
+	SetAPIVersion(obj runtime.Object, version string) error
+
+	Kind(obj runtime.Object) (string, error)
+	SetKind(obj runtime.Object, kind string) error
+
+	Namespace(obj runtime.Object) (string, error)
+	SetNamespace(obj runtime.Object, namespace string) error
+
+	Name(obj runtime.Object) (string, error)
+	SetName(obj runtime.Object, name string) error
+
+	GenerateName(obj runtime.Object) (string, error)
+	SetGenerateName(obj runtime.Object, name string) error
+
+	UID(obj runtime.Object) (types.UID, error)
+	SetUID(obj runtime.Object, uid types.UID) error
+
+	SelfLink(obj runtime.Object) (string, error)
+	SetSelfLink(obj runtime.Object, selfLink string) error
+
+	Labels(obj runtime.Object) (map[string]string, error)
+	SetLabels(obj runtime.Object, labels map[string]string) error
+
+	Annotations(obj runtime.Object) (map[string]string, error)
+	SetAnnotations(obj runtime.Object, annotations map[string]string) error
+
+	Continue(obj runtime.Object) (string, error)
+	SetContinue(obj runtime.Object, c string) error
+
+	runtime.ResourceVersioner
+}
+
+type RESTScopeName string
+
+const (
+	RESTScopeNameNamespace RESTScopeName = "namespace"
+	RESTScopeNameRoot      RESTScopeName = "root"
+)
+
+// RESTScope contains the information needed to deal with REST resources that are in a resource hierarchy
+type RESTScope interface {
+	// Name of the scope
+	Name() RESTScopeName
+}
+
+// RESTMapping contains the information needed to deal with objects of a specific
+// resource and kind in a RESTful manner.
+type RESTMapping struct {
+	// Resource is the GroupVersionResource (location) for this endpoint
+	Resource schema.GroupVersionResource
+
+	// GroupVersionKind is the GroupVersionKind (data format) to submit to this endpoint
+	GroupVersionKind schema.GroupVersionKind
+
+	// Scope contains the information needed to deal with REST Resources that are in a resource hierarchy
+	Scope RESTScope
+}
+
+// RESTMapper allows clients to map resources to kind, and map kind and version
+// to interfaces for manipulating those objects. It is primarily intended for
+// consumers of Kubernetes compatible REST APIs as defined in docs/devel/api-conventions.md.
+//
+// The Kubernetes API provides versioned resources and object kinds which are scoped
+// to API groups. In other words, kinds and resources should not be assumed to be
+// unique across groups.
+//
+// TODO: split into sub-interfaces
+type RESTMapper interface {
+	// KindFor takes a partial resource and returns the single match.  Returns an error if there are multiple matches
+	KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error)
+
+	// KindsFor takes a partial resource and returns the list of potential kinds in priority order
+	KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error)
+
+	// ResourceFor takes a partial resource and returns the single match.  Returns an error if there are multiple matches
+	ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error)
+
+	// ResourcesFor takes a partial resource and returns the list of potential resource in priority order
+	ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error)
+
+	// RESTMapping identifies a preferred resource mapping for the provided group kind.
+	RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error)
+	// RESTMappings returns all resource mappings for the provided group kind if no
+	// version search is provided. Otherwise identifies a preferred resource mapping for
+	// the provided version(s).
+	RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error)
+
+	ResourceSingularizer(resource string) (singular string, err error)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go
new file mode 100644
index 0000000..431a0a6
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go
@@ -0,0 +1,104 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+	"sync"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// lazyObject defers loading the mapper and typer until necessary.
+type lazyObject struct {
+	loader func() (RESTMapper, error)
+
+	lock   sync.Mutex
+	loaded bool
+	err    error
+	mapper RESTMapper
+}
+
+// NewLazyObjectLoader handles unrecoverable errors when creating a RESTMapper / ObjectTyper by
+// returning those initialization errors when the interface methods are invoked. This defers the
+// initialization and any server calls until a client actually needs to perform the action.
+func NewLazyRESTMapperLoader(fn func() (RESTMapper, error)) RESTMapper {
+	obj := &lazyObject{loader: fn}
+	return obj
+}
+
+// init lazily loads the mapper and typer, returning an error if initialization has failed.
+func (o *lazyObject) init() error {
+	o.lock.Lock()
+	defer o.lock.Unlock()
+	if o.loaded {
+		return o.err
+	}
+	o.mapper, o.err = o.loader()
+	o.loaded = true
+	return o.err
+}
+
+var _ RESTMapper = &lazyObject{}
+
+func (o *lazyObject) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+	if err := o.init(); err != nil {
+		return schema.GroupVersionKind{}, err
+	}
+	return o.mapper.KindFor(resource)
+}
+
+func (o *lazyObject) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) {
+	if err := o.init(); err != nil {
+		return []schema.GroupVersionKind{}, err
+	}
+	return o.mapper.KindsFor(resource)
+}
+
+func (o *lazyObject) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+	if err := o.init(); err != nil {
+		return schema.GroupVersionResource{}, err
+	}
+	return o.mapper.ResourceFor(input)
+}
+
+func (o *lazyObject) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
+	if err := o.init(); err != nil {
+		return []schema.GroupVersionResource{}, err
+	}
+	return o.mapper.ResourcesFor(input)
+}
+
+func (o *lazyObject) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) {
+	if err := o.init(); err != nil {
+		return nil, err
+	}
+	return o.mapper.RESTMapping(gk, versions...)
+}
+
+func (o *lazyObject) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) {
+	if err := o.init(); err != nil {
+		return nil, err
+	}
+	return o.mapper.RESTMappings(gk, versions...)
+}
+
+func (o *lazyObject) ResourceSingularizer(resource string) (singular string, err error) {
+	if err := o.init(); err != nil {
+		return "", err
+	}
+	return o.mapper.ResourceSingularizer(resource)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go
new file mode 100644
index 0000000..6fe7458
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go
@@ -0,0 +1,650 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+	"fmt"
+	"reflect"
+
+	"k8s.io/klog"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+// errNotList is returned when an object implements the Object style interfaces but not the List style
+// interfaces.
+var errNotList = fmt.Errorf("object does not implement the List interfaces")
+
+var errNotCommon = fmt.Errorf("object does not implement the common interface for accessing the SelfLink")
+
+// CommonAccessor returns a Common interface for the provided object or an error if the object does
+// not provide List.
+func CommonAccessor(obj interface{}) (metav1.Common, error) {
+	switch t := obj.(type) {
+	case List:
+		return t, nil
+	case metav1.ListInterface:
+		return t, nil
+	case ListMetaAccessor:
+		if m := t.GetListMeta(); m != nil {
+			return m, nil
+		}
+		return nil, errNotCommon
+	case metav1.ListMetaAccessor:
+		if m := t.GetListMeta(); m != nil {
+			return m, nil
+		}
+		return nil, errNotCommon
+	case metav1.Object:
+		return t, nil
+	case metav1.ObjectMetaAccessor:
+		if m := t.GetObjectMeta(); m != nil {
+			return m, nil
+		}
+		return nil, errNotCommon
+	default:
+		return nil, errNotCommon
+	}
+}
+
+// ListAccessor returns a List interface for the provided object or an error if the object does
+// not provide List.
+// IMPORTANT: Objects are NOT a superset of lists. Do not use this check to determine whether an
+// object *is* a List.
+func ListAccessor(obj interface{}) (List, error) {
+	switch t := obj.(type) {
+	case List:
+		return t, nil
+	case metav1.ListInterface:
+		return t, nil
+	case ListMetaAccessor:
+		if m := t.GetListMeta(); m != nil {
+			return m, nil
+		}
+		return nil, errNotList
+	case metav1.ListMetaAccessor:
+		if m := t.GetListMeta(); m != nil {
+			return m, nil
+		}
+		return nil, errNotList
+	default:
+		return nil, errNotList
+	}
+}
+
+// errNotObject is returned when an object implements the List style interfaces but not the Object style
+// interfaces.
+var errNotObject = fmt.Errorf("object does not implement the Object interfaces")
+
+// Accessor takes an arbitrary object pointer and returns meta.Interface.
+// obj must be a pointer to an API type. An error is returned if the minimum
+// required fields are missing. Fields that are not required return the default
+// value and are a no-op if set.
+func Accessor(obj interface{}) (metav1.Object, error) {
+	switch t := obj.(type) {
+	case metav1.Object:
+		return t, nil
+	case metav1.ObjectMetaAccessor:
+		if m := t.GetObjectMeta(); m != nil {
+			return m, nil
+		}
+		return nil, errNotObject
+	default:
+		return nil, errNotObject
+	}
+}
+
+// AsPartialObjectMetadata takes the metav1 interface and returns a partial object.
+// TODO: consider making this solely a conversion action.
+func AsPartialObjectMetadata(m metav1.Object) *metav1beta1.PartialObjectMetadata {
+	switch t := m.(type) {
+	case *metav1.ObjectMeta:
+		return &metav1beta1.PartialObjectMetadata{ObjectMeta: *t}
+	default:
+		return &metav1beta1.PartialObjectMetadata{
+			ObjectMeta: metav1.ObjectMeta{
+				Name:                       m.GetName(),
+				GenerateName:               m.GetGenerateName(),
+				Namespace:                  m.GetNamespace(),
+				SelfLink:                   m.GetSelfLink(),
+				UID:                        m.GetUID(),
+				ResourceVersion:            m.GetResourceVersion(),
+				Generation:                 m.GetGeneration(),
+				CreationTimestamp:          m.GetCreationTimestamp(),
+				DeletionTimestamp:          m.GetDeletionTimestamp(),
+				DeletionGracePeriodSeconds: m.GetDeletionGracePeriodSeconds(),
+				Labels:                     m.GetLabels(),
+				Annotations:                m.GetAnnotations(),
+				OwnerReferences:            m.GetOwnerReferences(),
+				Finalizers:                 m.GetFinalizers(),
+				ClusterName:                m.GetClusterName(),
+				Initializers:               m.GetInitializers(),
+			},
+		}
+	}
+}
+
+// TypeAccessor returns an interface that allows retrieving and modifying the APIVersion
+// and Kind of an in-memory internal object.
+// TODO: this interface is used to test code that does not have ObjectMeta or ListMeta
+// in round tripping (objects which can use apiVersion/kind, but do not fit the Kube
+// api conventions).
+func TypeAccessor(obj interface{}) (Type, error) {
+	if typed, ok := obj.(runtime.Object); ok {
+		return objectAccessor{typed}, nil
+	}
+	v, err := conversion.EnforcePtr(obj)
+	if err != nil {
+		return nil, err
+	}
+	t := v.Type()
+	if v.Kind() != reflect.Struct {
+		return nil, fmt.Errorf("expected struct, but got %v: %v (%#v)", v.Kind(), t, v.Interface())
+	}
+
+	typeMeta := v.FieldByName("TypeMeta")
+	if !typeMeta.IsValid() {
+		return nil, fmt.Errorf("struct %v lacks embedded TypeMeta type", t)
+	}
+	a := &genericAccessor{}
+	if err := extractFromTypeMeta(typeMeta, a); err != nil {
+		return nil, fmt.Errorf("unable to find type fields on %#v: %v", typeMeta, err)
+	}
+	return a, nil
+}
+
+type objectAccessor struct {
+	runtime.Object
+}
+
+func (obj objectAccessor) GetKind() string {
+	return obj.GetObjectKind().GroupVersionKind().Kind
+}
+
+func (obj objectAccessor) SetKind(kind string) {
+	gvk := obj.GetObjectKind().GroupVersionKind()
+	gvk.Kind = kind
+	obj.GetObjectKind().SetGroupVersionKind(gvk)
+}
+
+func (obj objectAccessor) GetAPIVersion() string {
+	return obj.GetObjectKind().GroupVersionKind().GroupVersion().String()
+}
+
+func (obj objectAccessor) SetAPIVersion(version string) {
+	gvk := obj.GetObjectKind().GroupVersionKind()
+	gv, err := schema.ParseGroupVersion(version)
+	if err != nil {
+		gv = schema.GroupVersion{Version: version}
+	}
+	gvk.Group, gvk.Version = gv.Group, gv.Version
+	obj.GetObjectKind().SetGroupVersionKind(gvk)
+}
+
+// NewAccessor returns a MetadataAccessor that can retrieve
+// or manipulate resource version on objects derived from core API
+// metadata concepts.
+func NewAccessor() MetadataAccessor {
+	return resourceAccessor{}
+}
+
+// resourceAccessor implements ResourceVersioner and SelfLinker.
+type resourceAccessor struct{}
+
+func (resourceAccessor) Kind(obj runtime.Object) (string, error) {
+	return objectAccessor{obj}.GetKind(), nil
+}
+
+func (resourceAccessor) SetKind(obj runtime.Object, kind string) error {
+	objectAccessor{obj}.SetKind(kind)
+	return nil
+}
+
+func (resourceAccessor) APIVersion(obj runtime.Object) (string, error) {
+	return objectAccessor{obj}.GetAPIVersion(), nil
+}
+
+func (resourceAccessor) SetAPIVersion(obj runtime.Object, version string) error {
+	objectAccessor{obj}.SetAPIVersion(version)
+	return nil
+}
+
+func (resourceAccessor) Namespace(obj runtime.Object) (string, error) {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return "", err
+	}
+	return accessor.GetNamespace(), nil
+}
+
+func (resourceAccessor) SetNamespace(obj runtime.Object, namespace string) error {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetNamespace(namespace)
+	return nil
+}
+
+func (resourceAccessor) Name(obj runtime.Object) (string, error) {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return "", err
+	}
+	return accessor.GetName(), nil
+}
+
+func (resourceAccessor) SetName(obj runtime.Object, name string) error {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetName(name)
+	return nil
+}
+
+func (resourceAccessor) GenerateName(obj runtime.Object) (string, error) {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return "", err
+	}
+	return accessor.GetGenerateName(), nil
+}
+
+func (resourceAccessor) SetGenerateName(obj runtime.Object, name string) error {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetGenerateName(name)
+	return nil
+}
+
+func (resourceAccessor) UID(obj runtime.Object) (types.UID, error) {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return "", err
+	}
+	return accessor.GetUID(), nil
+}
+
+func (resourceAccessor) SetUID(obj runtime.Object, uid types.UID) error {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetUID(uid)
+	return nil
+}
+
+func (resourceAccessor) SelfLink(obj runtime.Object) (string, error) {
+	accessor, err := CommonAccessor(obj)
+	if err != nil {
+		return "", err
+	}
+	return accessor.GetSelfLink(), nil
+}
+
+func (resourceAccessor) SetSelfLink(obj runtime.Object, selfLink string) error {
+	accessor, err := CommonAccessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetSelfLink(selfLink)
+	return nil
+}
+
+func (resourceAccessor) Labels(obj runtime.Object) (map[string]string, error) {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return nil, err
+	}
+	return accessor.GetLabels(), nil
+}
+
+func (resourceAccessor) SetLabels(obj runtime.Object, labels map[string]string) error {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetLabels(labels)
+	return nil
+}
+
+func (resourceAccessor) Annotations(obj runtime.Object) (map[string]string, error) {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return nil, err
+	}
+	return accessor.GetAnnotations(), nil
+}
+
+func (resourceAccessor) SetAnnotations(obj runtime.Object, annotations map[string]string) error {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetAnnotations(annotations)
+	return nil
+}
+
+func (resourceAccessor) ResourceVersion(obj runtime.Object) (string, error) {
+	accessor, err := CommonAccessor(obj)
+	if err != nil {
+		return "", err
+	}
+	return accessor.GetResourceVersion(), nil
+}
+
+func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) error {
+	accessor, err := CommonAccessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetResourceVersion(version)
+	return nil
+}
+
+func (resourceAccessor) Continue(obj runtime.Object) (string, error) {
+	accessor, err := ListAccessor(obj)
+	if err != nil {
+		return "", err
+	}
+	return accessor.GetContinue(), nil
+}
+
+func (resourceAccessor) SetContinue(obj runtime.Object, version string) error {
+	accessor, err := ListAccessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetContinue(version)
+	return nil
+}
+
+// extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object.
+func extractFromOwnerReference(v reflect.Value, o *metav1.OwnerReference) error {
+	if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil {
+		return err
+	}
+	if err := runtime.Field(v, "Kind", &o.Kind); err != nil {
+		return err
+	}
+	if err := runtime.Field(v, "Name", &o.Name); err != nil {
+		return err
+	}
+	if err := runtime.Field(v, "UID", &o.UID); err != nil {
+		return err
+	}
+	var controllerPtr *bool
+	if err := runtime.Field(v, "Controller", &controllerPtr); err != nil {
+		return err
+	}
+	if controllerPtr != nil {
+		controller := *controllerPtr
+		o.Controller = &controller
+	}
+	var blockOwnerDeletionPtr *bool
+	if err := runtime.Field(v, "BlockOwnerDeletion", &blockOwnerDeletionPtr); err != nil {
+		return err
+	}
+	if blockOwnerDeletionPtr != nil {
+		block := *blockOwnerDeletionPtr
+		o.BlockOwnerDeletion = &block
+	}
+	return nil
+}
+
+// setOwnerReference sets v to o. v is the OwnerReferences field of an object.
+func setOwnerReference(v reflect.Value, o *metav1.OwnerReference) error {
+	if err := runtime.SetField(o.APIVersion, v, "APIVersion"); err != nil {
+		return err
+	}
+	if err := runtime.SetField(o.Kind, v, "Kind"); err != nil {
+		return err
+	}
+	if err := runtime.SetField(o.Name, v, "Name"); err != nil {
+		return err
+	}
+	if err := runtime.SetField(o.UID, v, "UID"); err != nil {
+		return err
+	}
+	if o.Controller != nil {
+		controller := *(o.Controller)
+		if err := runtime.SetField(&controller, v, "Controller"); err != nil {
+			return err
+		}
+	}
+	if o.BlockOwnerDeletion != nil {
+		block := *(o.BlockOwnerDeletion)
+		if err := runtime.SetField(&block, v, "BlockOwnerDeletion"); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// genericAccessor contains pointers to strings that can modify an arbitrary
+// struct and implements the Accessor interface.
+type genericAccessor struct {
+	namespace         *string
+	name              *string
+	generateName      *string
+	uid               *types.UID
+	apiVersion        *string
+	kind              *string
+	resourceVersion   *string
+	selfLink          *string
+	creationTimestamp *metav1.Time
+	deletionTimestamp **metav1.Time
+	labels            *map[string]string
+	annotations       *map[string]string
+	ownerReferences   reflect.Value
+	finalizers        *[]string
+}
+
+func (a genericAccessor) GetNamespace() string {
+	if a.namespace == nil {
+		return ""
+	}
+	return *a.namespace
+}
+
+func (a genericAccessor) SetNamespace(namespace string) {
+	if a.namespace == nil {
+		return
+	}
+	*a.namespace = namespace
+}
+
+func (a genericAccessor) GetName() string {
+	if a.name == nil {
+		return ""
+	}
+	return *a.name
+}
+
+func (a genericAccessor) SetName(name string) {
+	if a.name == nil {
+		return
+	}
+	*a.name = name
+}
+
+func (a genericAccessor) GetGenerateName() string {
+	if a.generateName == nil {
+		return ""
+	}
+	return *a.generateName
+}
+
+func (a genericAccessor) SetGenerateName(generateName string) {
+	if a.generateName == nil {
+		return
+	}
+	*a.generateName = generateName
+}
+
+func (a genericAccessor) GetUID() types.UID {
+	if a.uid == nil {
+		return ""
+	}
+	return *a.uid
+}
+
+func (a genericAccessor) SetUID(uid types.UID) {
+	if a.uid == nil {
+		return
+	}
+	*a.uid = uid
+}
+
+func (a genericAccessor) GetAPIVersion() string {
+	return *a.apiVersion
+}
+
+func (a genericAccessor) SetAPIVersion(version string) {
+	*a.apiVersion = version
+}
+
+func (a genericAccessor) GetKind() string {
+	return *a.kind
+}
+
+func (a genericAccessor) SetKind(kind string) {
+	*a.kind = kind
+}
+
+func (a genericAccessor) GetResourceVersion() string {
+	return *a.resourceVersion
+}
+
+func (a genericAccessor) SetResourceVersion(version string) {
+	*a.resourceVersion = version
+}
+
+func (a genericAccessor) GetSelfLink() string {
+	return *a.selfLink
+}
+
+func (a genericAccessor) SetSelfLink(selfLink string) {
+	*a.selfLink = selfLink
+}
+
+func (a genericAccessor) GetCreationTimestamp() metav1.Time {
+	return *a.creationTimestamp
+}
+
+func (a genericAccessor) SetCreationTimestamp(timestamp metav1.Time) {
+	*a.creationTimestamp = timestamp
+}
+
+func (a genericAccessor) GetDeletionTimestamp() *metav1.Time {
+	return *a.deletionTimestamp
+}
+
+func (a genericAccessor) SetDeletionTimestamp(timestamp *metav1.Time) {
+	*a.deletionTimestamp = timestamp
+}
+
+func (a genericAccessor) GetLabels() map[string]string {
+	if a.labels == nil {
+		return nil
+	}
+	return *a.labels
+}
+
+func (a genericAccessor) SetLabels(labels map[string]string) {
+	*a.labels = labels
+}
+
+func (a genericAccessor) GetAnnotations() map[string]string {
+	if a.annotations == nil {
+		return nil
+	}
+	return *a.annotations
+}
+
+func (a genericAccessor) SetAnnotations(annotations map[string]string) {
+	if a.annotations == nil {
+		emptyAnnotations := make(map[string]string)
+		a.annotations = &emptyAnnotations
+	}
+	*a.annotations = annotations
+}
+
+func (a genericAccessor) GetFinalizers() []string {
+	if a.finalizers == nil {
+		return nil
+	}
+	return *a.finalizers
+}
+
+func (a genericAccessor) SetFinalizers(finalizers []string) {
+	*a.finalizers = finalizers
+}
+
+func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference {
+	var ret []metav1.OwnerReference
+	s := a.ownerReferences
+	if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice {
+		klog.Errorf("expect %v to be a pointer to slice", s)
+		return ret
+	}
+	s = s.Elem()
+	// Set the capacity to one element greater to avoid copy if the caller later append an element.
+	ret = make([]metav1.OwnerReference, s.Len(), s.Len()+1)
+	for i := 0; i < s.Len(); i++ {
+		if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil {
+			klog.Errorf("extractFromOwnerReference failed: %v", err)
+			return ret
+		}
+	}
+	return ret
+}
+
+func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) {
+	s := a.ownerReferences
+	if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice {
+		klog.Errorf("expect %v to be a pointer to slice", s)
+	}
+	s = s.Elem()
+	newReferences := reflect.MakeSlice(s.Type(), len(references), len(references))
+	for i := 0; i < len(references); i++ {
+		if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil {
+			klog.Errorf("setOwnerReference failed: %v", err)
+			return
+		}
+	}
+	s.Set(newReferences)
+}
+
+// extractFromTypeMeta extracts pointers to version and kind fields from an object
+func extractFromTypeMeta(v reflect.Value, a *genericAccessor) error {
+	if err := runtime.FieldPtr(v, "APIVersion", &a.apiVersion); err != nil {
+		return err
+	}
+	if err := runtime.FieldPtr(v, "Kind", &a.kind); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go
new file mode 100644
index 0000000..6b01bf1
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go
@@ -0,0 +1,210 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+	"fmt"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+)
+
+// MultiRESTMapper is a wrapper for multiple RESTMappers.
+type MultiRESTMapper []RESTMapper
+
+func (m MultiRESTMapper) String() string {
+	nested := []string{}
+	for _, t := range m {
+		currString := fmt.Sprintf("%v", t)
+		splitStrings := strings.Split(currString, "\n")
+		nested = append(nested, strings.Join(splitStrings, "\n\t"))
+	}
+
+	return fmt.Sprintf("MultiRESTMapper{\n\t%s\n}", strings.Join(nested, "\n\t"))
+}
+
+// ResourceSingularizer converts a REST resource name from plural to singular (e.g., from pods to pod)
+// This implementation supports multiple REST schemas and return the first match.
+func (m MultiRESTMapper) ResourceSingularizer(resource string) (singular string, err error) {
+	for _, t := range m {
+		singular, err = t.ResourceSingularizer(resource)
+		if err == nil {
+			return
+		}
+	}
+	return
+}
+
+func (m MultiRESTMapper) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
+	allGVRs := []schema.GroupVersionResource{}
+	for _, t := range m {
+		gvrs, err := t.ResourcesFor(resource)
+		// ignore "no match" errors, but any other error percolates back up
+		if IsNoMatchError(err) {
+			continue
+		}
+		if err != nil {
+			return nil, err
+		}
+
+		// walk the existing values to de-dup
+		for _, curr := range gvrs {
+			found := false
+			for _, existing := range allGVRs {
+				if curr == existing {
+					found = true
+					break
+				}
+			}
+
+			if !found {
+				allGVRs = append(allGVRs, curr)
+			}
+		}
+	}
+
+	if len(allGVRs) == 0 {
+		return nil, &NoResourceMatchError{PartialResource: resource}
+	}
+
+	return allGVRs, nil
+}
+
+func (m MultiRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) {
+	allGVKs := []schema.GroupVersionKind{}
+	for _, t := range m {
+		gvks, err := t.KindsFor(resource)
+		// ignore "no match" errors, but any other error percolates back up
+		if IsNoMatchError(err) {
+			continue
+		}
+		if err != nil {
+			return nil, err
+		}
+
+		// walk the existing values to de-dup
+		for _, curr := range gvks {
+			found := false
+			for _, existing := range allGVKs {
+				if curr == existing {
+					found = true
+					break
+				}
+			}
+
+			if !found {
+				allGVKs = append(allGVKs, curr)
+			}
+		}
+	}
+
+	if len(allGVKs) == 0 {
+		return nil, &NoResourceMatchError{PartialResource: resource}
+	}
+
+	return allGVKs, nil
+}
+
+func (m MultiRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+	resources, err := m.ResourcesFor(resource)
+	if err != nil {
+		return schema.GroupVersionResource{}, err
+	}
+	if len(resources) == 1 {
+		return resources[0], nil
+	}
+
+	return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources}
+}
+
+func (m MultiRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+	kinds, err := m.KindsFor(resource)
+	if err != nil {
+		return schema.GroupVersionKind{}, err
+	}
+	if len(kinds) == 1 {
+		return kinds[0], nil
+	}
+
+	return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds}
+}
+
+// RESTMapping provides the REST mapping for the resource based on the
+// kind and version. This implementation supports multiple REST schemas and
+// return the first match.
+func (m MultiRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) {
+	allMappings := []*RESTMapping{}
+	errors := []error{}
+
+	for _, t := range m {
+		currMapping, err := t.RESTMapping(gk, versions...)
+		// ignore "no match" errors, but any other error percolates back up
+		if IsNoMatchError(err) {
+			continue
+		}
+		if err != nil {
+			errors = append(errors, err)
+			continue
+		}
+
+		allMappings = append(allMappings, currMapping)
+	}
+
+	// if we got exactly one mapping, then use it even if other requested failed
+	if len(allMappings) == 1 {
+		return allMappings[0], nil
+	}
+	if len(allMappings) > 1 {
+		var kinds []schema.GroupVersionKind
+		for _, m := range allMappings {
+			kinds = append(kinds, m.GroupVersionKind)
+		}
+		return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds}
+	}
+	if len(errors) > 0 {
+		return nil, utilerrors.NewAggregate(errors)
+	}
+	return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions}
+}
+
+// RESTMappings returns all possible RESTMappings for the provided group kind, or an error
+// if the type is not recognized.
+func (m MultiRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) {
+	var allMappings []*RESTMapping
+	var errors []error
+
+	for _, t := range m {
+		currMappings, err := t.RESTMappings(gk, versions...)
+		// ignore "no match" errors, but any other error percolates back up
+		if IsNoMatchError(err) {
+			continue
+		}
+		if err != nil {
+			errors = append(errors, err)
+			continue
+		}
+		allMappings = append(allMappings, currMappings...)
+	}
+	if len(errors) > 0 {
+		return nil, utilerrors.NewAggregate(errors)
+	}
+	if len(allMappings) == 0 {
+		return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions}
+	}
+	return allMappings, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go
new file mode 100644
index 0000000..fa11c58
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go
@@ -0,0 +1,222 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const (
+	AnyGroup    = "*"
+	AnyVersion  = "*"
+	AnyResource = "*"
+	AnyKind     = "*"
+)
+
+// PriorityRESTMapper is a wrapper for automatically choosing a particular Resource or Kind
+// when multiple matches are possible
+type PriorityRESTMapper struct {
+	// Delegate is the RESTMapper to use to locate all the Kind and Resource matches
+	Delegate RESTMapper
+
+	// ResourcePriority is a list of priority patterns to apply to matching resources.
+	// The list of all matching resources is narrowed based on the patterns until only one remains.
+	// A pattern with no matches is skipped.  A pattern with more than one match uses its
+	// matches as the list to continue matching against.
+	ResourcePriority []schema.GroupVersionResource
+
+	// KindPriority is a list of priority patterns to apply to matching kinds.
+	// The list of all matching kinds is narrowed based on the patterns until only one remains.
+	// A pattern with no matches is skipped.  A pattern with more than one match uses its
+	// matches as the list to continue matching against.
+	KindPriority []schema.GroupVersionKind
+}
+
+func (m PriorityRESTMapper) String() string {
+	return fmt.Sprintf("PriorityRESTMapper{\n\t%v\n\t%v\n\t%v\n}", m.ResourcePriority, m.KindPriority, m.Delegate)
+}
+
+// ResourceFor finds all resources, then passes them through the ResourcePriority patterns to find a single matching hit.
+func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+	originalGVRs, originalErr := m.Delegate.ResourcesFor(partiallySpecifiedResource)
+	if originalErr != nil && len(originalGVRs) == 0 {
+		return schema.GroupVersionResource{}, originalErr
+	}
+	if len(originalGVRs) == 1 {
+		return originalGVRs[0], originalErr
+	}
+
+	remainingGVRs := append([]schema.GroupVersionResource{}, originalGVRs...)
+	for _, pattern := range m.ResourcePriority {
+		matchedGVRs := []schema.GroupVersionResource{}
+		for _, gvr := range remainingGVRs {
+			if resourceMatches(pattern, gvr) {
+				matchedGVRs = append(matchedGVRs, gvr)
+			}
+		}
+
+		switch len(matchedGVRs) {
+		case 0:
+			// if you have no matches, then nothing matched this pattern just move to the next
+			continue
+		case 1:
+			// one match, return
+			return matchedGVRs[0], originalErr
+		default:
+			// more than one match, use the matched hits as the list moving to the next pattern.
+			// this way you can have a series of selection criteria
+			remainingGVRs = matchedGVRs
+		}
+	}
+
+	return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingResources: originalGVRs}
+}
+
+// KindFor finds all kinds, then passes them through the KindPriority patterns to find a single matching hit.
+func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+	originalGVKs, originalErr := m.Delegate.KindsFor(partiallySpecifiedResource)
+	if originalErr != nil && len(originalGVKs) == 0 {
+		return schema.GroupVersionKind{}, originalErr
+	}
+	if len(originalGVKs) == 1 {
+		return originalGVKs[0], originalErr
+	}
+
+	remainingGVKs := append([]schema.GroupVersionKind{}, originalGVKs...)
+	for _, pattern := range m.KindPriority {
+		matchedGVKs := []schema.GroupVersionKind{}
+		for _, gvr := range remainingGVKs {
+			if kindMatches(pattern, gvr) {
+				matchedGVKs = append(matchedGVKs, gvr)
+			}
+		}
+
+		switch len(matchedGVKs) {
+		case 0:
+			// if you have no matches, then nothing matched this pattern just move to the next
+			continue
+		case 1:
+			// one match, return
+			return matchedGVKs[0], originalErr
+		default:
+			// more than one match, use the matched hits as the list moving to the next pattern.
+			// this way you can have a series of selection criteria
+			remainingGVKs = matchedGVKs
+		}
+	}
+
+	return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingKinds: originalGVKs}
+}
+
+func resourceMatches(pattern schema.GroupVersionResource, resource schema.GroupVersionResource) bool {
+	if pattern.Group != AnyGroup && pattern.Group != resource.Group {
+		return false
+	}
+	if pattern.Version != AnyVersion && pattern.Version != resource.Version {
+		return false
+	}
+	if pattern.Resource != AnyResource && pattern.Resource != resource.Resource {
+		return false
+	}
+
+	return true
+}
+
+func kindMatches(pattern schema.GroupVersionKind, kind schema.GroupVersionKind) bool {
+	if pattern.Group != AnyGroup && pattern.Group != kind.Group {
+		return false
+	}
+	if pattern.Version != AnyVersion && pattern.Version != kind.Version {
+		return false
+	}
+	if pattern.Kind != AnyKind && pattern.Kind != kind.Kind {
+		return false
+	}
+
+	return true
+}
+
+func (m PriorityRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (mapping *RESTMapping, err error) {
+	mappings, originalErr := m.Delegate.RESTMappings(gk, versions...)
+	if originalErr != nil && len(mappings) == 0 {
+		return nil, originalErr
+	}
+
+	// any versions the user provides take priority
+	priorities := m.KindPriority
+	if len(versions) > 0 {
+		priorities = make([]schema.GroupVersionKind, 0, len(m.KindPriority)+len(versions))
+		for _, version := range versions {
+			gv := schema.GroupVersion{
+				Version: version,
+				Group:   gk.Group,
+			}
+			priorities = append(priorities, gv.WithKind(AnyKind))
+		}
+		priorities = append(priorities, m.KindPriority...)
+	}
+
+	remaining := append([]*RESTMapping{}, mappings...)
+	for _, pattern := range priorities {
+		var matching []*RESTMapping
+		for _, m := range remaining {
+			if kindMatches(pattern, m.GroupVersionKind) {
+				matching = append(matching, m)
+			}
+		}
+
+		switch len(matching) {
+		case 0:
+			// if you have no matches, then nothing matched this pattern just move to the next
+			continue
+		case 1:
+			// one match, return
+			return matching[0], originalErr
+		default:
+			// more than one match, use the matched hits as the list moving to the next pattern.
+			// this way you can have a series of selection criteria
+			remaining = matching
+		}
+	}
+	if len(remaining) == 1 {
+		return remaining[0], originalErr
+	}
+
+	var kinds []schema.GroupVersionKind
+	for _, m := range mappings {
+		kinds = append(kinds, m.GroupVersionKind)
+	}
+	return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds}
+}
+
+func (m PriorityRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) {
+	return m.Delegate.RESTMappings(gk, versions...)
+}
+
+func (m PriorityRESTMapper) ResourceSingularizer(resource string) (singular string, err error) {
+	return m.Delegate.ResourceSingularizer(resource)
+}
+
+func (m PriorityRESTMapper) ResourcesFor(partiallySpecifiedResource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
+	return m.Delegate.ResourcesFor(partiallySpecifiedResource)
+}
+
+func (m PriorityRESTMapper) KindsFor(partiallySpecifiedResource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) {
+	return m.Delegate.KindsFor(partiallySpecifiedResource)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go
new file mode 100644
index 0000000..41b60d7
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go
@@ -0,0 +1,518 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// TODO: move everything in this file to pkg/api/rest
+package meta
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// Implements RESTScope interface
+type restScope struct {
+	name RESTScopeName
+}
+
+func (r *restScope) Name() RESTScopeName {
+	return r.name
+}
+
+var RESTScopeNamespace = &restScope{
+	name: RESTScopeNameNamespace,
+}
+
+var RESTScopeRoot = &restScope{
+	name: RESTScopeNameRoot,
+}
+
+// DefaultRESTMapper exposes mappings between the types defined in a
+// runtime.Scheme. It assumes that all types defined the provided scheme
+// can be mapped with the provided MetadataAccessor and Codec interfaces.
+//
+// The resource name of a Kind is defined as the lowercase,
+// English-plural version of the Kind string.
+// When converting from resource to Kind, the singular version of the
+// resource name is also accepted for convenience.
+//
+// TODO: Only accept plural for some operations for increased control?
+// (`get pod bar` vs `get pods bar`)
+type DefaultRESTMapper struct {
+	defaultGroupVersions []schema.GroupVersion
+
+	resourceToKind       map[schema.GroupVersionResource]schema.GroupVersionKind
+	kindToPluralResource map[schema.GroupVersionKind]schema.GroupVersionResource
+	kindToScope          map[schema.GroupVersionKind]RESTScope
+	singularToPlural     map[schema.GroupVersionResource]schema.GroupVersionResource
+	pluralToSingular     map[schema.GroupVersionResource]schema.GroupVersionResource
+}
+
+func (m *DefaultRESTMapper) String() string {
+	return fmt.Sprintf("DefaultRESTMapper{kindToPluralResource=%v}", m.kindToPluralResource)
+}
+
+var _ RESTMapper = &DefaultRESTMapper{}
+
+// NewDefaultRESTMapper initializes a mapping between Kind and APIVersion
+// to a resource name and back based on the objects in a runtime.Scheme
+// and the Kubernetes API conventions. Takes a group name, a priority list of the versions
+// to search when an object has no default version (set empty to return an error),
+// and a function that retrieves the correct metadata for a given version.
+func NewDefaultRESTMapper(defaultGroupVersions []schema.GroupVersion) *DefaultRESTMapper {
+	resourceToKind := make(map[schema.GroupVersionResource]schema.GroupVersionKind)
+	kindToPluralResource := make(map[schema.GroupVersionKind]schema.GroupVersionResource)
+	kindToScope := make(map[schema.GroupVersionKind]RESTScope)
+	singularToPlural := make(map[schema.GroupVersionResource]schema.GroupVersionResource)
+	pluralToSingular := make(map[schema.GroupVersionResource]schema.GroupVersionResource)
+	// TODO: verify name mappings work correctly when versions differ
+
+	return &DefaultRESTMapper{
+		resourceToKind:       resourceToKind,
+		kindToPluralResource: kindToPluralResource,
+		kindToScope:          kindToScope,
+		defaultGroupVersions: defaultGroupVersions,
+		singularToPlural:     singularToPlural,
+		pluralToSingular:     pluralToSingular,
+	}
+}
+
+func (m *DefaultRESTMapper) Add(kind schema.GroupVersionKind, scope RESTScope) {
+	plural, singular := UnsafeGuessKindToResource(kind)
+	m.AddSpecific(kind, plural, singular, scope)
+}
+
+func (m *DefaultRESTMapper) AddSpecific(kind schema.GroupVersionKind, plural, singular schema.GroupVersionResource, scope RESTScope) {
+	m.singularToPlural[singular] = plural
+	m.pluralToSingular[plural] = singular
+
+	m.resourceToKind[singular] = kind
+	m.resourceToKind[plural] = kind
+
+	m.kindToPluralResource[kind] = plural
+	m.kindToScope[kind] = scope
+}
+
+// unpluralizedSuffixes is a list of resource suffixes that are the same plural and singular
+// This is only is only necessary because some bits of code are lazy and don't actually use the RESTMapper like they should.
+// TODO eliminate this so that different callers can correctly map to resources.  This probably means updating all
+// callers to use the RESTMapper they mean.
+var unpluralizedSuffixes = []string{
+	"endpoints",
+}
+
+// UnsafeGuessKindToResource converts Kind to a resource name.
+// Broken. This method only "sort of" works when used outside of this package.  It assumes that Kinds and Resources match
+// and they aren't guaranteed to do so.
+func UnsafeGuessKindToResource(kind schema.GroupVersionKind) ( /*plural*/ schema.GroupVersionResource /*singular*/, schema.GroupVersionResource) {
+	kindName := kind.Kind
+	if len(kindName) == 0 {
+		return schema.GroupVersionResource{}, schema.GroupVersionResource{}
+	}
+	singularName := strings.ToLower(kindName)
+	singular := kind.GroupVersion().WithResource(singularName)
+
+	for _, skip := range unpluralizedSuffixes {
+		if strings.HasSuffix(singularName, skip) {
+			return singular, singular
+		}
+	}
+
+	switch string(singularName[len(singularName)-1]) {
+	case "s":
+		return kind.GroupVersion().WithResource(singularName + "es"), singular
+	case "y":
+		return kind.GroupVersion().WithResource(strings.TrimSuffix(singularName, "y") + "ies"), singular
+	}
+
+	return kind.GroupVersion().WithResource(singularName + "s"), singular
+}
+
+// ResourceSingularizer implements RESTMapper
+// It converts a resource name from plural to singular (e.g., from pods to pod)
+func (m *DefaultRESTMapper) ResourceSingularizer(resourceType string) (string, error) {
+	partialResource := schema.GroupVersionResource{Resource: resourceType}
+	resources, err := m.ResourcesFor(partialResource)
+	if err != nil {
+		return resourceType, err
+	}
+
+	singular := schema.GroupVersionResource{}
+	for _, curr := range resources {
+		currSingular, ok := m.pluralToSingular[curr]
+		if !ok {
+			continue
+		}
+		if singular.Empty() {
+			singular = currSingular
+			continue
+		}
+
+		if currSingular.Resource != singular.Resource {
+			return resourceType, fmt.Errorf("multiple possible singular resources (%v) found for %v", resources, resourceType)
+		}
+	}
+
+	if singular.Empty() {
+		return resourceType, fmt.Errorf("no singular of resource %v has been defined", resourceType)
+	}
+
+	return singular.Resource, nil
+}
+
+// coerceResourceForMatching makes the resource lower case and converts internal versions to unspecified (legacy behavior)
+func coerceResourceForMatching(resource schema.GroupVersionResource) schema.GroupVersionResource {
+	resource.Resource = strings.ToLower(resource.Resource)
+	if resource.Version == runtime.APIVersionInternal {
+		resource.Version = ""
+	}
+
+	return resource
+}
+
+func (m *DefaultRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
+	resource := coerceResourceForMatching(input)
+
+	hasResource := len(resource.Resource) > 0
+	hasGroup := len(resource.Group) > 0
+	hasVersion := len(resource.Version) > 0
+
+	if !hasResource {
+		return nil, fmt.Errorf("a resource must be present, got: %v", resource)
+	}
+
+	ret := []schema.GroupVersionResource{}
+	switch {
+	case hasGroup && hasVersion:
+		// fully qualified.  Find the exact match
+		for plural, singular := range m.pluralToSingular {
+			if singular == resource {
+				ret = append(ret, plural)
+				break
+			}
+			if plural == resource {
+				ret = append(ret, plural)
+				break
+			}
+		}
+
+	case hasGroup:
+		// given a group, prefer an exact match.  If you don't find one, resort to a prefix match on group
+		foundExactMatch := false
+		requestedGroupResource := resource.GroupResource()
+		for plural, singular := range m.pluralToSingular {
+			if singular.GroupResource() == requestedGroupResource {
+				foundExactMatch = true
+				ret = append(ret, plural)
+			}
+			if plural.GroupResource() == requestedGroupResource {
+				foundExactMatch = true
+				ret = append(ret, plural)
+			}
+		}
+
+		// if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match
+		// storageclass.storage.k8s.io
+		if !foundExactMatch {
+			for plural, singular := range m.pluralToSingular {
+				if !strings.HasPrefix(plural.Group, requestedGroupResource.Group) {
+					continue
+				}
+				if singular.Resource == requestedGroupResource.Resource {
+					ret = append(ret, plural)
+				}
+				if plural.Resource == requestedGroupResource.Resource {
+					ret = append(ret, plural)
+				}
+			}
+
+		}
+
+	case hasVersion:
+		for plural, singular := range m.pluralToSingular {
+			if singular.Version == resource.Version && singular.Resource == resource.Resource {
+				ret = append(ret, plural)
+			}
+			if plural.Version == resource.Version && plural.Resource == resource.Resource {
+				ret = append(ret, plural)
+			}
+		}
+
+	default:
+		for plural, singular := range m.pluralToSingular {
+			if singular.Resource == resource.Resource {
+				ret = append(ret, plural)
+			}
+			if plural.Resource == resource.Resource {
+				ret = append(ret, plural)
+			}
+		}
+	}
+
+	if len(ret) == 0 {
+		return nil, &NoResourceMatchError{PartialResource: resource}
+	}
+
+	sort.Sort(resourceByPreferredGroupVersion{ret, m.defaultGroupVersions})
+	return ret, nil
+}
+
+func (m *DefaultRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+	resources, err := m.ResourcesFor(resource)
+	if err != nil {
+		return schema.GroupVersionResource{}, err
+	}
+	if len(resources) == 1 {
+		return resources[0], nil
+	}
+
+	return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources}
+}
+
+func (m *DefaultRESTMapper) KindsFor(input schema.GroupVersionResource) ([]schema.GroupVersionKind, error) {
+	resource := coerceResourceForMatching(input)
+
+	hasResource := len(resource.Resource) > 0
+	hasGroup := len(resource.Group) > 0
+	hasVersion := len(resource.Version) > 0
+
+	if !hasResource {
+		return nil, fmt.Errorf("a resource must be present, got: %v", resource)
+	}
+
+	ret := []schema.GroupVersionKind{}
+	switch {
+	// fully qualified.  Find the exact match
+	case hasGroup && hasVersion:
+		kind, exists := m.resourceToKind[resource]
+		if exists {
+			ret = append(ret, kind)
+		}
+
+	case hasGroup:
+		foundExactMatch := false
+		requestedGroupResource := resource.GroupResource()
+		for currResource, currKind := range m.resourceToKind {
+			if currResource.GroupResource() == requestedGroupResource {
+				foundExactMatch = true
+				ret = append(ret, currKind)
+			}
+		}
+
+		// if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match
+		// storageclass.storage.k8s.io
+		if !foundExactMatch {
+			for currResource, currKind := range m.resourceToKind {
+				if !strings.HasPrefix(currResource.Group, requestedGroupResource.Group) {
+					continue
+				}
+				if currResource.Resource == requestedGroupResource.Resource {
+					ret = append(ret, currKind)
+				}
+			}
+
+		}
+
+	case hasVersion:
+		for currResource, currKind := range m.resourceToKind {
+			if currResource.Version == resource.Version && currResource.Resource == resource.Resource {
+				ret = append(ret, currKind)
+			}
+		}
+
+	default:
+		for currResource, currKind := range m.resourceToKind {
+			if currResource.Resource == resource.Resource {
+				ret = append(ret, currKind)
+			}
+		}
+	}
+
+	if len(ret) == 0 {
+		return nil, &NoResourceMatchError{PartialResource: input}
+	}
+
+	sort.Sort(kindByPreferredGroupVersion{ret, m.defaultGroupVersions})
+	return ret, nil
+}
+
+func (m *DefaultRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+	kinds, err := m.KindsFor(resource)
+	if err != nil {
+		return schema.GroupVersionKind{}, err
+	}
+	if len(kinds) == 1 {
+		return kinds[0], nil
+	}
+
+	return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds}
+}
+
+type kindByPreferredGroupVersion struct {
+	list      []schema.GroupVersionKind
+	sortOrder []schema.GroupVersion
+}
+
+func (o kindByPreferredGroupVersion) Len() int      { return len(o.list) }
+func (o kindByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] }
+func (o kindByPreferredGroupVersion) Less(i, j int) bool {
+	lhs := o.list[i]
+	rhs := o.list[j]
+	if lhs == rhs {
+		return false
+	}
+
+	if lhs.GroupVersion() == rhs.GroupVersion() {
+		return lhs.Kind < rhs.Kind
+	}
+
+	// otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order
+	lhsIndex := -1
+	rhsIndex := -1
+
+	for i := range o.sortOrder {
+		if o.sortOrder[i] == lhs.GroupVersion() {
+			lhsIndex = i
+		}
+		if o.sortOrder[i] == rhs.GroupVersion() {
+			rhsIndex = i
+		}
+	}
+
+	if rhsIndex == -1 {
+		return true
+	}
+
+	return lhsIndex < rhsIndex
+}
+
+type resourceByPreferredGroupVersion struct {
+	list      []schema.GroupVersionResource
+	sortOrder []schema.GroupVersion
+}
+
+func (o resourceByPreferredGroupVersion) Len() int      { return len(o.list) }
+func (o resourceByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] }
+func (o resourceByPreferredGroupVersion) Less(i, j int) bool {
+	lhs := o.list[i]
+	rhs := o.list[j]
+	if lhs == rhs {
+		return false
+	}
+
+	if lhs.GroupVersion() == rhs.GroupVersion() {
+		return lhs.Resource < rhs.Resource
+	}
+
+	// otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order
+	lhsIndex := -1
+	rhsIndex := -1
+
+	for i := range o.sortOrder {
+		if o.sortOrder[i] == lhs.GroupVersion() {
+			lhsIndex = i
+		}
+		if o.sortOrder[i] == rhs.GroupVersion() {
+			rhsIndex = i
+		}
+	}
+
+	if rhsIndex == -1 {
+		return true
+	}
+
+	return lhsIndex < rhsIndex
+}
+
+// RESTMapping returns a struct representing the resource path and conversion interfaces a
+// RESTClient should use to operate on the provided group/kind in order of versions. If a version search
+// order is not provided, the search order provided to DefaultRESTMapper will be used to resolve which
+// version should be used to access the named group/kind.
+func (m *DefaultRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) {
+	mappings, err := m.RESTMappings(gk, versions...)
+	if err != nil {
+		return nil, err
+	}
+	if len(mappings) == 0 {
+		return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions}
+	}
+	// since we rely on RESTMappings method
+	// take the first match and return to the caller
+	// as this was the existing behavior.
+	return mappings[0], nil
+}
+
+// RESTMappings returns the RESTMappings for the provided group kind. If a version search order
+// is not provided, the search order provided to DefaultRESTMapper will be used.
+func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) {
+	mappings := make([]*RESTMapping, 0)
+	potentialGVK := make([]schema.GroupVersionKind, 0)
+	hadVersion := false
+
+	// Pick an appropriate version
+	for _, version := range versions {
+		if len(version) == 0 || version == runtime.APIVersionInternal {
+			continue
+		}
+		currGVK := gk.WithVersion(version)
+		hadVersion = true
+		if _, ok := m.kindToPluralResource[currGVK]; ok {
+			potentialGVK = append(potentialGVK, currGVK)
+			break
+		}
+	}
+	// Use the default preferred versions
+	if !hadVersion && len(potentialGVK) == 0 {
+		for _, gv := range m.defaultGroupVersions {
+			if gv.Group != gk.Group {
+				continue
+			}
+			potentialGVK = append(potentialGVK, gk.WithVersion(gv.Version))
+		}
+	}
+
+	if len(potentialGVK) == 0 {
+		return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions}
+	}
+
+	for _, gvk := range potentialGVK {
+		//Ensure we have a REST mapping
+		res, ok := m.kindToPluralResource[gvk]
+		if !ok {
+			continue
+		}
+
+		// Ensure we have a REST scope
+		scope, ok := m.kindToScope[gvk]
+		if !ok {
+			return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported scope", gvk.GroupVersion(), gvk.Kind)
+		}
+
+		mappings = append(mappings, &RESTMapping{
+			Resource:         res,
+			GroupVersionKind: gvk,
+			Scope:            scope,
+		})
+	}
+
+	if len(mappings) == 0 {
+		return nil, &NoResourceMatchError{PartialResource: schema.GroupVersionResource{Group: gk.Group, Resource: gk.Kind}}
+	}
+	return mappings, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS
new file mode 100755
index 0000000..c430067
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS
@@ -0,0 +1,16 @@
+reviewers:
+- thockin
+- lavalamp
+- smarterclayton
+- wojtek-t
+- derekwaynecarr
+- mikedanese
+- saad-ali
+- janetkuo
+- tallclair
+- eparis
+- jbeda
+- xiang90
+- mbohlool
+- david-mcmahon
+- goltermann
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go
new file mode 100644
index 0000000..a8866a4
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go
@@ -0,0 +1,299 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"math/big"
+	"strconv"
+
+	inf "gopkg.in/inf.v0"
+)
+
+// Scale is used for getting and setting the base-10 scaled value.
+// Base-2 scales are omitted for mathematical simplicity.
+// See Quantity.ScaledValue for more details.
+type Scale int32
+
+// infScale adapts a Scale value to an inf.Scale value.
+func (s Scale) infScale() inf.Scale {
+	return inf.Scale(-s) // inf.Scale is upside-down
+}
+
+const (
+	Nano  Scale = -9
+	Micro Scale = -6
+	Milli Scale = -3
+	Kilo  Scale = 3
+	Mega  Scale = 6
+	Giga  Scale = 9
+	Tera  Scale = 12
+	Peta  Scale = 15
+	Exa   Scale = 18
+)
+
+var (
+	Zero = int64Amount{}
+
+	// Used by quantity strings - treat as read only
+	zeroBytes = []byte("0")
+)
+
+// int64Amount represents a fixed precision numerator and arbitrary scale exponent. It is faster
+// than operations on inf.Dec for values that can be represented as int64.
+// +k8s:openapi-gen=true
+type int64Amount struct {
+	value int64
+	scale Scale
+}
+
+// Sign returns 0 if the value is zero, -1 if it is less than 0, or 1 if it is greater than 0.
+func (a int64Amount) Sign() int {
+	switch {
+	case a.value == 0:
+		return 0
+	case a.value > 0:
+		return 1
+	default:
+		return -1
+	}
+}
+
+// AsInt64 returns the current amount as an int64 at scale 0, or false if the value cannot be
+// represented in an int64 OR would result in a loss of precision. This method is intended as
+// an optimization to avoid calling AsDec.
+func (a int64Amount) AsInt64() (int64, bool) {
+	if a.scale == 0 {
+		return a.value, true
+	}
+	if a.scale < 0 {
+		// TODO: attempt to reduce factors, although it is assumed that factors are reduced prior
+		// to the int64Amount being created.
+		return 0, false
+	}
+	return positiveScaleInt64(a.value, a.scale)
+}
+
+// AsScaledInt64 returns an int64 representing the value of this amount at the specified scale,
+// rounding up, or false if that would result in overflow. (1e20).AsScaledInt64(1) would result
+// in overflow because 1e19 is not representable as an int64. Note that setting a scale larger
+// than the current value may result in loss of precision - i.e. (1e-6).AsScaledInt64(0) would
+// return 1, because 0.000001 is rounded up to 1.
+func (a int64Amount) AsScaledInt64(scale Scale) (result int64, ok bool) {
+	if a.scale < scale {
+		result, _ = negativeScaleInt64(a.value, scale-a.scale)
+		return result, true
+	}
+	return positiveScaleInt64(a.value, a.scale-scale)
+}
+
+// AsDec returns an inf.Dec representation of this value.
+func (a int64Amount) AsDec() *inf.Dec {
+	var base inf.Dec
+	base.SetUnscaled(a.value)
+	base.SetScale(inf.Scale(-a.scale))
+	return &base
+}
+
+// Cmp returns 0 if a and b are equal, 1 if a is greater than b, or -1 if a is less than b.
+func (a int64Amount) Cmp(b int64Amount) int {
+	switch {
+	case a.scale == b.scale:
+		// compare only the unscaled portion
+	case a.scale > b.scale:
+		result, remainder, exact := divideByScaleInt64(b.value, a.scale-b.scale)
+		if !exact {
+			return a.AsDec().Cmp(b.AsDec())
+		}
+		if result == a.value {
+			switch {
+			case remainder == 0:
+				return 0
+			case remainder > 0:
+				return -1
+			default:
+				return 1
+			}
+		}
+		b.value = result
+	default:
+		result, remainder, exact := divideByScaleInt64(a.value, b.scale-a.scale)
+		if !exact {
+			return a.AsDec().Cmp(b.AsDec())
+		}
+		if result == b.value {
+			switch {
+			case remainder == 0:
+				return 0
+			case remainder > 0:
+				return 1
+			default:
+				return -1
+			}
+		}
+		a.value = result
+	}
+
+	switch {
+	case a.value == b.value:
+		return 0
+	case a.value < b.value:
+		return -1
+	default:
+		return 1
+	}
+}
+
+// Add adds two int64Amounts together, matching scales. It will return false and not mutate
+// a if overflow or underflow would result.
+func (a *int64Amount) Add(b int64Amount) bool {
+	switch {
+	case b.value == 0:
+		return true
+	case a.value == 0:
+		a.value = b.value
+		a.scale = b.scale
+		return true
+	case a.scale == b.scale:
+		c, ok := int64Add(a.value, b.value)
+		if !ok {
+			return false
+		}
+		a.value = c
+	case a.scale > b.scale:
+		c, ok := positiveScaleInt64(a.value, a.scale-b.scale)
+		if !ok {
+			return false
+		}
+		c, ok = int64Add(c, b.value)
+		if !ok {
+			return false
+		}
+		a.scale = b.scale
+		a.value = c
+	default:
+		c, ok := positiveScaleInt64(b.value, b.scale-a.scale)
+		if !ok {
+			return false
+		}
+		c, ok = int64Add(a.value, c)
+		if !ok {
+			return false
+		}
+		a.value = c
+	}
+	return true
+}
+
+// Sub removes the value of b from the current amount, or returns false if underflow would result.
+func (a *int64Amount) Sub(b int64Amount) bool {
+	return a.Add(int64Amount{value: -b.value, scale: b.scale})
+}
+
+// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision
+// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6.
+func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) {
+	if a.scale >= scale {
+		return a, true
+	}
+	result, exact := negativeScaleInt64(a.value, scale-a.scale)
+	return int64Amount{value: result, scale: scale}, exact
+}
+
+// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns
+// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted
+// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3.
+func (a int64Amount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
+	mantissa := a.value
+	exponent = int32(a.scale)
+
+	amount, times := removeInt64Factors(mantissa, 10)
+	exponent += int32(times)
+
+	// make sure exponent is a multiple of 3
+	var ok bool
+	switch exponent % 3 {
+	case 1, -2:
+		amount, ok = int64MultiplyScale10(amount)
+		if !ok {
+			return infDecAmount{a.AsDec()}.AsCanonicalBytes(out)
+		}
+		exponent = exponent - 1
+	case 2, -1:
+		amount, ok = int64MultiplyScale100(amount)
+		if !ok {
+			return infDecAmount{a.AsDec()}.AsCanonicalBytes(out)
+		}
+		exponent = exponent - 2
+	}
+	return strconv.AppendInt(out, amount, 10), exponent
+}
+
+// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns
+// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would
+// return []byte("2048"), 1.
+func (a int64Amount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) {
+	value, ok := a.AsScaledInt64(0)
+	if !ok {
+		return infDecAmount{a.AsDec()}.AsCanonicalBase1024Bytes(out)
+	}
+	amount, exponent := removeInt64Factors(value, 1024)
+	return strconv.AppendInt(out, amount, 10), exponent
+}
+
+// infDecAmount implements common operations over an inf.Dec that are specific to the quantity
+// representation.
+type infDecAmount struct {
+	*inf.Dec
+}
+
+// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision
+// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6.
+func (a infDecAmount) AsScale(scale Scale) (infDecAmount, bool) {
+	tmp := &inf.Dec{}
+	tmp.Round(a.Dec, scale.infScale(), inf.RoundUp)
+	return infDecAmount{tmp}, tmp.Cmp(a.Dec) == 0
+}
+
+// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns
+// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted
+// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3.
+func (a infDecAmount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
+	mantissa := a.Dec.UnscaledBig()
+	exponent = int32(-a.Dec.Scale())
+	amount := big.NewInt(0).Set(mantissa)
+	// move all factors of 10 into the exponent for easy reasoning
+	amount, times := removeBigIntFactors(amount, bigTen)
+	exponent += times
+
+	// make sure exponent is a multiple of 3
+	for exponent%3 != 0 {
+		amount.Mul(amount, bigTen)
+		exponent--
+	}
+
+	return append(out, amount.String()...), exponent
+}
+
+// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns
+// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would
+// return []byte("2048"), 1.
+func (a infDecAmount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) {
+	tmp := &inf.Dec{}
+	tmp.Round(a.Dec, 0, inf.RoundUp)
+	amount, exponent := removeBigIntFactors(tmp.UnscaledBig(), big1024)
+	return append(out, amount.String()...), exponent
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
new file mode 100644
index 0000000..acc9044
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
@@ -0,0 +1,88 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.apimachinery.pkg.api.resource;
+
+// Package-wide variables from generator "generated".
+option go_package = "resource";
+
+// Quantity is a fixed-point representation of a number.
+// It provides convenient marshaling/unmarshaling in JSON and YAML,
+// in addition to String() and Int64() accessors.
+//
+// The serialization format is:
+//
+// <quantity>        ::= <signedNumber><suffix>
+//   (Note that <suffix> may be empty, from the "" case in <decimalSI>.)
+// <digit>           ::= 0 | 1 | ... | 9
+// <digits>          ::= <digit> | <digit><digits>
+// <number>          ::= <digits> | <digits>.<digits> | <digits>. | .<digits>
+// <sign>            ::= "+" | "-"
+// <signedNumber>    ::= <number> | <sign><number>
+// <suffix>          ::= <binarySI> | <decimalExponent> | <decimalSI>
+// <binarySI>        ::= Ki | Mi | Gi | Ti | Pi | Ei
+//   (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)
+// <decimalSI>       ::= m | "" | k | M | G | T | P | E
+//   (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)
+// <decimalExponent> ::= "e" <signedNumber> | "E" <signedNumber>
+//
+// No matter which of the three exponent forms is used, no quantity may represent
+// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal
+// places. Numbers larger or more precise will be capped or rounded up.
+// (E.g.: 0.1m will rounded up to 1m.)
+// This may be extended in the future if we require larger or smaller quantities.
+//
+// When a Quantity is parsed from a string, it will remember the type of suffix
+// it had, and will use the same type again when it is serialized.
+//
+// Before serializing, Quantity will be put in "canonical form".
+// This means that Exponent/suffix will be adjusted up or down (with a
+// corresponding increase or decrease in Mantissa) such that:
+//   a. No precision is lost
+//   b. No fractional digits will be emitted
+//   c. The exponent (or suffix) is as large as possible.
+// The sign will be omitted unless the number is negative.
+//
+// Examples:
+//   1.5 will be serialized as "1500m"
+//   1.5Gi will be serialized as "1536Mi"
+//
+// Note that the quantity will NEVER be internally represented by a
+// floating point number. That is the whole point of this exercise.
+//
+// Non-canonical values will still parse as long as they are well formed,
+// but will be re-emitted in their canonical form. (So always use canonical
+// form, or don't diff.)
+//
+// This format is intended to make it difficult to use these numbers without
+// writing some sort of special handling code in the hopes that that will
+// cause implementors to also use a fixed point implementation.
+//
+// +protobuf=true
+// +protobuf.embed=string
+// +protobuf.options.marshal=false
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+// +k8s:deepcopy-gen=true
+// +k8s:openapi-gen=true
+message Quantity {
+  optional string string = 1;
+}
+
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go
new file mode 100644
index 0000000..72d3880
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go
@@ -0,0 +1,314 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"math/big"
+
+	inf "gopkg.in/inf.v0"
+)
+
+const (
+	// maxInt64Factors is the highest value that will be checked when removing factors of 10 from an int64.
+	// It is also the maximum decimal digits that can be represented with an int64.
+	maxInt64Factors = 18
+)
+
+var (
+	// Commonly needed big.Int values-- treat as read only!
+	bigTen      = big.NewInt(10)
+	bigZero     = big.NewInt(0)
+	bigOne      = big.NewInt(1)
+	bigThousand = big.NewInt(1000)
+	big1024     = big.NewInt(1024)
+
+	// Commonly needed inf.Dec values-- treat as read only!
+	decZero      = inf.NewDec(0, 0)
+	decOne       = inf.NewDec(1, 0)
+	decMinusOne  = inf.NewDec(-1, 0)
+	decThousand  = inf.NewDec(1000, 0)
+	dec1024      = inf.NewDec(1024, 0)
+	decMinus1024 = inf.NewDec(-1024, 0)
+
+	// Largest (in magnitude) number allowed.
+	maxAllowed = infDecAmount{inf.NewDec((1<<63)-1, 0)} // == max int64
+
+	// The maximum value we can represent milli-units for.
+	// Compare with the return value of Quantity.Value() to
+	// see if it's safe to use Quantity.MilliValue().
+	MaxMilliValue = int64(((1 << 63) - 1) / 1000)
+)
+
+const mostNegative = -(mostPositive + 1)
+const mostPositive = 1<<63 - 1
+
+// int64Add returns a+b, or false if that would overflow int64.
+func int64Add(a, b int64) (int64, bool) {
+	c := a + b
+	switch {
+	case a > 0 && b > 0:
+		if c < 0 {
+			return 0, false
+		}
+	case a < 0 && b < 0:
+		if c > 0 {
+			return 0, false
+		}
+		if a == mostNegative && b == mostNegative {
+			return 0, false
+		}
+	}
+	return c, true
+}
+
+// int64Multiply returns a*b, or false if that would overflow or underflow int64.
+func int64Multiply(a, b int64) (int64, bool) {
+	if a == 0 || b == 0 || a == 1 || b == 1 {
+		return a * b, true
+	}
+	if a == mostNegative || b == mostNegative {
+		return 0, false
+	}
+	c := a * b
+	return c, c/b == a
+}
+
+// int64MultiplyScale returns a*b, assuming b is greater than one, or false if that would overflow or underflow int64.
+// Use when b is known to be greater than one.
+func int64MultiplyScale(a int64, b int64) (int64, bool) {
+	if a == 0 || a == 1 {
+		return a * b, true
+	}
+	if a == mostNegative && b != 1 {
+		return 0, false
+	}
+	c := a * b
+	return c, c/b == a
+}
+
+// int64MultiplyScale10 multiplies a by 10, or returns false if that would overflow. This method is faster than
+// int64Multiply(a, 10) because the compiler can optimize constant factor multiplication.
+func int64MultiplyScale10(a int64) (int64, bool) {
+	if a == 0 || a == 1 {
+		return a * 10, true
+	}
+	if a == mostNegative {
+		return 0, false
+	}
+	c := a * 10
+	return c, c/10 == a
+}
+
+// int64MultiplyScale100 multiplies a by 100, or returns false if that would overflow. This method is faster than
+// int64Multiply(a, 100) because the compiler can optimize constant factor multiplication.
+func int64MultiplyScale100(a int64) (int64, bool) {
+	if a == 0 || a == 1 {
+		return a * 100, true
+	}
+	if a == mostNegative {
+		return 0, false
+	}
+	c := a * 100
+	return c, c/100 == a
+}
+
+// int64MultiplyScale1000 multiplies a by 1000, or returns false if that would overflow. This method is faster than
+// int64Multiply(a, 1000) because the compiler can optimize constant factor multiplication.
+func int64MultiplyScale1000(a int64) (int64, bool) {
+	if a == 0 || a == 1 {
+		return a * 1000, true
+	}
+	if a == mostNegative {
+		return 0, false
+	}
+	c := a * 1000
+	return c, c/1000 == a
+}
+
+// positiveScaleInt64 multiplies base by 10^scale, returning false if the
+// value overflows. Passing a negative scale is undefined.
+func positiveScaleInt64(base int64, scale Scale) (int64, bool) {
+	switch scale {
+	case 0:
+		return base, true
+	case 1:
+		return int64MultiplyScale10(base)
+	case 2:
+		return int64MultiplyScale100(base)
+	case 3:
+		return int64MultiplyScale1000(base)
+	case 6:
+		return int64MultiplyScale(base, 1000000)
+	case 9:
+		return int64MultiplyScale(base, 1000000000)
+	default:
+		value := base
+		var ok bool
+		for i := Scale(0); i < scale; i++ {
+			if value, ok = int64MultiplyScale(value, 10); !ok {
+				return 0, false
+			}
+		}
+		return value, true
+	}
+}
+
+// negativeScaleInt64 reduces base by the provided scale, rounding up, until the
+// value is zero or the scale is reached. Passing a negative scale is undefined.
+// The value returned, if not exact, is rounded away from zero.
+func negativeScaleInt64(base int64, scale Scale) (result int64, exact bool) {
+	if scale == 0 {
+		return base, true
+	}
+
+	value := base
+	var fraction bool
+	for i := Scale(0); i < scale; i++ {
+		if !fraction && value%10 != 0 {
+			fraction = true
+		}
+		value = value / 10
+		if value == 0 {
+			if fraction {
+				if base > 0 {
+					return 1, false
+				}
+				return -1, false
+			}
+			return 0, true
+		}
+	}
+	if fraction {
+		if base > 0 {
+			value += 1
+		} else {
+			value += -1
+		}
+	}
+	return value, !fraction
+}
+
+func pow10Int64(b int64) int64 {
+	switch b {
+	case 0:
+		return 1
+	case 1:
+		return 10
+	case 2:
+		return 100
+	case 3:
+		return 1000
+	case 4:
+		return 10000
+	case 5:
+		return 100000
+	case 6:
+		return 1000000
+	case 7:
+		return 10000000
+	case 8:
+		return 100000000
+	case 9:
+		return 1000000000
+	case 10:
+		return 10000000000
+	case 11:
+		return 100000000000
+	case 12:
+		return 1000000000000
+	case 13:
+		return 10000000000000
+	case 14:
+		return 100000000000000
+	case 15:
+		return 1000000000000000
+	case 16:
+		return 10000000000000000
+	case 17:
+		return 100000000000000000
+	case 18:
+		return 1000000000000000000
+	default:
+		return 0
+	}
+}
+
+// negativeScaleInt64 returns the result of dividing base by scale * 10 and the remainder, or
+// false if no such division is possible. Dividing by negative scales is undefined.
+func divideByScaleInt64(base int64, scale Scale) (result, remainder int64, exact bool) {
+	if scale == 0 {
+		return base, 0, true
+	}
+	// the max scale representable in base 10 in an int64 is 18 decimal places
+	if scale >= 18 {
+		return 0, base, false
+	}
+	divisor := pow10Int64(int64(scale))
+	return base / divisor, base % divisor, true
+}
+
+// removeInt64Factors divides in a loop; the return values have the property that
+// value == result * base ^ scale
+func removeInt64Factors(value int64, base int64) (result int64, times int32) {
+	times = 0
+	result = value
+	negative := result < 0
+	if negative {
+		result = -result
+	}
+	switch base {
+	// allow the compiler to optimize the common cases
+	case 10:
+		for result >= 10 && result%10 == 0 {
+			times++
+			result = result / 10
+		}
+	// allow the compiler to optimize the common cases
+	case 1024:
+		for result >= 1024 && result%1024 == 0 {
+			times++
+			result = result / 1024
+		}
+	default:
+		for result >= base && result%base == 0 {
+			times++
+			result = result / base
+		}
+	}
+	if negative {
+		result = -result
+	}
+	return result, times
+}
+
+// removeBigIntFactors divides in a loop; the return values have the property that
+// d == result * factor ^ times
+// d may be modified in place.
+// If d == 0, then the return values will be (0, 0)
+func removeBigIntFactors(d, factor *big.Int) (result *big.Int, times int32) {
+	q := big.NewInt(0)
+	m := big.NewInt(0)
+	for d.Cmp(bigZero) != 0 {
+		q.DivMod(d, factor, m)
+		if m.Cmp(bigZero) != 0 {
+			break
+		}
+		times++
+		d, q = q, d
+	}
+	return d, times
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
new file mode 100644
index 0000000..b155a62
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
@@ -0,0 +1,738 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"math/big"
+	"strconv"
+	"strings"
+
+	inf "gopkg.in/inf.v0"
+)
+
+// Quantity is a fixed-point representation of a number.
+// It provides convenient marshaling/unmarshaling in JSON and YAML,
+// in addition to String() and Int64() accessors.
+//
+// The serialization format is:
+//
+// <quantity>        ::= <signedNumber><suffix>
+//   (Note that <suffix> may be empty, from the "" case in <decimalSI>.)
+// <digit>           ::= 0 | 1 | ... | 9
+// <digits>          ::= <digit> | <digit><digits>
+// <number>          ::= <digits> | <digits>.<digits> | <digits>. | .<digits>
+// <sign>            ::= "+" | "-"
+// <signedNumber>    ::= <number> | <sign><number>
+// <suffix>          ::= <binarySI> | <decimalExponent> | <decimalSI>
+// <binarySI>        ::= Ki | Mi | Gi | Ti | Pi | Ei
+//   (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)
+// <decimalSI>       ::= m | "" | k | M | G | T | P | E
+//   (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)
+// <decimalExponent> ::= "e" <signedNumber> | "E" <signedNumber>
+//
+// No matter which of the three exponent forms is used, no quantity may represent
+// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal
+// places. Numbers larger or more precise will be capped or rounded up.
+// (E.g.: 0.1m will rounded up to 1m.)
+// This may be extended in the future if we require larger or smaller quantities.
+//
+// When a Quantity is parsed from a string, it will remember the type of suffix
+// it had, and will use the same type again when it is serialized.
+//
+// Before serializing, Quantity will be put in "canonical form".
+// This means that Exponent/suffix will be adjusted up or down (with a
+// corresponding increase or decrease in Mantissa) such that:
+//   a. No precision is lost
+//   b. No fractional digits will be emitted
+//   c. The exponent (or suffix) is as large as possible.
+// The sign will be omitted unless the number is negative.
+//
+// Examples:
+//   1.5 will be serialized as "1500m"
+//   1.5Gi will be serialized as "1536Mi"
+//
+// Note that the quantity will NEVER be internally represented by a
+// floating point number. That is the whole point of this exercise.
+//
+// Non-canonical values will still parse as long as they are well formed,
+// but will be re-emitted in their canonical form. (So always use canonical
+// form, or don't diff.)
+//
+// This format is intended to make it difficult to use these numbers without
+// writing some sort of special handling code in the hopes that that will
+// cause implementors to also use a fixed point implementation.
+//
+// +protobuf=true
+// +protobuf.embed=string
+// +protobuf.options.marshal=false
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+// +k8s:deepcopy-gen=true
+// +k8s:openapi-gen=true
+type Quantity struct {
+	// i is the quantity in int64 scaled form, if d.Dec == nil
+	i int64Amount
+	// d is the quantity in inf.Dec form if d.Dec != nil
+	d infDecAmount
+	// s is the generated value of this quantity to avoid recalculation
+	s string
+
+	// Change Format at will. See the comment for Canonicalize for
+	// more details.
+	Format
+}
+
+// CanonicalValue allows a quantity amount to be converted to a string.
+type CanonicalValue interface {
+	// AsCanonicalBytes returns a byte array representing the string representation
+	// of the value mantissa and an int32 representing its exponent in base-10. Callers may
+	// pass a byte slice to the method to avoid allocations.
+	AsCanonicalBytes(out []byte) ([]byte, int32)
+	// AsCanonicalBase1024Bytes returns a byte array representing the string representation
+	// of the value mantissa and an int32 representing its exponent in base-1024. Callers
+	// may pass a byte slice to the method to avoid allocations.
+	AsCanonicalBase1024Bytes(out []byte) ([]byte, int32)
+}
+
+// Format lists the three possible formattings of a quantity.
+type Format string
+
+const (
+	DecimalExponent = Format("DecimalExponent") // e.g., 12e6
+	BinarySI        = Format("BinarySI")        // e.g., 12Mi (12 * 2^20)
+	DecimalSI       = Format("DecimalSI")       // e.g., 12M  (12 * 10^6)
+)
+
+// MustParse turns the given string into a quantity or panics; for tests
+// or others cases where you know the string is valid.
+func MustParse(str string) Quantity {
+	q, err := ParseQuantity(str)
+	if err != nil {
+		panic(fmt.Errorf("cannot parse '%v': %v", str, err))
+	}
+	return q
+}
+
+const (
+	// splitREString is used to separate a number from its suffix; as such,
+	// this is overly permissive, but that's OK-- it will be checked later.
+	splitREString = "^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+)
+
+var (
+	// Errors that could happen while parsing a string.
+	ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'")
+	ErrNumeric     = errors.New("unable to parse numeric part of quantity")
+	ErrSuffix      = errors.New("unable to parse quantity's suffix")
+)
+
+// parseQuantityString is a fast scanner for quantity values.
+func parseQuantityString(str string) (positive bool, value, num, denom, suffix string, err error) {
+	positive = true
+	pos := 0
+	end := len(str)
+
+	// handle leading sign
+	if pos < end {
+		switch str[0] {
+		case '-':
+			positive = false
+			pos++
+		case '+':
+			pos++
+		}
+	}
+
+	// strip leading zeros
+Zeroes:
+	for i := pos; ; i++ {
+		if i >= end {
+			num = "0"
+			value = num
+			return
+		}
+		switch str[i] {
+		case '0':
+			pos++
+		default:
+			break Zeroes
+		}
+	}
+
+	// extract the numerator
+Num:
+	for i := pos; ; i++ {
+		if i >= end {
+			num = str[pos:end]
+			value = str[0:end]
+			return
+		}
+		switch str[i] {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+		default:
+			num = str[pos:i]
+			pos = i
+			break Num
+		}
+	}
+
+	// if we stripped all numerator positions, always return 0
+	if len(num) == 0 {
+		num = "0"
+	}
+
+	// handle a denominator
+	if pos < end && str[pos] == '.' {
+		pos++
+	Denom:
+		for i := pos; ; i++ {
+			if i >= end {
+				denom = str[pos:end]
+				value = str[0:end]
+				return
+			}
+			switch str[i] {
+			case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			default:
+				denom = str[pos:i]
+				pos = i
+				break Denom
+			}
+		}
+		// TODO: we currently allow 1.G, but we may not want to in the future.
+		// if len(denom) == 0 {
+		// 	err = ErrFormatWrong
+		// 	return
+		// }
+	}
+	value = str[0:pos]
+
+	// grab the elements of the suffix
+	suffixStart := pos
+	for i := pos; ; i++ {
+		if i >= end {
+			suffix = str[suffixStart:end]
+			return
+		}
+		if !strings.ContainsAny(str[i:i+1], "eEinumkKMGTP") {
+			pos = i
+			break
+		}
+	}
+	if pos < end {
+		switch str[pos] {
+		case '-', '+':
+			pos++
+		}
+	}
+Suffix:
+	for i := pos; ; i++ {
+		if i >= end {
+			suffix = str[suffixStart:end]
+			return
+		}
+		switch str[i] {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+		default:
+			break Suffix
+		}
+	}
+	// we encountered a non decimal in the Suffix loop, but the last character
+	// was not a valid exponent
+	err = ErrFormatWrong
+	return
+}
+
+// ParseQuantity turns str into a Quantity, or returns an error.
+func ParseQuantity(str string) (Quantity, error) {
+	if len(str) == 0 {
+		return Quantity{}, ErrFormatWrong
+	}
+	if str == "0" {
+		return Quantity{Format: DecimalSI, s: str}, nil
+	}
+
+	positive, value, num, denom, suf, err := parseQuantityString(str)
+	if err != nil {
+		return Quantity{}, err
+	}
+
+	base, exponent, format, ok := quantitySuffixer.interpret(suffix(suf))
+	if !ok {
+		return Quantity{}, ErrSuffix
+	}
+
+	precision := int32(0)
+	scale := int32(0)
+	mantissa := int64(1)
+	switch format {
+	case DecimalExponent, DecimalSI:
+		scale = exponent
+		precision = maxInt64Factors - int32(len(num)+len(denom))
+	case BinarySI:
+		scale = 0
+		switch {
+		case exponent >= 0 && len(denom) == 0:
+			// only handle positive binary numbers with the fast path
+			mantissa = int64(int64(mantissa) << uint64(exponent))
+			// 1Mi (2^20) has ~6 digits of decimal precision, so exponent*3/10 -1 is roughly the precision
+			precision = 15 - int32(len(num)) - int32(float32(exponent)*3/10) - 1
+		default:
+			precision = -1
+		}
+	}
+
+	if precision >= 0 {
+		// if we have a denominator, shift the entire value to the left by the number of places in the
+		// denominator
+		scale -= int32(len(denom))
+		if scale >= int32(Nano) {
+			shifted := num + denom
+
+			var value int64
+			value, err := strconv.ParseInt(shifted, 10, 64)
+			if err != nil {
+				return Quantity{}, ErrNumeric
+			}
+			if result, ok := int64Multiply(value, int64(mantissa)); ok {
+				if !positive {
+					result = -result
+				}
+				// if the number is in canonical form, reuse the string
+				switch format {
+				case BinarySI:
+					if exponent%10 == 0 && (value&0x07 != 0) {
+						return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
+					}
+				default:
+					if scale%3 == 0 && !strings.HasSuffix(shifted, "000") && shifted[0] != '0' {
+						return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
+					}
+				}
+				return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format}, nil
+			}
+		}
+	}
+
+	amount := new(inf.Dec)
+	if _, ok := amount.SetString(value); !ok {
+		return Quantity{}, ErrNumeric
+	}
+
+	// So that no one but us has to think about suffixes, remove it.
+	if base == 10 {
+		amount.SetScale(amount.Scale() + Scale(exponent).infScale())
+	} else if base == 2 {
+		// numericSuffix = 2 ** exponent
+		numericSuffix := big.NewInt(1).Lsh(bigOne, uint(exponent))
+		ub := amount.UnscaledBig()
+		amount.SetUnscaledBig(ub.Mul(ub, numericSuffix))
+	}
+
+	// Cap at min/max bounds.
+	sign := amount.Sign()
+	if sign == -1 {
+		amount.Neg(amount)
+	}
+
+	// This rounds non-zero values up to the minimum representable value, under the theory that
+	// if you want some resources, you should get some resources, even if you asked for way too small
+	// of an amount.  Arguably, this should be inf.RoundHalfUp (normal rounding), but that would have
+	// the side effect of rounding values < .5n to zero.
+	if v, ok := amount.Unscaled(); v != int64(0) || !ok {
+		amount.Round(amount, Nano.infScale(), inf.RoundUp)
+	}
+
+	// The max is just a simple cap.
+	// TODO: this prevents accumulating quantities greater than int64, for instance quota across a cluster
+	if format == BinarySI && amount.Cmp(maxAllowed.Dec) > 0 {
+		amount.Set(maxAllowed.Dec)
+	}
+
+	if format == BinarySI && amount.Cmp(decOne) < 0 && amount.Cmp(decZero) > 0 {
+		// This avoids rounding and hopefully confusion, too.
+		format = DecimalSI
+	}
+	if sign == -1 {
+		amount.Neg(amount)
+	}
+
+	return Quantity{d: infDecAmount{amount}, Format: format}, nil
+}
+
+// DeepCopy returns a deep-copy of the Quantity value.  Note that the method
+// receiver is a value, so we can mutate it in-place and return it.
+func (q Quantity) DeepCopy() Quantity {
+	if q.d.Dec != nil {
+		tmp := &inf.Dec{}
+		q.d.Dec = tmp.Set(q.d.Dec)
+	}
+	return q
+}
+
+// OpenAPISchemaType is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+//
+// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
+func (_ Quantity) OpenAPISchemaType() []string { return []string{"string"} }
+
+// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+func (_ Quantity) OpenAPISchemaFormat() string { return "" }
+
+// CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity).
+//
+// Note about BinarySI:
+// * If q.Format is set to BinarySI and q.Amount represents a non-zero value between
+//   -1 and +1, it will be emitted as if q.Format were DecimalSI.
+// * Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be
+//   rounded up. (1.1i becomes 2i.)
+func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {
+	if q.IsZero() {
+		return zeroBytes, nil
+	}
+
+	var rounded CanonicalValue
+	format := q.Format
+	switch format {
+	case DecimalExponent, DecimalSI:
+	case BinarySI:
+		if q.CmpInt64(-1024) > 0 && q.CmpInt64(1024) < 0 {
+			// This avoids rounding and hopefully confusion, too.
+			format = DecimalSI
+		} else {
+			var exact bool
+			if rounded, exact = q.AsScale(0); !exact {
+				// Don't lose precision-- show as DecimalSI
+				format = DecimalSI
+			}
+		}
+	default:
+		format = DecimalExponent
+	}
+
+	// TODO: If BinarySI formatting is requested but would cause rounding, upgrade to
+	// one of the other formats.
+	switch format {
+	case DecimalExponent, DecimalSI:
+		number, exponent := q.AsCanonicalBytes(out)
+		suffix, _ := quantitySuffixer.constructBytes(10, exponent, format)
+		return number, suffix
+	default:
+		// format must be BinarySI
+		number, exponent := rounded.AsCanonicalBase1024Bytes(out)
+		suffix, _ := quantitySuffixer.constructBytes(2, exponent*10, format)
+		return number, suffix
+	}
+}
+
+// AsInt64 returns a representation of the current value as an int64 if a fast conversion
+// is possible. If false is returned, callers must use the inf.Dec form of this quantity.
+func (q *Quantity) AsInt64() (int64, bool) {
+	if q.d.Dec != nil {
+		return 0, false
+	}
+	return q.i.AsInt64()
+}
+
+// ToDec promotes the quantity in place to use an inf.Dec representation and returns itself.
+func (q *Quantity) ToDec() *Quantity {
+	if q.d.Dec == nil {
+		q.d.Dec = q.i.AsDec()
+		q.i = int64Amount{}
+	}
+	return q
+}
+
+// AsDec returns the quantity as represented by a scaled inf.Dec.
+func (q *Quantity) AsDec() *inf.Dec {
+	if q.d.Dec != nil {
+		return q.d.Dec
+	}
+	q.d.Dec = q.i.AsDec()
+	q.i = int64Amount{}
+	return q.d.Dec
+}
+
+// AsCanonicalBytes returns the canonical byte representation of this quantity as a mantissa
+// and base 10 exponent. The out byte slice may be passed to the method to avoid an extra
+// allocation.
+func (q *Quantity) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
+	if q.d.Dec != nil {
+		return q.d.AsCanonicalBytes(out)
+	}
+	return q.i.AsCanonicalBytes(out)
+}
+
+// IsZero returns true if the quantity is equal to zero.
+func (q *Quantity) IsZero() bool {
+	if q.d.Dec != nil {
+		return q.d.Dec.Sign() == 0
+	}
+	return q.i.value == 0
+}
+
+// Sign returns 0 if the quantity is zero, -1 if the quantity is less than zero, or 1 if the
+// quantity is greater than zero.
+func (q *Quantity) Sign() int {
+	if q.d.Dec != nil {
+		return q.d.Dec.Sign()
+	}
+	return q.i.Sign()
+}
+
+// AsScale returns the current value, rounded up to the provided scale, and returns
+// false if the scale resulted in a loss of precision.
+func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) {
+	if q.d.Dec != nil {
+		return q.d.AsScale(scale)
+	}
+	return q.i.AsScale(scale)
+}
+
+// RoundUp updates the quantity to the provided scale, ensuring that the value is at
+// least 1. False is returned if the rounding operation resulted in a loss of precision.
+// Negative numbers are rounded away from zero (-9 scale 1 rounds to -10).
+func (q *Quantity) RoundUp(scale Scale) bool {
+	if q.d.Dec != nil {
+		q.s = ""
+		d, exact := q.d.AsScale(scale)
+		q.d = d
+		return exact
+	}
+	// avoid clearing the string value if we have already calculated it
+	if q.i.scale >= scale {
+		return true
+	}
+	q.s = ""
+	i, exact := q.i.AsScale(scale)
+	q.i = i
+	return exact
+}
+
+// Add adds the provide y quantity to the current value. If the current value is zero,
+// the format of the quantity will be updated to the format of y.
+func (q *Quantity) Add(y Quantity) {
+	q.s = ""
+	if q.d.Dec == nil && y.d.Dec == nil {
+		if q.i.value == 0 {
+			q.Format = y.Format
+		}
+		if q.i.Add(y.i) {
+			return
+		}
+	} else if q.IsZero() {
+		q.Format = y.Format
+	}
+	q.ToDec().d.Dec.Add(q.d.Dec, y.AsDec())
+}
+
+// Sub subtracts the provided quantity from the current value in place. If the current
+// value is zero, the format of the quantity will be updated to the format of y.
+func (q *Quantity) Sub(y Quantity) {
+	q.s = ""
+	if q.IsZero() {
+		q.Format = y.Format
+	}
+	if q.d.Dec == nil && y.d.Dec == nil && q.i.Sub(y.i) {
+		return
+	}
+	q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec())
+}
+
+// Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
+// quantity is greater than y.
+func (q *Quantity) Cmp(y Quantity) int {
+	if q.d.Dec == nil && y.d.Dec == nil {
+		return q.i.Cmp(y.i)
+	}
+	return q.AsDec().Cmp(y.AsDec())
+}
+
+// CmpInt64 returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
+// quantity is greater than y.
+func (q *Quantity) CmpInt64(y int64) int {
+	if q.d.Dec != nil {
+		return q.d.Dec.Cmp(inf.NewDec(y, inf.Scale(0)))
+	}
+	return q.i.Cmp(int64Amount{value: y})
+}
+
+// Neg sets quantity to be the negative value of itself.
+func (q *Quantity) Neg() {
+	q.s = ""
+	if q.d.Dec == nil {
+		q.i.value = -q.i.value
+		return
+	}
+	q.d.Dec.Neg(q.d.Dec)
+}
+
+// int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation
+// of most Quantity values.
+const int64QuantityExpectedBytes = 18
+
+// String formats the Quantity as a string, caching the result if not calculated.
+// String is an expensive operation and caching this result significantly reduces the cost of
+// normal parse / marshal operations on Quantity.
+func (q *Quantity) String() string {
+	if len(q.s) == 0 {
+		result := make([]byte, 0, int64QuantityExpectedBytes)
+		number, suffix := q.CanonicalizeBytes(result)
+		number = append(number, suffix...)
+		q.s = string(number)
+	}
+	return q.s
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+func (q Quantity) MarshalJSON() ([]byte, error) {
+	if len(q.s) > 0 {
+		out := make([]byte, len(q.s)+2)
+		out[0], out[len(out)-1] = '"', '"'
+		copy(out[1:], q.s)
+		return out, nil
+	}
+	result := make([]byte, int64QuantityExpectedBytes, int64QuantityExpectedBytes)
+	result[0] = '"'
+	number, suffix := q.CanonicalizeBytes(result[1:1])
+	// if the same slice was returned to us that we passed in, avoid another allocation by copying number into
+	// the source slice and returning that
+	if len(number) > 0 && &number[0] == &result[1] && (len(number)+len(suffix)+2) <= int64QuantityExpectedBytes {
+		number = append(number, suffix...)
+		number = append(number, '"')
+		return result[:1+len(number)], nil
+	}
+	// if CanonicalizeBytes needed more space than our slice provided, we may need to allocate again so use
+	// append
+	result = result[:1]
+	result = append(result, number...)
+	result = append(result, suffix...)
+	result = append(result, '"')
+	return result, nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+// TODO: Remove support for leading/trailing whitespace
+func (q *Quantity) UnmarshalJSON(value []byte) error {
+	l := len(value)
+	if l == 4 && bytes.Equal(value, []byte("null")) {
+		q.d.Dec = nil
+		q.i = int64Amount{}
+		return nil
+	}
+	if l >= 2 && value[0] == '"' && value[l-1] == '"' {
+		value = value[1 : l-1]
+	}
+
+	parsed, err := ParseQuantity(strings.TrimSpace(string(value)))
+	if err != nil {
+		return err
+	}
+
+	// This copy is safe because parsed will not be referred to again.
+	*q = parsed
+	return nil
+}
+
+// NewQuantity returns a new Quantity representing the given
+// value in the given format.
+func NewQuantity(value int64, format Format) *Quantity {
+	return &Quantity{
+		i:      int64Amount{value: value},
+		Format: format,
+	}
+}
+
+// NewMilliQuantity returns a new Quantity representing the given
+// value * 1/1000 in the given format. Note that BinarySI formatting
+// will round fractional values, and will be changed to DecimalSI for
+// values x where (-1 < x < 1) && (x != 0).
+func NewMilliQuantity(value int64, format Format) *Quantity {
+	return &Quantity{
+		i:      int64Amount{value: value, scale: -3},
+		Format: format,
+	}
+}
+
+// NewScaledQuantity returns a new Quantity representing the given
+// value * 10^scale in DecimalSI format.
+func NewScaledQuantity(value int64, scale Scale) *Quantity {
+	return &Quantity{
+		i:      int64Amount{value: value, scale: scale},
+		Format: DecimalSI,
+	}
+}
+
+// Value returns the value of q; any fractional part will be lost.
+func (q *Quantity) Value() int64 {
+	return q.ScaledValue(0)
+}
+
+// MilliValue returns the value of ceil(q * 1000); this could overflow an int64;
+// if that's a concern, call Value() first to verify the number is small enough.
+func (q *Quantity) MilliValue() int64 {
+	return q.ScaledValue(Milli)
+}
+
+// ScaledValue returns the value of ceil(q * 10^scale); this could overflow an int64.
+// To detect overflow, call Value() first and verify the expected magnitude.
+func (q *Quantity) ScaledValue(scale Scale) int64 {
+	if q.d.Dec == nil {
+		i, _ := q.i.AsScaledInt64(scale)
+		return i
+	}
+	dec := q.d.Dec
+	return scaledValue(dec.UnscaledBig(), int(dec.Scale()), int(scale.infScale()))
+}
+
+// Set sets q's value to be value.
+func (q *Quantity) Set(value int64) {
+	q.SetScaled(value, 0)
+}
+
+// SetMilli sets q's value to be value * 1/1000.
+func (q *Quantity) SetMilli(value int64) {
+	q.SetScaled(value, Milli)
+}
+
+// SetScaled sets q's value to be value * 10^scale
+func (q *Quantity) SetScaled(value int64, scale Scale) {
+	q.s = ""
+	q.d.Dec = nil
+	q.i = int64Amount{value: value, scale: scale}
+}
+
+// Copy is a convenience function that makes a deep copy for you. Non-deep
+// copies of quantities share pointers and you will regret that.
+func (q *Quantity) Copy() *Quantity {
+	if q.d.Dec == nil {
+		return &Quantity{
+			s:      q.s,
+			i:      q.i,
+			Format: q.Format,
+		}
+	}
+	tmp := &inf.Dec{}
+	return &Quantity{
+		s:      q.s,
+		d:      infDecAmount{tmp.Set(q.d.Dec)},
+		Format: q.Format,
+	}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go
new file mode 100644
index 0000000..74dfb4e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go
@@ -0,0 +1,284 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"fmt"
+	"io"
+
+	"github.com/gogo/protobuf/proto"
+)
+
+var _ proto.Sizer = &Quantity{}
+
+func (m *Quantity) Marshal() (data []byte, err error) {
+	size := m.Size()
+	data = make([]byte, size)
+	n, err := m.MarshalTo(data)
+	if err != nil {
+		return nil, err
+	}
+	return data[:n], nil
+}
+
+// MarshalTo is a customized version of the generated Protobuf unmarshaler for a struct
+// with a single string field.
+func (m *Quantity) MarshalTo(data []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+
+	data[i] = 0xa
+	i++
+	// BEGIN CUSTOM MARSHAL
+	out := m.String()
+	i = encodeVarintGenerated(data, i, uint64(len(out)))
+	i += copy(data[i:], out)
+	// END CUSTOM MARSHAL
+
+	return i, nil
+}
+
+func encodeVarintGenerated(data []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		data[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	data[offset] = uint8(v)
+	return offset + 1
+}
+
+func (m *Quantity) Size() (n int) {
+	var l int
+	_ = l
+
+	// BEGIN CUSTOM SIZE
+	l = len(m.String())
+	// END CUSTOM SIZE
+
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func sovGenerated(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+
+// Unmarshal is a customized version of the generated Protobuf unmarshaler for a struct
+// with a single string field.
+func (m *Quantity) Unmarshal(data []byte) error {
+	l := len(data)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := data[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Quantity: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Quantity: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field String_", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(data[iNdEx:postIndex])
+
+			// BEGIN CUSTOM DECODE
+			p, err := ParseQuantity(s)
+			if err != nil {
+				return err
+			}
+			*m = p
+			// END CUSTOM DECODE
+
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(data[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+
+func skipGenerated(data []byte) (n int, err error) {
+	l := len(data)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := data[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if data[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthGenerated
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := data[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipGenerated(data[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go b/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go
new file mode 100644
index 0000000..55e177b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go
@@ -0,0 +1,95 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"math"
+	"math/big"
+	"sync"
+)
+
+var (
+	// A sync pool to reduce allocation.
+	intPool  sync.Pool
+	maxInt64 = big.NewInt(math.MaxInt64)
+)
+
+func init() {
+	intPool.New = func() interface{} {
+		return &big.Int{}
+	}
+}
+
+// scaledValue scales given unscaled value from scale to new Scale and returns
+// an int64. It ALWAYS rounds up the result when scale down. The final result might
+// overflow.
+//
+// scale, newScale represents the scale of the unscaled decimal.
+// The mathematical value of the decimal is unscaled * 10**(-scale).
+func scaledValue(unscaled *big.Int, scale, newScale int) int64 {
+	dif := scale - newScale
+	if dif == 0 {
+		return unscaled.Int64()
+	}
+
+	// Handle scale up
+	// This is an easy case, we do not need to care about rounding and overflow.
+	// If any intermediate operation causes overflow, the result will overflow.
+	if dif < 0 {
+		return unscaled.Int64() * int64(math.Pow10(-dif))
+	}
+
+	// Handle scale down
+	// We have to be careful about the intermediate operations.
+
+	// fast path when unscaled < max.Int64 and exp(10,dif) < max.Int64
+	const log10MaxInt64 = 19
+	if unscaled.Cmp(maxInt64) < 0 && dif < log10MaxInt64 {
+		divide := int64(math.Pow10(dif))
+		result := unscaled.Int64() / divide
+		mod := unscaled.Int64() % divide
+		if mod != 0 {
+			return result + 1
+		}
+		return result
+	}
+
+	// We should only convert back to int64 when getting the result.
+	divisor := intPool.Get().(*big.Int)
+	exp := intPool.Get().(*big.Int)
+	result := intPool.Get().(*big.Int)
+	defer func() {
+		intPool.Put(divisor)
+		intPool.Put(exp)
+		intPool.Put(result)
+	}()
+
+	// divisor = 10^(dif)
+	// TODO: create loop up table if exp costs too much.
+	divisor.Exp(bigTen, exp.SetInt64(int64(dif)), nil)
+	// reuse exp
+	remainder := exp
+
+	// result = unscaled / divisor
+	// remainder = unscaled % divisor
+	result.DivMod(unscaled, divisor, remainder)
+	if remainder.Sign() != 0 {
+		return result.Int64() + 1
+	}
+
+	return result.Int64()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go b/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go
new file mode 100644
index 0000000..5ed7abe
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go
@@ -0,0 +1,198 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"strconv"
+)
+
+type suffix string
+
+// suffixer can interpret and construct suffixes.
+type suffixer interface {
+	interpret(suffix) (base, exponent int32, fmt Format, ok bool)
+	construct(base, exponent int32, fmt Format) (s suffix, ok bool)
+	constructBytes(base, exponent int32, fmt Format) (s []byte, ok bool)
+}
+
+// quantitySuffixer handles suffixes for all three formats that quantity
+// can handle.
+var quantitySuffixer = newSuffixer()
+
+type bePair struct {
+	base, exponent int32
+}
+
+type listSuffixer struct {
+	suffixToBE      map[suffix]bePair
+	beToSuffix      map[bePair]suffix
+	beToSuffixBytes map[bePair][]byte
+}
+
+func (ls *listSuffixer) addSuffix(s suffix, pair bePair) {
+	if ls.suffixToBE == nil {
+		ls.suffixToBE = map[suffix]bePair{}
+	}
+	if ls.beToSuffix == nil {
+		ls.beToSuffix = map[bePair]suffix{}
+	}
+	if ls.beToSuffixBytes == nil {
+		ls.beToSuffixBytes = map[bePair][]byte{}
+	}
+	ls.suffixToBE[s] = pair
+	ls.beToSuffix[pair] = s
+	ls.beToSuffixBytes[pair] = []byte(s)
+}
+
+func (ls *listSuffixer) lookup(s suffix) (base, exponent int32, ok bool) {
+	pair, ok := ls.suffixToBE[s]
+	if !ok {
+		return 0, 0, false
+	}
+	return pair.base, pair.exponent, true
+}
+
+func (ls *listSuffixer) construct(base, exponent int32) (s suffix, ok bool) {
+	s, ok = ls.beToSuffix[bePair{base, exponent}]
+	return
+}
+
+func (ls *listSuffixer) constructBytes(base, exponent int32) (s []byte, ok bool) {
+	s, ok = ls.beToSuffixBytes[bePair{base, exponent}]
+	return
+}
+
+type suffixHandler struct {
+	decSuffixes listSuffixer
+	binSuffixes listSuffixer
+}
+
+type fastLookup struct {
+	*suffixHandler
+}
+
+func (l fastLookup) interpret(s suffix) (base, exponent int32, format Format, ok bool) {
+	switch s {
+	case "":
+		return 10, 0, DecimalSI, true
+	case "n":
+		return 10, -9, DecimalSI, true
+	case "u":
+		return 10, -6, DecimalSI, true
+	case "m":
+		return 10, -3, DecimalSI, true
+	case "k":
+		return 10, 3, DecimalSI, true
+	case "M":
+		return 10, 6, DecimalSI, true
+	case "G":
+		return 10, 9, DecimalSI, true
+	}
+	return l.suffixHandler.interpret(s)
+}
+
+func newSuffixer() suffixer {
+	sh := &suffixHandler{}
+
+	// IMPORTANT: if you change this section you must change fastLookup
+
+	sh.binSuffixes.addSuffix("Ki", bePair{2, 10})
+	sh.binSuffixes.addSuffix("Mi", bePair{2, 20})
+	sh.binSuffixes.addSuffix("Gi", bePair{2, 30})
+	sh.binSuffixes.addSuffix("Ti", bePair{2, 40})
+	sh.binSuffixes.addSuffix("Pi", bePair{2, 50})
+	sh.binSuffixes.addSuffix("Ei", bePair{2, 60})
+	// Don't emit an error when trying to produce
+	// a suffix for 2^0.
+	sh.decSuffixes.addSuffix("", bePair{2, 0})
+
+	sh.decSuffixes.addSuffix("n", bePair{10, -9})
+	sh.decSuffixes.addSuffix("u", bePair{10, -6})
+	sh.decSuffixes.addSuffix("m", bePair{10, -3})
+	sh.decSuffixes.addSuffix("", bePair{10, 0})
+	sh.decSuffixes.addSuffix("k", bePair{10, 3})
+	sh.decSuffixes.addSuffix("M", bePair{10, 6})
+	sh.decSuffixes.addSuffix("G", bePair{10, 9})
+	sh.decSuffixes.addSuffix("T", bePair{10, 12})
+	sh.decSuffixes.addSuffix("P", bePair{10, 15})
+	sh.decSuffixes.addSuffix("E", bePair{10, 18})
+
+	return fastLookup{sh}
+}
+
+func (sh *suffixHandler) construct(base, exponent int32, fmt Format) (s suffix, ok bool) {
+	switch fmt {
+	case DecimalSI:
+		return sh.decSuffixes.construct(base, exponent)
+	case BinarySI:
+		return sh.binSuffixes.construct(base, exponent)
+	case DecimalExponent:
+		if base != 10 {
+			return "", false
+		}
+		if exponent == 0 {
+			return "", true
+		}
+		return suffix("e" + strconv.FormatInt(int64(exponent), 10)), true
+	}
+	return "", false
+}
+
+func (sh *suffixHandler) constructBytes(base, exponent int32, format Format) (s []byte, ok bool) {
+	switch format {
+	case DecimalSI:
+		return sh.decSuffixes.constructBytes(base, exponent)
+	case BinarySI:
+		return sh.binSuffixes.constructBytes(base, exponent)
+	case DecimalExponent:
+		if base != 10 {
+			return nil, false
+		}
+		if exponent == 0 {
+			return nil, true
+		}
+		result := make([]byte, 8, 8)
+		result[0] = 'e'
+		number := strconv.AppendInt(result[1:1], int64(exponent), 10)
+		if &result[1] == &number[0] {
+			return result[:1+len(number)], true
+		}
+		result = append(result[:1], number...)
+		return result, true
+	}
+	return nil, false
+}
+
+func (sh *suffixHandler) interpret(suffix suffix) (base, exponent int32, fmt Format, ok bool) {
+	// Try lookup tables first
+	if b, e, ok := sh.decSuffixes.lookup(suffix); ok {
+		return b, e, DecimalSI, true
+	}
+	if b, e, ok := sh.binSuffixes.lookup(suffix); ok {
+		return b, e, BinarySI, true
+	}
+
+	if len(suffix) > 1 && (suffix[0] == 'E' || suffix[0] == 'e') {
+		parsed, err := strconv.ParseInt(string(suffix[1:]), 10, 64)
+		if err != nil {
+			return 0, 0, DecimalExponent, false
+		}
+		return 10, int32(parsed), DecimalExponent, true
+	}
+
+	return 0, 0, DecimalExponent, false
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go
new file mode 100644
index 0000000..ab47407
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go
@@ -0,0 +1,27 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package resource
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Quantity) DeepCopyInto(out *Quantity) {
+	*out = in.DeepCopy()
+	return
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
new file mode 100755
index 0000000..cdb125a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
@@ -0,0 +1,31 @@
+reviewers:
+- thockin
+- smarterclayton
+- wojtek-t
+- deads2k
+- brendandburns
+- caesarxuchao
+- liggitt
+- nikhiljindal
+- gmarek
+- erictune
+- davidopp
+- sttts
+- quinton-hoole
+- luxas
+- janetkuo
+- justinsb
+- ncdc
+- soltysh
+- dims
+- madhusudancs
+- hongchaodeng
+- krousey
+- mml
+- mbohlool
+- david-mcmahon
+- therc
+- mqliang
+- kevin-wangzefeng
+- jianhuiz
+- feihujiang
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
new file mode 100644
index 0000000..042cd5b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// IsControlledBy checks if the  object has a controllerRef set to the given owner
+func IsControlledBy(obj Object, owner Object) bool {
+	ref := GetControllerOf(obj)
+	if ref == nil {
+		return false
+	}
+	return ref.UID == owner.GetUID()
+}
+
+// GetControllerOf returns a pointer to a copy of the controllerRef if controllee has a controller
+func GetControllerOf(controllee Object) *OwnerReference {
+	for _, ref := range controllee.GetOwnerReferences() {
+		if ref.Controller != nil && *ref.Controller {
+			return &ref
+		}
+	}
+	return nil
+}
+
+// NewControllerRef creates an OwnerReference pointing to the given owner.
+func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference {
+	blockOwnerDeletion := true
+	isController := true
+	return &OwnerReference{
+		APIVersion:         gvk.GroupVersion().String(),
+		Kind:               gvk.Kind,
+		Name:               owner.GetName(),
+		UID:                owner.GetUID(),
+		BlockOwnerDeletion: &blockOwnerDeletion,
+		Controller:         &isController,
+	}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go
new file mode 100644
index 0000000..5c36f82
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go
@@ -0,0 +1,319 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/api/resource"
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/fields"
+	"k8s.io/apimachinery/pkg/labels"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/intstr"
+)
+
+func AddConversionFuncs(scheme *runtime.Scheme) error {
+	return scheme.AddConversionFuncs(
+		Convert_v1_TypeMeta_To_v1_TypeMeta,
+
+		Convert_v1_ListMeta_To_v1_ListMeta,
+
+		Convert_intstr_IntOrString_To_intstr_IntOrString,
+
+		Convert_Pointer_v1_Duration_To_v1_Duration,
+		Convert_v1_Duration_To_Pointer_v1_Duration,
+
+		Convert_Slice_string_To_v1_Time,
+
+		Convert_v1_Time_To_v1_Time,
+		Convert_v1_MicroTime_To_v1_MicroTime,
+
+		Convert_resource_Quantity_To_resource_Quantity,
+
+		Convert_string_To_labels_Selector,
+		Convert_labels_Selector_To_string,
+
+		Convert_string_To_fields_Selector,
+		Convert_fields_Selector_To_string,
+
+		Convert_Pointer_bool_To_bool,
+		Convert_bool_To_Pointer_bool,
+
+		Convert_Pointer_string_To_string,
+		Convert_string_To_Pointer_string,
+
+		Convert_Pointer_int64_To_int,
+		Convert_int_To_Pointer_int64,
+
+		Convert_Pointer_int32_To_int32,
+		Convert_int32_To_Pointer_int32,
+
+		Convert_Pointer_int64_To_int64,
+		Convert_int64_To_Pointer_int64,
+
+		Convert_Pointer_float64_To_float64,
+		Convert_float64_To_Pointer_float64,
+
+		Convert_Map_string_To_string_To_v1_LabelSelector,
+		Convert_v1_LabelSelector_To_Map_string_To_string,
+
+		Convert_Slice_string_To_Slice_int32,
+
+		Convert_Slice_string_To_v1_DeletionPropagation,
+	)
+}
+
+func Convert_Pointer_float64_To_float64(in **float64, out *float64, s conversion.Scope) error {
+	if *in == nil {
+		*out = 0
+		return nil
+	}
+	*out = float64(**in)
+	return nil
+}
+
+func Convert_float64_To_Pointer_float64(in *float64, out **float64, s conversion.Scope) error {
+	temp := float64(*in)
+	*out = &temp
+	return nil
+}
+
+func Convert_Pointer_int32_To_int32(in **int32, out *int32, s conversion.Scope) error {
+	if *in == nil {
+		*out = 0
+		return nil
+	}
+	*out = int32(**in)
+	return nil
+}
+
+func Convert_int32_To_Pointer_int32(in *int32, out **int32, s conversion.Scope) error {
+	temp := int32(*in)
+	*out = &temp
+	return nil
+}
+
+func Convert_Pointer_int64_To_int64(in **int64, out *int64, s conversion.Scope) error {
+	if *in == nil {
+		*out = 0
+		return nil
+	}
+	*out = int64(**in)
+	return nil
+}
+
+func Convert_int64_To_Pointer_int64(in *int64, out **int64, s conversion.Scope) error {
+	temp := int64(*in)
+	*out = &temp
+	return nil
+}
+
+func Convert_Pointer_int64_To_int(in **int64, out *int, s conversion.Scope) error {
+	if *in == nil {
+		*out = 0
+		return nil
+	}
+	*out = int(**in)
+	return nil
+}
+
+func Convert_int_To_Pointer_int64(in *int, out **int64, s conversion.Scope) error {
+	temp := int64(*in)
+	*out = &temp
+	return nil
+}
+
+func Convert_Pointer_string_To_string(in **string, out *string, s conversion.Scope) error {
+	if *in == nil {
+		*out = ""
+		return nil
+	}
+	*out = **in
+	return nil
+}
+
+func Convert_string_To_Pointer_string(in *string, out **string, s conversion.Scope) error {
+	if in == nil {
+		stringVar := ""
+		*out = &stringVar
+		return nil
+	}
+	*out = in
+	return nil
+}
+
+func Convert_Pointer_bool_To_bool(in **bool, out *bool, s conversion.Scope) error {
+	if *in == nil {
+		*out = false
+		return nil
+	}
+	*out = **in
+	return nil
+}
+
+func Convert_bool_To_Pointer_bool(in *bool, out **bool, s conversion.Scope) error {
+	if in == nil {
+		boolVar := false
+		*out = &boolVar
+		return nil
+	}
+	*out = in
+	return nil
+}
+
+// +k8s:conversion-fn=drop
+func Convert_v1_TypeMeta_To_v1_TypeMeta(in, out *TypeMeta, s conversion.Scope) error {
+	// These values are explicitly not copied
+	//out.APIVersion = in.APIVersion
+	//out.Kind = in.Kind
+	return nil
+}
+
+// +k8s:conversion-fn=copy-only
+func Convert_v1_ListMeta_To_v1_ListMeta(in, out *ListMeta, s conversion.Scope) error {
+	*out = *in
+	return nil
+}
+
+// +k8s:conversion-fn=copy-only
+func Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrString, s conversion.Scope) error {
+	*out = *in
+	return nil
+}
+
+// +k8s:conversion-fn=copy-only
+func Convert_v1_Time_To_v1_Time(in *Time, out *Time, s conversion.Scope) error {
+	// Cannot deep copy these, because time.Time has unexported fields.
+	*out = *in
+	return nil
+}
+
+// +k8s:conversion-fn=copy-only
+func Convert_v1_MicroTime_To_v1_MicroTime(in *MicroTime, out *MicroTime, s conversion.Scope) error {
+	// Cannot deep copy these, because time.Time has unexported fields.
+	*out = *in
+	return nil
+}
+
+func Convert_Pointer_v1_Duration_To_v1_Duration(in **Duration, out *Duration, s conversion.Scope) error {
+	if *in == nil {
+		*out = Duration{} // zero duration
+		return nil
+	}
+	*out = **in // copy
+	return nil
+}
+
+func Convert_v1_Duration_To_Pointer_v1_Duration(in *Duration, out **Duration, s conversion.Scope) error {
+	temp := *in //copy
+	*out = &temp
+	return nil
+}
+
+// Convert_Slice_string_To_v1_Time allows converting a URL query parameter value
+func Convert_Slice_string_To_v1_Time(input *[]string, out *Time, s conversion.Scope) error {
+	str := ""
+	if len(*input) > 0 {
+		str = (*input)[0]
+	}
+	return out.UnmarshalQueryParameter(str)
+}
+
+func Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error {
+	selector, err := labels.Parse(*in)
+	if err != nil {
+		return err
+	}
+	*out = selector
+	return nil
+}
+
+func Convert_string_To_fields_Selector(in *string, out *fields.Selector, s conversion.Scope) error {
+	selector, err := fields.ParseSelector(*in)
+	if err != nil {
+		return err
+	}
+	*out = selector
+	return nil
+}
+
+func Convert_labels_Selector_To_string(in *labels.Selector, out *string, s conversion.Scope) error {
+	if *in == nil {
+		return nil
+	}
+	*out = (*in).String()
+	return nil
+}
+
+func Convert_fields_Selector_To_string(in *fields.Selector, out *string, s conversion.Scope) error {
+	if *in == nil {
+		return nil
+	}
+	*out = (*in).String()
+	return nil
+}
+
+// +k8s:conversion-fn=copy-only
+func Convert_resource_Quantity_To_resource_Quantity(in *resource.Quantity, out *resource.Quantity, s conversion.Scope) error {
+	*out = *in
+	return nil
+}
+
+func Convert_Map_string_To_string_To_v1_LabelSelector(in *map[string]string, out *LabelSelector, s conversion.Scope) error {
+	if in == nil {
+		return nil
+	}
+	for labelKey, labelValue := range *in {
+		AddLabelToSelector(out, labelKey, labelValue)
+	}
+	return nil
+}
+
+func Convert_v1_LabelSelector_To_Map_string_To_string(in *LabelSelector, out *map[string]string, s conversion.Scope) error {
+	var err error
+	*out, err = LabelSelectorAsMap(in)
+	return err
+}
+
+// Convert_Slice_string_To_Slice_int32 converts multiple query parameters or
+// a single query parameter with a comma delimited value to multiple int32.
+// This is used for port forwarding which needs the ports as int32.
+func Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversion.Scope) error {
+	for _, s := range *in {
+		for _, v := range strings.Split(s, ",") {
+			x, err := strconv.ParseUint(v, 10, 16)
+			if err != nil {
+				return fmt.Errorf("cannot convert to []int32: %v", err)
+			}
+			*out = append(*out, int32(x))
+		}
+	}
+	return nil
+}
+
+// Convert_Slice_string_To_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy
+func Convert_Slice_string_To_v1_DeletionPropagation(input *[]string, out *DeletionPropagation, s conversion.Scope) error {
+	if len(*input) > 0 {
+		*out = DeletionPropagation((*input)[0])
+	} else {
+		*out = ""
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go
new file mode 100644
index 0000000..dbaa87c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+
+// +groupName=meta.k8s.io
+
+package v1 // import "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go
new file mode 100644
index 0000000..babe8a8
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"encoding/json"
+	"time"
+)
+
+// Duration is a wrapper around time.Duration which supports correct
+// marshaling to YAML and JSON. In particular, it marshals into strings, which
+// can be used as map keys in json.
+type Duration struct {
+	time.Duration `protobuf:"varint,1,opt,name=duration,casttype=time.Duration"`
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (d *Duration) UnmarshalJSON(b []byte) error {
+	var str string
+	err := json.Unmarshal(b, &str)
+	if err != nil {
+		return err
+	}
+
+	pd, err := time.ParseDuration(str)
+	if err != nil {
+		return err
+	}
+	d.Duration = pd
+	return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (d Duration) MarshalJSON() ([]byte, error) {
+	return json.Marshal(d.Duration.String())
+}
+
+// OpenAPISchemaType is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+//
+// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
+func (_ Duration) OpenAPISchemaType() []string { return []string{"string"} }
+
+// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+func (_ Duration) OpenAPISchemaFormat() string { return "" }
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
new file mode 100644
index 0000000..989f076
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
@@ -0,0 +1,879 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.apimachinery.pkg.apis.meta.v1;
+
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1";
+
+// APIGroup contains the name, the supported versions, and the preferred version
+// of a group.
+message APIGroup {
+  // name is the name of the group.
+  optional string name = 1;
+
+  // versions are the versions supported in this group.
+  repeated GroupVersionForDiscovery versions = 2;
+
+  // preferredVersion is the version preferred by the API server, which
+  // probably is the storage version.
+  // +optional
+  optional GroupVersionForDiscovery preferredVersion = 3;
+
+  // a map of client CIDR to server address that is serving this group.
+  // This is to help clients reach servers in the most network-efficient way possible.
+  // Clients can use the appropriate server address as per the CIDR that they match.
+  // In case of multiple matches, clients should use the longest matching CIDR.
+  // The server returns only those CIDRs that it thinks that the client can match.
+  // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
+  // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
+  // +optional
+  repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4;
+}
+
+// APIGroupList is a list of APIGroup, to allow clients to discover the API at
+// /apis.
+message APIGroupList {
+  // groups is a list of APIGroup.
+  repeated APIGroup groups = 1;
+}
+
+// APIResource specifies the name of a resource and whether it is namespaced.
+message APIResource {
+  // name is the plural name of the resource.
+  optional string name = 1;
+
+  // singularName is the singular name of the resource.  This allows clients to handle plural and singular opaquely.
+  // The singularName is more correct for reporting status on a single item and both singular and plural are allowed
+  // from the kubectl CLI interface.
+  optional string singularName = 6;
+
+  // namespaced indicates if a resource is namespaced or not.
+  optional bool namespaced = 2;
+
+  // group is the preferred group of the resource.  Empty implies the group of the containing resource list.
+  // For subresources, this may have a different value, for example: Scale".
+  optional string group = 8;
+
+  // version is the preferred version of the resource.  Empty implies the version of the containing resource list
+  // For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)".
+  optional string version = 9;
+
+  // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')
+  optional string kind = 3;
+
+  // verbs is a list of supported kube verbs (this includes get, list, watch, create,
+  // update, patch, delete, deletecollection, and proxy)
+  optional Verbs verbs = 4;
+
+  // shortNames is a list of suggested short names of the resource.
+  repeated string shortNames = 5;
+
+  // categories is a list of the grouped resources this resource belongs to (e.g. 'all')
+  repeated string categories = 7;
+}
+
+// APIResourceList is a list of APIResource, it is used to expose the name of the
+// resources supported in a specific group and version, and if the resource
+// is namespaced.
+message APIResourceList {
+  // groupVersion is the group and version this APIResourceList is for.
+  optional string groupVersion = 1;
+
+  // resources contains the name of the resources and if they are namespaced.
+  repeated APIResource resources = 2;
+}
+
+// APIVersions lists the versions that are available, to allow clients to
+// discover the API at /api, which is the root path of the legacy v1 API.
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+message APIVersions {
+  // versions are the api versions that are available.
+  repeated string versions = 1;
+
+  // a map of client CIDR to server address that is serving this group.
+  // This is to help clients reach servers in the most network-efficient way possible.
+  // Clients can use the appropriate server address as per the CIDR that they match.
+  // In case of multiple matches, clients should use the longest matching CIDR.
+  // The server returns only those CIDRs that it thinks that the client can match.
+  // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
+  // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
+  repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2;
+}
+
+// CreateOptions may be provided when creating an API object.
+message CreateOptions {
+  // When present, indicates that modifications should not be
+  // persisted. An invalid or unrecognized dryRun directive will
+  // result in an error response and no further processing of the
+  // request. Valid values are:
+  // - All: all dry run stages will be processed
+  // +optional
+  repeated string dryRun = 1;
+
+  // If IncludeUninitialized is specified, the object may be
+  // returned without completing initialization.
+  optional bool includeUninitialized = 2;
+}
+
+// DeleteOptions may be provided when deleting an API object.
+message DeleteOptions {
+  // The duration in seconds before the object should be deleted. Value must be non-negative integer.
+  // The value zero indicates delete immediately. If this value is nil, the default grace period for the
+  // specified type will be used.
+  // Defaults to a per object value if not specified. zero means delete immediately.
+  // +optional
+  optional int64 gracePeriodSeconds = 1;
+
+  // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
+  // returned.
+  // +optional
+  optional Preconditions preconditions = 2;
+
+  // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.
+  // Should the dependent objects be orphaned. If true/false, the "orphan"
+  // finalizer will be added to/removed from the object's finalizers list.
+  // Either this field or PropagationPolicy may be set, but not both.
+  // +optional
+  optional bool orphanDependents = 3;
+
+  // Whether and how garbage collection will be performed.
+  // Either this field or OrphanDependents may be set, but not both.
+  // The default policy is decided by the existing finalizer set in the
+  // metadata.finalizers and the resource-specific default policy.
+  // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' -
+  // allow the garbage collector to delete the dependents in the background;
+  // 'Foreground' - a cascading policy that deletes all dependents in the
+  // foreground.
+  // +optional
+  optional string propagationPolicy = 4;
+
+  // When present, indicates that modifications should not be
+  // persisted. An invalid or unrecognized dryRun directive will
+  // result in an error response and no further processing of the
+  // request. Valid values are:
+  // - All: all dry run stages will be processed
+  // +optional
+  repeated string dryRun = 5;
+}
+
+// Duration is a wrapper around time.Duration which supports correct
+// marshaling to YAML and JSON. In particular, it marshals into strings, which
+// can be used as map keys in json.
+message Duration {
+  optional int64 duration = 1;
+}
+
+// ExportOptions is the query options to the standard REST get call.
+message ExportOptions {
+  // Should this value be exported.  Export strips fields that a user can not specify.
+  optional bool export = 1;
+
+  // Should the export be exact.  Exact export maintains cluster-specific fields like 'Namespace'.
+  optional bool exact = 2;
+}
+
+// GetOptions is the standard query options to the standard REST get call.
+message GetOptions {
+  // When specified:
+  // - if unset, then the result is returned from remote storage based on quorum-read flag;
+  // - if it's 0, then we simply return what we currently have in cache, no guarantee;
+  // - if set to non zero, then the result is at least as fresh as given rv.
+  optional string resourceVersion = 1;
+
+  // If true, partially initialized resources are included in the response.
+  // +optional
+  optional bool includeUninitialized = 2;
+}
+
+// GroupKind specifies a Group and a Kind, but does not force a version.  This is useful for identifying
+// concepts during lookup stages without having partially valid types
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message GroupKind {
+  optional string group = 1;
+
+  optional string kind = 2;
+}
+
+// GroupResource specifies a Group and a Resource, but does not force a version.  This is useful for identifying
+// concepts during lookup stages without having partially valid types
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message GroupResource {
+  optional string group = 1;
+
+  optional string resource = 2;
+}
+
+// GroupVersion contains the "group" and the "version", which uniquely identifies the API.
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message GroupVersion {
+  optional string group = 1;
+
+  optional string version = 2;
+}
+
+// GroupVersion contains the "group/version" and "version" string of a version.
+// It is made a struct to keep extensibility.
+message GroupVersionForDiscovery {
+  // groupVersion specifies the API group and version in the form "group/version"
+  optional string groupVersion = 1;
+
+  // version specifies the version in the form of "version". This is to save
+  // the clients the trouble of splitting the GroupVersion.
+  optional string version = 2;
+}
+
+// GroupVersionKind unambiguously identifies a kind.  It doesn't anonymously include GroupVersion
+// to avoid automatic coersion.  It doesn't use a GroupVersion to avoid custom marshalling
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message GroupVersionKind {
+  optional string group = 1;
+
+  optional string version = 2;
+
+  optional string kind = 3;
+}
+
+// GroupVersionResource unambiguously identifies a resource.  It doesn't anonymously include GroupVersion
+// to avoid automatic coersion.  It doesn't use a GroupVersion to avoid custom marshalling
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message GroupVersionResource {
+  optional string group = 1;
+
+  optional string version = 2;
+
+  optional string resource = 3;
+}
+
+// Initializer is information about an initializer that has not yet completed.
+message Initializer {
+  // name of the process that is responsible for initializing this object.
+  optional string name = 1;
+}
+
+// Initializers tracks the progress of initialization.
+message Initializers {
+  // Pending is a list of initializers that must execute in order before this object is visible.
+  // When the last pending initializer is removed, and no failing result is set, the initializers
+  // struct will be set to nil and the object is considered as initialized and visible to all
+  // clients.
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  repeated Initializer pending = 1;
+
+  // If result is set with the Failure field, the object will be persisted to storage and then deleted,
+  // ensuring that other clients can observe the deletion.
+  optional Status result = 2;
+}
+
+// A label selector is a label query over a set of resources. The result of matchLabels and
+// matchExpressions are ANDed. An empty label selector matches all objects. A null
+// label selector matches no objects.
+message LabelSelector {
+  // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+  // map is equivalent to an element of matchExpressions, whose key field is "key", the
+  // operator is "In", and the values array contains only "value". The requirements are ANDed.
+  // +optional
+  map<string, string> matchLabels = 1;
+
+  // matchExpressions is a list of label selector requirements. The requirements are ANDed.
+  // +optional
+  repeated LabelSelectorRequirement matchExpressions = 2;
+}
+
+// A label selector requirement is a selector that contains values, a key, and an operator that
+// relates the key and values.
+message LabelSelectorRequirement {
+  // key is the label key that the selector applies to.
+  // +patchMergeKey=key
+  // +patchStrategy=merge
+  optional string key = 1;
+
+  // operator represents a key's relationship to a set of values.
+  // Valid operators are In, NotIn, Exists and DoesNotExist.
+  optional string operator = 2;
+
+  // values is an array of string values. If the operator is In or NotIn,
+  // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+  // the values array must be empty. This array is replaced during a strategic
+  // merge patch.
+  // +optional
+  repeated string values = 3;
+}
+
+// List holds a list of objects, which may not be known by the server.
+message List {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional ListMeta metadata = 1;
+
+  // List of objects
+  repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
+}
+
+// ListMeta describes metadata that synthetic resources must have, including lists and
+// various status objects. A resource may have only one of {ObjectMeta, ListMeta}.
+message ListMeta {
+  // selfLink is a URL representing this object.
+  // Populated by the system.
+  // Read-only.
+  // +optional
+  optional string selfLink = 1;
+
+  // String that identifies the server's internal version of this object that
+  // can be used by clients to determine when objects have changed.
+  // Value must be treated as opaque by clients and passed unmodified back to the server.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
+  // +optional
+  optional string resourceVersion = 2;
+
+  // continue may be set if the user set a limit on the number of items returned, and indicates that
+  // the server has more data available. The value is opaque and may be used to issue another request
+  // to the endpoint that served this list to retrieve the next set of available objects. Continuing a
+  // consistent list may not be possible if the server configuration has changed or more than a few
+  // minutes have passed. The resourceVersion field returned when using this continue value will be
+  // identical to the value in the first response, unless you have received this token from an error
+  // message.
+  optional string continue = 3;
+}
+
+// ListOptions is the query options to a standard REST list call.
+message ListOptions {
+  // A selector to restrict the list of returned objects by their labels.
+  // Defaults to everything.
+  // +optional
+  optional string labelSelector = 1;
+
+  // A selector to restrict the list of returned objects by their fields.
+  // Defaults to everything.
+  // +optional
+  optional string fieldSelector = 2;
+
+  // If true, partially initialized resources are included in the response.
+  // +optional
+  optional bool includeUninitialized = 6;
+
+  // Watch for changes to the described resources and return them as a stream of
+  // add, update, and remove notifications. Specify resourceVersion.
+  // +optional
+  optional bool watch = 3;
+
+  // When specified with a watch call, shows changes that occur after that particular version of a resource.
+  // Defaults to changes from the beginning of history.
+  // When specified for list:
+  // - if unset, then the result is returned from remote storage based on quorum-read flag;
+  // - if it's 0, then we simply return what we currently have in cache, no guarantee;
+  // - if set to non zero, then the result is at least as fresh as given rv.
+  // +optional
+  optional string resourceVersion = 4;
+
+  // Timeout for the list/watch call.
+  // This limits the duration of the call, regardless of any activity or inactivity.
+  // +optional
+  optional int64 timeoutSeconds = 5;
+
+  // limit is a maximum number of responses to return for a list call. If more items exist, the
+  // server will set the `continue` field on the list metadata to a value that can be used with the
+  // same initial query to retrieve the next set of results. Setting a limit may return fewer than
+  // the requested amount of items (up to zero items) in the event all requested objects are
+  // filtered out and clients should only use the presence of the continue field to determine whether
+  // more results are available. Servers may choose not to support the limit argument and will return
+  // all of the available results. If limit is specified and the continue field is empty, clients may
+  // assume that no more results are available. This field is not supported if watch is true.
+  //
+  // The server guarantees that the objects returned when using continue will be identical to issuing
+  // a single list call without a limit - that is, no objects created, modified, or deleted after the
+  // first request is issued will be included in any subsequent continued requests. This is sometimes
+  // referred to as a consistent snapshot, and ensures that a client that is using limit to receive
+  // smaller chunks of a very large result can ensure they see all possible objects. If objects are
+  // updated during a chunked list the version of the object that was present at the time the first list
+  // result was calculated is returned.
+  optional int64 limit = 7;
+
+  // The continue option should be set when retrieving more results from the server. Since this value is
+  // server defined, clients may only use the continue value from a previous query result with identical
+  // query parameters (except for the value of continue) and the server may reject a continue value it
+  // does not recognize. If the specified continue value is no longer valid whether due to expiration
+  // (generally five to fifteen minutes) or a configuration change on the server, the server will
+  // respond with a 410 ResourceExpired error together with a continue token. If the client needs a
+  // consistent list, it must restart their list without the continue field. Otherwise, the client may
+  // send another list request with the token received with the 410 error, the server will respond with
+  // a list starting from the next key, but from the latest snapshot, which is inconsistent from the
+  // previous list results - objects that are created, modified, or deleted after the first list request
+  // will be included in the response, as long as their keys are after the "next key".
+  //
+  // This field is not supported when watch is true. Clients may start a watch from the last
+  // resourceVersion value returned by the server and not miss any modifications.
+  optional string continue = 8;
+}
+
+// MicroTime is version of Time with microsecond level precision.
+//
+// +protobuf.options.marshal=false
+// +protobuf.as=Timestamp
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message MicroTime {
+  // Represents seconds of UTC time since Unix epoch
+  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+  // 9999-12-31T23:59:59Z inclusive.
+  optional int64 seconds = 1;
+
+  // Non-negative fractions of a second at nanosecond resolution. Negative
+  // second values with fractions must still have non-negative nanos values
+  // that count forward in time. Must be from 0 to 999,999,999
+  // inclusive. This field may be limited in precision depending on context.
+  optional int32 nanos = 2;
+}
+
+// ObjectMeta is metadata that all persisted resources must have, which includes all objects
+// users must create.
+message ObjectMeta {
+  // Name must be unique within a namespace. Is required when creating resources, although
+  // some resources may allow a client to request the generation of an appropriate name
+  // automatically. Name is primarily intended for creation idempotence and configuration
+  // definition.
+  // Cannot be updated.
+  // More info: http://kubernetes.io/docs/user-guide/identifiers#names
+  // +optional
+  optional string name = 1;
+
+  // GenerateName is an optional prefix, used by the server, to generate a unique
+  // name ONLY IF the Name field has not been provided.
+  // If this field is used, the name returned to the client will be different
+  // than the name passed. This value will also be combined with a unique suffix.
+  // The provided value has the same validation rules as the Name field,
+  // and may be truncated by the length of the suffix required to make the value
+  // unique on the server.
+  //
+  // If this field is specified and the generated name exists, the server will
+  // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason
+  // ServerTimeout indicating a unique name could not be found in the time allotted, and the client
+  // should retry (optionally after the time indicated in the Retry-After header).
+  //
+  // Applied only if Name is not specified.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency
+  // +optional
+  optional string generateName = 2;
+
+  // Namespace defines the space within each name must be unique. An empty namespace is
+  // equivalent to the "default" namespace, but "default" is the canonical representation.
+  // Not all objects are required to be scoped to a namespace - the value of this field for
+  // those objects will be empty.
+  //
+  // Must be a DNS_LABEL.
+  // Cannot be updated.
+  // More info: http://kubernetes.io/docs/user-guide/namespaces
+  // +optional
+  optional string namespace = 3;
+
+  // SelfLink is a URL representing this object.
+  // Populated by the system.
+  // Read-only.
+  // +optional
+  optional string selfLink = 4;
+
+  // UID is the unique in time and space value for this object. It is typically generated by
+  // the server on successful creation of a resource and is not allowed to change on PUT
+  // operations.
+  //
+  // Populated by the system.
+  // Read-only.
+  // More info: http://kubernetes.io/docs/user-guide/identifiers#uids
+  // +optional
+  optional string uid = 5;
+
+  // An opaque value that represents the internal version of this object that can
+  // be used by clients to determine when objects have changed. May be used for optimistic
+  // concurrency, change detection, and the watch operation on a resource or set of resources.
+  // Clients must treat these values as opaque and passed unmodified back to the server.
+  // They may only be valid for a particular resource or set of resources.
+  //
+  // Populated by the system.
+  // Read-only.
+  // Value must be treated as opaque by clients and .
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
+  // +optional
+  optional string resourceVersion = 6;
+
+  // A sequence number representing a specific generation of the desired state.
+  // Populated by the system. Read-only.
+  // +optional
+  optional int64 generation = 7;
+
+  // CreationTimestamp is a timestamp representing the server time when this object was
+  // created. It is not guaranteed to be set in happens-before order across separate operations.
+  // Clients may not set this value. It is represented in RFC3339 form and is in UTC.
+  //
+  // Populated by the system.
+  // Read-only.
+  // Null for lists.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional Time creationTimestamp = 8;
+
+  // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
+  // field is set by the server when a graceful deletion is requested by the user, and is not
+  // directly settable by a client. The resource is expected to be deleted (no longer visible
+  // from resource lists, and not reachable by name) after the time in this field, once the
+  // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked.
+  // Once the deletionTimestamp is set, this value may not be unset or be set further into the
+  // future, although it may be shortened or the resource may be deleted prior to this time.
+  // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react
+  // by sending a graceful termination signal to the containers in the pod. After that 30 seconds,
+  // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup,
+  // remove the pod from the API. In the presence of network partitions, this object may still
+  // exist after this timestamp, until an administrator or automated process can determine the
+  // resource is fully terminated.
+  // If not set, graceful deletion of the object has not been requested.
+  //
+  // Populated by the system when a graceful deletion is requested.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional Time deletionTimestamp = 9;
+
+  // Number of seconds allowed for this object to gracefully terminate before
+  // it will be removed from the system. Only set when deletionTimestamp is also set.
+  // May only be shortened.
+  // Read-only.
+  // +optional
+  optional int64 deletionGracePeriodSeconds = 10;
+
+  // Map of string keys and values that can be used to organize and categorize
+  // (scope and select) objects. May match selectors of replication controllers
+  // and services.
+  // More info: http://kubernetes.io/docs/user-guide/labels
+  // +optional
+  map<string, string> labels = 11;
+
+  // Annotations is an unstructured key value map stored with a resource that may be
+  // set by external tools to store and retrieve arbitrary metadata. They are not
+  // queryable and should be preserved when modifying objects.
+  // More info: http://kubernetes.io/docs/user-guide/annotations
+  // +optional
+  map<string, string> annotations = 12;
+
+  // List of objects depended by this object. If ALL objects in the list have
+  // been deleted, this object will be garbage collected. If this object is managed by a controller,
+  // then an entry in this list will point to this controller, with the controller field set to true.
+  // There cannot be more than one managing controller.
+  // +optional
+  // +patchMergeKey=uid
+  // +patchStrategy=merge
+  repeated OwnerReference ownerReferences = 13;
+
+  // An initializer is a controller which enforces some system invariant at object creation time.
+  // This field is a list of initializers that have not yet acted on this object. If nil or empty,
+  // this object has been completely initialized. Otherwise, the object is considered uninitialized
+  // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to
+  // observe uninitialized objects.
+  //
+  // When an object is created, the system will populate this list with the current set of initializers.
+  // Only privileged users may set or modify this list. Once it is empty, it may not be modified further
+  // by any user.
+  optional Initializers initializers = 16;
+
+  // Must be empty before the object is deleted from the registry. Each entry
+  // is an identifier for the responsible component that will remove the entry
+  // from the list. If the deletionTimestamp of the object is non-nil, entries
+  // in this list can only be removed.
+  // +optional
+  // +patchStrategy=merge
+  repeated string finalizers = 14;
+
+  // The name of the cluster which the object belongs to.
+  // This is used to distinguish resources with same name and namespace in different clusters.
+  // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
+  // +optional
+  optional string clusterName = 15;
+}
+
+// OwnerReference contains enough information to let you identify an owning
+// object. An owning object must be in the same namespace as the dependent, or
+// be cluster-scoped, so there is no namespace field.
+message OwnerReference {
+  // API version of the referent.
+  optional string apiVersion = 5;
+
+  // Kind of the referent.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  optional string kind = 1;
+
+  // Name of the referent.
+  // More info: http://kubernetes.io/docs/user-guide/identifiers#names
+  optional string name = 3;
+
+  // UID of the referent.
+  // More info: http://kubernetes.io/docs/user-guide/identifiers#uids
+  optional string uid = 4;
+
+  // If true, this reference points to the managing controller.
+  // +optional
+  optional bool controller = 6;
+
+  // If true, AND if the owner has the "foregroundDeletion" finalizer, then
+  // the owner cannot be deleted from the key-value store until this
+  // reference is removed.
+  // Defaults to false.
+  // To set this field, a user needs "delete" permission of the owner,
+  // otherwise 422 (Unprocessable Entity) will be returned.
+  // +optional
+  optional bool blockOwnerDeletion = 7;
+}
+
+// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.
+message Patch {
+}
+
+// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
+message Preconditions {
+  // Specifies the target UID.
+  // +optional
+  optional string uid = 1;
+}
+
+// RootPaths lists the paths available at root.
+// For example: "/healthz", "/apis".
+message RootPaths {
+  // paths are the paths available at root.
+  repeated string paths = 1;
+}
+
+// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.
+message ServerAddressByClientCIDR {
+  // The CIDR with which clients can match their IP to figure out the server address that they should use.
+  optional string clientCIDR = 1;
+
+  // Address of this server, suitable for a client that matches the above CIDR.
+  // This can be a hostname, hostname:port, IP or IP:port.
+  optional string serverAddress = 2;
+}
+
+// Status is a return value for calls that don't return other objects.
+message Status {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional ListMeta metadata = 1;
+
+  // Status of the operation.
+  // One of: "Success" or "Failure".
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+  // +optional
+  optional string status = 2;
+
+  // A human-readable description of the status of this operation.
+  // +optional
+  optional string message = 3;
+
+  // A machine-readable description of why this operation is in the
+  // "Failure" status. If this value is empty there
+  // is no information available. A Reason clarifies an HTTP status
+  // code but does not override it.
+  // +optional
+  optional string reason = 4;
+
+  // Extended data associated with the reason.  Each reason may define its
+  // own extended details. This field is optional and the data returned
+  // is not guaranteed to conform to any schema except that defined by
+  // the reason type.
+  // +optional
+  optional StatusDetails details = 5;
+
+  // Suggested HTTP return code for this status, 0 if not set.
+  // +optional
+  optional int32 code = 6;
+}
+
+// StatusCause provides more information about an api.Status failure, including
+// cases when multiple errors are encountered.
+message StatusCause {
+  // A machine-readable description of the cause of the error. If this value is
+  // empty there is no information available.
+  // +optional
+  optional string reason = 1;
+
+  // A human-readable description of the cause of the error.  This field may be
+  // presented as-is to a reader.
+  // +optional
+  optional string message = 2;
+
+  // The field of the resource that has caused this error, as named by its JSON
+  // serialization. May include dot and postfix notation for nested attributes.
+  // Arrays are zero-indexed.  Fields may appear more than once in an array of
+  // causes due to fields having multiple errors.
+  // Optional.
+  //
+  // Examples:
+  //   "name" - the field "name" on the current resource
+  //   "items[0].name" - the field "name" on the first array entry in "items"
+  // +optional
+  optional string field = 3;
+}
+
+// StatusDetails is a set of additional properties that MAY be set by the
+// server to provide additional information about a response. The Reason
+// field of a Status object defines what attributes will be set. Clients
+// must ignore fields that do not match the defined type of each attribute,
+// and should assume that any attribute may be empty, invalid, or under
+// defined.
+message StatusDetails {
+  // The name attribute of the resource associated with the status StatusReason
+  // (when there is a single name which can be described).
+  // +optional
+  optional string name = 1;
+
+  // The group attribute of the resource associated with the status StatusReason.
+  // +optional
+  optional string group = 2;
+
+  // The kind attribute of the resource associated with the status StatusReason.
+  // On some operations may differ from the requested resource Kind.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional string kind = 3;
+
+  // UID of the resource.
+  // (when there is a single resource which can be described).
+  // More info: http://kubernetes.io/docs/user-guide/identifiers#uids
+  // +optional
+  optional string uid = 6;
+
+  // The Causes array includes more details associated with the StatusReason
+  // failure. Not all StatusReasons may provide detailed causes.
+  // +optional
+  repeated StatusCause causes = 4;
+
+  // If specified, the time in seconds before the operation should be retried. Some errors may indicate
+  // the client must take an alternate action - for those errors this field may indicate how long to wait
+  // before taking the alternate action.
+  // +optional
+  optional int32 retryAfterSeconds = 5;
+}
+
+// Time is a wrapper around time.Time which supports correct
+// marshaling to YAML and JSON.  Wrappers are provided for many
+// of the factory methods that the time package offers.
+//
+// +protobuf.options.marshal=false
+// +protobuf.as=Timestamp
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message Time {
+  // Represents seconds of UTC time since Unix epoch
+  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+  // 9999-12-31T23:59:59Z inclusive.
+  optional int64 seconds = 1;
+
+  // Non-negative fractions of a second at nanosecond resolution. Negative
+  // second values with fractions must still have non-negative nanos values
+  // that count forward in time. Must be from 0 to 999,999,999
+  // inclusive. This field may be limited in precision depending on context.
+  optional int32 nanos = 2;
+}
+
+// Timestamp is a struct that is equivalent to Time, but intended for
+// protobuf marshalling/unmarshalling. It is generated into a serialization
+// that matches Time. Do not use in Go structs.
+message Timestamp {
+  // Represents seconds of UTC time since Unix epoch
+  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+  // 9999-12-31T23:59:59Z inclusive.
+  optional int64 seconds = 1;
+
+  // Non-negative fractions of a second at nanosecond resolution. Negative
+  // second values with fractions must still have non-negative nanos values
+  // that count forward in time. Must be from 0 to 999,999,999
+  // inclusive. This field may be limited in precision depending on context.
+  optional int32 nanos = 2;
+}
+
+// TypeMeta describes an individual object in an API response or request
+// with strings representing the type of the object and its API schema version.
+// Structures that are versioned or persisted should inline TypeMeta.
+//
+// +k8s:deepcopy-gen=false
+message TypeMeta {
+  // Kind is a string value representing the REST resource this object represents.
+  // Servers may infer this from the endpoint the client submits requests to.
+  // Cannot be updated.
+  // In CamelCase.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+  // +optional
+  optional string kind = 1;
+
+  // APIVersion defines the versioned schema of this representation of an object.
+  // Servers should convert recognized schemas to the latest internal value, and
+  // may reject unrecognized values.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
+  // +optional
+  optional string apiVersion = 2;
+}
+
+// UpdateOptions may be provided when updating an API object.
+message UpdateOptions {
+  // When present, indicates that modifications should not be
+  // persisted. An invalid or unrecognized dryRun directive will
+  // result in an error response and no further processing of the
+  // request. Valid values are:
+  // - All: all dry run stages will be processed
+  // +optional
+  repeated string dryRun = 1;
+}
+
+// Verbs masks the value so protobuf can generate
+//
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message Verbs {
+  // items, if empty, will result in an empty slice
+
+  repeated string items = 1;
+}
+
+// Event represents a single event to a watched resource.
+//
+// +protobuf=true
+// +k8s:deepcopy-gen=true
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+message WatchEvent {
+  optional string type = 1;
+
+  // Object is:
+  //  * If Type is Added or Modified: the new state of the object.
+  //  * If Type is Deleted: the state of the object immediately before deletion.
+  //  * If Type is Error: *Status is recommended; other types may make sense
+  //    depending on context.
+  optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2;
+}
+
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go
new file mode 100644
index 0000000..bd4c6d9
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go
@@ -0,0 +1,148 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupResource specifies a Group and a Resource, but does not force a version.  This is useful for identifying
+// concepts during lookup stages without having partially valid types
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type GroupResource struct {
+	Group    string `json:"group" protobuf:"bytes,1,opt,name=group"`
+	Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"`
+}
+
+func (gr *GroupResource) String() string {
+	if len(gr.Group) == 0 {
+		return gr.Resource
+	}
+	return gr.Resource + "." + gr.Group
+}
+
+// GroupVersionResource unambiguously identifies a resource.  It doesn't anonymously include GroupVersion
+// to avoid automatic coersion.  It doesn't use a GroupVersion to avoid custom marshalling
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type GroupVersionResource struct {
+	Group    string `json:"group" protobuf:"bytes,1,opt,name=group"`
+	Version  string `json:"version" protobuf:"bytes,2,opt,name=version"`
+	Resource string `json:"resource" protobuf:"bytes,3,opt,name=resource"`
+}
+
+func (gvr *GroupVersionResource) String() string {
+	return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "")
+}
+
+// GroupKind specifies a Group and a Kind, but does not force a version.  This is useful for identifying
+// concepts during lookup stages without having partially valid types
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type GroupKind struct {
+	Group string `json:"group" protobuf:"bytes,1,opt,name=group"`
+	Kind  string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
+}
+
+func (gk *GroupKind) String() string {
+	if len(gk.Group) == 0 {
+		return gk.Kind
+	}
+	return gk.Kind + "." + gk.Group
+}
+
+// GroupVersionKind unambiguously identifies a kind.  It doesn't anonymously include GroupVersion
+// to avoid automatic coersion.  It doesn't use a GroupVersion to avoid custom marshalling
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type GroupVersionKind struct {
+	Group   string `json:"group" protobuf:"bytes,1,opt,name=group"`
+	Version string `json:"version" protobuf:"bytes,2,opt,name=version"`
+	Kind    string `json:"kind" protobuf:"bytes,3,opt,name=kind"`
+}
+
+func (gvk GroupVersionKind) String() string {
+	return gvk.Group + "/" + gvk.Version + ", Kind=" + gvk.Kind
+}
+
+// GroupVersion contains the "group" and the "version", which uniquely identifies the API.
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type GroupVersion struct {
+	Group   string `json:"group" protobuf:"bytes,1,opt,name=group"`
+	Version string `json:"version" protobuf:"bytes,2,opt,name=version"`
+}
+
+// Empty returns true if group and version are empty
+func (gv GroupVersion) Empty() bool {
+	return len(gv.Group) == 0 && len(gv.Version) == 0
+}
+
+// String puts "group" and "version" into a single "group/version" string. For the legacy v1
+// it returns "v1".
+func (gv GroupVersion) String() string {
+	// special case the internal apiVersion for the legacy kube types
+	if gv.Empty() {
+		return ""
+	}
+
+	// special case of "v1" for backward compatibility
+	if len(gv.Group) == 0 && gv.Version == "v1" {
+		return gv.Version
+	}
+	if len(gv.Group) > 0 {
+		return gv.Group + "/" + gv.Version
+	}
+	return gv.Version
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+func (gv GroupVersion) MarshalJSON() ([]byte, error) {
+	s := gv.String()
+	if strings.Count(s, "/") > 1 {
+		return []byte{}, fmt.Errorf("illegal GroupVersion %v: contains more than one /", s)
+	}
+	return json.Marshal(s)
+}
+
+func (gv *GroupVersion) unmarshal(value []byte) error {
+	var s string
+	if err := json.Unmarshal(value, &s); err != nil {
+		return err
+	}
+	parsed, err := schema.ParseGroupVersion(s)
+	if err != nil {
+		return err
+	}
+	gv.Group, gv.Version = parsed.Group, parsed.Version
+	return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (gv *GroupVersion) UnmarshalJSON(value []byte) error {
+	return gv.unmarshal(value)
+}
+
+// UnmarshalTEXT implements the Ugorji's encoding.TextUnmarshaler interface.
+func (gv *GroupVersion) UnmarshalText(value []byte) error {
+	return gv.unmarshal(value)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
new file mode 100644
index 0000000..604129e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
@@ -0,0 +1,246 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/fields"
+	"k8s.io/apimachinery/pkg/labels"
+	"k8s.io/apimachinery/pkg/selection"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements
+// labels.Selector
+// Note: This function should be kept in sync with the selector methods in pkg/labels/selector.go
+func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) {
+	if ps == nil {
+		return labels.Nothing(), nil
+	}
+	if len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 {
+		return labels.Everything(), nil
+	}
+	selector := labels.NewSelector()
+	for k, v := range ps.MatchLabels {
+		r, err := labels.NewRequirement(k, selection.Equals, []string{v})
+		if err != nil {
+			return nil, err
+		}
+		selector = selector.Add(*r)
+	}
+	for _, expr := range ps.MatchExpressions {
+		var op selection.Operator
+		switch expr.Operator {
+		case LabelSelectorOpIn:
+			op = selection.In
+		case LabelSelectorOpNotIn:
+			op = selection.NotIn
+		case LabelSelectorOpExists:
+			op = selection.Exists
+		case LabelSelectorOpDoesNotExist:
+			op = selection.DoesNotExist
+		default:
+			return nil, fmt.Errorf("%q is not a valid pod selector operator", expr.Operator)
+		}
+		r, err := labels.NewRequirement(expr.Key, op, append([]string(nil), expr.Values...))
+		if err != nil {
+			return nil, err
+		}
+		selector = selector.Add(*r)
+	}
+	return selector, nil
+}
+
+// LabelSelectorAsMap converts the LabelSelector api type into a map of strings, ie. the
+// original structure of a label selector. Operators that cannot be converted into plain
+// labels (Exists, DoesNotExist, NotIn, and In with more than one value) will result in
+// an error.
+func LabelSelectorAsMap(ps *LabelSelector) (map[string]string, error) {
+	if ps == nil {
+		return nil, nil
+	}
+	selector := map[string]string{}
+	for k, v := range ps.MatchLabels {
+		selector[k] = v
+	}
+	for _, expr := range ps.MatchExpressions {
+		switch expr.Operator {
+		case LabelSelectorOpIn:
+			if len(expr.Values) != 1 {
+				return selector, fmt.Errorf("operator %q without a single value cannot be converted into the old label selector format", expr.Operator)
+			}
+			// Should we do anything in case this will override a previous key-value pair?
+			selector[expr.Key] = expr.Values[0]
+		case LabelSelectorOpNotIn, LabelSelectorOpExists, LabelSelectorOpDoesNotExist:
+			return selector, fmt.Errorf("operator %q cannot be converted into the old label selector format", expr.Operator)
+		default:
+			return selector, fmt.Errorf("%q is not a valid selector operator", expr.Operator)
+		}
+	}
+	return selector, nil
+}
+
+// ParseToLabelSelector parses a string representing a selector into a LabelSelector object.
+// Note: This function should be kept in sync with the parser in pkg/labels/selector.go
+func ParseToLabelSelector(selector string) (*LabelSelector, error) {
+	reqs, err := labels.ParseToRequirements(selector)
+	if err != nil {
+		return nil, fmt.Errorf("couldn't parse the selector string \"%s\": %v", selector, err)
+	}
+
+	labelSelector := &LabelSelector{
+		MatchLabels:      map[string]string{},
+		MatchExpressions: []LabelSelectorRequirement{},
+	}
+	for _, req := range reqs {
+		var op LabelSelectorOperator
+		switch req.Operator() {
+		case selection.Equals, selection.DoubleEquals:
+			vals := req.Values()
+			if vals.Len() != 1 {
+				return nil, fmt.Errorf("equals operator must have exactly one value")
+			}
+			val, ok := vals.PopAny()
+			if !ok {
+				return nil, fmt.Errorf("equals operator has exactly one value but it cannot be retrieved")
+			}
+			labelSelector.MatchLabels[req.Key()] = val
+			continue
+		case selection.In:
+			op = LabelSelectorOpIn
+		case selection.NotIn:
+			op = LabelSelectorOpNotIn
+		case selection.Exists:
+			op = LabelSelectorOpExists
+		case selection.DoesNotExist:
+			op = LabelSelectorOpDoesNotExist
+		case selection.GreaterThan, selection.LessThan:
+			// Adding a separate case for these operators to indicate that this is deliberate
+			return nil, fmt.Errorf("%q isn't supported in label selectors", req.Operator())
+		default:
+			return nil, fmt.Errorf("%q is not a valid label selector operator", req.Operator())
+		}
+		labelSelector.MatchExpressions = append(labelSelector.MatchExpressions, LabelSelectorRequirement{
+			Key:      req.Key(),
+			Operator: op,
+			Values:   req.Values().List(),
+		})
+	}
+	return labelSelector, nil
+}
+
+// SetAsLabelSelector converts the labels.Set object into a LabelSelector api object.
+func SetAsLabelSelector(ls labels.Set) *LabelSelector {
+	if ls == nil {
+		return nil
+	}
+
+	selector := &LabelSelector{
+		MatchLabels: make(map[string]string),
+	}
+	for label, value := range ls {
+		selector.MatchLabels[label] = value
+	}
+
+	return selector
+}
+
+// FormatLabelSelector convert labelSelector into plain string
+func FormatLabelSelector(labelSelector *LabelSelector) string {
+	selector, err := LabelSelectorAsSelector(labelSelector)
+	if err != nil {
+		return "<error>"
+	}
+
+	l := selector.String()
+	if len(l) == 0 {
+		l = "<none>"
+	}
+	return l
+}
+
+func ExtractGroupVersions(l *APIGroupList) []string {
+	var groupVersions []string
+	for _, g := range l.Groups {
+		for _, gv := range g.Versions {
+			groupVersions = append(groupVersions, gv.GroupVersion)
+		}
+	}
+	return groupVersions
+}
+
+// HasAnnotation returns a bool if passed in annotation exists
+func HasAnnotation(obj ObjectMeta, ann string) bool {
+	_, found := obj.Annotations[ann]
+	return found
+}
+
+// SetMetaDataAnnotation sets the annotation and value
+func SetMetaDataAnnotation(obj *ObjectMeta, ann string, value string) {
+	if obj.Annotations == nil {
+		obj.Annotations = make(map[string]string)
+	}
+	obj.Annotations[ann] = value
+}
+
+// SingleObject returns a ListOptions for watching a single object.
+func SingleObject(meta ObjectMeta) ListOptions {
+	return ListOptions{
+		FieldSelector:   fields.OneTermEqualSelector("metadata.name", meta.Name).String(),
+		ResourceVersion: meta.ResourceVersion,
+	}
+}
+
+// NewDeleteOptions returns a DeleteOptions indicating the resource should
+// be deleted within the specified grace period. Use zero to indicate
+// immediate deletion. If you would prefer to use the default grace period,
+// use &metav1.DeleteOptions{} directly.
+func NewDeleteOptions(grace int64) *DeleteOptions {
+	return &DeleteOptions{GracePeriodSeconds: &grace}
+}
+
+// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set.
+func NewPreconditionDeleteOptions(uid string) *DeleteOptions {
+	u := types.UID(uid)
+	p := Preconditions{UID: &u}
+	return &DeleteOptions{Preconditions: &p}
+}
+
+// NewUIDPreconditions returns a Preconditions with UID set.
+func NewUIDPreconditions(uid string) *Preconditions {
+	u := types.UID(uid)
+	return &Preconditions{UID: &u}
+}
+
+// HasObjectMetaSystemFieldValues returns true if fields that are managed by the system on ObjectMeta have values.
+func HasObjectMetaSystemFieldValues(meta Object) bool {
+	return !meta.GetCreationTimestamp().Time.IsZero() ||
+		len(meta.GetUID()) != 0
+}
+
+// ResetObjectMetaForStatus forces the meta fields for a status update to match the meta fields
+// for a pre-existing object. This is opt-in for new objects with Status subresource.
+func ResetObjectMetaForStatus(meta, existingMeta Object) {
+	meta.SetDeletionTimestamp(existingMeta.GetDeletionTimestamp())
+	meta.SetGeneration(existingMeta.GetGeneration())
+	meta.SetSelfLink(existingMeta.GetSelfLink())
+	meta.SetLabels(existingMeta.GetLabels())
+	meta.SetAnnotations(existingMeta.GetAnnotations())
+	meta.SetFinalizers(existingMeta.GetFinalizers())
+	meta.SetOwnerReferences(existingMeta.GetOwnerReferences())
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go
new file mode 100644
index 0000000..9b45145
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// Clones the given selector and returns a new selector with the given key and value added.
+// Returns the given selector, if labelKey is empty.
+func CloneSelectorAndAddLabel(selector *LabelSelector, labelKey, labelValue string) *LabelSelector {
+	if labelKey == "" {
+		// Don't need to add a label.
+		return selector
+	}
+
+	// Clone.
+	newSelector := selector.DeepCopy()
+
+	if newSelector.MatchLabels == nil {
+		newSelector.MatchLabels = make(map[string]string)
+	}
+
+	newSelector.MatchLabels[labelKey] = labelValue
+
+	return newSelector
+}
+
+// AddLabelToSelector returns a selector with the given key and value added to the given selector's MatchLabels.
+func AddLabelToSelector(selector *LabelSelector, labelKey, labelValue string) *LabelSelector {
+	if labelKey == "" {
+		// Don't need to add a label.
+		return selector
+	}
+	if selector.MatchLabels == nil {
+		selector.MatchLabels = make(map[string]string)
+	}
+	selector.MatchLabels[labelKey] = labelValue
+	return selector
+}
+
+// SelectorHasLabel checks if the given selector contains the given label key in its MatchLabels
+func SelectorHasLabel(selector *LabelSelector, labelKey string) bool {
+	return len(selector.MatchLabels[labelKey]) > 0
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go
new file mode 100644
index 0000000..ee14475
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go
@@ -0,0 +1,170 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+// TODO: move this, Object, List, and Type to a different package
+type ObjectMetaAccessor interface {
+	GetObjectMeta() Object
+}
+
+// Object lets you work with object metadata from any of the versioned or
+// internal API objects. Attempting to set or retrieve a field on an object that does
+// not support that field (Name, UID, Namespace on lists) will be a no-op and return
+// a default value.
+type Object interface {
+	GetNamespace() string
+	SetNamespace(namespace string)
+	GetName() string
+	SetName(name string)
+	GetGenerateName() string
+	SetGenerateName(name string)
+	GetUID() types.UID
+	SetUID(uid types.UID)
+	GetResourceVersion() string
+	SetResourceVersion(version string)
+	GetGeneration() int64
+	SetGeneration(generation int64)
+	GetSelfLink() string
+	SetSelfLink(selfLink string)
+	GetCreationTimestamp() Time
+	SetCreationTimestamp(timestamp Time)
+	GetDeletionTimestamp() *Time
+	SetDeletionTimestamp(timestamp *Time)
+	GetDeletionGracePeriodSeconds() *int64
+	SetDeletionGracePeriodSeconds(*int64)
+	GetLabels() map[string]string
+	SetLabels(labels map[string]string)
+	GetAnnotations() map[string]string
+	SetAnnotations(annotations map[string]string)
+	GetInitializers() *Initializers
+	SetInitializers(initializers *Initializers)
+	GetFinalizers() []string
+	SetFinalizers(finalizers []string)
+	GetOwnerReferences() []OwnerReference
+	SetOwnerReferences([]OwnerReference)
+	GetClusterName() string
+	SetClusterName(clusterName string)
+}
+
+// ListMetaAccessor retrieves the list interface from an object
+type ListMetaAccessor interface {
+	GetListMeta() ListInterface
+}
+
+// Common lets you work with core metadata from any of the versioned or
+// internal API objects. Attempting to set or retrieve a field on an object that does
+// not support that field will be a no-op and return a default value.
+// TODO: move this, and TypeMeta and ListMeta, to a different package
+type Common interface {
+	GetResourceVersion() string
+	SetResourceVersion(version string)
+	GetSelfLink() string
+	SetSelfLink(selfLink string)
+}
+
+// ListInterface lets you work with list metadata from any of the versioned or
+// internal API objects. Attempting to set or retrieve a field on an object that does
+// not support that field will be a no-op and return a default value.
+// TODO: move this, and TypeMeta and ListMeta, to a different package
+type ListInterface interface {
+	GetResourceVersion() string
+	SetResourceVersion(version string)
+	GetSelfLink() string
+	SetSelfLink(selfLink string)
+	GetContinue() string
+	SetContinue(c string)
+}
+
+// Type exposes the type and APIVersion of versioned or internal API objects.
+// TODO: move this, and TypeMeta and ListMeta, to a different package
+type Type interface {
+	GetAPIVersion() string
+	SetAPIVersion(version string)
+	GetKind() string
+	SetKind(kind string)
+}
+
+func (meta *ListMeta) GetResourceVersion() string        { return meta.ResourceVersion }
+func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version }
+func (meta *ListMeta) GetSelfLink() string               { return meta.SelfLink }
+func (meta *ListMeta) SetSelfLink(selfLink string)       { meta.SelfLink = selfLink }
+func (meta *ListMeta) GetContinue() string               { return meta.Continue }
+func (meta *ListMeta) SetContinue(c string)              { meta.Continue = c }
+
+func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj }
+
+// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
+func (obj *TypeMeta) SetGroupVersionKind(gvk schema.GroupVersionKind) {
+	obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
+}
+
+// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
+func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind {
+	return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
+}
+
+func (obj *ListMeta) GetListMeta() ListInterface { return obj }
+
+func (obj *ObjectMeta) GetObjectMeta() Object { return obj }
+
+// Namespace implements metav1.Object for any object with an ObjectMeta typed field. Allows
+// fast, direct access to metadata fields for API objects.
+func (meta *ObjectMeta) GetNamespace() string                { return meta.Namespace }
+func (meta *ObjectMeta) SetNamespace(namespace string)       { meta.Namespace = namespace }
+func (meta *ObjectMeta) GetName() string                     { return meta.Name }
+func (meta *ObjectMeta) SetName(name string)                 { meta.Name = name }
+func (meta *ObjectMeta) GetGenerateName() string             { return meta.GenerateName }
+func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName }
+func (meta *ObjectMeta) GetUID() types.UID                   { return meta.UID }
+func (meta *ObjectMeta) SetUID(uid types.UID)                { meta.UID = uid }
+func (meta *ObjectMeta) GetResourceVersion() string          { return meta.ResourceVersion }
+func (meta *ObjectMeta) SetResourceVersion(version string)   { meta.ResourceVersion = version }
+func (meta *ObjectMeta) GetGeneration() int64                { return meta.Generation }
+func (meta *ObjectMeta) SetGeneration(generation int64)      { meta.Generation = generation }
+func (meta *ObjectMeta) GetSelfLink() string                 { return meta.SelfLink }
+func (meta *ObjectMeta) SetSelfLink(selfLink string)         { meta.SelfLink = selfLink }
+func (meta *ObjectMeta) GetCreationTimestamp() Time          { return meta.CreationTimestamp }
+func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp Time) {
+	meta.CreationTimestamp = creationTimestamp
+}
+func (meta *ObjectMeta) GetDeletionTimestamp() *Time { return meta.DeletionTimestamp }
+func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *Time) {
+	meta.DeletionTimestamp = deletionTimestamp
+}
+func (meta *ObjectMeta) GetDeletionGracePeriodSeconds() *int64 { return meta.DeletionGracePeriodSeconds }
+func (meta *ObjectMeta) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) {
+	meta.DeletionGracePeriodSeconds = deletionGracePeriodSeconds
+}
+func (meta *ObjectMeta) GetLabels() map[string]string                 { return meta.Labels }
+func (meta *ObjectMeta) SetLabels(labels map[string]string)           { meta.Labels = labels }
+func (meta *ObjectMeta) GetAnnotations() map[string]string            { return meta.Annotations }
+func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations }
+func (meta *ObjectMeta) GetInitializers() *Initializers               { return meta.Initializers }
+func (meta *ObjectMeta) SetInitializers(initializers *Initializers)   { meta.Initializers = initializers }
+func (meta *ObjectMeta) GetFinalizers() []string                      { return meta.Finalizers }
+func (meta *ObjectMeta) SetFinalizers(finalizers []string)            { meta.Finalizers = finalizers }
+func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference         { return meta.OwnerReferences }
+func (meta *ObjectMeta) SetOwnerReferences(references []OwnerReference) {
+	meta.OwnerReferences = references
+}
+func (meta *ObjectMeta) GetClusterName() string            { return meta.ClusterName }
+func (meta *ObjectMeta) SetClusterName(clusterName string) { meta.ClusterName = clusterName }
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
new file mode 100644
index 0000000..6f6c511
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
@@ -0,0 +1,183 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"encoding/json"
+	"time"
+
+	"github.com/google/gofuzz"
+)
+
+const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00"
+
+// MicroTime is version of Time with microsecond level precision.
+//
+// +protobuf.options.marshal=false
+// +protobuf.as=Timestamp
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type MicroTime struct {
+	time.Time `protobuf:"-"`
+}
+
+// DeepCopy returns a deep-copy of the MicroTime value.  The underlying time.Time
+// type is effectively immutable in the time API, so it is safe to
+// copy-by-assign, despite the presence of (unexported) Pointer fields.
+func (t *MicroTime) DeepCopyInto(out *MicroTime) {
+	*out = *t
+}
+
+// String returns the representation of the time.
+func (t MicroTime) String() string {
+	return t.Time.String()
+}
+
+// NewMicroTime returns a wrapped instance of the provided time
+func NewMicroTime(time time.Time) MicroTime {
+	return MicroTime{time}
+}
+
+// DateMicro returns the MicroTime corresponding to the supplied parameters
+// by wrapping time.Date.
+func DateMicro(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) MicroTime {
+	return MicroTime{time.Date(year, month, day, hour, min, sec, nsec, loc)}
+}
+
+// NowMicro returns the current local time.
+func NowMicro() MicroTime {
+	return MicroTime{time.Now()}
+}
+
+// IsZero returns true if the value is nil or time is zero.
+func (t *MicroTime) IsZero() bool {
+	if t == nil {
+		return true
+	}
+	return t.Time.IsZero()
+}
+
+// Before reports whether the time instant t is before u.
+func (t *MicroTime) Before(u *MicroTime) bool {
+	return t.Time.Before(u.Time)
+}
+
+// Equal reports whether the time instant t is equal to u.
+func (t *MicroTime) Equal(u *MicroTime) bool {
+	return t.Time.Equal(u.Time)
+}
+
+// BeforeTime reports whether the time instant t is before second-lever precision u.
+func (t *MicroTime) BeforeTime(u *Time) bool {
+	return t.Time.Before(u.Time)
+}
+
+// EqualTime reports whether the time instant t is equal to second-lever precision u.
+func (t *MicroTime) EqualTime(u *Time) bool {
+	return t.Time.Equal(u.Time)
+}
+
+// UnixMicro returns the local time corresponding to the given Unix time
+// by wrapping time.Unix.
+func UnixMicro(sec int64, nsec int64) MicroTime {
+	return MicroTime{time.Unix(sec, nsec)}
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (t *MicroTime) UnmarshalJSON(b []byte) error {
+	if len(b) == 4 && string(b) == "null" {
+		t.Time = time.Time{}
+		return nil
+	}
+
+	var str string
+	err := json.Unmarshal(b, &str)
+	if err != nil {
+		return err
+	}
+
+	pt, err := time.Parse(RFC3339Micro, str)
+	if err != nil {
+		return err
+	}
+
+	t.Time = pt.Local()
+	return nil
+}
+
+// UnmarshalQueryParameter converts from a URL query parameter value to an object
+func (t *MicroTime) UnmarshalQueryParameter(str string) error {
+	if len(str) == 0 {
+		t.Time = time.Time{}
+		return nil
+	}
+	// Tolerate requests from older clients that used JSON serialization to build query params
+	if len(str) == 4 && str == "null" {
+		t.Time = time.Time{}
+		return nil
+	}
+
+	pt, err := time.Parse(RFC3339Micro, str)
+	if err != nil {
+		return err
+	}
+
+	t.Time = pt.Local()
+	return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t MicroTime) MarshalJSON() ([]byte, error) {
+	if t.IsZero() {
+		// Encode unset/nil objects as JSON's "null".
+		return []byte("null"), nil
+	}
+
+	return json.Marshal(t.UTC().Format(RFC3339Micro))
+}
+
+// OpenAPISchemaType is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+//
+// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
+func (_ MicroTime) OpenAPISchemaType() []string { return []string{"string"} }
+
+// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+func (_ MicroTime) OpenAPISchemaFormat() string { return "date-time" }
+
+// MarshalQueryParameter converts to a URL query parameter value
+func (t MicroTime) MarshalQueryParameter() (string, error) {
+	if t.IsZero() {
+		// Encode unset/nil objects as an empty string
+		return "", nil
+	}
+
+	return t.UTC().Format(RFC3339Micro), nil
+}
+
+// Fuzz satisfies fuzz.Interface.
+func (t *MicroTime) Fuzz(c fuzz.Continue) {
+	if t == nil {
+		return
+	}
+	// Allow for about 1000 years of randomness. Accurate to a tenth of
+	// micro second. Leave off nanoseconds because JSON doesn't
+	// represent them so they can't round-trip properly.
+	t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000))
+}
+
+var _ fuzz.Interface = &MicroTime{}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go
new file mode 100644
index 0000000..14841be
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"time"
+)
+
+// Timestamp is declared in time_proto.go
+
+// Timestamp returns the Time as a new Timestamp value.
+func (m *MicroTime) ProtoMicroTime() *Timestamp {
+	if m == nil {
+		return &Timestamp{}
+	}
+	return &Timestamp{
+		Seconds: m.Time.Unix(),
+		Nanos:   int32(m.Time.Nanosecond()),
+	}
+}
+
+// Size implements the protobuf marshalling interface.
+func (m *MicroTime) Size() (n int) {
+	if m == nil || m.Time.IsZero() {
+		return 0
+	}
+	return m.ProtoMicroTime().Size()
+}
+
+// Reset implements the protobuf marshalling interface.
+func (m *MicroTime) Unmarshal(data []byte) error {
+	if len(data) == 0 {
+		m.Time = time.Time{}
+		return nil
+	}
+	p := Timestamp{}
+	if err := p.Unmarshal(data); err != nil {
+		return err
+	}
+	m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local()
+	return nil
+}
+
+// Marshal implements the protobuf marshalling interface.
+func (m *MicroTime) Marshal() (data []byte, err error) {
+	if m == nil || m.Time.IsZero() {
+		return nil, nil
+	}
+	return m.ProtoMicroTime().Marshal()
+}
+
+// MarshalTo implements the protobuf marshalling interface.
+func (m *MicroTime) MarshalTo(data []byte) (int, error) {
+	if m == nil || m.Time.IsZero() {
+		return 0, nil
+	}
+	return m.ProtoMicroTime().MarshalTo(data)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go
new file mode 100644
index 0000000..0827729
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// GroupName is the group name for this API.
+const GroupName = "meta.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Unversioned is group version for unversioned API objects
+// TODO: this should be v1 probably
+var Unversioned = schema.GroupVersion{Group: "", Version: "v1"}
+
+// WatchEventKind is name reserved for serializing watch events.
+const WatchEventKind = "WatchEvent"
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+	return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// AddToGroupVersion registers common meta types into schemas.
+func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) {
+	scheme.AddKnownTypeWithName(groupVersion.WithKind(WatchEventKind), &WatchEvent{})
+	scheme.AddKnownTypeWithName(
+		schema.GroupVersion{Group: groupVersion.Group, Version: runtime.APIVersionInternal}.WithKind(WatchEventKind),
+		&InternalEvent{},
+	)
+	// Supports legacy code paths, most callers should use metav1.ParameterCodec for now
+	scheme.AddKnownTypes(groupVersion,
+		&ListOptions{},
+		&ExportOptions{},
+		&GetOptions{},
+		&DeleteOptions{},
+		&CreateOptions{},
+		&UpdateOptions{},
+	)
+	utilruntime.Must(scheme.AddConversionFuncs(
+		Convert_v1_WatchEvent_To_watch_Event,
+		Convert_v1_InternalEvent_To_v1_WatchEvent,
+		Convert_watch_Event_To_v1_WatchEvent,
+		Convert_v1_WatchEvent_To_v1_InternalEvent,
+	))
+	// Register Unversioned types under their own special group
+	scheme.AddUnversionedTypes(Unversioned,
+		&Status{},
+		&APIVersions{},
+		&APIGroupList{},
+		&APIGroup{},
+		&APIResourceList{},
+	)
+
+	// register manually. This usually goes through the SchemeBuilder, which we cannot use here.
+	utilruntime.Must(AddConversionFuncs(scheme))
+	utilruntime.Must(RegisterDefaults(scheme))
+}
+
+// scheme is the registry for the common types that adhere to the meta v1 API spec.
+var scheme = runtime.NewScheme()
+
+// ParameterCodec knows about query parameters used with the meta v1 API spec.
+var ParameterCodec = runtime.NewParameterCodec(scheme)
+
+func init() {
+	scheme.AddUnversionedTypes(SchemeGroupVersion,
+		&ListOptions{},
+		&ExportOptions{},
+		&GetOptions{},
+		&DeleteOptions{},
+		&CreateOptions{},
+		&UpdateOptions{},
+	)
+
+	// register manually. This usually goes through the SchemeBuilder, which we cannot use here.
+	utilruntime.Must(RegisterDefaults(scheme))
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
new file mode 100644
index 0000000..efff656
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
@@ -0,0 +1,185 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"encoding/json"
+	"time"
+
+	"github.com/google/gofuzz"
+)
+
+// Time is a wrapper around time.Time which supports correct
+// marshaling to YAML and JSON.  Wrappers are provided for many
+// of the factory methods that the time package offers.
+//
+// +protobuf.options.marshal=false
+// +protobuf.as=Timestamp
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type Time struct {
+	time.Time `protobuf:"-"`
+}
+
+// DeepCopyInto creates a deep-copy of the Time value.  The underlying time.Time
+// type is effectively immutable in the time API, so it is safe to
+// copy-by-assign, despite the presence of (unexported) Pointer fields.
+func (t *Time) DeepCopyInto(out *Time) {
+	*out = *t
+}
+
+// String returns the representation of the time.
+func (t Time) String() string {
+	return t.Time.String()
+}
+
+// NewTime returns a wrapped instance of the provided time
+func NewTime(time time.Time) Time {
+	return Time{time}
+}
+
+// Date returns the Time corresponding to the supplied parameters
+// by wrapping time.Date.
+func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time {
+	return Time{time.Date(year, month, day, hour, min, sec, nsec, loc)}
+}
+
+// Now returns the current local time.
+func Now() Time {
+	return Time{time.Now()}
+}
+
+// IsZero returns true if the value is nil or time is zero.
+func (t *Time) IsZero() bool {
+	if t == nil {
+		return true
+	}
+	return t.Time.IsZero()
+}
+
+// Before reports whether the time instant t is before u.
+func (t *Time) Before(u *Time) bool {
+	return t.Time.Before(u.Time)
+}
+
+// Equal reports whether the time instant t is equal to u.
+func (t *Time) Equal(u *Time) bool {
+	if t == nil && u == nil {
+		return true
+	}
+	if t != nil && u != nil {
+		return t.Time.Equal(u.Time)
+	}
+	return false
+}
+
+// Unix returns the local time corresponding to the given Unix time
+// by wrapping time.Unix.
+func Unix(sec int64, nsec int64) Time {
+	return Time{time.Unix(sec, nsec)}
+}
+
+// Rfc3339Copy returns a copy of the Time at second-level precision.
+func (t Time) Rfc3339Copy() Time {
+	copied, _ := time.Parse(time.RFC3339, t.Format(time.RFC3339))
+	return Time{copied}
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+	if len(b) == 4 && string(b) == "null" {
+		t.Time = time.Time{}
+		return nil
+	}
+
+	var str string
+	err := json.Unmarshal(b, &str)
+	if err != nil {
+		return err
+	}
+
+	pt, err := time.Parse(time.RFC3339, str)
+	if err != nil {
+		return err
+	}
+
+	t.Time = pt.Local()
+	return nil
+}
+
+// UnmarshalQueryParameter converts from a URL query parameter value to an object
+func (t *Time) UnmarshalQueryParameter(str string) error {
+	if len(str) == 0 {
+		t.Time = time.Time{}
+		return nil
+	}
+	// Tolerate requests from older clients that used JSON serialization to build query params
+	if len(str) == 4 && str == "null" {
+		t.Time = time.Time{}
+		return nil
+	}
+
+	pt, err := time.Parse(time.RFC3339, str)
+	if err != nil {
+		return err
+	}
+
+	t.Time = pt.Local()
+	return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+	if t.IsZero() {
+		// Encode unset/nil objects as JSON's "null".
+		return []byte("null"), nil
+	}
+
+	return json.Marshal(t.UTC().Format(time.RFC3339))
+}
+
+// OpenAPISchemaType is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+//
+// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
+func (_ Time) OpenAPISchemaType() []string { return []string{"string"} }
+
+// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+func (_ Time) OpenAPISchemaFormat() string { return "date-time" }
+
+// MarshalQueryParameter converts to a URL query parameter value
+func (t Time) MarshalQueryParameter() (string, error) {
+	if t.IsZero() {
+		// Encode unset/nil objects as an empty string
+		return "", nil
+	}
+
+	return t.UTC().Format(time.RFC3339), nil
+}
+
+// Fuzz satisfies fuzz.Interface.
+func (t *Time) Fuzz(c fuzz.Continue) {
+	if t == nil {
+		return
+	}
+	// Allow for about 1000 years of randomness.  Leave off nanoseconds
+	// because JSON doesn't represent them so they can't round-trip
+	// properly.
+	t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0)
+}
+
+var _ fuzz.Interface = &Time{}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go
new file mode 100644
index 0000000..ed72186
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go
@@ -0,0 +1,92 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"time"
+)
+
+// Timestamp is a struct that is equivalent to Time, but intended for
+// protobuf marshalling/unmarshalling. It is generated into a serialization
+// that matches Time. Do not use in Go structs.
+type Timestamp struct {
+	// Represents seconds of UTC time since Unix epoch
+	// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+	// 9999-12-31T23:59:59Z inclusive.
+	Seconds int64 `json:"seconds" protobuf:"varint,1,opt,name=seconds"`
+	// Non-negative fractions of a second at nanosecond resolution. Negative
+	// second values with fractions must still have non-negative nanos values
+	// that count forward in time. Must be from 0 to 999,999,999
+	// inclusive. This field may be limited in precision depending on context.
+	Nanos int32 `json:"nanos" protobuf:"varint,2,opt,name=nanos"`
+}
+
+// Timestamp returns the Time as a new Timestamp value.
+func (m *Time) ProtoTime() *Timestamp {
+	if m == nil {
+		return &Timestamp{}
+	}
+	return &Timestamp{
+		Seconds: m.Time.Unix(),
+		// leaving this here for the record.  our JSON only handled seconds, so this results in writes by
+		// protobuf clients storing values that aren't read by json clients, which results in unexpected
+		// field mutation, which fails various validation and equality code.
+		// Nanos:   int32(m.Time.Nanosecond()),
+	}
+}
+
+// Size implements the protobuf marshalling interface.
+func (m *Time) Size() (n int) {
+	if m == nil || m.Time.IsZero() {
+		return 0
+	}
+	return m.ProtoTime().Size()
+}
+
+// Reset implements the protobuf marshalling interface.
+func (m *Time) Unmarshal(data []byte) error {
+	if len(data) == 0 {
+		m.Time = time.Time{}
+		return nil
+	}
+	p := Timestamp{}
+	if err := p.Unmarshal(data); err != nil {
+		return err
+	}
+	// leaving this here for the record.  our JSON only handled seconds, so this results in writes by
+	// protobuf clients storing values that aren't read by json clients, which results in unexpected
+	// field mutation, which fails various validation and equality code.
+	// m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local()
+	m.Time = time.Unix(p.Seconds, int64(0)).Local()
+	return nil
+}
+
+// Marshal implements the protobuf marshalling interface.
+func (m *Time) Marshal() (data []byte, err error) {
+	if m == nil || m.Time.IsZero() {
+		return nil, nil
+	}
+	return m.ProtoTime().Marshal()
+}
+
+// MarshalTo implements the protobuf marshalling interface.
+func (m *Time) MarshalTo(data []byte) (int, error) {
+	if m == nil || m.Time.IsZero() {
+		return 0, nil
+	}
+	return m.ProtoTime().MarshalTo(data)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
new file mode 100644
index 0000000..8f488ba
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
@@ -0,0 +1,1005 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1 contains API types that are common to all versions.
+//
+// The package contains two categories of types:
+// - external (serialized) types that lack their own version (e.g TypeMeta)
+// - internal (never-serialized) types that are needed by several different
+//   api groups, and so live here, to avoid duplication and/or import loops
+//   (e.g. LabelSelector).
+// In the future, we will probably move these categories of objects into
+// separate packages.
+package v1
+
+import (
+	"fmt"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+// TypeMeta describes an individual object in an API response or request
+// with strings representing the type of the object and its API schema version.
+// Structures that are versioned or persisted should inline TypeMeta.
+//
+// +k8s:deepcopy-gen=false
+type TypeMeta struct {
+	// Kind is a string value representing the REST resource this object represents.
+	// Servers may infer this from the endpoint the client submits requests to.
+	// Cannot be updated.
+	// In CamelCase.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
+
+	// APIVersion defines the versioned schema of this representation of an object.
+	// Servers should convert recognized schemas to the latest internal value, and
+	// may reject unrecognized values.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"`
+}
+
+// ListMeta describes metadata that synthetic resources must have, including lists and
+// various status objects. A resource may have only one of {ObjectMeta, ListMeta}.
+type ListMeta struct {
+	// selfLink is a URL representing this object.
+	// Populated by the system.
+	// Read-only.
+	// +optional
+	SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"`
+
+	// String that identifies the server's internal version of this object that
+	// can be used by clients to determine when objects have changed.
+	// Value must be treated as opaque by clients and passed unmodified back to the server.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
+	// +optional
+	ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"`
+
+	// continue may be set if the user set a limit on the number of items returned, and indicates that
+	// the server has more data available. The value is opaque and may be used to issue another request
+	// to the endpoint that served this list to retrieve the next set of available objects. Continuing a
+	// consistent list may not be possible if the server configuration has changed or more than a few
+	// minutes have passed. The resourceVersion field returned when using this continue value will be
+	// identical to the value in the first response, unless you have received this token from an error
+	// message.
+	Continue string `json:"continue,omitempty" protobuf:"bytes,3,opt,name=continue"`
+}
+
+// These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here
+const (
+	FinalizerOrphanDependents string = "orphan"
+	FinalizerDeleteDependents string = "foregroundDeletion"
+)
+
+// ObjectMeta is metadata that all persisted resources must have, which includes all objects
+// users must create.
+type ObjectMeta struct {
+	// Name must be unique within a namespace. Is required when creating resources, although
+	// some resources may allow a client to request the generation of an appropriate name
+	// automatically. Name is primarily intended for creation idempotence and configuration
+	// definition.
+	// Cannot be updated.
+	// More info: http://kubernetes.io/docs/user-guide/identifiers#names
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+
+	// GenerateName is an optional prefix, used by the server, to generate a unique
+	// name ONLY IF the Name field has not been provided.
+	// If this field is used, the name returned to the client will be different
+	// than the name passed. This value will also be combined with a unique suffix.
+	// The provided value has the same validation rules as the Name field,
+	// and may be truncated by the length of the suffix required to make the value
+	// unique on the server.
+	//
+	// If this field is specified and the generated name exists, the server will
+	// NOT return a 409 - instead, it will either return 201 Created or 500 with Reason
+	// ServerTimeout indicating a unique name could not be found in the time allotted, and the client
+	// should retry (optionally after the time indicated in the Retry-After header).
+	//
+	// Applied only if Name is not specified.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency
+	// +optional
+	GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"`
+
+	// Namespace defines the space within each name must be unique. An empty namespace is
+	// equivalent to the "default" namespace, but "default" is the canonical representation.
+	// Not all objects are required to be scoped to a namespace - the value of this field for
+	// those objects will be empty.
+	//
+	// Must be a DNS_LABEL.
+	// Cannot be updated.
+	// More info: http://kubernetes.io/docs/user-guide/namespaces
+	// +optional
+	Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
+
+	// SelfLink is a URL representing this object.
+	// Populated by the system.
+	// Read-only.
+	// +optional
+	SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"`
+
+	// UID is the unique in time and space value for this object. It is typically generated by
+	// the server on successful creation of a resource and is not allowed to change on PUT
+	// operations.
+	//
+	// Populated by the system.
+	// Read-only.
+	// More info: http://kubernetes.io/docs/user-guide/identifiers#uids
+	// +optional
+	UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"`
+
+	// An opaque value that represents the internal version of this object that can
+	// be used by clients to determine when objects have changed. May be used for optimistic
+	// concurrency, change detection, and the watch operation on a resource or set of resources.
+	// Clients must treat these values as opaque and passed unmodified back to the server.
+	// They may only be valid for a particular resource or set of resources.
+	//
+	// Populated by the system.
+	// Read-only.
+	// Value must be treated as opaque by clients and .
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
+	// +optional
+	ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
+
+	// A sequence number representing a specific generation of the desired state.
+	// Populated by the system. Read-only.
+	// +optional
+	Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"`
+
+	// CreationTimestamp is a timestamp representing the server time when this object was
+	// created. It is not guaranteed to be set in happens-before order across separate operations.
+	// Clients may not set this value. It is represented in RFC3339 form and is in UTC.
+	//
+	// Populated by the system.
+	// Read-only.
+	// Null for lists.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"`
+
+	// DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
+	// field is set by the server when a graceful deletion is requested by the user, and is not
+	// directly settable by a client. The resource is expected to be deleted (no longer visible
+	// from resource lists, and not reachable by name) after the time in this field, once the
+	// finalizers list is empty. As long as the finalizers list contains items, deletion is blocked.
+	// Once the deletionTimestamp is set, this value may not be unset or be set further into the
+	// future, although it may be shortened or the resource may be deleted prior to this time.
+	// For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react
+	// by sending a graceful termination signal to the containers in the pod. After that 30 seconds,
+	// the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup,
+	// remove the pod from the API. In the presence of network partitions, this object may still
+	// exist after this timestamp, until an administrator or automated process can determine the
+	// resource is fully terminated.
+	// If not set, graceful deletion of the object has not been requested.
+	//
+	// Populated by the system when a graceful deletion is requested.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"`
+
+	// Number of seconds allowed for this object to gracefully terminate before
+	// it will be removed from the system. Only set when deletionTimestamp is also set.
+	// May only be shortened.
+	// Read-only.
+	// +optional
+	DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"`
+
+	// Map of string keys and values that can be used to organize and categorize
+	// (scope and select) objects. May match selectors of replication controllers
+	// and services.
+	// More info: http://kubernetes.io/docs/user-guide/labels
+	// +optional
+	Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"`
+
+	// Annotations is an unstructured key value map stored with a resource that may be
+	// set by external tools to store and retrieve arbitrary metadata. They are not
+	// queryable and should be preserved when modifying objects.
+	// More info: http://kubernetes.io/docs/user-guide/annotations
+	// +optional
+	Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"`
+
+	// List of objects depended by this object. If ALL objects in the list have
+	// been deleted, this object will be garbage collected. If this object is managed by a controller,
+	// then an entry in this list will point to this controller, with the controller field set to true.
+	// There cannot be more than one managing controller.
+	// +optional
+	// +patchMergeKey=uid
+	// +patchStrategy=merge
+	OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"`
+
+	// An initializer is a controller which enforces some system invariant at object creation time.
+	// This field is a list of initializers that have not yet acted on this object. If nil or empty,
+	// this object has been completely initialized. Otherwise, the object is considered uninitialized
+	// and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to
+	// observe uninitialized objects.
+	//
+	// When an object is created, the system will populate this list with the current set of initializers.
+	// Only privileged users may set or modify this list. Once it is empty, it may not be modified further
+	// by any user.
+	Initializers *Initializers `json:"initializers,omitempty" protobuf:"bytes,16,opt,name=initializers"`
+
+	// Must be empty before the object is deleted from the registry. Each entry
+	// is an identifier for the responsible component that will remove the entry
+	// from the list. If the deletionTimestamp of the object is non-nil, entries
+	// in this list can only be removed.
+	// +optional
+	// +patchStrategy=merge
+	Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"`
+
+	// The name of the cluster which the object belongs to.
+	// This is used to distinguish resources with same name and namespace in different clusters.
+	// This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
+	// +optional
+	ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"`
+}
+
+// Initializers tracks the progress of initialization.
+type Initializers struct {
+	// Pending is a list of initializers that must execute in order before this object is visible.
+	// When the last pending initializer is removed, and no failing result is set, the initializers
+	// struct will be set to nil and the object is considered as initialized and visible to all
+	// clients.
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	Pending []Initializer `json:"pending" protobuf:"bytes,1,rep,name=pending" patchStrategy:"merge" patchMergeKey:"name"`
+	// If result is set with the Failure field, the object will be persisted to storage and then deleted,
+	// ensuring that other clients can observe the deletion.
+	Result *Status `json:"result,omitempty" protobuf:"bytes,2,opt,name=result"`
+}
+
+// Initializer is information about an initializer that has not yet completed.
+type Initializer struct {
+	// name of the process that is responsible for initializing this object.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+}
+
+const (
+	// NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
+	NamespaceDefault string = "default"
+	// NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
+	NamespaceAll string = ""
+	// NamespaceNone is the argument for a context when there is no namespace.
+	NamespaceNone string = ""
+	// NamespaceSystem is the system namespace where we place system components.
+	NamespaceSystem string = "kube-system"
+	// NamespacePublic is the namespace where we place public info (ConfigMaps)
+	NamespacePublic string = "kube-public"
+)
+
+// OwnerReference contains enough information to let you identify an owning
+// object. An owning object must be in the same namespace as the dependent, or
+// be cluster-scoped, so there is no namespace field.
+type OwnerReference struct {
+	// API version of the referent.
+	APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"`
+	// Kind of the referent.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+	// Name of the referent.
+	// More info: http://kubernetes.io/docs/user-guide/identifiers#names
+	Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
+	// UID of the referent.
+	// More info: http://kubernetes.io/docs/user-guide/identifiers#uids
+	UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
+	// If true, this reference points to the managing controller.
+	// +optional
+	Controller *bool `json:"controller,omitempty" protobuf:"varint,6,opt,name=controller"`
+	// If true, AND if the owner has the "foregroundDeletion" finalizer, then
+	// the owner cannot be deleted from the key-value store until this
+	// reference is removed.
+	// Defaults to false.
+	// To set this field, a user needs "delete" permission of the owner,
+	// otherwise 422 (Unprocessable Entity) will be returned.
+	// +optional
+	BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty" protobuf:"varint,7,opt,name=blockOwnerDeletion"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ListOptions is the query options to a standard REST list call.
+type ListOptions struct {
+	TypeMeta `json:",inline"`
+
+	// A selector to restrict the list of returned objects by their labels.
+	// Defaults to everything.
+	// +optional
+	LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
+	// A selector to restrict the list of returned objects by their fields.
+	// Defaults to everything.
+	// +optional
+	FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"`
+	// If true, partially initialized resources are included in the response.
+	// +optional
+	IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,6,opt,name=includeUninitialized"`
+	// Watch for changes to the described resources and return them as a stream of
+	// add, update, and remove notifications. Specify resourceVersion.
+	// +optional
+	Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"`
+	// When specified with a watch call, shows changes that occur after that particular version of a resource.
+	// Defaults to changes from the beginning of history.
+	// When specified for list:
+	// - if unset, then the result is returned from remote storage based on quorum-read flag;
+	// - if it's 0, then we simply return what we currently have in cache, no guarantee;
+	// - if set to non zero, then the result is at least as fresh as given rv.
+	// +optional
+	ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"`
+	// Timeout for the list/watch call.
+	// This limits the duration of the call, regardless of any activity or inactivity.
+	// +optional
+	TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"`
+
+	// limit is a maximum number of responses to return for a list call. If more items exist, the
+	// server will set the `continue` field on the list metadata to a value that can be used with the
+	// same initial query to retrieve the next set of results. Setting a limit may return fewer than
+	// the requested amount of items (up to zero items) in the event all requested objects are
+	// filtered out and clients should only use the presence of the continue field to determine whether
+	// more results are available. Servers may choose not to support the limit argument and will return
+	// all of the available results. If limit is specified and the continue field is empty, clients may
+	// assume that no more results are available. This field is not supported if watch is true.
+	//
+	// The server guarantees that the objects returned when using continue will be identical to issuing
+	// a single list call without a limit - that is, no objects created, modified, or deleted after the
+	// first request is issued will be included in any subsequent continued requests. This is sometimes
+	// referred to as a consistent snapshot, and ensures that a client that is using limit to receive
+	// smaller chunks of a very large result can ensure they see all possible objects. If objects are
+	// updated during a chunked list the version of the object that was present at the time the first list
+	// result was calculated is returned.
+	Limit int64 `json:"limit,omitempty" protobuf:"varint,7,opt,name=limit"`
+	// The continue option should be set when retrieving more results from the server. Since this value is
+	// server defined, clients may only use the continue value from a previous query result with identical
+	// query parameters (except for the value of continue) and the server may reject a continue value it
+	// does not recognize. If the specified continue value is no longer valid whether due to expiration
+	// (generally five to fifteen minutes) or a configuration change on the server, the server will
+	// respond with a 410 ResourceExpired error together with a continue token. If the client needs a
+	// consistent list, it must restart their list without the continue field. Otherwise, the client may
+	// send another list request with the token received with the 410 error, the server will respond with
+	// a list starting from the next key, but from the latest snapshot, which is inconsistent from the
+	// previous list results - objects that are created, modified, or deleted after the first list request
+	// will be included in the response, as long as their keys are after the "next key".
+	//
+	// This field is not supported when watch is true. Clients may start a watch from the last
+	// resourceVersion value returned by the server and not miss any modifications.
+	Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ExportOptions is the query options to the standard REST get call.
+type ExportOptions struct {
+	TypeMeta `json:",inline"`
+	// Should this value be exported.  Export strips fields that a user can not specify.
+	Export bool `json:"export" protobuf:"varint,1,opt,name=export"`
+	// Should the export be exact.  Exact export maintains cluster-specific fields like 'Namespace'.
+	Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// GetOptions is the standard query options to the standard REST get call.
+type GetOptions struct {
+	TypeMeta `json:",inline"`
+	// When specified:
+	// - if unset, then the result is returned from remote storage based on quorum-read flag;
+	// - if it's 0, then we simply return what we currently have in cache, no guarantee;
+	// - if set to non zero, then the result is at least as fresh as given rv.
+	ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,1,opt,name=resourceVersion"`
+	// If true, partially initialized resources are included in the response.
+	// +optional
+	IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,2,opt,name=includeUninitialized"`
+}
+
+// DeletionPropagation decides if a deletion will propagate to the dependents of
+// the object, and how the garbage collector will handle the propagation.
+type DeletionPropagation string
+
+const (
+	// Orphans the dependents.
+	DeletePropagationOrphan DeletionPropagation = "Orphan"
+	// Deletes the object from the key-value store, the garbage collector will
+	// delete the dependents in the background.
+	DeletePropagationBackground DeletionPropagation = "Background"
+	// The object exists in the key-value store until the garbage collector
+	// deletes all the dependents whose ownerReference.blockOwnerDeletion=true
+	// from the key-value store.  API sever will put the "foregroundDeletion"
+	// finalizer on the object, and sets its deletionTimestamp.  This policy is
+	// cascading, i.e., the dependents will be deleted with Foreground.
+	DeletePropagationForeground DeletionPropagation = "Foreground"
+)
+
+const (
+	// DryRunAll means to complete all processing stages, but don't
+	// persist changes to storage.
+	DryRunAll = "All"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DeleteOptions may be provided when deleting an API object.
+type DeleteOptions struct {
+	TypeMeta `json:",inline"`
+
+	// The duration in seconds before the object should be deleted. Value must be non-negative integer.
+	// The value zero indicates delete immediately. If this value is nil, the default grace period for the
+	// specified type will be used.
+	// Defaults to a per object value if not specified. zero means delete immediately.
+	// +optional
+	GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"`
+
+	// Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
+	// returned.
+	// +optional
+	Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"`
+
+	// Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.
+	// Should the dependent objects be orphaned. If true/false, the "orphan"
+	// finalizer will be added to/removed from the object's finalizers list.
+	// Either this field or PropagationPolicy may be set, but not both.
+	// +optional
+	OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"`
+
+	// Whether and how garbage collection will be performed.
+	// Either this field or OrphanDependents may be set, but not both.
+	// The default policy is decided by the existing finalizer set in the
+	// metadata.finalizers and the resource-specific default policy.
+	// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' -
+	// allow the garbage collector to delete the dependents in the background;
+	// 'Foreground' - a cascading policy that deletes all dependents in the
+	// foreground.
+	// +optional
+	PropagationPolicy *DeletionPropagation `json:"propagationPolicy,omitempty" protobuf:"varint,4,opt,name=propagationPolicy"`
+
+	// When present, indicates that modifications should not be
+	// persisted. An invalid or unrecognized dryRun directive will
+	// result in an error response and no further processing of the
+	// request. Valid values are:
+	// - All: all dry run stages will be processed
+	// +optional
+	DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CreateOptions may be provided when creating an API object.
+type CreateOptions struct {
+	TypeMeta `json:",inline"`
+
+	// When present, indicates that modifications should not be
+	// persisted. An invalid or unrecognized dryRun directive will
+	// result in an error response and no further processing of the
+	// request. Valid values are:
+	// - All: all dry run stages will be processed
+	// +optional
+	DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"`
+
+	// If IncludeUninitialized is specified, the object may be
+	// returned without completing initialization.
+	IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,2,opt,name=includeUninitialized"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// UpdateOptions may be provided when updating an API object.
+type UpdateOptions struct {
+	TypeMeta `json:",inline"`
+
+	// When present, indicates that modifications should not be
+	// persisted. An invalid or unrecognized dryRun directive will
+	// result in an error response and no further processing of the
+	// request. Valid values are:
+	// - All: all dry run stages will be processed
+	// +optional
+	DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"`
+}
+
+// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
+type Preconditions struct {
+	// Specifies the target UID.
+	// +optional
+	UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Status is a return value for calls that don't return other objects.
+type Status struct {
+	TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Status of the operation.
+	// One of: "Success" or "Failure".
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
+	// +optional
+	Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
+	// A human-readable description of the status of this operation.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
+	// A machine-readable description of why this operation is in the
+	// "Failure" status. If this value is empty there
+	// is no information available. A Reason clarifies an HTTP status
+	// code but does not override it.
+	// +optional
+	Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason,casttype=StatusReason"`
+	// Extended data associated with the reason.  Each reason may define its
+	// own extended details. This field is optional and the data returned
+	// is not guaranteed to conform to any schema except that defined by
+	// the reason type.
+	// +optional
+	Details *StatusDetails `json:"details,omitempty" protobuf:"bytes,5,opt,name=details"`
+	// Suggested HTTP return code for this status, 0 if not set.
+	// +optional
+	Code int32 `json:"code,omitempty" protobuf:"varint,6,opt,name=code"`
+}
+
+// StatusDetails is a set of additional properties that MAY be set by the
+// server to provide additional information about a response. The Reason
+// field of a Status object defines what attributes will be set. Clients
+// must ignore fields that do not match the defined type of each attribute,
+// and should assume that any attribute may be empty, invalid, or under
+// defined.
+type StatusDetails struct {
+	// The name attribute of the resource associated with the status StatusReason
+	// (when there is a single name which can be described).
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+	// The group attribute of the resource associated with the status StatusReason.
+	// +optional
+	Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"`
+	// The kind attribute of the resource associated with the status StatusReason.
+	// On some operations may differ from the requested resource Kind.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"`
+	// UID of the resource.
+	// (when there is a single resource which can be described).
+	// More info: http://kubernetes.io/docs/user-guide/identifiers#uids
+	// +optional
+	UID types.UID `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
+	// The Causes array includes more details associated with the StatusReason
+	// failure. Not all StatusReasons may provide detailed causes.
+	// +optional
+	Causes []StatusCause `json:"causes,omitempty" protobuf:"bytes,4,rep,name=causes"`
+	// If specified, the time in seconds before the operation should be retried. Some errors may indicate
+	// the client must take an alternate action - for those errors this field may indicate how long to wait
+	// before taking the alternate action.
+	// +optional
+	RetryAfterSeconds int32 `json:"retryAfterSeconds,omitempty" protobuf:"varint,5,opt,name=retryAfterSeconds"`
+}
+
+// Values of Status.Status
+const (
+	StatusSuccess = "Success"
+	StatusFailure = "Failure"
+)
+
+// StatusReason is an enumeration of possible failure causes.  Each StatusReason
+// must map to a single HTTP status code, but multiple reasons may map
+// to the same HTTP status code.
+// TODO: move to apiserver
+type StatusReason string
+
+const (
+	// StatusReasonUnknown means the server has declined to indicate a specific reason.
+	// The details field may contain other information about this error.
+	// Status code 500.
+	StatusReasonUnknown StatusReason = ""
+
+	// StatusReasonUnauthorized means the server can be reached and understood the request, but requires
+	// the user to present appropriate authorization credentials (identified by the WWW-Authenticate header)
+	// in order for the action to be completed. If the user has specified credentials on the request, the
+	// server considers them insufficient.
+	// Status code 401
+	StatusReasonUnauthorized StatusReason = "Unauthorized"
+
+	// StatusReasonForbidden means the server can be reached and understood the request, but refuses
+	// to take any further action.  It is the result of the server being configured to deny access for some reason
+	// to the requested resource by the client.
+	// Details (optional):
+	//   "kind" string - the kind attribute of the forbidden resource
+	//                   on some operations may differ from the requested
+	//                   resource.
+	//   "id"   string - the identifier of the forbidden resource
+	// Status code 403
+	StatusReasonForbidden StatusReason = "Forbidden"
+
+	// StatusReasonNotFound means one or more resources required for this operation
+	// could not be found.
+	// Details (optional):
+	//   "kind" string - the kind attribute of the missing resource
+	//                   on some operations may differ from the requested
+	//                   resource.
+	//   "id"   string - the identifier of the missing resource
+	// Status code 404
+	StatusReasonNotFound StatusReason = "NotFound"
+
+	// StatusReasonAlreadyExists means the resource you are creating already exists.
+	// Details (optional):
+	//   "kind" string - the kind attribute of the conflicting resource
+	//   "id"   string - the identifier of the conflicting resource
+	// Status code 409
+	StatusReasonAlreadyExists StatusReason = "AlreadyExists"
+
+	// StatusReasonConflict means the requested operation cannot be completed
+	// due to a conflict in the operation. The client may need to alter the
+	// request. Each resource may define custom details that indicate the
+	// nature of the conflict.
+	// Status code 409
+	StatusReasonConflict StatusReason = "Conflict"
+
+	// StatusReasonGone means the item is no longer available at the server and no
+	// forwarding address is known.
+	// Status code 410
+	StatusReasonGone StatusReason = "Gone"
+
+	// StatusReasonInvalid means the requested create or update operation cannot be
+	// completed due to invalid data provided as part of the request. The client may
+	// need to alter the request. When set, the client may use the StatusDetails
+	// message field as a summary of the issues encountered.
+	// Details (optional):
+	//   "kind" string - the kind attribute of the invalid resource
+	//   "id"   string - the identifier of the invalid resource
+	//   "causes"      - one or more StatusCause entries indicating the data in the
+	//                   provided resource that was invalid.  The code, message, and
+	//                   field attributes will be set.
+	// Status code 422
+	StatusReasonInvalid StatusReason = "Invalid"
+
+	// StatusReasonServerTimeout means the server can be reached and understood the request,
+	// but cannot complete the action in a reasonable time. The client should retry the request.
+	// This is may be due to temporary server load or a transient communication issue with
+	// another server. Status code 500 is used because the HTTP spec provides no suitable
+	// server-requested client retry and the 5xx class represents actionable errors.
+	// Details (optional):
+	//   "kind" string - the kind attribute of the resource being acted on.
+	//   "id"   string - the operation that is being attempted.
+	//   "retryAfterSeconds" int32 - the number of seconds before the operation should be retried
+	// Status code 500
+	StatusReasonServerTimeout StatusReason = "ServerTimeout"
+
+	// StatusReasonTimeout means that the request could not be completed within the given time.
+	// Clients can get this response only when they specified a timeout param in the request,
+	// or if the server cannot complete the operation within a reasonable amount of time.
+	// The request might succeed with an increased value of timeout param. The client *should*
+	// wait at least the number of seconds specified by the retryAfterSeconds field.
+	// Details (optional):
+	//   "retryAfterSeconds" int32 - the number of seconds before the operation should be retried
+	// Status code 504
+	StatusReasonTimeout StatusReason = "Timeout"
+
+	// StatusReasonTooManyRequests means the server experienced too many requests within a
+	// given window and that the client must wait to perform the action again. A client may
+	// always retry the request that led to this error, although the client should wait at least
+	// the number of seconds specified by the retryAfterSeconds field.
+	// Details (optional):
+	//   "retryAfterSeconds" int32 - the number of seconds before the operation should be retried
+	// Status code 429
+	StatusReasonTooManyRequests StatusReason = "TooManyRequests"
+
+	// StatusReasonBadRequest means that the request itself was invalid, because the request
+	// doesn't make any sense, for example deleting a read-only object.  This is different than
+	// StatusReasonInvalid above which indicates that the API call could possibly succeed, but the
+	// data was invalid.  API calls that return BadRequest can never succeed.
+	StatusReasonBadRequest StatusReason = "BadRequest"
+
+	// StatusReasonMethodNotAllowed means that the action the client attempted to perform on the
+	// resource was not supported by the code - for instance, attempting to delete a resource that
+	// can only be created. API calls that return MethodNotAllowed can never succeed.
+	StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed"
+
+	// StatusReasonNotAcceptable means that the accept types indicated by the client were not acceptable
+	// to the server - for instance, attempting to receive protobuf for a resource that supports only json and yaml.
+	// API calls that return NotAcceptable can never succeed.
+	// Status code 406
+	StatusReasonNotAcceptable StatusReason = "NotAcceptable"
+
+	// StatusReasonUnsupportedMediaType means that the content type sent by the client is not acceptable
+	// to the server - for instance, attempting to send protobuf for a resource that supports only json and yaml.
+	// API calls that return UnsupportedMediaType can never succeed.
+	// Status code 415
+	StatusReasonUnsupportedMediaType StatusReason = "UnsupportedMediaType"
+
+	// StatusReasonInternalError indicates that an internal error occurred, it is unexpected
+	// and the outcome of the call is unknown.
+	// Details (optional):
+	//   "causes" - The original error
+	// Status code 500
+	StatusReasonInternalError StatusReason = "InternalError"
+
+	// StatusReasonExpired indicates that the request is invalid because the content you are requesting
+	// has expired and is no longer available. It is typically associated with watches that can't be
+	// serviced.
+	// Status code 410 (gone)
+	StatusReasonExpired StatusReason = "Expired"
+
+	// StatusReasonServiceUnavailable means that the request itself was valid,
+	// but the requested service is unavailable at this time.
+	// Retrying the request after some time might succeed.
+	// Status code 503
+	StatusReasonServiceUnavailable StatusReason = "ServiceUnavailable"
+)
+
+// StatusCause provides more information about an api.Status failure, including
+// cases when multiple errors are encountered.
+type StatusCause struct {
+	// A machine-readable description of the cause of the error. If this value is
+	// empty there is no information available.
+	// +optional
+	Type CauseType `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason,casttype=CauseType"`
+	// A human-readable description of the cause of the error.  This field may be
+	// presented as-is to a reader.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
+	// The field of the resource that has caused this error, as named by its JSON
+	// serialization. May include dot and postfix notation for nested attributes.
+	// Arrays are zero-indexed.  Fields may appear more than once in an array of
+	// causes due to fields having multiple errors.
+	// Optional.
+	//
+	// Examples:
+	//   "name" - the field "name" on the current resource
+	//   "items[0].name" - the field "name" on the first array entry in "items"
+	// +optional
+	Field string `json:"field,omitempty" protobuf:"bytes,3,opt,name=field"`
+}
+
+// CauseType is a machine readable value providing more detail about what
+// occurred in a status response. An operation may have multiple causes for a
+// status (whether Failure or Success).
+type CauseType string
+
+const (
+	// CauseTypeFieldValueNotFound is used to report failure to find a requested value
+	// (e.g. looking up an ID).
+	CauseTypeFieldValueNotFound CauseType = "FieldValueNotFound"
+	// CauseTypeFieldValueRequired is used to report required values that are not
+	// provided (e.g. empty strings, null values, or empty arrays).
+	CauseTypeFieldValueRequired CauseType = "FieldValueRequired"
+	// CauseTypeFieldValueDuplicate is used to report collisions of values that must be
+	// unique (e.g. unique IDs).
+	CauseTypeFieldValueDuplicate CauseType = "FieldValueDuplicate"
+	// CauseTypeFieldValueInvalid is used to report malformed values (e.g. failed regex
+	// match).
+	CauseTypeFieldValueInvalid CauseType = "FieldValueInvalid"
+	// CauseTypeFieldValueNotSupported is used to report valid (as per formatting rules)
+	// values that can not be handled (e.g. an enumerated string).
+	CauseTypeFieldValueNotSupported CauseType = "FieldValueNotSupported"
+	// CauseTypeUnexpectedServerResponse is used to report when the server responded to the client
+	// without the expected return type. The presence of this cause indicates the error may be
+	// due to an intervening proxy or the server software malfunctioning.
+	CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// List holds a list of objects, which may not be known by the server.
+type List struct {
+	TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of objects
+	Items []runtime.RawExtension `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// APIVersions lists the versions that are available, to allow clients to
+// discover the API at /api, which is the root path of the legacy v1 API.
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type APIVersions struct {
+	TypeMeta `json:",inline"`
+	// versions are the api versions that are available.
+	Versions []string `json:"versions" protobuf:"bytes,1,rep,name=versions"`
+	// a map of client CIDR to server address that is serving this group.
+	// This is to help clients reach servers in the most network-efficient way possible.
+	// Clients can use the appropriate server address as per the CIDR that they match.
+	// In case of multiple matches, clients should use the longest matching CIDR.
+	// The server returns only those CIDRs that it thinks that the client can match.
+	// For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
+	// Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
+	ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// APIGroupList is a list of APIGroup, to allow clients to discover the API at
+// /apis.
+type APIGroupList struct {
+	TypeMeta `json:",inline"`
+	// groups is a list of APIGroup.
+	Groups []APIGroup `json:"groups" protobuf:"bytes,1,rep,name=groups"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// APIGroup contains the name, the supported versions, and the preferred version
+// of a group.
+type APIGroup struct {
+	TypeMeta `json:",inline"`
+	// name is the name of the group.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// versions are the versions supported in this group.
+	Versions []GroupVersionForDiscovery `json:"versions" protobuf:"bytes,2,rep,name=versions"`
+	// preferredVersion is the version preferred by the API server, which
+	// probably is the storage version.
+	// +optional
+	PreferredVersion GroupVersionForDiscovery `json:"preferredVersion,omitempty" protobuf:"bytes,3,opt,name=preferredVersion"`
+	// a map of client CIDR to server address that is serving this group.
+	// This is to help clients reach servers in the most network-efficient way possible.
+	// Clients can use the appropriate server address as per the CIDR that they match.
+	// In case of multiple matches, clients should use the longest matching CIDR.
+	// The server returns only those CIDRs that it thinks that the client can match.
+	// For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
+	// Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
+	// +optional
+	ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs,omitempty" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"`
+}
+
+// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.
+type ServerAddressByClientCIDR struct {
+	// The CIDR with which clients can match their IP to figure out the server address that they should use.
+	ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"`
+	// Address of this server, suitable for a client that matches the above CIDR.
+	// This can be a hostname, hostname:port, IP or IP:port.
+	ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"`
+}
+
+// GroupVersion contains the "group/version" and "version" string of a version.
+// It is made a struct to keep extensibility.
+type GroupVersionForDiscovery struct {
+	// groupVersion specifies the API group and version in the form "group/version"
+	GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"`
+	// version specifies the version in the form of "version". This is to save
+	// the clients the trouble of splitting the GroupVersion.
+	Version string `json:"version" protobuf:"bytes,2,opt,name=version"`
+}
+
+// APIResource specifies the name of a resource and whether it is namespaced.
+type APIResource struct {
+	// name is the plural name of the resource.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// singularName is the singular name of the resource.  This allows clients to handle plural and singular opaquely.
+	// The singularName is more correct for reporting status on a single item and both singular and plural are allowed
+	// from the kubectl CLI interface.
+	SingularName string `json:"singularName" protobuf:"bytes,6,opt,name=singularName"`
+	// namespaced indicates if a resource is namespaced or not.
+	Namespaced bool `json:"namespaced" protobuf:"varint,2,opt,name=namespaced"`
+	// group is the preferred group of the resource.  Empty implies the group of the containing resource list.
+	// For subresources, this may have a different value, for example: Scale".
+	Group string `json:"group,omitempty" protobuf:"bytes,8,opt,name=group"`
+	// version is the preferred version of the resource.  Empty implies the version of the containing resource list
+	// For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)".
+	Version string `json:"version,omitempty" protobuf:"bytes,9,opt,name=version"`
+	// kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')
+	Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"`
+	// verbs is a list of supported kube verbs (this includes get, list, watch, create,
+	// update, patch, delete, deletecollection, and proxy)
+	Verbs Verbs `json:"verbs" protobuf:"bytes,4,opt,name=verbs"`
+	// shortNames is a list of suggested short names of the resource.
+	ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,5,rep,name=shortNames"`
+	// categories is a list of the grouped resources this resource belongs to (e.g. 'all')
+	Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"`
+}
+
+// Verbs masks the value so protobuf can generate
+//
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type Verbs []string
+
+func (vs Verbs) String() string {
+	return fmt.Sprintf("%v", []string(vs))
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// APIResourceList is a list of APIResource, it is used to expose the name of the
+// resources supported in a specific group and version, and if the resource
+// is namespaced.
+type APIResourceList struct {
+	TypeMeta `json:",inline"`
+	// groupVersion is the group and version this APIResourceList is for.
+	GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"`
+	// resources contains the name of the resources and if they are namespaced.
+	APIResources []APIResource `json:"resources" protobuf:"bytes,2,rep,name=resources"`
+}
+
+// RootPaths lists the paths available at root.
+// For example: "/healthz", "/apis".
+type RootPaths struct {
+	// paths are the paths available at root.
+	Paths []string `json:"paths" protobuf:"bytes,1,rep,name=paths"`
+}
+
+// TODO: remove me when watch is refactored
+func LabelSelectorQueryParam(version string) string {
+	return "labelSelector"
+}
+
+// TODO: remove me when watch is refactored
+func FieldSelectorQueryParam(version string) string {
+	return "fieldSelector"
+}
+
+// String returns available api versions as a human-friendly version string.
+func (apiVersions APIVersions) String() string {
+	return strings.Join(apiVersions.Versions, ",")
+}
+
+func (apiVersions APIVersions) GoString() string {
+	return apiVersions.String()
+}
+
+// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.
+type Patch struct{}
+
+// Note:
+// There are two different styles of label selectors used in versioned types:
+// an older style which is represented as just a string in versioned types, and a
+// newer style that is structured.  LabelSelector is an internal representation for the
+// latter style.
+
+// A label selector is a label query over a set of resources. The result of matchLabels and
+// matchExpressions are ANDed. An empty label selector matches all objects. A null
+// label selector matches no objects.
+type LabelSelector struct {
+	// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+	// map is equivalent to an element of matchExpressions, whose key field is "key", the
+	// operator is "In", and the values array contains only "value". The requirements are ANDed.
+	// +optional
+	MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"`
+	// matchExpressions is a list of label selector requirements. The requirements are ANDed.
+	// +optional
+	MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"`
+}
+
+// A label selector requirement is a selector that contains values, a key, and an operator that
+// relates the key and values.
+type LabelSelectorRequirement struct {
+	// key is the label key that the selector applies to.
+	// +patchMergeKey=key
+	// +patchStrategy=merge
+	Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
+	// operator represents a key's relationship to a set of values.
+	// Valid operators are In, NotIn, Exists and DoesNotExist.
+	Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"`
+	// values is an array of string values. If the operator is In or NotIn,
+	// the values array must be non-empty. If the operator is Exists or DoesNotExist,
+	// the values array must be empty. This array is replaced during a strategic
+	// merge patch.
+	// +optional
+	Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
+}
+
+// A label selector operator is the set of operators that can be used in a selector requirement.
+type LabelSelectorOperator string
+
+const (
+	LabelSelectorOpIn           LabelSelectorOperator = "In"
+	LabelSelectorOpNotIn        LabelSelectorOperator = "NotIn"
+	LabelSelectorOpExists       LabelSelectorOperator = "Exists"
+	LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist"
+)
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..679e709
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
@@ -0,0 +1,350 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_APIGroup = map[string]string{
+	"":                           "APIGroup contains the name, the supported versions, and the preferred version of a group.",
+	"name":                       "name is the name of the group.",
+	"versions":                   "versions are the versions supported in this group.",
+	"preferredVersion":           "preferredVersion is the version preferred by the API server, which probably is the storage version.",
+	"serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.",
+}
+
+func (APIGroup) SwaggerDoc() map[string]string {
+	return map_APIGroup
+}
+
+var map_APIGroupList = map[string]string{
+	"":       "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.",
+	"groups": "groups is a list of APIGroup.",
+}
+
+func (APIGroupList) SwaggerDoc() map[string]string {
+	return map_APIGroupList
+}
+
+var map_APIResource = map[string]string{
+	"":             "APIResource specifies the name of a resource and whether it is namespaced.",
+	"name":         "name is the plural name of the resource.",
+	"singularName": "singularName is the singular name of the resource.  This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.",
+	"namespaced":   "namespaced indicates if a resource is namespaced or not.",
+	"group":        "group is the preferred group of the resource.  Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".",
+	"version":      "version is the preferred version of the resource.  Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".",
+	"kind":         "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')",
+	"verbs":        "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)",
+	"shortNames":   "shortNames is a list of suggested short names of the resource.",
+	"categories":   "categories is a list of the grouped resources this resource belongs to (e.g. 'all')",
+}
+
+func (APIResource) SwaggerDoc() map[string]string {
+	return map_APIResource
+}
+
+var map_APIResourceList = map[string]string{
+	"":             "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.",
+	"groupVersion": "groupVersion is the group and version this APIResourceList is for.",
+	"resources":    "resources contains the name of the resources and if they are namespaced.",
+}
+
+func (APIResourceList) SwaggerDoc() map[string]string {
+	return map_APIResourceList
+}
+
+var map_APIVersions = map[string]string{
+	"":                           "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.",
+	"versions":                   "versions are the api versions that are available.",
+	"serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.",
+}
+
+func (APIVersions) SwaggerDoc() map[string]string {
+	return map_APIVersions
+}
+
+var map_CreateOptions = map[string]string{
+	"":                     "CreateOptions may be provided when creating an API object.",
+	"dryRun":               "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+	"includeUninitialized": "If IncludeUninitialized is specified, the object may be returned without completing initialization.",
+}
+
+func (CreateOptions) SwaggerDoc() map[string]string {
+	return map_CreateOptions
+}
+
+var map_DeleteOptions = map[string]string{
+	"":                   "DeleteOptions may be provided when deleting an API object.",
+	"gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+	"preconditions":      "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.",
+	"orphanDependents":   "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+	"propagationPolicy":  "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+	"dryRun":             "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+}
+
+func (DeleteOptions) SwaggerDoc() map[string]string {
+	return map_DeleteOptions
+}
+
+var map_ExportOptions = map[string]string{
+	"":       "ExportOptions is the query options to the standard REST get call.",
+	"export": "Should this value be exported.  Export strips fields that a user can not specify.",
+	"exact":  "Should the export be exact.  Exact export maintains cluster-specific fields like 'Namespace'.",
+}
+
+func (ExportOptions) SwaggerDoc() map[string]string {
+	return map_ExportOptions
+}
+
+var map_GetOptions = map[string]string{
+	"":                     "GetOptions is the standard query options to the standard REST get call.",
+	"resourceVersion":      "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+	"includeUninitialized": "If true, partially initialized resources are included in the response.",
+}
+
+func (GetOptions) SwaggerDoc() map[string]string {
+	return map_GetOptions
+}
+
+var map_GroupVersionForDiscovery = map[string]string{
+	"":             "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.",
+	"groupVersion": "groupVersion specifies the API group and version in the form \"group/version\"",
+	"version":      "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.",
+}
+
+func (GroupVersionForDiscovery) SwaggerDoc() map[string]string {
+	return map_GroupVersionForDiscovery
+}
+
+var map_Initializer = map[string]string{
+	"":     "Initializer is information about an initializer that has not yet completed.",
+	"name": "name of the process that is responsible for initializing this object.",
+}
+
+func (Initializer) SwaggerDoc() map[string]string {
+	return map_Initializer
+}
+
+var map_Initializers = map[string]string{
+	"":        "Initializers tracks the progress of initialization.",
+	"pending": "Pending is a list of initializers that must execute in order before this object is visible. When the last pending initializer is removed, and no failing result is set, the initializers struct will be set to nil and the object is considered as initialized and visible to all clients.",
+	"result":  "If result is set with the Failure field, the object will be persisted to storage and then deleted, ensuring that other clients can observe the deletion.",
+}
+
+func (Initializers) SwaggerDoc() map[string]string {
+	return map_Initializers
+}
+
+var map_LabelSelector = map[string]string{
+	"":                 "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.",
+	"matchLabels":      "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.",
+	"matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.",
+}
+
+func (LabelSelector) SwaggerDoc() map[string]string {
+	return map_LabelSelector
+}
+
+var map_LabelSelectorRequirement = map[string]string{
+	"":         "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
+	"key":      "key is the label key that the selector applies to.",
+	"operator": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.",
+	"values":   "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.",
+}
+
+func (LabelSelectorRequirement) SwaggerDoc() map[string]string {
+	return map_LabelSelectorRequirement
+}
+
+var map_List = map[string]string{
+	"":         "List holds a list of objects, which may not be known by the server.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"items":    "List of objects",
+}
+
+func (List) SwaggerDoc() map[string]string {
+	return map_List
+}
+
+var map_ListMeta = map[string]string{
+	"":                "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.",
+	"selfLink":        "selfLink is a URL representing this object. Populated by the system. Read-only.",
+	"resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency",
+	"continue":        "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.",
+}
+
+func (ListMeta) SwaggerDoc() map[string]string {
+	return map_ListMeta
+}
+
+var map_ListOptions = map[string]string{
+	"":                     "ListOptions is the query options to a standard REST list call.",
+	"labelSelector":        "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+	"fieldSelector":        "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+	"includeUninitialized": "If true, partially initialized resources are included in the response.",
+	"watch":                "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+	"resourceVersion":      "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+	"timeoutSeconds":       "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+	"limit":                "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+	"continue":             "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+}
+
+func (ListOptions) SwaggerDoc() map[string]string {
+	return map_ListOptions
+}
+
+var map_ObjectMeta = map[string]string{
+	"":                           "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
+	"name":                       "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
+	"generateName":               "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency",
+	"namespace":                  "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces",
+	"selfLink":                   "SelfLink is a URL representing this object. Populated by the system. Read-only.",
+	"uid":                        "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
+	"resourceVersion":            "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency",
+	"generation":                 "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.",
+	"creationTimestamp":          "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"deletionTimestamp":          "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+	"deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.",
+	"labels":                     "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels",
+	"annotations":                "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
+	"ownerReferences":            "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.",
+	"initializers":               "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.",
+	"finalizers":                 "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.",
+	"clusterName":                "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.",
+}
+
+func (ObjectMeta) SwaggerDoc() map[string]string {
+	return map_ObjectMeta
+}
+
+var map_OwnerReference = map[string]string{
+	"":                   "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.",
+	"apiVersion":         "API version of the referent.",
+	"kind":               "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"name":               "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
+	"uid":                "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
+	"controller":         "If true, this reference points to the managing controller.",
+	"blockOwnerDeletion": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.",
+}
+
+func (OwnerReference) SwaggerDoc() map[string]string {
+	return map_OwnerReference
+}
+
+var map_Patch = map[string]string{
+	"": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.",
+}
+
+func (Patch) SwaggerDoc() map[string]string {
+	return map_Patch
+}
+
+var map_Preconditions = map[string]string{
+	"":    "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.",
+	"uid": "Specifies the target UID.",
+}
+
+func (Preconditions) SwaggerDoc() map[string]string {
+	return map_Preconditions
+}
+
+var map_RootPaths = map[string]string{
+	"":      "RootPaths lists the paths available at root. For example: \"/healthz\", \"/apis\".",
+	"paths": "paths are the paths available at root.",
+}
+
+func (RootPaths) SwaggerDoc() map[string]string {
+	return map_RootPaths
+}
+
+var map_ServerAddressByClientCIDR = map[string]string{
+	"":              "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.",
+	"clientCIDR":    "The CIDR with which clients can match their IP to figure out the server address that they should use.",
+	"serverAddress": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.",
+}
+
+func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string {
+	return map_ServerAddressByClientCIDR
+}
+
+var map_Status = map[string]string{
+	"":         "Status is a return value for calls that don't return other objects.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"status":   "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+	"message":  "A human-readable description of the status of this operation.",
+	"reason":   "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.",
+	"details":  "Extended data associated with the reason.  Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.",
+	"code":     "Suggested HTTP return code for this status, 0 if not set.",
+}
+
+func (Status) SwaggerDoc() map[string]string {
+	return map_Status
+}
+
+var map_StatusCause = map[string]string{
+	"":        "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.",
+	"reason":  "A machine-readable description of the cause of the error. If this value is empty there is no information available.",
+	"message": "A human-readable description of the cause of the error.  This field may be presented as-is to a reader.",
+	"field":   "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed.  Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n  \"name\" - the field \"name\" on the current resource\n  \"items[0].name\" - the field \"name\" on the first array entry in \"items\"",
+}
+
+func (StatusCause) SwaggerDoc() map[string]string {
+	return map_StatusCause
+}
+
+var map_StatusDetails = map[string]string{
+	"":                  "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.",
+	"name":              "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).",
+	"group":             "The group attribute of the resource associated with the status StatusReason.",
+	"kind":              "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"uid":               "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
+	"causes":            "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.",
+	"retryAfterSeconds": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.",
+}
+
+func (StatusDetails) SwaggerDoc() map[string]string {
+	return map_StatusDetails
+}
+
+var map_TypeMeta = map[string]string{
+	"":           "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.",
+	"kind":       "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources",
+}
+
+func (TypeMeta) SwaggerDoc() map[string]string {
+	return map_TypeMeta
+}
+
+var map_UpdateOptions = map[string]string{
+	"":       "UpdateOptions may be provided when updating an API object.",
+	"dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+}
+
+func (UpdateOptions) SwaggerDoc() map[string]string {
+	return map_UpdateOptions
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
new file mode 100644
index 0000000..75ac693
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
@@ -0,0 +1,454 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unstructured
+
+import (
+	gojson "encoding/json"
+	"fmt"
+	"io"
+	"strings"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/apimachinery/pkg/util/json"
+)
+
+// NestedFieldCopy returns a deep copy of the value of a nested field.
+// Returns false if the value is missing.
+// No error is returned for a nil field.
+func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) {
+	val, found, err := NestedFieldNoCopy(obj, fields...)
+	if !found || err != nil {
+		return nil, found, err
+	}
+	return runtime.DeepCopyJSONValue(val), true, nil
+}
+
+// NestedFieldNoCopy returns a reference to a nested field.
+// Returns false if value is not found and an error if unable
+// to traverse obj.
+func NestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) {
+	var val interface{} = obj
+
+	for i, field := range fields {
+		if val == nil {
+			return nil, false, nil
+		}
+		if m, ok := val.(map[string]interface{}); ok {
+			val, ok = m[field]
+			if !ok {
+				return nil, false, nil
+			}
+		} else {
+			return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields[:i+1]), val, val)
+		}
+	}
+	return val, true, nil
+}
+
+// NestedString returns the string value of a nested field.
+// Returns false if value is not found and an error if not a string.
+func NestedString(obj map[string]interface{}, fields ...string) (string, bool, error) {
+	val, found, err := NestedFieldNoCopy(obj, fields...)
+	if !found || err != nil {
+		return "", found, err
+	}
+	s, ok := val.(string)
+	if !ok {
+		return "", false, fmt.Errorf("%v accessor error: %v is of the type %T, expected string", jsonPath(fields), val, val)
+	}
+	return s, true, nil
+}
+
+// NestedBool returns the bool value of a nested field.
+// Returns false if value is not found and an error if not a bool.
+func NestedBool(obj map[string]interface{}, fields ...string) (bool, bool, error) {
+	val, found, err := NestedFieldNoCopy(obj, fields...)
+	if !found || err != nil {
+		return false, found, err
+	}
+	b, ok := val.(bool)
+	if !ok {
+		return false, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected bool", jsonPath(fields), val, val)
+	}
+	return b, true, nil
+}
+
+// NestedFloat64 returns the float64 value of a nested field.
+// Returns false if value is not found and an error if not a float64.
+func NestedFloat64(obj map[string]interface{}, fields ...string) (float64, bool, error) {
+	val, found, err := NestedFieldNoCopy(obj, fields...)
+	if !found || err != nil {
+		return 0, found, err
+	}
+	f, ok := val.(float64)
+	if !ok {
+		return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected float64", jsonPath(fields), val, val)
+	}
+	return f, true, nil
+}
+
+// NestedInt64 returns the int64 value of a nested field.
+// Returns false if value is not found and an error if not an int64.
+func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, error) {
+	val, found, err := NestedFieldNoCopy(obj, fields...)
+	if !found || err != nil {
+		return 0, found, err
+	}
+	i, ok := val.(int64)
+	if !ok {
+		return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected int64", jsonPath(fields), val, val)
+	}
+	return i, true, nil
+}
+
+// NestedStringSlice returns a copy of []string value of a nested field.
+// Returns false if value is not found and an error if not a []interface{} or contains non-string items in the slice.
+func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool, error) {
+	val, found, err := NestedFieldNoCopy(obj, fields...)
+	if !found || err != nil {
+		return nil, found, err
+	}
+	m, ok := val.([]interface{})
+	if !ok {
+		return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val)
+	}
+	strSlice := make([]string, 0, len(m))
+	for _, v := range m {
+		if str, ok := v.(string); ok {
+			strSlice = append(strSlice, str)
+		} else {
+			return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the slice: %v is of the type %T, expected string", jsonPath(fields), v, v)
+		}
+	}
+	return strSlice, true, nil
+}
+
+// NestedSlice returns a deep copy of []interface{} value of a nested field.
+// Returns false if value is not found and an error if not a []interface{}.
+func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, bool, error) {
+	val, found, err := NestedFieldNoCopy(obj, fields...)
+	if !found || err != nil {
+		return nil, found, err
+	}
+	_, ok := val.([]interface{})
+	if !ok {
+		return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val)
+	}
+	return runtime.DeepCopyJSONValue(val).([]interface{}), true, nil
+}
+
+// NestedStringMap returns a copy of map[string]string value of a nested field.
+// Returns false if value is not found and an error if not a map[string]interface{} or contains non-string values in the map.
+func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) {
+	m, found, err := nestedMapNoCopy(obj, fields...)
+	if !found || err != nil {
+		return nil, found, err
+	}
+	strMap := make(map[string]string, len(m))
+	for k, v := range m {
+		if str, ok := v.(string); ok {
+			strMap[k] = str
+		} else {
+			return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the map: %v is of the type %T, expected string", jsonPath(fields), v, v)
+		}
+	}
+	return strMap, true, nil
+}
+
+// NestedMap returns a deep copy of map[string]interface{} value of a nested field.
+// Returns false if value is not found and an error if not a map[string]interface{}.
+func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) {
+	m, found, err := nestedMapNoCopy(obj, fields...)
+	if !found || err != nil {
+		return nil, found, err
+	}
+	return runtime.DeepCopyJSON(m), true, nil
+}
+
+// nestedMapNoCopy returns a map[string]interface{} value of a nested field.
+// Returns false if value is not found and an error if not a map[string]interface{}.
+func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) {
+	val, found, err := NestedFieldNoCopy(obj, fields...)
+	if !found || err != nil {
+		return nil, found, err
+	}
+	m, ok := val.(map[string]interface{})
+	if !ok {
+		return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields), val, val)
+	}
+	return m, true, nil
+}
+
+// SetNestedField sets the value of a nested field to a deep copy of the value provided.
+// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
+func SetNestedField(obj map[string]interface{}, value interface{}, fields ...string) error {
+	return setNestedFieldNoCopy(obj, runtime.DeepCopyJSONValue(value), fields...)
+}
+
+func setNestedFieldNoCopy(obj map[string]interface{}, value interface{}, fields ...string) error {
+	m := obj
+
+	for i, field := range fields[:len(fields)-1] {
+		if val, ok := m[field]; ok {
+			if valMap, ok := val.(map[string]interface{}); ok {
+				m = valMap
+			} else {
+				return fmt.Errorf("value cannot be set because %v is not a map[string]interface{}", jsonPath(fields[:i+1]))
+			}
+		} else {
+			newVal := make(map[string]interface{})
+			m[field] = newVal
+			m = newVal
+		}
+	}
+	m[fields[len(fields)-1]] = value
+	return nil
+}
+
+// SetNestedStringSlice sets the string slice value of a nested field.
+// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
+func SetNestedStringSlice(obj map[string]interface{}, value []string, fields ...string) error {
+	m := make([]interface{}, 0, len(value)) // convert []string into []interface{}
+	for _, v := range value {
+		m = append(m, v)
+	}
+	return setNestedFieldNoCopy(obj, m, fields...)
+}
+
+// SetNestedSlice sets the slice value of a nested field.
+// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
+func SetNestedSlice(obj map[string]interface{}, value []interface{}, fields ...string) error {
+	return SetNestedField(obj, value, fields...)
+}
+
+// SetNestedStringMap sets the map[string]string value of a nested field.
+// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
+func SetNestedStringMap(obj map[string]interface{}, value map[string]string, fields ...string) error {
+	m := make(map[string]interface{}, len(value)) // convert map[string]string into map[string]interface{}
+	for k, v := range value {
+		m[k] = v
+	}
+	return setNestedFieldNoCopy(obj, m, fields...)
+}
+
+// SetNestedMap sets the map[string]interface{} value of a nested field.
+// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
+func SetNestedMap(obj map[string]interface{}, value map[string]interface{}, fields ...string) error {
+	return SetNestedField(obj, value, fields...)
+}
+
+// RemoveNestedField removes the nested field from the obj.
+func RemoveNestedField(obj map[string]interface{}, fields ...string) {
+	m := obj
+	for _, field := range fields[:len(fields)-1] {
+		if x, ok := m[field].(map[string]interface{}); ok {
+			m = x
+		} else {
+			return
+		}
+	}
+	delete(m, fields[len(fields)-1])
+}
+
+func getNestedString(obj map[string]interface{}, fields ...string) string {
+	val, found, err := NestedString(obj, fields...)
+	if !found || err != nil {
+		return ""
+	}
+	return val
+}
+
+func jsonPath(fields []string) string {
+	return "." + strings.Join(fields, ".")
+}
+
+func extractOwnerReference(v map[string]interface{}) metav1.OwnerReference {
+	// though this field is a *bool, but when decoded from JSON, it's
+	// unmarshalled as bool.
+	var controllerPtr *bool
+	if controller, found, err := NestedBool(v, "controller"); err == nil && found {
+		controllerPtr = &controller
+	}
+	var blockOwnerDeletionPtr *bool
+	if blockOwnerDeletion, found, err := NestedBool(v, "blockOwnerDeletion"); err == nil && found {
+		blockOwnerDeletionPtr = &blockOwnerDeletion
+	}
+	return metav1.OwnerReference{
+		Kind:               getNestedString(v, "kind"),
+		Name:               getNestedString(v, "name"),
+		APIVersion:         getNestedString(v, "apiVersion"),
+		UID:                types.UID(getNestedString(v, "uid")),
+		Controller:         controllerPtr,
+		BlockOwnerDeletion: blockOwnerDeletionPtr,
+	}
+}
+
+// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured
+// type, which can be used for generic access to objects without a predefined scheme.
+// TODO: move into serializer/json.
+var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{}
+
+type unstructuredJSONScheme struct{}
+
+func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	var err error
+	if obj != nil {
+		err = s.decodeInto(data, obj)
+	} else {
+		obj, err = s.decode(data)
+	}
+
+	if err != nil {
+		return nil, nil, err
+	}
+
+	gvk := obj.GetObjectKind().GroupVersionKind()
+	if len(gvk.Kind) == 0 {
+		return nil, &gvk, runtime.NewMissingKindErr(string(data))
+	}
+
+	return obj, &gvk, nil
+}
+
+func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error {
+	switch t := obj.(type) {
+	case *Unstructured:
+		return json.NewEncoder(w).Encode(t.Object)
+	case *UnstructuredList:
+		items := make([]interface{}, 0, len(t.Items))
+		for _, i := range t.Items {
+			items = append(items, i.Object)
+		}
+		listObj := make(map[string]interface{}, len(t.Object)+1)
+		for k, v := range t.Object { // Make a shallow copy
+			listObj[k] = v
+		}
+		listObj["items"] = items
+		return json.NewEncoder(w).Encode(listObj)
+	case *runtime.Unknown:
+		// TODO: Unstructured needs to deal with ContentType.
+		_, err := w.Write(t.Raw)
+		return err
+	default:
+		return json.NewEncoder(w).Encode(t)
+	}
+}
+
+func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) {
+	type detector struct {
+		Items gojson.RawMessage
+	}
+	var det detector
+	if err := json.Unmarshal(data, &det); err != nil {
+		return nil, err
+	}
+
+	if det.Items != nil {
+		list := &UnstructuredList{}
+		err := s.decodeToList(data, list)
+		return list, err
+	}
+
+	// No Items field, so it wasn't a list.
+	unstruct := &Unstructured{}
+	err := s.decodeToUnstructured(data, unstruct)
+	return unstruct, err
+}
+
+func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) error {
+	switch x := obj.(type) {
+	case *Unstructured:
+		return s.decodeToUnstructured(data, x)
+	case *UnstructuredList:
+		return s.decodeToList(data, x)
+	case *runtime.VersionedObjects:
+		o, err := s.decode(data)
+		if err == nil {
+			x.Objects = []runtime.Object{o}
+		}
+		return err
+	default:
+		return json.Unmarshal(data, x)
+	}
+}
+
+func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error {
+	m := make(map[string]interface{})
+	if err := json.Unmarshal(data, &m); err != nil {
+		return err
+	}
+
+	unstruct.Object = m
+
+	return nil
+}
+
+func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error {
+	type decodeList struct {
+		Items []gojson.RawMessage
+	}
+
+	var dList decodeList
+	if err := json.Unmarshal(data, &dList); err != nil {
+		return err
+	}
+
+	if err := json.Unmarshal(data, &list.Object); err != nil {
+		return err
+	}
+
+	// For typed lists, e.g., a PodList, API server doesn't set each item's
+	// APIVersion and Kind. We need to set it.
+	listAPIVersion := list.GetAPIVersion()
+	listKind := list.GetKind()
+	itemKind := strings.TrimSuffix(listKind, "List")
+
+	delete(list.Object, "items")
+	list.Items = make([]Unstructured, 0, len(dList.Items))
+	for _, i := range dList.Items {
+		unstruct := &Unstructured{}
+		if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil {
+			return err
+		}
+		// This is hacky. Set the item's Kind and APIVersion to those inferred
+		// from the List.
+		if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 {
+			unstruct.SetKind(itemKind)
+			unstruct.SetAPIVersion(listAPIVersion)
+		}
+		list.Items = append(list.Items, *unstruct)
+	}
+	return nil
+}
+
+type JSONFallbackEncoder struct {
+	runtime.Encoder
+}
+
+func (c JSONFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error {
+	err := c.Encoder.Encode(obj, w)
+	if runtime.IsNotRegisteredError(err) {
+		switch obj.(type) {
+		case *Unstructured, *UnstructuredList:
+			return UnstructuredJSONScheme.Encode(obj, w)
+		}
+	}
+	return err
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
new file mode 100644
index 0000000..781469e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
@@ -0,0 +1,452 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unstructured
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/types"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// Unstructured allows objects that do not have Golang structs registered to be manipulated
+// generically. This can be used to deal with the API objects from a plug-in. Unstructured
+// objects still have functioning TypeMeta features-- kind, version, etc.
+//
+// WARNING: This object has accessors for the v1 standard metadata. You *MUST NOT* use this
+// type if you are dealing with objects that are not in the server meta v1 schema.
+//
+// TODO: make the serialization part of this type distinct from the field accessors.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:deepcopy-gen=true
+type Unstructured struct {
+	// Object is a JSON compatible map with string, float, int, bool, []interface{}, or
+	// map[string]interface{}
+	// children.
+	Object map[string]interface{}
+}
+
+var _ metav1.Object = &Unstructured{}
+var _ runtime.Unstructured = &Unstructured{}
+
+func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj }
+
+func (obj *Unstructured) IsList() bool {
+	field, ok := obj.Object["items"]
+	if !ok {
+		return false
+	}
+	_, ok = field.([]interface{})
+	return ok
+}
+func (obj *Unstructured) ToList() (*UnstructuredList, error) {
+	if !obj.IsList() {
+		// return an empty list back
+		return &UnstructuredList{Object: obj.Object}, nil
+	}
+
+	ret := &UnstructuredList{}
+	ret.Object = obj.Object
+
+	err := obj.EachListItem(func(item runtime.Object) error {
+		castItem := item.(*Unstructured)
+		ret.Items = append(ret.Items, *castItem)
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return ret, nil
+}
+
+func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error {
+	field, ok := obj.Object["items"]
+	if !ok {
+		return errors.New("content is not a list")
+	}
+	items, ok := field.([]interface{})
+	if !ok {
+		return fmt.Errorf("content is not a list: %T", field)
+	}
+	for _, item := range items {
+		child, ok := item.(map[string]interface{})
+		if !ok {
+			return fmt.Errorf("items member is not an object: %T", child)
+		}
+		if err := fn(&Unstructured{Object: child}); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (obj *Unstructured) UnstructuredContent() map[string]interface{} {
+	if obj.Object == nil {
+		return make(map[string]interface{})
+	}
+	return obj.Object
+}
+
+func (obj *Unstructured) SetUnstructuredContent(content map[string]interface{}) {
+	obj.Object = content
+}
+
+// MarshalJSON ensures that the unstructured object produces proper
+// JSON when passed to Go's standard JSON library.
+func (u *Unstructured) MarshalJSON() ([]byte, error) {
+	var buf bytes.Buffer
+	err := UnstructuredJSONScheme.Encode(u, &buf)
+	return buf.Bytes(), err
+}
+
+// UnmarshalJSON ensures that the unstructured object properly decodes
+// JSON when passed to Go's standard JSON library.
+func (u *Unstructured) UnmarshalJSON(b []byte) error {
+	_, _, err := UnstructuredJSONScheme.Decode(b, nil, u)
+	return err
+}
+
+func (in *Unstructured) DeepCopy() *Unstructured {
+	if in == nil {
+		return nil
+	}
+	out := new(Unstructured)
+	*out = *in
+	out.Object = runtime.DeepCopyJSON(in.Object)
+	return out
+}
+
+func (u *Unstructured) setNestedField(value interface{}, fields ...string) {
+	if u.Object == nil {
+		u.Object = make(map[string]interface{})
+	}
+	SetNestedField(u.Object, value, fields...)
+}
+
+func (u *Unstructured) setNestedSlice(value []string, fields ...string) {
+	if u.Object == nil {
+		u.Object = make(map[string]interface{})
+	}
+	SetNestedStringSlice(u.Object, value, fields...)
+}
+
+func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) {
+	if u.Object == nil {
+		u.Object = make(map[string]interface{})
+	}
+	SetNestedStringMap(u.Object, value, fields...)
+}
+
+func (u *Unstructured) GetOwnerReferences() []metav1.OwnerReference {
+	field, found, err := NestedFieldNoCopy(u.Object, "metadata", "ownerReferences")
+	if !found || err != nil {
+		return nil
+	}
+	original, ok := field.([]interface{})
+	if !ok {
+		return nil
+	}
+	ret := make([]metav1.OwnerReference, 0, len(original))
+	for _, obj := range original {
+		o, ok := obj.(map[string]interface{})
+		if !ok {
+			// expected map[string]interface{}, got something else
+			return nil
+		}
+		ret = append(ret, extractOwnerReference(o))
+	}
+	return ret
+}
+
+func (u *Unstructured) SetOwnerReferences(references []metav1.OwnerReference) {
+	if references == nil {
+		RemoveNestedField(u.Object, "metadata", "ownerReferences")
+		return
+	}
+
+	newReferences := make([]interface{}, 0, len(references))
+	for _, reference := range references {
+		out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&reference)
+		if err != nil {
+			utilruntime.HandleError(fmt.Errorf("unable to convert Owner Reference: %v", err))
+			continue
+		}
+		newReferences = append(newReferences, out)
+	}
+	u.setNestedField(newReferences, "metadata", "ownerReferences")
+}
+
+func (u *Unstructured) GetAPIVersion() string {
+	return getNestedString(u.Object, "apiVersion")
+}
+
+func (u *Unstructured) SetAPIVersion(version string) {
+	u.setNestedField(version, "apiVersion")
+}
+
+func (u *Unstructured) GetKind() string {
+	return getNestedString(u.Object, "kind")
+}
+
+func (u *Unstructured) SetKind(kind string) {
+	u.setNestedField(kind, "kind")
+}
+
+func (u *Unstructured) GetNamespace() string {
+	return getNestedString(u.Object, "metadata", "namespace")
+}
+
+func (u *Unstructured) SetNamespace(namespace string) {
+	if len(namespace) == 0 {
+		RemoveNestedField(u.Object, "metadata", "namespace")
+		return
+	}
+	u.setNestedField(namespace, "metadata", "namespace")
+}
+
+func (u *Unstructured) GetName() string {
+	return getNestedString(u.Object, "metadata", "name")
+}
+
+func (u *Unstructured) SetName(name string) {
+	if len(name) == 0 {
+		RemoveNestedField(u.Object, "metadata", "name")
+		return
+	}
+	u.setNestedField(name, "metadata", "name")
+}
+
+func (u *Unstructured) GetGenerateName() string {
+	return getNestedString(u.Object, "metadata", "generateName")
+}
+
+func (u *Unstructured) SetGenerateName(generateName string) {
+	if len(generateName) == 0 {
+		RemoveNestedField(u.Object, "metadata", "generateName")
+		return
+	}
+	u.setNestedField(generateName, "metadata", "generateName")
+}
+
+func (u *Unstructured) GetUID() types.UID {
+	return types.UID(getNestedString(u.Object, "metadata", "uid"))
+}
+
+func (u *Unstructured) SetUID(uid types.UID) {
+	if len(string(uid)) == 0 {
+		RemoveNestedField(u.Object, "metadata", "uid")
+		return
+	}
+	u.setNestedField(string(uid), "metadata", "uid")
+}
+
+func (u *Unstructured) GetResourceVersion() string {
+	return getNestedString(u.Object, "metadata", "resourceVersion")
+}
+
+func (u *Unstructured) SetResourceVersion(resourceVersion string) {
+	if len(resourceVersion) == 0 {
+		RemoveNestedField(u.Object, "metadata", "resourceVersion")
+		return
+	}
+	u.setNestedField(resourceVersion, "metadata", "resourceVersion")
+}
+
+func (u *Unstructured) GetGeneration() int64 {
+	val, found, err := NestedInt64(u.Object, "metadata", "generation")
+	if !found || err != nil {
+		return 0
+	}
+	return val
+}
+
+func (u *Unstructured) SetGeneration(generation int64) {
+	if generation == 0 {
+		RemoveNestedField(u.Object, "metadata", "generation")
+		return
+	}
+	u.setNestedField(generation, "metadata", "generation")
+}
+
+func (u *Unstructured) GetSelfLink() string {
+	return getNestedString(u.Object, "metadata", "selfLink")
+}
+
+func (u *Unstructured) SetSelfLink(selfLink string) {
+	if len(selfLink) == 0 {
+		RemoveNestedField(u.Object, "metadata", "selfLink")
+		return
+	}
+	u.setNestedField(selfLink, "metadata", "selfLink")
+}
+
+func (u *Unstructured) GetContinue() string {
+	return getNestedString(u.Object, "metadata", "continue")
+}
+
+func (u *Unstructured) SetContinue(c string) {
+	if len(c) == 0 {
+		RemoveNestedField(u.Object, "metadata", "continue")
+		return
+	}
+	u.setNestedField(c, "metadata", "continue")
+}
+
+func (u *Unstructured) GetCreationTimestamp() metav1.Time {
+	var timestamp metav1.Time
+	timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp"))
+	return timestamp
+}
+
+func (u *Unstructured) SetCreationTimestamp(timestamp metav1.Time) {
+	ts, _ := timestamp.MarshalQueryParameter()
+	if len(ts) == 0 || timestamp.Time.IsZero() {
+		RemoveNestedField(u.Object, "metadata", "creationTimestamp")
+		return
+	}
+	u.setNestedField(ts, "metadata", "creationTimestamp")
+}
+
+func (u *Unstructured) GetDeletionTimestamp() *metav1.Time {
+	var timestamp metav1.Time
+	timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "deletionTimestamp"))
+	if timestamp.IsZero() {
+		return nil
+	}
+	return &timestamp
+}
+
+func (u *Unstructured) SetDeletionTimestamp(timestamp *metav1.Time) {
+	if timestamp == nil {
+		RemoveNestedField(u.Object, "metadata", "deletionTimestamp")
+		return
+	}
+	ts, _ := timestamp.MarshalQueryParameter()
+	u.setNestedField(ts, "metadata", "deletionTimestamp")
+}
+
+func (u *Unstructured) GetDeletionGracePeriodSeconds() *int64 {
+	val, found, err := NestedInt64(u.Object, "metadata", "deletionGracePeriodSeconds")
+	if !found || err != nil {
+		return nil
+	}
+	return &val
+}
+
+func (u *Unstructured) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) {
+	if deletionGracePeriodSeconds == nil {
+		RemoveNestedField(u.Object, "metadata", "deletionGracePeriodSeconds")
+		return
+	}
+	u.setNestedField(*deletionGracePeriodSeconds, "metadata", "deletionGracePeriodSeconds")
+}
+
+func (u *Unstructured) GetLabels() map[string]string {
+	m, _, _ := NestedStringMap(u.Object, "metadata", "labels")
+	return m
+}
+
+func (u *Unstructured) SetLabels(labels map[string]string) {
+	if labels == nil {
+		RemoveNestedField(u.Object, "metadata", "labels")
+		return
+	}
+	u.setNestedMap(labels, "metadata", "labels")
+}
+
+func (u *Unstructured) GetAnnotations() map[string]string {
+	m, _, _ := NestedStringMap(u.Object, "metadata", "annotations")
+	return m
+}
+
+func (u *Unstructured) SetAnnotations(annotations map[string]string) {
+	if annotations == nil {
+		RemoveNestedField(u.Object, "metadata", "annotations")
+		return
+	}
+	u.setNestedMap(annotations, "metadata", "annotations")
+}
+
+func (u *Unstructured) SetGroupVersionKind(gvk schema.GroupVersionKind) {
+	u.SetAPIVersion(gvk.GroupVersion().String())
+	u.SetKind(gvk.Kind)
+}
+
+func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind {
+	gv, err := schema.ParseGroupVersion(u.GetAPIVersion())
+	if err != nil {
+		return schema.GroupVersionKind{}
+	}
+	gvk := gv.WithKind(u.GetKind())
+	return gvk
+}
+
+func (u *Unstructured) GetInitializers() *metav1.Initializers {
+	m, found, err := nestedMapNoCopy(u.Object, "metadata", "initializers")
+	if !found || err != nil {
+		return nil
+	}
+	out := &metav1.Initializers{}
+	if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, out); err != nil {
+		utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err))
+		return nil
+	}
+	return out
+}
+
+func (u *Unstructured) SetInitializers(initializers *metav1.Initializers) {
+	if initializers == nil {
+		RemoveNestedField(u.Object, "metadata", "initializers")
+		return
+	}
+	out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(initializers)
+	if err != nil {
+		utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err))
+	}
+	u.setNestedField(out, "metadata", "initializers")
+}
+
+func (u *Unstructured) GetFinalizers() []string {
+	val, _, _ := NestedStringSlice(u.Object, "metadata", "finalizers")
+	return val
+}
+
+func (u *Unstructured) SetFinalizers(finalizers []string) {
+	if finalizers == nil {
+		RemoveNestedField(u.Object, "metadata", "finalizers")
+		return
+	}
+	u.setNestedSlice(finalizers, "metadata", "finalizers")
+}
+
+func (u *Unstructured) GetClusterName() string {
+	return getNestedString(u.Object, "metadata", "clusterName")
+}
+
+func (u *Unstructured) SetClusterName(clusterName string) {
+	if len(clusterName) == 0 {
+		RemoveNestedField(u.Object, "metadata", "clusterName")
+		return
+	}
+	u.setNestedField(clusterName, "metadata", "clusterName")
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go
new file mode 100644
index 0000000..bf3fd02
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go
@@ -0,0 +1,188 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unstructured
+
+import (
+	"bytes"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var _ runtime.Unstructured = &UnstructuredList{}
+var _ metav1.ListInterface = &UnstructuredList{}
+
+// UnstructuredList allows lists that do not have Golang structs
+// registered to be manipulated generically. This can be used to deal
+// with the API lists from a plug-in.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:deepcopy-gen=true
+type UnstructuredList struct {
+	Object map[string]interface{}
+
+	// Items is a list of unstructured objects.
+	Items []Unstructured `json:"items"`
+}
+
+func (u *UnstructuredList) GetObjectKind() schema.ObjectKind { return u }
+
+func (u *UnstructuredList) IsList() bool { return true }
+
+func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error {
+	for i := range u.Items {
+		if err := fn(&u.Items[i]); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// UnstructuredContent returns a map contain an overlay of the Items field onto
+// the Object field. Items always overwrites overlay.
+func (u *UnstructuredList) UnstructuredContent() map[string]interface{} {
+	out := make(map[string]interface{}, len(u.Object)+1)
+
+	// shallow copy every property
+	for k, v := range u.Object {
+		out[k] = v
+	}
+
+	items := make([]interface{}, len(u.Items))
+	for i, item := range u.Items {
+		items[i] = item.UnstructuredContent()
+	}
+	out["items"] = items
+	return out
+}
+
+// SetUnstructuredContent obeys the conventions of List and keeps Items and the items
+// array in sync. If items is not an array of objects in the incoming map, then any
+// mismatched item will be removed.
+func (obj *UnstructuredList) SetUnstructuredContent(content map[string]interface{}) {
+	obj.Object = content
+	if content == nil {
+		obj.Items = nil
+		return
+	}
+	items, ok := obj.Object["items"].([]interface{})
+	if !ok || items == nil {
+		items = []interface{}{}
+	}
+	unstructuredItems := make([]Unstructured, 0, len(items))
+	newItems := make([]interface{}, 0, len(items))
+	for _, item := range items {
+		o, ok := item.(map[string]interface{})
+		if !ok {
+			continue
+		}
+		unstructuredItems = append(unstructuredItems, Unstructured{Object: o})
+		newItems = append(newItems, o)
+	}
+	obj.Items = unstructuredItems
+	obj.Object["items"] = newItems
+}
+
+func (u *UnstructuredList) DeepCopy() *UnstructuredList {
+	if u == nil {
+		return nil
+	}
+	out := new(UnstructuredList)
+	*out = *u
+	out.Object = runtime.DeepCopyJSON(u.Object)
+	out.Items = make([]Unstructured, len(u.Items))
+	for i := range u.Items {
+		u.Items[i].DeepCopyInto(&out.Items[i])
+	}
+	return out
+}
+
+// MarshalJSON ensures that the unstructured list object produces proper
+// JSON when passed to Go's standard JSON library.
+func (u *UnstructuredList) MarshalJSON() ([]byte, error) {
+	var buf bytes.Buffer
+	err := UnstructuredJSONScheme.Encode(u, &buf)
+	return buf.Bytes(), err
+}
+
+// UnmarshalJSON ensures that the unstructured list object properly
+// decodes JSON when passed to Go's standard JSON library.
+func (u *UnstructuredList) UnmarshalJSON(b []byte) error {
+	_, _, err := UnstructuredJSONScheme.Decode(b, nil, u)
+	return err
+}
+
+func (u *UnstructuredList) GetAPIVersion() string {
+	return getNestedString(u.Object, "apiVersion")
+}
+
+func (u *UnstructuredList) SetAPIVersion(version string) {
+	u.setNestedField(version, "apiVersion")
+}
+
+func (u *UnstructuredList) GetKind() string {
+	return getNestedString(u.Object, "kind")
+}
+
+func (u *UnstructuredList) SetKind(kind string) {
+	u.setNestedField(kind, "kind")
+}
+
+func (u *UnstructuredList) GetResourceVersion() string {
+	return getNestedString(u.Object, "metadata", "resourceVersion")
+}
+
+func (u *UnstructuredList) SetResourceVersion(version string) {
+	u.setNestedField(version, "metadata", "resourceVersion")
+}
+
+func (u *UnstructuredList) GetSelfLink() string {
+	return getNestedString(u.Object, "metadata", "selfLink")
+}
+
+func (u *UnstructuredList) SetSelfLink(selfLink string) {
+	u.setNestedField(selfLink, "metadata", "selfLink")
+}
+
+func (u *UnstructuredList) GetContinue() string {
+	return getNestedString(u.Object, "metadata", "continue")
+}
+
+func (u *UnstructuredList) SetContinue(c string) {
+	u.setNestedField(c, "metadata", "continue")
+}
+
+func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) {
+	u.SetAPIVersion(gvk.GroupVersion().String())
+	u.SetKind(gvk.Kind)
+}
+
+func (u *UnstructuredList) GroupVersionKind() schema.GroupVersionKind {
+	gv, err := schema.ParseGroupVersion(u.GetAPIVersion())
+	if err != nil {
+		return schema.GroupVersionKind{}
+	}
+	gvk := gv.WithKind(u.GetKind())
+	return gvk
+}
+
+func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) {
+	if u.Object == nil {
+		u.Object = make(map[string]interface{})
+	}
+	SetNestedField(u.Object, value, fields...)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go
new file mode 100644
index 0000000..9a9f25e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go
@@ -0,0 +1,55 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package unstructured
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Unstructured) DeepCopyInto(out *Unstructured) {
+	clone := in.DeepCopy()
+	*out = *clone
+	return
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Unstructured) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UnstructuredList) DeepCopyInto(out *UnstructuredList) {
+	clone := in.DeepCopy()
+	*out = *clone
+	return
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *UnstructuredList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go
new file mode 100644
index 0000000..58f0773
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/watch"
+)
+
+// Event represents a single event to a watched resource.
+//
+// +protobuf=true
+// +k8s:deepcopy-gen=true
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type WatchEvent struct {
+	Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+
+	// Object is:
+	//  * If Type is Added or Modified: the new state of the object.
+	//  * If Type is Deleted: the state of the object immediately before deletion.
+	//  * If Type is Error: *Status is recommended; other types may make sense
+	//    depending on context.
+	Object runtime.RawExtension `json:"object" protobuf:"bytes,2,opt,name=object"`
+}
+
+func Convert_watch_Event_To_v1_WatchEvent(in *watch.Event, out *WatchEvent, s conversion.Scope) error {
+	out.Type = string(in.Type)
+	switch t := in.Object.(type) {
+	case *runtime.Unknown:
+		// TODO: handle other fields on Unknown and detect type
+		out.Object.Raw = t.Raw
+	case nil:
+	default:
+		out.Object.Object = in.Object
+	}
+	return nil
+}
+
+func Convert_v1_InternalEvent_To_v1_WatchEvent(in *InternalEvent, out *WatchEvent, s conversion.Scope) error {
+	return Convert_watch_Event_To_v1_WatchEvent((*watch.Event)(in), out, s)
+}
+
+func Convert_v1_WatchEvent_To_watch_Event(in *WatchEvent, out *watch.Event, s conversion.Scope) error {
+	out.Type = watch.EventType(in.Type)
+	if in.Object.Object != nil {
+		out.Object = in.Object.Object
+	} else if in.Object.Raw != nil {
+		// TODO: handle other fields on Unknown and detect type
+		out.Object = &runtime.Unknown{
+			Raw:         in.Object.Raw,
+			ContentType: runtime.ContentTypeJSON,
+		}
+	}
+	return nil
+}
+
+func Convert_v1_WatchEvent_To_v1_InternalEvent(in *WatchEvent, out *InternalEvent, s conversion.Scope) error {
+	return Convert_v1_WatchEvent_To_watch_Event(in, (*watch.Event)(out), s)
+}
+
+// InternalEvent makes watch.Event versioned
+// +protobuf=false
+type InternalEvent watch.Event
+
+func (e *InternalEvent) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind }
+func (e *WatchEvent) GetObjectKind() schema.ObjectKind    { return schema.EmptyObjectKind }
+func (e *InternalEvent) DeepCopyObject() runtime.Object {
+	if c := e.DeepCopy(); c != nil {
+		return c
+	} else {
+		return nil
+	}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..1084599
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
@@ -0,0 +1,961 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	types "k8s.io/apimachinery/pkg/types"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIGroup) DeepCopyInto(out *APIGroup) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Versions != nil {
+		in, out := &in.Versions, &out.Versions
+		*out = make([]GroupVersionForDiscovery, len(*in))
+		copy(*out, *in)
+	}
+	out.PreferredVersion = in.PreferredVersion
+	if in.ServerAddressByClientCIDRs != nil {
+		in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs
+		*out = make([]ServerAddressByClientCIDR, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroup.
+func (in *APIGroup) DeepCopy() *APIGroup {
+	if in == nil {
+		return nil
+	}
+	out := new(APIGroup)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIGroup) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIGroupList) DeepCopyInto(out *APIGroupList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Groups != nil {
+		in, out := &in.Groups, &out.Groups
+		*out = make([]APIGroup, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroupList.
+func (in *APIGroupList) DeepCopy() *APIGroupList {
+	if in == nil {
+		return nil
+	}
+	out := new(APIGroupList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIGroupList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIResource) DeepCopyInto(out *APIResource) {
+	*out = *in
+	if in.Verbs != nil {
+		in, out := &in.Verbs, &out.Verbs
+		*out = make(Verbs, len(*in))
+		copy(*out, *in)
+	}
+	if in.ShortNames != nil {
+		in, out := &in.ShortNames, &out.ShortNames
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Categories != nil {
+		in, out := &in.Categories, &out.Categories
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResource.
+func (in *APIResource) DeepCopy() *APIResource {
+	if in == nil {
+		return nil
+	}
+	out := new(APIResource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIResourceList) DeepCopyInto(out *APIResourceList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.APIResources != nil {
+		in, out := &in.APIResources, &out.APIResources
+		*out = make([]APIResource, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceList.
+func (in *APIResourceList) DeepCopy() *APIResourceList {
+	if in == nil {
+		return nil
+	}
+	out := new(APIResourceList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIResourceList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIVersions) DeepCopyInto(out *APIVersions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Versions != nil {
+		in, out := &in.Versions, &out.Versions
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ServerAddressByClientCIDRs != nil {
+		in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs
+		*out = make([]ServerAddressByClientCIDR, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersions.
+func (in *APIVersions) DeepCopy() *APIVersions {
+	if in == nil {
+		return nil
+	}
+	out := new(APIVersions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIVersions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CreateOptions) DeepCopyInto(out *CreateOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.DryRun != nil {
+		in, out := &in.DryRun, &out.DryRun
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreateOptions.
+func (in *CreateOptions) DeepCopy() *CreateOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(CreateOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CreateOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.GracePeriodSeconds != nil {
+		in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	if in.Preconditions != nil {
+		in, out := &in.Preconditions, &out.Preconditions
+		*out = new(Preconditions)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.OrphanDependents != nil {
+		in, out := &in.OrphanDependents, &out.OrphanDependents
+		*out = new(bool)
+		**out = **in
+	}
+	if in.PropagationPolicy != nil {
+		in, out := &in.PropagationPolicy, &out.PropagationPolicy
+		*out = new(DeletionPropagation)
+		**out = **in
+	}
+	if in.DryRun != nil {
+		in, out := &in.DryRun, &out.DryRun
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteOptions.
+func (in *DeleteOptions) DeepCopy() *DeleteOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(DeleteOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeleteOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Duration) DeepCopyInto(out *Duration) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Duration.
+func (in *Duration) DeepCopy() *Duration {
+	if in == nil {
+		return nil
+	}
+	out := new(Duration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExportOptions) DeepCopyInto(out *ExportOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportOptions.
+func (in *ExportOptions) DeepCopy() *ExportOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(ExportOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ExportOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GetOptions) DeepCopyInto(out *GetOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GetOptions.
+func (in *GetOptions) DeepCopy() *GetOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(GetOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *GetOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupKind) DeepCopyInto(out *GroupKind) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupKind.
+func (in *GroupKind) DeepCopy() *GroupKind {
+	if in == nil {
+		return nil
+	}
+	out := new(GroupKind)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupResource) DeepCopyInto(out *GroupResource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResource.
+func (in *GroupResource) DeepCopy() *GroupResource {
+	if in == nil {
+		return nil
+	}
+	out := new(GroupResource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupVersion) DeepCopyInto(out *GroupVersion) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersion.
+func (in *GroupVersion) DeepCopy() *GroupVersion {
+	if in == nil {
+		return nil
+	}
+	out := new(GroupVersion)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupVersionForDiscovery) DeepCopyInto(out *GroupVersionForDiscovery) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionForDiscovery.
+func (in *GroupVersionForDiscovery) DeepCopy() *GroupVersionForDiscovery {
+	if in == nil {
+		return nil
+	}
+	out := new(GroupVersionForDiscovery)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupVersionKind) DeepCopyInto(out *GroupVersionKind) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionKind.
+func (in *GroupVersionKind) DeepCopy() *GroupVersionKind {
+	if in == nil {
+		return nil
+	}
+	out := new(GroupVersionKind)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupVersionResource) DeepCopyInto(out *GroupVersionResource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionResource.
+func (in *GroupVersionResource) DeepCopy() *GroupVersionResource {
+	if in == nil {
+		return nil
+	}
+	out := new(GroupVersionResource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Initializer) DeepCopyInto(out *Initializer) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer.
+func (in *Initializer) DeepCopy() *Initializer {
+	if in == nil {
+		return nil
+	}
+	out := new(Initializer)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Initializers) DeepCopyInto(out *Initializers) {
+	*out = *in
+	if in.Pending != nil {
+		in, out := &in.Pending, &out.Pending
+		*out = make([]Initializer, len(*in))
+		copy(*out, *in)
+	}
+	if in.Result != nil {
+		in, out := &in.Result, &out.Result
+		*out = new(Status)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializers.
+func (in *Initializers) DeepCopy() *Initializers {
+	if in == nil {
+		return nil
+	}
+	out := new(Initializers)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InternalEvent) DeepCopyInto(out *InternalEvent) {
+	*out = *in
+	if in.Object != nil {
+		out.Object = in.Object.DeepCopyObject()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalEvent.
+func (in *InternalEvent) DeepCopy() *InternalEvent {
+	if in == nil {
+		return nil
+	}
+	out := new(InternalEvent)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LabelSelector) DeepCopyInto(out *LabelSelector) {
+	*out = *in
+	if in.MatchLabels != nil {
+		in, out := &in.MatchLabels, &out.MatchLabels
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.MatchExpressions != nil {
+		in, out := &in.MatchExpressions, &out.MatchExpressions
+		*out = make([]LabelSelectorRequirement, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelector.
+func (in *LabelSelector) DeepCopy() *LabelSelector {
+	if in == nil {
+		return nil
+	}
+	out := new(LabelSelector)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LabelSelectorRequirement) DeepCopyInto(out *LabelSelectorRequirement) {
+	*out = *in
+	if in.Values != nil {
+		in, out := &in.Values, &out.Values
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelectorRequirement.
+func (in *LabelSelectorRequirement) DeepCopy() *LabelSelectorRequirement {
+	if in == nil {
+		return nil
+	}
+	out := new(LabelSelectorRequirement)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *List) DeepCopyInto(out *List) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]runtime.RawExtension, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List.
+func (in *List) DeepCopy() *List {
+	if in == nil {
+		return nil
+	}
+	out := new(List)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *List) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ListMeta) DeepCopyInto(out *ListMeta) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListMeta.
+func (in *ListMeta) DeepCopy() *ListMeta {
+	if in == nil {
+		return nil
+	}
+	out := new(ListMeta)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ListOptions) DeepCopyInto(out *ListOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.TimeoutSeconds != nil {
+		in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions.
+func (in *ListOptions) DeepCopy() *ListOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(ListOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ListOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicroTime.
+func (in *MicroTime) DeepCopy() *MicroTime {
+	if in == nil {
+		return nil
+	}
+	out := new(MicroTime)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) {
+	*out = *in
+	in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp)
+	if in.DeletionTimestamp != nil {
+		in, out := &in.DeletionTimestamp, &out.DeletionTimestamp
+		*out = (*in).DeepCopy()
+	}
+	if in.DeletionGracePeriodSeconds != nil {
+		in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	if in.Labels != nil {
+		in, out := &in.Labels, &out.Labels
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.Annotations != nil {
+		in, out := &in.Annotations, &out.Annotations
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.OwnerReferences != nil {
+		in, out := &in.OwnerReferences, &out.OwnerReferences
+		*out = make([]OwnerReference, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Initializers != nil {
+		in, out := &in.Initializers, &out.Initializers
+		*out = new(Initializers)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Finalizers != nil {
+		in, out := &in.Finalizers, &out.Finalizers
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta.
+func (in *ObjectMeta) DeepCopy() *ObjectMeta {
+	if in == nil {
+		return nil
+	}
+	out := new(ObjectMeta)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OwnerReference) DeepCopyInto(out *OwnerReference) {
+	*out = *in
+	if in.Controller != nil {
+		in, out := &in.Controller, &out.Controller
+		*out = new(bool)
+		**out = **in
+	}
+	if in.BlockOwnerDeletion != nil {
+		in, out := &in.BlockOwnerDeletion, &out.BlockOwnerDeletion
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnerReference.
+func (in *OwnerReference) DeepCopy() *OwnerReference {
+	if in == nil {
+		return nil
+	}
+	out := new(OwnerReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Patch) DeepCopyInto(out *Patch) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Patch.
+func (in *Patch) DeepCopy() *Patch {
+	if in == nil {
+		return nil
+	}
+	out := new(Patch)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Preconditions) DeepCopyInto(out *Preconditions) {
+	*out = *in
+	if in.UID != nil {
+		in, out := &in.UID, &out.UID
+		*out = new(types.UID)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions.
+func (in *Preconditions) DeepCopy() *Preconditions {
+	if in == nil {
+		return nil
+	}
+	out := new(Preconditions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RootPaths) DeepCopyInto(out *RootPaths) {
+	*out = *in
+	if in.Paths != nil {
+		in, out := &in.Paths, &out.Paths
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootPaths.
+func (in *RootPaths) DeepCopy() *RootPaths {
+	if in == nil {
+		return nil
+	}
+	out := new(RootPaths)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServerAddressByClientCIDR) DeepCopyInto(out *ServerAddressByClientCIDR) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerAddressByClientCIDR.
+func (in *ServerAddressByClientCIDR) DeepCopy() *ServerAddressByClientCIDR {
+	if in == nil {
+		return nil
+	}
+	out := new(ServerAddressByClientCIDR)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Status) DeepCopyInto(out *Status) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Details != nil {
+		in, out := &in.Details, &out.Details
+		*out = new(StatusDetails)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status.
+func (in *Status) DeepCopy() *Status {
+	if in == nil {
+		return nil
+	}
+	out := new(Status)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Status) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatusCause) DeepCopyInto(out *StatusCause) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusCause.
+func (in *StatusCause) DeepCopy() *StatusCause {
+	if in == nil {
+		return nil
+	}
+	out := new(StatusCause)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatusDetails) DeepCopyInto(out *StatusDetails) {
+	*out = *in
+	if in.Causes != nil {
+		in, out := &in.Causes, &out.Causes
+		*out = make([]StatusCause, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusDetails.
+func (in *StatusDetails) DeepCopy() *StatusDetails {
+	if in == nil {
+		return nil
+	}
+	out := new(StatusDetails)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time.
+func (in *Time) DeepCopy() *Time {
+	if in == nil {
+		return nil
+	}
+	out := new(Time)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Timestamp) DeepCopyInto(out *Timestamp) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Timestamp.
+func (in *Timestamp) DeepCopy() *Timestamp {
+	if in == nil {
+		return nil
+	}
+	out := new(Timestamp)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UpdateOptions) DeepCopyInto(out *UpdateOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.DryRun != nil {
+		in, out := &in.DryRun, &out.DryRun
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateOptions.
+func (in *UpdateOptions) DeepCopy() *UpdateOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(UpdateOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *UpdateOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in Verbs) DeepCopyInto(out *Verbs) {
+	{
+		in := &in
+		*out = make(Verbs, len(*in))
+		copy(*out, *in)
+		return
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Verbs.
+func (in Verbs) DeepCopy() Verbs {
+	if in == nil {
+		return nil
+	}
+	out := new(Verbs)
+	in.DeepCopyInto(out)
+	return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WatchEvent) DeepCopyInto(out *WatchEvent) {
+	*out = *in
+	in.Object.DeepCopyInto(&out.Object)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchEvent.
+func (in *WatchEvent) DeepCopy() *WatchEvent {
+	if in == nil {
+		return nil
+	}
+	out := new(WatchEvent)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *WatchEvent) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go
new file mode 100644
index 0000000..cce2e60
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go
@@ -0,0 +1,32 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go
new file mode 100644
index 0000000..f3e5e4c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import "k8s.io/apimachinery/pkg/conversion"
+
+// Convert_Slice_string_To_v1beta1_IncludeObjectPolicy allows converting a URL query parameter value
+func Convert_Slice_string_To_v1beta1_IncludeObjectPolicy(input *[]string, out *IncludeObjectPolicy, s conversion.Scope) error {
+	if len(*input) > 0 {
+		*out = IncludeObjectPolicy((*input)[0])
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go
new file mode 100644
index 0000000..3b2bedd
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go
@@ -0,0 +1,44 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import "k8s.io/apimachinery/pkg/runtime"
+
+func (in *TableRow) DeepCopy() *TableRow {
+	if in == nil {
+		return nil
+	}
+
+	out := new(TableRow)
+
+	if in.Cells != nil {
+		out.Cells = make([]interface{}, len(in.Cells))
+		for i := range in.Cells {
+			out.Cells[i] = runtime.DeepCopyJSONValue(in.Cells[i])
+		}
+	}
+
+	if in.Conditions != nil {
+		out.Conditions = make([]TableRowCondition, len(in.Conditions))
+		for i := range in.Conditions {
+			in.Conditions[i].DeepCopyInto(&out.Conditions[i])
+		}
+	}
+
+	in.Object.DeepCopyInto(&out.Object)
+	return out
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go
new file mode 100644
index 0000000..46b0e13
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+
+// +groupName=meta.k8s.io
+
+package v1beta1
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
new file mode 100644
index 0000000..83be997
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
@@ -0,0 +1,57 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.apimachinery.pkg.apis.meta.v1beta1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta1";
+
+// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients
+// to get access to a particular ObjectMeta schema without knowing the details of the version.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+message PartialObjectMetadata {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+}
+
+// PartialObjectMetadataList contains a list of objects containing only their metadata
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+message PartialObjectMetadataList {
+  // items contains each of the included items.
+  repeated PartialObjectMetadata items = 1;
+}
+
+// TableOptions are used when a Table is requested by the caller.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+message TableOptions {
+  // includeObject decides whether to include each object along with its columnar information.
+  // Specifying "None" will return no object, specifying "Object" will return the full object contents, and
+  // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind
+  // in version v1beta1 of the meta.k8s.io API group.
+  optional string includeObject = 1;
+}
+
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go
new file mode 100644
index 0000000..d13254b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name for this API.
+const GroupName = "meta.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+	return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// scheme is the registry for the common types that adhere to the meta v1beta1 API spec.
+var scheme = runtime.NewScheme()
+
+// ParameterCodec knows about query parameters used with the meta v1beta1 API spec.
+var ParameterCodec = runtime.NewParameterCodec(scheme)
+
+func init() {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Table{},
+		&TableOptions{},
+		&PartialObjectMetadata{},
+		&PartialObjectMetadataList{},
+	)
+
+	if err := scheme.AddConversionFuncs(
+		Convert_Slice_string_To_v1beta1_IncludeObjectPolicy,
+	); err != nil {
+		panic(err)
+	}
+
+	// register manually. This usually goes through the SchemeBuilder, which we cannot use here.
+	//scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go
new file mode 100644
index 0000000..344c533
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go
@@ -0,0 +1,161 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// package v1beta1 is alpha objects from meta that will be introduced.
+package v1beta1
+
+import (
+	"k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf
+//   generation to support a meta type that can accept any valid JSON.
+
+// Table is a tabular representation of a set of API resources. The server transforms the
+// object into a set of preferred columns for quickly reviewing the objects.
+// +protobuf=false
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type Table struct {
+	v1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+	// +optional
+	v1.ListMeta `json:"metadata,omitempty"`
+
+	// columnDefinitions describes each column in the returned items array. The number of cells per row
+	// will always match the number of column definitions.
+	ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"`
+	// rows is the list of items in the table.
+	Rows []TableRow `json:"rows"`
+}
+
+// TableColumnDefinition contains information about a column returned in the Table.
+// +protobuf=false
+type TableColumnDefinition struct {
+	// name is a human readable name for the column.
+	Name string `json:"name"`
+	// type is an OpenAPI type definition for this column.
+	// See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.
+	Type string `json:"type"`
+	// format is an optional OpenAPI type definition for this column. The 'name' format is applied
+	// to the primary identifier column to assist in clients identifying column is the resource name.
+	// See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.
+	Format string `json:"format"`
+	// description is a human readable description of this column.
+	Description string `json:"description"`
+	// priority is an integer defining the relative importance of this column compared to others. Lower
+	// numbers are considered higher priority. Columns that may be omitted in limited space scenarios
+	// should be given a higher priority.
+	Priority int32 `json:"priority"`
+}
+
+// TableRow is an individual row in a table.
+// +protobuf=false
+type TableRow struct {
+	// cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple
+	// maps, or lists, or null. See the type field of the column definition for a more detailed description.
+	Cells []interface{} `json:"cells"`
+	// conditions describe additional status of a row that are relevant for a human user.
+	// +optional
+	Conditions []TableRowCondition `json:"conditions,omitempty"`
+	// This field contains the requested additional information about each object based on the includeObject
+	// policy when requesting the Table. If "None", this field is empty, if "Object" this will be the
+	// default serialization of the object for the current API version, and if "Metadata" (the default) will
+	// contain the object metadata. Check the returned kind and apiVersion of the object before parsing.
+	// +optional
+	Object runtime.RawExtension `json:"object,omitempty"`
+}
+
+// TableRowCondition allows a row to be marked with additional information.
+// +protobuf=false
+type TableRowCondition struct {
+	// Type of row condition.
+	Type RowConditionType `json:"type"`
+	// Status of the condition, one of True, False, Unknown.
+	Status ConditionStatus `json:"status"`
+	// (brief) machine readable reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty"`
+	// Human readable message indicating details about last transition.
+	// +optional
+	Message string `json:"message,omitempty"`
+}
+
+type RowConditionType string
+
+// These are valid conditions of a row. This list is not exhaustive and new conditions may be
+// included by other resources.
+const (
+	// RowCompleted means the underlying resource has reached completion and may be given less
+	// visual priority than other resources.
+	RowCompleted RowConditionType = "Completed"
+)
+
+type ConditionStatus string
+
+// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
+// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
+// can't decide if a resource is in the condition or not. In the future, we could add other
+// intermediate conditions, e.g. ConditionDegraded.
+const (
+	ConditionTrue    ConditionStatus = "True"
+	ConditionFalse   ConditionStatus = "False"
+	ConditionUnknown ConditionStatus = "Unknown"
+)
+
+// IncludeObjectPolicy controls which portion of the object is returned with a Table.
+type IncludeObjectPolicy string
+
+const (
+	// IncludeNone returns no object.
+	IncludeNone IncludeObjectPolicy = "None"
+	// IncludeMetadata serializes the object containing only its metadata field.
+	IncludeMetadata IncludeObjectPolicy = "Metadata"
+	// IncludeObject contains the full object.
+	IncludeObject IncludeObjectPolicy = "Object"
+)
+
+// TableOptions are used when a Table is requested by the caller.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type TableOptions struct {
+	v1.TypeMeta `json:",inline"`
+	// includeObject decides whether to include each object along with its columnar information.
+	// Specifying "None" will return no object, specifying "Object" will return the full object contents, and
+	// specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind
+	// in version v1beta1 of the meta.k8s.io API group.
+	IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"`
+}
+
+// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients
+// to get access to a particular ObjectMeta schema without knowing the details of the version.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type PartialObjectMetadata struct {
+	v1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+	// +optional
+	v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+}
+
+// PartialObjectMetadataList contains a list of objects containing only their metadata
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type PartialObjectMetadataList struct {
+	v1.TypeMeta `json:",inline"`
+
+	// items contains each of the included items.
+	Items []*PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"`
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..7394535
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,104 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_PartialObjectMetadata = map[string]string{
+	"":         "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+}
+
+func (PartialObjectMetadata) SwaggerDoc() map[string]string {
+	return map_PartialObjectMetadata
+}
+
+var map_PartialObjectMetadataList = map[string]string{
+	"":      "PartialObjectMetadataList contains a list of objects containing only their metadata",
+	"items": "items contains each of the included items.",
+}
+
+func (PartialObjectMetadataList) SwaggerDoc() map[string]string {
+	return map_PartialObjectMetadataList
+}
+
+var map_Table = map[string]string{
+	"":                  "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.",
+	"metadata":          "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+	"columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.",
+	"rows":              "rows is the list of items in the table.",
+}
+
+func (Table) SwaggerDoc() map[string]string {
+	return map_Table
+}
+
+var map_TableColumnDefinition = map[string]string{
+	"":            "TableColumnDefinition contains information about a column returned in the Table.",
+	"name":        "name is a human readable name for the column.",
+	"type":        "type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.",
+	"format":      "format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.",
+	"description": "description is a human readable description of this column.",
+	"priority":    "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.",
+}
+
+func (TableColumnDefinition) SwaggerDoc() map[string]string {
+	return map_TableColumnDefinition
+}
+
+var map_TableOptions = map[string]string{
+	"":              "TableOptions are used when a Table is requested by the caller.",
+	"includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.",
+}
+
+func (TableOptions) SwaggerDoc() map[string]string {
+	return map_TableOptions
+}
+
+var map_TableRow = map[string]string{
+	"":           "TableRow is an individual row in a table.",
+	"cells":      "cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple maps, or lists, or null. See the type field of the column definition for a more detailed description.",
+	"conditions": "conditions describe additional status of a row that are relevant for a human user.",
+	"object":     "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing.",
+}
+
+func (TableRow) SwaggerDoc() map[string]string {
+	return map_TableRow
+}
+
+var map_TableRowCondition = map[string]string{
+	"":        "TableRowCondition allows a row to be marked with additional information.",
+	"type":    "Type of row condition.",
+	"status":  "Status of the condition, one of True, False, Unknown.",
+	"reason":  "(brief) machine readable reason for the condition's last transition.",
+	"message": "Human readable message indicating details about last transition.",
+}
+
+func (TableRowCondition) SwaggerDoc() map[string]string {
+	return map_TableRowCondition
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..b77db1b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,189 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata.
+func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata {
+	if in == nil {
+		return nil
+	}
+	out := new(PartialObjectMetadata)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]*PartialObjectMetadata, len(*in))
+		for i := range *in {
+			if (*in)[i] != nil {
+				in, out := &(*in)[i], &(*out)[i]
+				*out = new(PartialObjectMetadata)
+				(*in).DeepCopyInto(*out)
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList.
+func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList {
+	if in == nil {
+		return nil
+	}
+	out := new(PartialObjectMetadataList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Table) DeepCopyInto(out *Table) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.ColumnDefinitions != nil {
+		in, out := &in.ColumnDefinitions, &out.ColumnDefinitions
+		*out = make([]TableColumnDefinition, len(*in))
+		copy(*out, *in)
+	}
+	if in.Rows != nil {
+		in, out := &in.Rows, &out.Rows
+		*out = make([]TableRow, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table.
+func (in *Table) DeepCopy() *Table {
+	if in == nil {
+		return nil
+	}
+	out := new(Table)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Table) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition.
+func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition {
+	if in == nil {
+		return nil
+	}
+	out := new(TableColumnDefinition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TableOptions) DeepCopyInto(out *TableOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions.
+func (in *TableOptions) DeepCopy() *TableOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(TableOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TableOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TableRow) DeepCopyInto(out *TableRow) {
+	clone := in.DeepCopy()
+	*out = *clone
+	return
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition.
+func (in *TableRowCondition) DeepCopy() *TableRowCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(TableRowCondition)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go
new file mode 100644
index 0000000..73e63fc
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go
@@ -0,0 +1,32 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go
new file mode 100644
index 0000000..bc615dc
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go
@@ -0,0 +1,898 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package conversion
+
+import (
+	"fmt"
+	"reflect"
+)
+
+type typePair struct {
+	source reflect.Type
+	dest   reflect.Type
+}
+
+type typeNamePair struct {
+	fieldType reflect.Type
+	fieldName string
+}
+
+// DebugLogger allows you to get debugging messages if necessary.
+type DebugLogger interface {
+	Logf(format string, args ...interface{})
+}
+
+type NameFunc func(t reflect.Type) string
+
+var DefaultNameFunc = func(t reflect.Type) string { return t.Name() }
+
+// ConversionFunc converts the object a into the object b, reusing arrays or objects
+// or pointers if necessary. It should return an error if the object cannot be converted
+// or if some data is invalid. If you do not wish a and b to share fields or nested
+// objects, you must copy a before calling this function.
+type ConversionFunc func(a, b interface{}, scope Scope) error
+
+// Converter knows how to convert one type to another.
+type Converter struct {
+	// Map from the conversion pair to a function which can
+	// do the conversion.
+	conversionFuncs          ConversionFuncs
+	generatedConversionFuncs ConversionFuncs
+
+	// Set of conversions that should be treated as a no-op
+	ignoredConversions map[typePair]struct{}
+
+	// This is a map from a source field type and name, to a list of destination
+	// field type and name.
+	structFieldDests map[typeNamePair][]typeNamePair
+
+	// Allows for the opposite lookup of structFieldDests. So that SourceFromDest
+	// copy flag also works. So this is a map of destination field name, to potential
+	// source field name and type to look for.
+	structFieldSources map[typeNamePair][]typeNamePair
+
+	// Map from an input type to a function which can apply a key name mapping
+	inputFieldMappingFuncs map[reflect.Type]FieldMappingFunc
+
+	// Map from an input type to a set of default conversion flags.
+	inputDefaultFlags map[reflect.Type]FieldMatchingFlags
+
+	// If non-nil, will be called to print helpful debugging info. Quite verbose.
+	Debug DebugLogger
+
+	// nameFunc is called to retrieve the name of a type; this name is used for the
+	// purpose of deciding whether two types match or not (i.e., will we attempt to
+	// do a conversion). The default returns the go type name.
+	nameFunc func(t reflect.Type) string
+}
+
+// NewConverter creates a new Converter object.
+func NewConverter(nameFn NameFunc) *Converter {
+	c := &Converter{
+		conversionFuncs:          NewConversionFuncs(),
+		generatedConversionFuncs: NewConversionFuncs(),
+		ignoredConversions:       make(map[typePair]struct{}),
+		nameFunc:                 nameFn,
+		structFieldDests:         make(map[typeNamePair][]typeNamePair),
+		structFieldSources:       make(map[typeNamePair][]typeNamePair),
+
+		inputFieldMappingFuncs: make(map[reflect.Type]FieldMappingFunc),
+		inputDefaultFlags:      make(map[reflect.Type]FieldMatchingFlags),
+	}
+	c.RegisterConversionFunc(Convert_Slice_byte_To_Slice_byte)
+	return c
+}
+
+// WithConversions returns a Converter that is a copy of c but with the additional
+// fns merged on top.
+func (c *Converter) WithConversions(fns ConversionFuncs) *Converter {
+	copied := *c
+	copied.conversionFuncs = c.conversionFuncs.Merge(fns)
+	return &copied
+}
+
+// DefaultMeta returns the conversion FieldMappingFunc and meta for a given type.
+func (c *Converter) DefaultMeta(t reflect.Type) (FieldMatchingFlags, *Meta) {
+	return c.inputDefaultFlags[t], &Meta{
+		KeyNameMapping: c.inputFieldMappingFuncs[t],
+	}
+}
+
+// Convert_Slice_byte_To_Slice_byte prevents recursing into every byte
+func Convert_Slice_byte_To_Slice_byte(in *[]byte, out *[]byte, s Scope) error {
+	if *in == nil {
+		*out = nil
+		return nil
+	}
+	*out = make([]byte, len(*in))
+	copy(*out, *in)
+	return nil
+}
+
+// Scope is passed to conversion funcs to allow them to continue an ongoing conversion.
+// If multiple converters exist in the system, Scope will allow you to use the correct one
+// from a conversion function--that is, the one your conversion function was called by.
+type Scope interface {
+	// Call Convert to convert sub-objects. Note that if you call it with your own exact
+	// parameters, you'll run out of stack space before anything useful happens.
+	Convert(src, dest interface{}, flags FieldMatchingFlags) error
+
+	// DefaultConvert performs the default conversion, without calling a conversion func
+	// on the current stack frame. This makes it safe to call from a conversion func.
+	DefaultConvert(src, dest interface{}, flags FieldMatchingFlags) error
+
+	// SrcTags and DestTags contain the struct tags that src and dest had, respectively.
+	// If the enclosing object was not a struct, then these will contain no tags, of course.
+	SrcTag() reflect.StructTag
+	DestTag() reflect.StructTag
+
+	// Flags returns the flags with which the conversion was started.
+	Flags() FieldMatchingFlags
+
+	// Meta returns any information originally passed to Convert.
+	Meta() *Meta
+}
+
+// FieldMappingFunc can convert an input field value into different values, depending on
+// the value of the source or destination struct tags.
+type FieldMappingFunc func(key string, sourceTag, destTag reflect.StructTag) (source string, dest string)
+
+func NewConversionFuncs() ConversionFuncs {
+	return ConversionFuncs{
+		fns:     make(map[typePair]reflect.Value),
+		untyped: make(map[typePair]ConversionFunc),
+	}
+}
+
+type ConversionFuncs struct {
+	fns     map[typePair]reflect.Value
+	untyped map[typePair]ConversionFunc
+}
+
+// Add adds the provided conversion functions to the lookup table - they must have the signature
+// `func(type1, type2, Scope) error`. Functions are added in the order passed and will override
+// previously registered pairs.
+func (c ConversionFuncs) Add(fns ...interface{}) error {
+	for _, fn := range fns {
+		fv := reflect.ValueOf(fn)
+		ft := fv.Type()
+		if err := verifyConversionFunctionSignature(ft); err != nil {
+			return err
+		}
+		c.fns[typePair{ft.In(0).Elem(), ft.In(1).Elem()}] = fv
+	}
+	return nil
+}
+
+// AddUntyped adds the provided conversion function to the lookup table for the types that are
+// supplied as a and b. a and b must be pointers or an error is returned. This method overwrites
+// previously defined functions.
+func (c ConversionFuncs) AddUntyped(a, b interface{}, fn ConversionFunc) error {
+	tA, tB := reflect.TypeOf(a), reflect.TypeOf(b)
+	if tA.Kind() != reflect.Ptr {
+		return fmt.Errorf("the type %T must be a pointer to register as an untyped conversion", a)
+	}
+	if tB.Kind() != reflect.Ptr {
+		return fmt.Errorf("the type %T must be a pointer to register as an untyped conversion", b)
+	}
+	c.untyped[typePair{tA, tB}] = fn
+	return nil
+}
+
+// Merge returns a new ConversionFuncs that contains all conversions from
+// both other and c, with other conversions taking precedence.
+func (c ConversionFuncs) Merge(other ConversionFuncs) ConversionFuncs {
+	merged := NewConversionFuncs()
+	for k, v := range c.fns {
+		merged.fns[k] = v
+	}
+	for k, v := range other.fns {
+		merged.fns[k] = v
+	}
+	for k, v := range c.untyped {
+		merged.untyped[k] = v
+	}
+	for k, v := range other.untyped {
+		merged.untyped[k] = v
+	}
+	return merged
+}
+
+// Meta is supplied by Scheme, when it calls Convert.
+type Meta struct {
+	// KeyNameMapping is an optional function which may map the listed key (field name)
+	// into a source and destination value.
+	KeyNameMapping FieldMappingFunc
+	// Context is an optional field that callers may use to pass info to conversion functions.
+	Context interface{}
+}
+
+// scope contains information about an ongoing conversion.
+type scope struct {
+	converter *Converter
+	meta      *Meta
+	flags     FieldMatchingFlags
+
+	// srcStack & destStack are separate because they may not have a 1:1
+	// relationship.
+	srcStack  scopeStack
+	destStack scopeStack
+}
+
+type scopeStackElem struct {
+	tag   reflect.StructTag
+	value reflect.Value
+	key   string
+}
+
+type scopeStack []scopeStackElem
+
+func (s *scopeStack) pop() {
+	n := len(*s)
+	*s = (*s)[:n-1]
+}
+
+func (s *scopeStack) push(e scopeStackElem) {
+	*s = append(*s, e)
+}
+
+func (s *scopeStack) top() *scopeStackElem {
+	return &(*s)[len(*s)-1]
+}
+
+func (s scopeStack) describe() string {
+	desc := ""
+	if len(s) > 1 {
+		desc = "(" + s[1].value.Type().String() + ")"
+	}
+	for i, v := range s {
+		if i < 2 {
+			// First layer on stack is not real; second is handled specially above.
+			continue
+		}
+		if v.key == "" {
+			desc += fmt.Sprintf(".%v", v.value.Type())
+		} else {
+			desc += fmt.Sprintf(".%v", v.key)
+		}
+	}
+	return desc
+}
+
+// Formats src & dest as indices for printing.
+func (s *scope) setIndices(src, dest int) {
+	s.srcStack.top().key = fmt.Sprintf("[%v]", src)
+	s.destStack.top().key = fmt.Sprintf("[%v]", dest)
+}
+
+// Formats src & dest as map keys for printing.
+func (s *scope) setKeys(src, dest interface{}) {
+	s.srcStack.top().key = fmt.Sprintf(`["%v"]`, src)
+	s.destStack.top().key = fmt.Sprintf(`["%v"]`, dest)
+}
+
+// Convert continues a conversion.
+func (s *scope) Convert(src, dest interface{}, flags FieldMatchingFlags) error {
+	return s.converter.Convert(src, dest, flags, s.meta)
+}
+
+// DefaultConvert continues a conversion, performing a default conversion (no conversion func)
+// for the current stack frame.
+func (s *scope) DefaultConvert(src, dest interface{}, flags FieldMatchingFlags) error {
+	return s.converter.DefaultConvert(src, dest, flags, s.meta)
+}
+
+// SrcTag returns the tag of the struct containing the current source item, if any.
+func (s *scope) SrcTag() reflect.StructTag {
+	return s.srcStack.top().tag
+}
+
+// DestTag returns the tag of the struct containing the current dest item, if any.
+func (s *scope) DestTag() reflect.StructTag {
+	return s.destStack.top().tag
+}
+
+// Flags returns the flags with which the current conversion was started.
+func (s *scope) Flags() FieldMatchingFlags {
+	return s.flags
+}
+
+// Meta returns the meta object that was originally passed to Convert.
+func (s *scope) Meta() *Meta {
+	return s.meta
+}
+
+// describe prints the path to get to the current (source, dest) values.
+func (s *scope) describe() (src, dest string) {
+	return s.srcStack.describe(), s.destStack.describe()
+}
+
+// error makes an error that includes information about where we were in the objects
+// we were asked to convert.
+func (s *scope) errorf(message string, args ...interface{}) error {
+	srcPath, destPath := s.describe()
+	where := fmt.Sprintf("converting %v to %v: ", srcPath, destPath)
+	return fmt.Errorf(where+message, args...)
+}
+
+// Verifies whether a conversion function has a correct signature.
+func verifyConversionFunctionSignature(ft reflect.Type) error {
+	if ft.Kind() != reflect.Func {
+		return fmt.Errorf("expected func, got: %v", ft)
+	}
+	if ft.NumIn() != 3 {
+		return fmt.Errorf("expected three 'in' params, got: %v", ft)
+	}
+	if ft.NumOut() != 1 {
+		return fmt.Errorf("expected one 'out' param, got: %v", ft)
+	}
+	if ft.In(0).Kind() != reflect.Ptr {
+		return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft)
+	}
+	if ft.In(1).Kind() != reflect.Ptr {
+		return fmt.Errorf("expected pointer arg for 'in' param 1, got: %v", ft)
+	}
+	scopeType := Scope(nil)
+	if e, a := reflect.TypeOf(&scopeType).Elem(), ft.In(2); e != a {
+		return fmt.Errorf("expected '%v' arg for 'in' param 2, got '%v' (%v)", e, a, ft)
+	}
+	var forErrorType error
+	// This convolution is necessary, otherwise TypeOf picks up on the fact
+	// that forErrorType is nil.
+	errorType := reflect.TypeOf(&forErrorType).Elem()
+	if ft.Out(0) != errorType {
+		return fmt.Errorf("expected error return, got: %v", ft)
+	}
+	return nil
+}
+
+// RegisterConversionFunc registers a conversion func with the
+// Converter. conversionFunc must take three parameters: a pointer to the input
+// type, a pointer to the output type, and a conversion.Scope (which should be
+// used if recursive conversion calls are desired).  It must return an error.
+//
+// Example:
+// c.RegisterConversionFunc(
+//         func(in *Pod, out *v1.Pod, s Scope) error {
+//                 // conversion logic...
+//                 return nil
+//          })
+// DEPRECATED: Will be removed in favor of RegisterUntypedConversionFunc
+func (c *Converter) RegisterConversionFunc(conversionFunc interface{}) error {
+	return c.conversionFuncs.Add(conversionFunc)
+}
+
+// Similar to RegisterConversionFunc, but registers conversion function that were
+// automatically generated.
+// DEPRECATED: Will be removed in favor of RegisterGeneratedUntypedConversionFunc
+func (c *Converter) RegisterGeneratedConversionFunc(conversionFunc interface{}) error {
+	return c.generatedConversionFuncs.Add(conversionFunc)
+}
+
+// RegisterUntypedConversionFunc registers a function that converts between a and b by passing objects of those
+// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce
+// any other guarantee.
+func (c *Converter) RegisterUntypedConversionFunc(a, b interface{}, fn ConversionFunc) error {
+	return c.conversionFuncs.AddUntyped(a, b, fn)
+}
+
+// RegisterGeneratedUntypedConversionFunc registers a function that converts between a and b by passing objects of those
+// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce
+// any other guarantee.
+func (c *Converter) RegisterGeneratedUntypedConversionFunc(a, b interface{}, fn ConversionFunc) error {
+	return c.generatedConversionFuncs.AddUntyped(a, b, fn)
+}
+
+// RegisterIgnoredConversion registers a "no-op" for conversion, where any requested
+// conversion between from and to is ignored.
+func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error {
+	typeFrom := reflect.TypeOf(from)
+	typeTo := reflect.TypeOf(to)
+	if reflect.TypeOf(from).Kind() != reflect.Ptr {
+		return fmt.Errorf("expected pointer arg for 'from' param 0, got: %v", typeFrom)
+	}
+	if typeTo.Kind() != reflect.Ptr {
+		return fmt.Errorf("expected pointer arg for 'to' param 1, got: %v", typeTo)
+	}
+	c.ignoredConversions[typePair{typeFrom.Elem(), typeTo.Elem()}] = struct{}{}
+	return nil
+}
+
+// RegisterInputDefaults registers a field name mapping function, used when converting
+// from maps to structs. Inputs to the conversion methods are checked for this type and a mapping
+// applied automatically if the input matches in. A set of default flags for the input conversion
+// may also be provided, which will be used when no explicit flags are requested.
+func (c *Converter) RegisterInputDefaults(in interface{}, fn FieldMappingFunc, defaultFlags FieldMatchingFlags) error {
+	fv := reflect.ValueOf(in)
+	ft := fv.Type()
+	if ft.Kind() != reflect.Ptr {
+		return fmt.Errorf("expected pointer 'in' argument, got: %v", ft)
+	}
+	c.inputFieldMappingFuncs[ft] = fn
+	c.inputDefaultFlags[ft] = defaultFlags
+	return nil
+}
+
+// FieldMatchingFlags contains a list of ways in which struct fields could be
+// copied. These constants may be | combined.
+type FieldMatchingFlags int
+
+const (
+	// Loop through destination fields, search for matching source
+	// field to copy it from. Source fields with no corresponding
+	// destination field will be ignored. If SourceToDest is
+	// specified, this flag is ignored. If neither is specified,
+	// or no flags are passed, this flag is the default.
+	DestFromSource FieldMatchingFlags = 0
+	// Loop through source fields, search for matching dest field
+	// to copy it into. Destination fields with no corresponding
+	// source field will be ignored.
+	SourceToDest FieldMatchingFlags = 1 << iota
+	// Don't treat it as an error if the corresponding source or
+	// dest field can't be found.
+	IgnoreMissingFields
+	// Don't require type names to match.
+	AllowDifferentFieldTypeNames
+)
+
+// IsSet returns true if the given flag or combination of flags is set.
+func (f FieldMatchingFlags) IsSet(flag FieldMatchingFlags) bool {
+	if flag == DestFromSource {
+		// The bit logic doesn't work on the default value.
+		return f&SourceToDest != SourceToDest
+	}
+	return f&flag == flag
+}
+
+// Convert will translate src to dest if it knows how. Both must be pointers.
+// If no conversion func is registered and the default copying mechanism
+// doesn't work on this type pair, an error will be returned.
+// Read the comments on the various FieldMatchingFlags constants to understand
+// what the 'flags' parameter does.
+// 'meta' is given to allow you to pass information to conversion functions,
+// it is not used by Convert() other than storing it in the scope.
+// Not safe for objects with cyclic references!
+func (c *Converter) Convert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error {
+	return c.doConversion(src, dest, flags, meta, c.convert)
+}
+
+// DefaultConvert will translate src to dest if it knows how. Both must be pointers.
+// No conversion func is used. If the default copying mechanism
+// doesn't work on this type pair, an error will be returned.
+// Read the comments on the various FieldMatchingFlags constants to understand
+// what the 'flags' parameter does.
+// 'meta' is given to allow you to pass information to conversion functions,
+// it is not used by DefaultConvert() other than storing it in the scope.
+// Not safe for objects with cyclic references!
+func (c *Converter) DefaultConvert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error {
+	return c.doConversion(src, dest, flags, meta, c.defaultConvert)
+}
+
+type conversionFunc func(sv, dv reflect.Value, scope *scope) error
+
+func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags, meta *Meta, f conversionFunc) error {
+	pair := typePair{reflect.TypeOf(src), reflect.TypeOf(dest)}
+	scope := &scope{
+		converter: c,
+		flags:     flags,
+		meta:      meta,
+	}
+	if fn, ok := c.conversionFuncs.untyped[pair]; ok {
+		return fn(src, dest, scope)
+	}
+	if fn, ok := c.generatedConversionFuncs.untyped[pair]; ok {
+		return fn(src, dest, scope)
+	}
+	// TODO: consider everything past this point deprecated - we want to support only point to point top level
+	// conversions
+
+	dv, err := EnforcePtr(dest)
+	if err != nil {
+		return err
+	}
+	if !dv.CanAddr() && !dv.CanSet() {
+		return fmt.Errorf("can't write to dest")
+	}
+	sv, err := EnforcePtr(src)
+	if err != nil {
+		return err
+	}
+	// Leave something on the stack, so that calls to struct tag getters never fail.
+	scope.srcStack.push(scopeStackElem{})
+	scope.destStack.push(scopeStackElem{})
+	return f(sv, dv, scope)
+}
+
+// callCustom calls 'custom' with sv & dv. custom must be a conversion function.
+func (c *Converter) callCustom(sv, dv, custom reflect.Value, scope *scope) error {
+	if !sv.CanAddr() {
+		sv2 := reflect.New(sv.Type())
+		sv2.Elem().Set(sv)
+		sv = sv2
+	} else {
+		sv = sv.Addr()
+	}
+	if !dv.CanAddr() {
+		if !dv.CanSet() {
+			return scope.errorf("can't addr or set dest.")
+		}
+		dvOrig := dv
+		dv := reflect.New(dvOrig.Type())
+		defer func() { dvOrig.Set(dv) }()
+	} else {
+		dv = dv.Addr()
+	}
+	args := []reflect.Value{sv, dv, reflect.ValueOf(scope)}
+	ret := custom.Call(args)[0].Interface()
+	// This convolution is necessary because nil interfaces won't convert
+	// to errors.
+	if ret == nil {
+		return nil
+	}
+	return ret.(error)
+}
+
+// convert recursively copies sv into dv, calling an appropriate conversion function if
+// one is registered.
+func (c *Converter) convert(sv, dv reflect.Value, scope *scope) error {
+	dt, st := dv.Type(), sv.Type()
+	pair := typePair{st, dt}
+
+	// ignore conversions of this type
+	if _, ok := c.ignoredConversions[pair]; ok {
+		if c.Debug != nil {
+			c.Debug.Logf("Ignoring conversion of '%v' to '%v'", st, dt)
+		}
+		return nil
+	}
+
+	// Convert sv to dv.
+	if fv, ok := c.conversionFuncs.fns[pair]; ok {
+		if c.Debug != nil {
+			c.Debug.Logf("Calling custom conversion of '%v' to '%v'", st, dt)
+		}
+		return c.callCustom(sv, dv, fv, scope)
+	}
+	if fv, ok := c.generatedConversionFuncs.fns[pair]; ok {
+		if c.Debug != nil {
+			c.Debug.Logf("Calling generated conversion of '%v' to '%v'", st, dt)
+		}
+		return c.callCustom(sv, dv, fv, scope)
+	}
+
+	return c.defaultConvert(sv, dv, scope)
+}
+
+// defaultConvert recursively copies sv into dv. no conversion function is called
+// for the current stack frame (but conversion functions may be called for nested objects)
+func (c *Converter) defaultConvert(sv, dv reflect.Value, scope *scope) error {
+	dt, st := dv.Type(), sv.Type()
+
+	if !dv.CanSet() {
+		return scope.errorf("Cannot set dest. (Tried to deep copy something with unexported fields?)")
+	}
+
+	if !scope.flags.IsSet(AllowDifferentFieldTypeNames) && c.nameFunc(dt) != c.nameFunc(st) {
+		return scope.errorf(
+			"type names don't match (%v, %v), and no conversion 'func (%v, %v) error' registered.",
+			c.nameFunc(st), c.nameFunc(dt), st, dt)
+	}
+
+	switch st.Kind() {
+	case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct:
+		// Don't copy these via assignment/conversion!
+	default:
+		// This should handle all simple types.
+		if st.AssignableTo(dt) {
+			dv.Set(sv)
+			return nil
+		}
+		if st.ConvertibleTo(dt) {
+			dv.Set(sv.Convert(dt))
+			return nil
+		}
+	}
+
+	if c.Debug != nil {
+		c.Debug.Logf("Trying to convert '%v' to '%v'", st, dt)
+	}
+
+	scope.srcStack.push(scopeStackElem{value: sv})
+	scope.destStack.push(scopeStackElem{value: dv})
+	defer scope.srcStack.pop()
+	defer scope.destStack.pop()
+
+	switch dv.Kind() {
+	case reflect.Struct:
+		return c.convertKV(toKVValue(sv), toKVValue(dv), scope)
+	case reflect.Slice:
+		if sv.IsNil() {
+			// Don't make a zero-length slice.
+			dv.Set(reflect.Zero(dt))
+			return nil
+		}
+		dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap()))
+		for i := 0; i < sv.Len(); i++ {
+			scope.setIndices(i, i)
+			if err := c.convert(sv.Index(i), dv.Index(i), scope); err != nil {
+				return err
+			}
+		}
+	case reflect.Ptr:
+		if sv.IsNil() {
+			// Don't copy a nil ptr!
+			dv.Set(reflect.Zero(dt))
+			return nil
+		}
+		dv.Set(reflect.New(dt.Elem()))
+		switch st.Kind() {
+		case reflect.Ptr, reflect.Interface:
+			return c.convert(sv.Elem(), dv.Elem(), scope)
+		default:
+			return c.convert(sv, dv.Elem(), scope)
+		}
+	case reflect.Map:
+		if sv.IsNil() {
+			// Don't copy a nil ptr!
+			dv.Set(reflect.Zero(dt))
+			return nil
+		}
+		dv.Set(reflect.MakeMap(dt))
+		for _, sk := range sv.MapKeys() {
+			dk := reflect.New(dt.Key()).Elem()
+			if err := c.convert(sk, dk, scope); err != nil {
+				return err
+			}
+			dkv := reflect.New(dt.Elem()).Elem()
+			scope.setKeys(sk.Interface(), dk.Interface())
+			// TODO:  sv.MapIndex(sk) may return a value with CanAddr() == false,
+			// because a map[string]struct{} does not allow a pointer reference.
+			// Calling a custom conversion function defined for the map value
+			// will panic. Example is PodInfo map[string]ContainerStatus.
+			if err := c.convert(sv.MapIndex(sk), dkv, scope); err != nil {
+				return err
+			}
+			dv.SetMapIndex(dk, dkv)
+		}
+	case reflect.Interface:
+		if sv.IsNil() {
+			// Don't copy a nil interface!
+			dv.Set(reflect.Zero(dt))
+			return nil
+		}
+		tmpdv := reflect.New(sv.Elem().Type()).Elem()
+		if err := c.convert(sv.Elem(), tmpdv, scope); err != nil {
+			return err
+		}
+		dv.Set(reflect.ValueOf(tmpdv.Interface()))
+		return nil
+	default:
+		return scope.errorf("couldn't copy '%v' into '%v'; didn't understand types", st, dt)
+	}
+	return nil
+}
+
+var stringType = reflect.TypeOf("")
+
+func toKVValue(v reflect.Value) kvValue {
+	switch v.Kind() {
+	case reflect.Struct:
+		return structAdaptor(v)
+	case reflect.Map:
+		if v.Type().Key().AssignableTo(stringType) {
+			return stringMapAdaptor(v)
+		}
+	}
+
+	return nil
+}
+
+// kvValue lets us write the same conversion logic to work with both maps
+// and structs. Only maps with string keys make sense for this.
+type kvValue interface {
+	// returns all keys, as a []string.
+	keys() []string
+	// Will just return "" for maps.
+	tagOf(key string) reflect.StructTag
+	// Will return the zero Value if the key doesn't exist.
+	value(key string) reflect.Value
+	// Maps require explicit setting-- will do nothing for structs.
+	// Returns false on failure.
+	confirmSet(key string, v reflect.Value) bool
+}
+
+type stringMapAdaptor reflect.Value
+
+func (a stringMapAdaptor) len() int {
+	return reflect.Value(a).Len()
+}
+
+func (a stringMapAdaptor) keys() []string {
+	v := reflect.Value(a)
+	keys := make([]string, v.Len())
+	for i, v := range v.MapKeys() {
+		if v.IsNil() {
+			continue
+		}
+		switch t := v.Interface().(type) {
+		case string:
+			keys[i] = t
+		}
+	}
+	return keys
+}
+
+func (a stringMapAdaptor) tagOf(key string) reflect.StructTag {
+	return ""
+}
+
+func (a stringMapAdaptor) value(key string) reflect.Value {
+	return reflect.Value(a).MapIndex(reflect.ValueOf(key))
+}
+
+func (a stringMapAdaptor) confirmSet(key string, v reflect.Value) bool {
+	return true
+}
+
+type structAdaptor reflect.Value
+
+func (a structAdaptor) len() int {
+	v := reflect.Value(a)
+	return v.Type().NumField()
+}
+
+func (a structAdaptor) keys() []string {
+	v := reflect.Value(a)
+	t := v.Type()
+	keys := make([]string, t.NumField())
+	for i := range keys {
+		keys[i] = t.Field(i).Name
+	}
+	return keys
+}
+
+func (a structAdaptor) tagOf(key string) reflect.StructTag {
+	v := reflect.Value(a)
+	field, ok := v.Type().FieldByName(key)
+	if ok {
+		return field.Tag
+	}
+	return ""
+}
+
+func (a structAdaptor) value(key string) reflect.Value {
+	v := reflect.Value(a)
+	return v.FieldByName(key)
+}
+
+func (a structAdaptor) confirmSet(key string, v reflect.Value) bool {
+	return true
+}
+
+// convertKV can convert things that consist of key/value pairs, like structs
+// and some maps.
+func (c *Converter) convertKV(skv, dkv kvValue, scope *scope) error {
+	if skv == nil || dkv == nil {
+		// TODO: add keys to stack to support really understandable error messages.
+		return fmt.Errorf("Unable to convert %#v to %#v", skv, dkv)
+	}
+
+	lister := dkv
+	if scope.flags.IsSet(SourceToDest) {
+		lister = skv
+	}
+
+	var mapping FieldMappingFunc
+	if scope.meta != nil && scope.meta.KeyNameMapping != nil {
+		mapping = scope.meta.KeyNameMapping
+	}
+
+	for _, key := range lister.keys() {
+		if found, err := c.checkField(key, skv, dkv, scope); found {
+			if err != nil {
+				return err
+			}
+			continue
+		}
+		stag := skv.tagOf(key)
+		dtag := dkv.tagOf(key)
+		skey := key
+		dkey := key
+		if mapping != nil {
+			skey, dkey = scope.meta.KeyNameMapping(key, stag, dtag)
+		}
+
+		df := dkv.value(dkey)
+		sf := skv.value(skey)
+		if !df.IsValid() || !sf.IsValid() {
+			switch {
+			case scope.flags.IsSet(IgnoreMissingFields):
+				// No error.
+			case scope.flags.IsSet(SourceToDest):
+				return scope.errorf("%v not present in dest", dkey)
+			default:
+				return scope.errorf("%v not present in src", skey)
+			}
+			continue
+		}
+		scope.srcStack.top().key = skey
+		scope.srcStack.top().tag = stag
+		scope.destStack.top().key = dkey
+		scope.destStack.top().tag = dtag
+		if err := c.convert(sf, df, scope); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// checkField returns true if the field name matches any of the struct
+// field copying rules. The error should be ignored if it returns false.
+func (c *Converter) checkField(fieldName string, skv, dkv kvValue, scope *scope) (bool, error) {
+	replacementMade := false
+	if scope.flags.IsSet(DestFromSource) {
+		df := dkv.value(fieldName)
+		if !df.IsValid() {
+			return false, nil
+		}
+		destKey := typeNamePair{df.Type(), fieldName}
+		// Check each of the potential source (type, name) pairs to see if they're
+		// present in sv.
+		for _, potentialSourceKey := range c.structFieldSources[destKey] {
+			sf := skv.value(potentialSourceKey.fieldName)
+			if !sf.IsValid() {
+				continue
+			}
+			if sf.Type() == potentialSourceKey.fieldType {
+				// Both the source's name and type matched, so copy.
+				scope.srcStack.top().key = potentialSourceKey.fieldName
+				scope.destStack.top().key = fieldName
+				if err := c.convert(sf, df, scope); err != nil {
+					return true, err
+				}
+				dkv.confirmSet(fieldName, df)
+				replacementMade = true
+			}
+		}
+		return replacementMade, nil
+	}
+
+	sf := skv.value(fieldName)
+	if !sf.IsValid() {
+		return false, nil
+	}
+	srcKey := typeNamePair{sf.Type(), fieldName}
+	// Check each of the potential dest (type, name) pairs to see if they're
+	// present in dv.
+	for _, potentialDestKey := range c.structFieldDests[srcKey] {
+		df := dkv.value(potentialDestKey.fieldName)
+		if !df.IsValid() {
+			continue
+		}
+		if df.Type() == potentialDestKey.fieldType {
+			// Both the dest's name and type matched, so copy.
+			scope.srcStack.top().key = fieldName
+			scope.destStack.top().key = potentialDestKey.fieldName
+			if err := c.convert(sf, df, scope); err != nil {
+				return true, err
+			}
+			dkv.confirmSet(potentialDestKey.fieldName, df)
+			replacementMade = true
+		}
+	}
+	return replacementMade, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go
new file mode 100644
index 0000000..f21abe1
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package conversion
+
+import (
+	"k8s.io/apimachinery/third_party/forked/golang/reflect"
+)
+
+// The code for this type must be located in third_party, since it forks from
+// go std lib. But for convenience, we expose the type here, too.
+type Equalities struct {
+	reflect.Equalities
+}
+
+// For convenience, panics on errors
+func EqualitiesOrDie(funcs ...interface{}) Equalities {
+	e := Equalities{reflect.Equalities{}}
+	if err := e.AddFuncs(funcs...); err != nil {
+		panic(err)
+	}
+	return e
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go
new file mode 100644
index 0000000..7415d81
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package conversion provides go object versioning.
+//
+// Specifically, conversion provides a way for you to define multiple versions
+// of the same object. You may write functions which implement conversion logic,
+// but for the fields which did not change, copying is automated. This makes it
+// easy to modify the structures you use in memory without affecting the format
+// you store on disk or respond to in your external API calls.
+package conversion // import "k8s.io/apimachinery/pkg/conversion"
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/helper.go b/vendor/k8s.io/apimachinery/pkg/conversion/helper.go
new file mode 100644
index 0000000..4ebc1eb
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/helper.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package conversion
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// EnforcePtr ensures that obj is a pointer of some sort. Returns a reflect.Value
+// of the dereferenced pointer, ensuring that it is settable/addressable.
+// Returns an error if this is not possible.
+func EnforcePtr(obj interface{}) (reflect.Value, error) {
+	v := reflect.ValueOf(obj)
+	if v.Kind() != reflect.Ptr {
+		if v.Kind() == reflect.Invalid {
+			return reflect.Value{}, fmt.Errorf("expected pointer, but got invalid kind")
+		}
+		return reflect.Value{}, fmt.Errorf("expected pointer, but got %v type", v.Type())
+	}
+	if v.IsNil() {
+		return reflect.Value{}, fmt.Errorf("expected pointer, but got nil")
+	}
+	return v.Elem(), nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go
new file mode 100644
index 0000000..b3804aa
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go
@@ -0,0 +1,198 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package queryparams
+
+import (
+	"fmt"
+	"net/url"
+	"reflect"
+	"strings"
+)
+
+// Marshaler converts an object to a query parameter string representation
+type Marshaler interface {
+	MarshalQueryParameter() (string, error)
+}
+
+// Unmarshaler converts a string representation to an object
+type Unmarshaler interface {
+	UnmarshalQueryParameter(string) error
+}
+
+func jsonTag(field reflect.StructField) (string, bool) {
+	structTag := field.Tag.Get("json")
+	if len(structTag) == 0 {
+		return "", false
+	}
+	parts := strings.Split(structTag, ",")
+	tag := parts[0]
+	if tag == "-" {
+		tag = ""
+	}
+	omitempty := false
+	parts = parts[1:]
+	for _, part := range parts {
+		if part == "omitempty" {
+			omitempty = true
+			break
+		}
+	}
+	return tag, omitempty
+}
+
+func formatValue(value interface{}) string {
+	return fmt.Sprintf("%v", value)
+}
+
+func isPointerKind(kind reflect.Kind) bool {
+	return kind == reflect.Ptr
+}
+
+func isStructKind(kind reflect.Kind) bool {
+	return kind == reflect.Struct
+}
+
+func isValueKind(kind reflect.Kind) bool {
+	switch kind {
+	case reflect.String, reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16,
+		reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8,
+		reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32,
+		reflect.Float64, reflect.Complex64, reflect.Complex128:
+		return true
+	default:
+		return false
+	}
+}
+
+func zeroValue(value reflect.Value) bool {
+	return reflect.DeepEqual(reflect.Zero(value.Type()).Interface(), value.Interface())
+}
+
+func customMarshalValue(value reflect.Value) (reflect.Value, bool) {
+	// Return unless we implement a custom query marshaler
+	if !value.CanInterface() {
+		return reflect.Value{}, false
+	}
+
+	marshaler, ok := value.Interface().(Marshaler)
+	if !ok {
+		if !isPointerKind(value.Kind()) && value.CanAddr() {
+			marshaler, ok = value.Addr().Interface().(Marshaler)
+			if !ok {
+				return reflect.Value{}, false
+			}
+		} else {
+			return reflect.Value{}, false
+		}
+	}
+
+	// Don't invoke functions on nil pointers
+	// If the type implements MarshalQueryParameter, AND the tag is not omitempty, AND the value is a nil pointer, "" seems like a reasonable response
+	if isPointerKind(value.Kind()) && zeroValue(value) {
+		return reflect.ValueOf(""), true
+	}
+
+	// Get the custom marshalled value
+	v, err := marshaler.MarshalQueryParameter()
+	if err != nil {
+		return reflect.Value{}, false
+	}
+	return reflect.ValueOf(v), true
+}
+
+func addParam(values url.Values, tag string, omitempty bool, value reflect.Value) {
+	if omitempty && zeroValue(value) {
+		return
+	}
+	val := ""
+	iValue := fmt.Sprintf("%v", value.Interface())
+
+	if iValue != "<nil>" {
+		val = iValue
+	}
+	values.Add(tag, val)
+}
+
+func addListOfParams(values url.Values, tag string, omitempty bool, list reflect.Value) {
+	for i := 0; i < list.Len(); i++ {
+		addParam(values, tag, omitempty, list.Index(i))
+	}
+}
+
+// Convert takes an object and converts it to a url.Values object using JSON tags as
+// parameter names. Only top-level simple values, arrays, and slices are serialized.
+// Embedded structs, maps, etc. will not be serialized.
+func Convert(obj interface{}) (url.Values, error) {
+	result := url.Values{}
+	if obj == nil {
+		return result, nil
+	}
+	var sv reflect.Value
+	switch reflect.TypeOf(obj).Kind() {
+	case reflect.Ptr, reflect.Interface:
+		sv = reflect.ValueOf(obj).Elem()
+	default:
+		return nil, fmt.Errorf("expecting a pointer or interface")
+	}
+	st := sv.Type()
+	if !isStructKind(st.Kind()) {
+		return nil, fmt.Errorf("expecting a pointer to a struct")
+	}
+
+	// Check all object fields
+	convertStruct(result, st, sv)
+
+	return result, nil
+}
+
+func convertStruct(result url.Values, st reflect.Type, sv reflect.Value) {
+	for i := 0; i < st.NumField(); i++ {
+		field := sv.Field(i)
+		tag, omitempty := jsonTag(st.Field(i))
+		if len(tag) == 0 {
+			continue
+		}
+		ft := field.Type()
+
+		kind := ft.Kind()
+		if isPointerKind(kind) {
+			ft = ft.Elem()
+			kind = ft.Kind()
+			if !field.IsNil() {
+				field = reflect.Indirect(field)
+				// If the field is non-nil, it should be added to params
+				// and the omitempty should be overwite to false
+				omitempty = false
+			}
+		}
+
+		switch {
+		case isValueKind(kind):
+			addParam(result, tag, omitempty, field)
+		case kind == reflect.Array || kind == reflect.Slice:
+			if isValueKind(ft.Elem().Kind()) {
+				addListOfParams(result, tag, omitempty, field)
+			}
+		case isStructKind(kind) && !(zeroValue(field) && omitempty):
+			if marshalValue, ok := customMarshalValue(field); ok {
+				addParam(result, tag, omitempty, marshalValue)
+			} else {
+				convertStruct(result, ft, field)
+			}
+		}
+	}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go
new file mode 100644
index 0000000..7b763de
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package queryparams provides conversion from versioned
+// runtime objects to URL query values
+package queryparams // import "k8s.io/apimachinery/pkg/conversion/queryparams"
diff --git a/vendor/k8s.io/apimachinery/pkg/fields/doc.go b/vendor/k8s.io/apimachinery/pkg/fields/doc.go
new file mode 100644
index 0000000..c39b803
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/fields/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package fields implements a simple field system, parsing and matching
+// selectors with sets of fields.
+package fields // import "k8s.io/apimachinery/pkg/fields"
diff --git a/vendor/k8s.io/apimachinery/pkg/fields/fields.go b/vendor/k8s.io/apimachinery/pkg/fields/fields.go
new file mode 100644
index 0000000..623b27e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/fields/fields.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fields
+
+import (
+	"sort"
+	"strings"
+)
+
+// Fields allows you to present fields independently from their storage.
+type Fields interface {
+	// Has returns whether the provided field exists.
+	Has(field string) (exists bool)
+
+	// Get returns the value for the provided field.
+	Get(field string) (value string)
+}
+
+// Set is a map of field:value. It implements Fields.
+type Set map[string]string
+
+// String returns all fields listed as a human readable string.
+// Conveniently, exactly the format that ParseSelector takes.
+func (ls Set) String() string {
+	selector := make([]string, 0, len(ls))
+	for key, value := range ls {
+		selector = append(selector, key+"="+value)
+	}
+	// Sort for determinism.
+	sort.StringSlice(selector).Sort()
+	return strings.Join(selector, ",")
+}
+
+// Has returns whether the provided field exists in the map.
+func (ls Set) Has(field string) bool {
+	_, exists := ls[field]
+	return exists
+}
+
+// Get returns the value in the map for the provided field.
+func (ls Set) Get(field string) string {
+	return ls[field]
+}
+
+// AsSelector converts fields into a selectors.
+func (ls Set) AsSelector() Selector {
+	return SelectorFromSet(ls)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/fields/requirements.go b/vendor/k8s.io/apimachinery/pkg/fields/requirements.go
new file mode 100644
index 0000000..70d94de
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/fields/requirements.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fields
+
+import "k8s.io/apimachinery/pkg/selection"
+
+// Requirements is AND of all requirements.
+type Requirements []Requirement
+
+// Requirement contains a field, a value, and an operator that relates the field and value.
+// This is currently for reading internal selection information of field selector.
+type Requirement struct {
+	Operator selection.Operator
+	Field    string
+	Value    string
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/fields/selector.go b/vendor/k8s.io/apimachinery/pkg/fields/selector.go
new file mode 100644
index 0000000..e3e4453
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/fields/selector.go
@@ -0,0 +1,476 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fields
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/selection"
+)
+
+// Selector represents a field selector.
+type Selector interface {
+	// Matches returns true if this selector matches the given set of fields.
+	Matches(Fields) bool
+
+	// Empty returns true if this selector does not restrict the selection space.
+	Empty() bool
+
+	// RequiresExactMatch allows a caller to introspect whether a given selector
+	// requires a single specific field to be set, and if so returns the value it
+	// requires.
+	RequiresExactMatch(field string) (value string, found bool)
+
+	// Transform returns a new copy of the selector after TransformFunc has been
+	// applied to the entire selector, or an error if fn returns an error.
+	// If for a given requirement both field and value are transformed to empty
+	// string, the requirement is skipped.
+	Transform(fn TransformFunc) (Selector, error)
+
+	// Requirements converts this interface to Requirements to expose
+	// more detailed selection information.
+	Requirements() Requirements
+
+	// String returns a human readable string that represents this selector.
+	String() string
+
+	// Make a deep copy of the selector.
+	DeepCopySelector() Selector
+}
+
+type nothingSelector struct{}
+
+func (n nothingSelector) Matches(_ Fields) bool                                      { return false }
+func (n nothingSelector) Empty() bool                                                { return false }
+func (n nothingSelector) String() string                                             { return "" }
+func (n nothingSelector) Requirements() Requirements                                 { return nil }
+func (n nothingSelector) DeepCopySelector() Selector                                 { return n }
+func (n nothingSelector) RequiresExactMatch(field string) (value string, found bool) { return "", false }
+func (n nothingSelector) Transform(fn TransformFunc) (Selector, error)               { return n, nil }
+
+// Nothing returns a selector that matches no fields
+func Nothing() Selector {
+	return nothingSelector{}
+}
+
+// Everything returns a selector that matches all fields.
+func Everything() Selector {
+	return andTerm{}
+}
+
+type hasTerm struct {
+	field, value string
+}
+
+func (t *hasTerm) Matches(ls Fields) bool {
+	return ls.Get(t.field) == t.value
+}
+
+func (t *hasTerm) Empty() bool {
+	return false
+}
+
+func (t *hasTerm) RequiresExactMatch(field string) (value string, found bool) {
+	if t.field == field {
+		return t.value, true
+	}
+	return "", false
+}
+
+func (t *hasTerm) Transform(fn TransformFunc) (Selector, error) {
+	field, value, err := fn(t.field, t.value)
+	if err != nil {
+		return nil, err
+	}
+	if len(field) == 0 && len(value) == 0 {
+		return Everything(), nil
+	}
+	return &hasTerm{field, value}, nil
+}
+
+func (t *hasTerm) Requirements() Requirements {
+	return []Requirement{{
+		Field:    t.field,
+		Operator: selection.Equals,
+		Value:    t.value,
+	}}
+}
+
+func (t *hasTerm) String() string {
+	return fmt.Sprintf("%v=%v", t.field, EscapeValue(t.value))
+}
+
+func (t *hasTerm) DeepCopySelector() Selector {
+	if t == nil {
+		return nil
+	}
+	out := new(hasTerm)
+	*out = *t
+	return out
+}
+
+type notHasTerm struct {
+	field, value string
+}
+
+func (t *notHasTerm) Matches(ls Fields) bool {
+	return ls.Get(t.field) != t.value
+}
+
+func (t *notHasTerm) Empty() bool {
+	return false
+}
+
+func (t *notHasTerm) RequiresExactMatch(field string) (value string, found bool) {
+	return "", false
+}
+
+func (t *notHasTerm) Transform(fn TransformFunc) (Selector, error) {
+	field, value, err := fn(t.field, t.value)
+	if err != nil {
+		return nil, err
+	}
+	if len(field) == 0 && len(value) == 0 {
+		return Everything(), nil
+	}
+	return &notHasTerm{field, value}, nil
+}
+
+func (t *notHasTerm) Requirements() Requirements {
+	return []Requirement{{
+		Field:    t.field,
+		Operator: selection.NotEquals,
+		Value:    t.value,
+	}}
+}
+
+func (t *notHasTerm) String() string {
+	return fmt.Sprintf("%v!=%v", t.field, EscapeValue(t.value))
+}
+
+func (t *notHasTerm) DeepCopySelector() Selector {
+	if t == nil {
+		return nil
+	}
+	out := new(notHasTerm)
+	*out = *t
+	return out
+}
+
+type andTerm []Selector
+
+func (t andTerm) Matches(ls Fields) bool {
+	for _, q := range t {
+		if !q.Matches(ls) {
+			return false
+		}
+	}
+	return true
+}
+
+func (t andTerm) Empty() bool {
+	if t == nil {
+		return true
+	}
+	if len([]Selector(t)) == 0 {
+		return true
+	}
+	for i := range t {
+		if !t[i].Empty() {
+			return false
+		}
+	}
+	return true
+}
+
+func (t andTerm) RequiresExactMatch(field string) (string, bool) {
+	if t == nil || len([]Selector(t)) == 0 {
+		return "", false
+	}
+	for i := range t {
+		if value, found := t[i].RequiresExactMatch(field); found {
+			return value, found
+		}
+	}
+	return "", false
+}
+
+func (t andTerm) Transform(fn TransformFunc) (Selector, error) {
+	next := make([]Selector, 0, len([]Selector(t)))
+	for _, s := range []Selector(t) {
+		n, err := s.Transform(fn)
+		if err != nil {
+			return nil, err
+		}
+		if !n.Empty() {
+			next = append(next, n)
+		}
+	}
+	return andTerm(next), nil
+}
+
+func (t andTerm) Requirements() Requirements {
+	reqs := make([]Requirement, 0, len(t))
+	for _, s := range []Selector(t) {
+		rs := s.Requirements()
+		reqs = append(reqs, rs...)
+	}
+	return reqs
+}
+
+func (t andTerm) String() string {
+	var terms []string
+	for _, q := range t {
+		terms = append(terms, q.String())
+	}
+	return strings.Join(terms, ",")
+}
+
+func (t andTerm) DeepCopySelector() Selector {
+	if t == nil {
+		return nil
+	}
+	out := make([]Selector, len(t))
+	for i := range t {
+		out[i] = t[i].DeepCopySelector()
+	}
+	return andTerm(out)
+}
+
+// SelectorFromSet returns a Selector which will match exactly the given Set. A
+// nil Set is considered equivalent to Everything().
+func SelectorFromSet(ls Set) Selector {
+	if ls == nil {
+		return Everything()
+	}
+	items := make([]Selector, 0, len(ls))
+	for field, value := range ls {
+		items = append(items, &hasTerm{field: field, value: value})
+	}
+	if len(items) == 1 {
+		return items[0]
+	}
+	return andTerm(items)
+}
+
+// valueEscaper prefixes \,= characters with a backslash
+var valueEscaper = strings.NewReplacer(
+	// escape \ characters
+	`\`, `\\`,
+	// then escape , and = characters to allow unambiguous parsing of the value in a fieldSelector
+	`,`, `\,`,
+	`=`, `\=`,
+)
+
+// EscapeValue escapes an arbitrary literal string for use as a fieldSelector value
+func EscapeValue(s string) string {
+	return valueEscaper.Replace(s)
+}
+
+// InvalidEscapeSequence indicates an error occurred unescaping a field selector
+type InvalidEscapeSequence struct {
+	sequence string
+}
+
+func (i InvalidEscapeSequence) Error() string {
+	return fmt.Sprintf("invalid field selector: invalid escape sequence: %s", i.sequence)
+}
+
+// UnescapedRune indicates an error occurred unescaping a field selector
+type UnescapedRune struct {
+	r rune
+}
+
+func (i UnescapedRune) Error() string {
+	return fmt.Sprintf("invalid field selector: unescaped character in value: %v", i.r)
+}
+
+// UnescapeValue unescapes a fieldSelector value and returns the original literal value.
+// May return the original string if it contains no escaped or special characters.
+func UnescapeValue(s string) (string, error) {
+	// if there's no escaping or special characters, just return to avoid allocation
+	if !strings.ContainsAny(s, `\,=`) {
+		return s, nil
+	}
+
+	v := bytes.NewBuffer(make([]byte, 0, len(s)))
+	inSlash := false
+	for _, c := range s {
+		if inSlash {
+			switch c {
+			case '\\', ',', '=':
+				// omit the \ for recognized escape sequences
+				v.WriteRune(c)
+			default:
+				// error on unrecognized escape sequences
+				return "", InvalidEscapeSequence{sequence: string([]rune{'\\', c})}
+			}
+			inSlash = false
+			continue
+		}
+
+		switch c {
+		case '\\':
+			inSlash = true
+		case ',', '=':
+			// unescaped , and = characters are not allowed in field selector values
+			return "", UnescapedRune{r: c}
+		default:
+			v.WriteRune(c)
+		}
+	}
+
+	// Ending with a single backslash is an invalid sequence
+	if inSlash {
+		return "", InvalidEscapeSequence{sequence: "\\"}
+	}
+
+	return v.String(), nil
+}
+
+// ParseSelectorOrDie takes a string representing a selector and returns an
+// object suitable for matching, or panic when an error occur.
+func ParseSelectorOrDie(s string) Selector {
+	selector, err := ParseSelector(s)
+	if err != nil {
+		panic(err)
+	}
+	return selector
+}
+
+// ParseSelector takes a string representing a selector and returns an
+// object suitable for matching, or an error.
+func ParseSelector(selector string) (Selector, error) {
+	return parseSelector(selector,
+		func(lhs, rhs string) (newLhs, newRhs string, err error) {
+			return lhs, rhs, nil
+		})
+}
+
+// ParseAndTransformSelector parses the selector and runs them through the given TransformFunc.
+func ParseAndTransformSelector(selector string, fn TransformFunc) (Selector, error) {
+	return parseSelector(selector, fn)
+}
+
+// TransformFunc transforms selectors.
+type TransformFunc func(field, value string) (newField, newValue string, err error)
+
+// splitTerms returns the comma-separated terms contained in the given fieldSelector.
+// Backslash-escaped commas are treated as data instead of delimiters, and are included in the returned terms, with the leading backslash preserved.
+func splitTerms(fieldSelector string) []string {
+	if len(fieldSelector) == 0 {
+		return nil
+	}
+
+	terms := make([]string, 0, 1)
+	startIndex := 0
+	inSlash := false
+	for i, c := range fieldSelector {
+		switch {
+		case inSlash:
+			inSlash = false
+		case c == '\\':
+			inSlash = true
+		case c == ',':
+			terms = append(terms, fieldSelector[startIndex:i])
+			startIndex = i + 1
+		}
+	}
+
+	terms = append(terms, fieldSelector[startIndex:])
+
+	return terms
+}
+
+const (
+	notEqualOperator    = "!="
+	doubleEqualOperator = "=="
+	equalOperator       = "="
+)
+
+// termOperators holds the recognized operators supported in fieldSelectors.
+// doubleEqualOperator and equal are equivalent, but doubleEqualOperator is checked first
+// to avoid leaving a leading = character on the rhs value.
+var termOperators = []string{notEqualOperator, doubleEqualOperator, equalOperator}
+
+// splitTerm returns the lhs, operator, and rhs parsed from the given term, along with an indicator of whether the parse was successful.
+// no escaping of special characters is supported in the lhs value, so the first occurrence of a recognized operator is used as the split point.
+// the literal rhs is returned, and the caller is responsible for applying any desired unescaping.
+func splitTerm(term string) (lhs, op, rhs string, ok bool) {
+	for i := range term {
+		remaining := term[i:]
+		for _, op := range termOperators {
+			if strings.HasPrefix(remaining, op) {
+				return term[0:i], op, term[i+len(op):], true
+			}
+		}
+	}
+	return "", "", "", false
+}
+
+func parseSelector(selector string, fn TransformFunc) (Selector, error) {
+	parts := splitTerms(selector)
+	sort.StringSlice(parts).Sort()
+	var items []Selector
+	for _, part := range parts {
+		if part == "" {
+			continue
+		}
+		lhs, op, rhs, ok := splitTerm(part)
+		if !ok {
+			return nil, fmt.Errorf("invalid selector: '%s'; can't understand '%s'", selector, part)
+		}
+		unescapedRHS, err := UnescapeValue(rhs)
+		if err != nil {
+			return nil, err
+		}
+		switch op {
+		case notEqualOperator:
+			items = append(items, &notHasTerm{field: lhs, value: unescapedRHS})
+		case doubleEqualOperator:
+			items = append(items, &hasTerm{field: lhs, value: unescapedRHS})
+		case equalOperator:
+			items = append(items, &hasTerm{field: lhs, value: unescapedRHS})
+		default:
+			return nil, fmt.Errorf("invalid selector: '%s'; can't understand '%s'", selector, part)
+		}
+	}
+	if len(items) == 1 {
+		return items[0].Transform(fn)
+	}
+	return andTerm(items).Transform(fn)
+}
+
+// OneTermEqualSelector returns an object that matches objects where one field/field equals one value.
+// Cannot return an error.
+func OneTermEqualSelector(k, v string) Selector {
+	return &hasTerm{field: k, value: v}
+}
+
+// OneTermNotEqualSelector returns an object that matches objects where one field/field does not equal one value.
+// Cannot return an error.
+func OneTermNotEqualSelector(k, v string) Selector {
+	return &notHasTerm{field: k, value: v}
+}
+
+// AndSelectors creates a selector that is the logical AND of all the given selectors
+func AndSelectors(selectors ...Selector) Selector {
+	return andTerm(selectors)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/doc.go b/vendor/k8s.io/apimachinery/pkg/labels/doc.go
new file mode 100644
index 0000000..82de005
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/labels/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package labels implements a simple label system, parsing and matching
+// selectors with sets of labels.
+package labels // import "k8s.io/apimachinery/pkg/labels"
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go
new file mode 100644
index 0000000..32db4d9
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go
@@ -0,0 +1,181 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package labels
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// Labels allows you to present labels independently from their storage.
+type Labels interface {
+	// Has returns whether the provided label exists.
+	Has(label string) (exists bool)
+
+	// Get returns the value for the provided label.
+	Get(label string) (value string)
+}
+
+// Set is a map of label:value. It implements Labels.
+type Set map[string]string
+
+// String returns all labels listed as a human readable string.
+// Conveniently, exactly the format that ParseSelector takes.
+func (ls Set) String() string {
+	selector := make([]string, 0, len(ls))
+	for key, value := range ls {
+		selector = append(selector, key+"="+value)
+	}
+	// Sort for determinism.
+	sort.StringSlice(selector).Sort()
+	return strings.Join(selector, ",")
+}
+
+// Has returns whether the provided label exists in the map.
+func (ls Set) Has(label string) bool {
+	_, exists := ls[label]
+	return exists
+}
+
+// Get returns the value in the map for the provided label.
+func (ls Set) Get(label string) string {
+	return ls[label]
+}
+
+// AsSelector converts labels into a selectors.
+func (ls Set) AsSelector() Selector {
+	return SelectorFromSet(ls)
+}
+
+// AsSelectorPreValidated converts labels into a selector, but
+// assumes that labels are already validated and thus don't
+// preform any validation.
+// According to our measurements this is significantly faster
+// in codepaths that matter at high scale.
+func (ls Set) AsSelectorPreValidated() Selector {
+	return SelectorFromValidatedSet(ls)
+}
+
+// FormatLabels convert label map into plain string
+func FormatLabels(labelMap map[string]string) string {
+	l := Set(labelMap).String()
+	if l == "" {
+		l = "<none>"
+	}
+	return l
+}
+
+// Conflicts takes 2 maps and returns true if there a key match between
+// the maps but the value doesn't match, and returns false in other cases
+func Conflicts(labels1, labels2 Set) bool {
+	small := labels1
+	big := labels2
+	if len(labels2) < len(labels1) {
+		small = labels2
+		big = labels1
+	}
+
+	for k, v := range small {
+		if val, match := big[k]; match {
+			if val != v {
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+// Merge combines given maps, and does not check for any conflicts
+// between the maps. In case of conflicts, second map (labels2) wins
+func Merge(labels1, labels2 Set) Set {
+	mergedMap := Set{}
+
+	for k, v := range labels1 {
+		mergedMap[k] = v
+	}
+	for k, v := range labels2 {
+		mergedMap[k] = v
+	}
+	return mergedMap
+}
+
+// Equals returns true if the given maps are equal
+func Equals(labels1, labels2 Set) bool {
+	if len(labels1) != len(labels2) {
+		return false
+	}
+
+	for k, v := range labels1 {
+		value, ok := labels2[k]
+		if !ok {
+			return false
+		}
+		if value != v {
+			return false
+		}
+	}
+	return true
+}
+
+// AreLabelsInWhiteList verifies if the provided label list
+// is in the provided whitelist and returns true, otherwise false.
+func AreLabelsInWhiteList(labels, whitelist Set) bool {
+	if len(whitelist) == 0 {
+		return true
+	}
+
+	for k, v := range labels {
+		value, ok := whitelist[k]
+		if !ok {
+			return false
+		}
+		if value != v {
+			return false
+		}
+	}
+	return true
+}
+
+// ConvertSelectorToLabelsMap converts selector string to labels map
+// and validates keys and values
+func ConvertSelectorToLabelsMap(selector string) (Set, error) {
+	labelsMap := Set{}
+
+	if len(selector) == 0 {
+		return labelsMap, nil
+	}
+
+	labels := strings.Split(selector, ",")
+	for _, label := range labels {
+		l := strings.Split(label, "=")
+		if len(l) != 2 {
+			return labelsMap, fmt.Errorf("invalid selector: %s", l)
+		}
+		key := strings.TrimSpace(l[0])
+		if err := validateLabelKey(key); err != nil {
+			return labelsMap, err
+		}
+		value := strings.TrimSpace(l[1])
+		if err := validateLabelValue(value); err != nil {
+			return labelsMap, err
+		}
+		labelsMap[key] = value
+	}
+	return labelsMap, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
new file mode 100644
index 0000000..f5a0888
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
@@ -0,0 +1,891 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package labels
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+	"strconv"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/selection"
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/apimachinery/pkg/util/validation"
+	"k8s.io/klog"
+)
+
+// Requirements is AND of all requirements.
+type Requirements []Requirement
+
+// Selector represents a label selector.
+type Selector interface {
+	// Matches returns true if this selector matches the given set of labels.
+	Matches(Labels) bool
+
+	// Empty returns true if this selector does not restrict the selection space.
+	Empty() bool
+
+	// String returns a human readable string that represents this selector.
+	String() string
+
+	// Add adds requirements to the Selector
+	Add(r ...Requirement) Selector
+
+	// Requirements converts this interface into Requirements to expose
+	// more detailed selection information.
+	// If there are querying parameters, it will return converted requirements and selectable=true.
+	// If this selector doesn't want to select anything, it will return selectable=false.
+	Requirements() (requirements Requirements, selectable bool)
+
+	// Make a deep copy of the selector.
+	DeepCopySelector() Selector
+}
+
+// Everything returns a selector that matches all labels.
+func Everything() Selector {
+	return internalSelector{}
+}
+
+type nothingSelector struct{}
+
+func (n nothingSelector) Matches(_ Labels) bool              { return false }
+func (n nothingSelector) Empty() bool                        { return false }
+func (n nothingSelector) String() string                     { return "" }
+func (n nothingSelector) Add(_ ...Requirement) Selector      { return n }
+func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false }
+func (n nothingSelector) DeepCopySelector() Selector         { return n }
+
+// Nothing returns a selector that matches no labels
+func Nothing() Selector {
+	return nothingSelector{}
+}
+
+// NewSelector returns a nil selector
+func NewSelector() Selector {
+	return internalSelector(nil)
+}
+
+type internalSelector []Requirement
+
+func (s internalSelector) DeepCopy() internalSelector {
+	if s == nil {
+		return nil
+	}
+	result := make([]Requirement, len(s))
+	for i := range s {
+		s[i].DeepCopyInto(&result[i])
+	}
+	return result
+}
+
+func (s internalSelector) DeepCopySelector() Selector {
+	return s.DeepCopy()
+}
+
+// ByKey sorts requirements by key to obtain deterministic parser
+type ByKey []Requirement
+
+func (a ByKey) Len() int { return len(a) }
+
+func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key }
+
+// Requirement contains values, a key, and an operator that relates the key and values.
+// The zero value of Requirement is invalid.
+// Requirement implements both set based match and exact match
+// Requirement should be initialized via NewRequirement constructor for creating a valid Requirement.
+// +k8s:deepcopy-gen=true
+type Requirement struct {
+	key      string
+	operator selection.Operator
+	// In huge majority of cases we have at most one value here.
+	// It is generally faster to operate on a single-element slice
+	// than on a single-element map, so we have a slice here.
+	strValues []string
+}
+
+// NewRequirement is the constructor for a Requirement.
+// If any of these rules is violated, an error is returned:
+// (1) The operator can only be In, NotIn, Equals, DoubleEquals, NotEquals, Exists, or DoesNotExist.
+// (2) If the operator is In or NotIn, the values set must be non-empty.
+// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value.
+// (4) If the operator is Exists or DoesNotExist, the value set must be empty.
+// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer.
+// (6) The key is invalid due to its length, or sequence
+//     of characters. See validateLabelKey for more details.
+//
+// The empty string is a valid value in the input values set.
+func NewRequirement(key string, op selection.Operator, vals []string) (*Requirement, error) {
+	if err := validateLabelKey(key); err != nil {
+		return nil, err
+	}
+	switch op {
+	case selection.In, selection.NotIn:
+		if len(vals) == 0 {
+			return nil, fmt.Errorf("for 'in', 'notin' operators, values set can't be empty")
+		}
+	case selection.Equals, selection.DoubleEquals, selection.NotEquals:
+		if len(vals) != 1 {
+			return nil, fmt.Errorf("exact-match compatibility requires one single value")
+		}
+	case selection.Exists, selection.DoesNotExist:
+		if len(vals) != 0 {
+			return nil, fmt.Errorf("values set must be empty for exists and does not exist")
+		}
+	case selection.GreaterThan, selection.LessThan:
+		if len(vals) != 1 {
+			return nil, fmt.Errorf("for 'Gt', 'Lt' operators, exactly one value is required")
+		}
+		for i := range vals {
+			if _, err := strconv.ParseInt(vals[i], 10, 64); err != nil {
+				return nil, fmt.Errorf("for 'Gt', 'Lt' operators, the value must be an integer")
+			}
+		}
+	default:
+		return nil, fmt.Errorf("operator '%v' is not recognized", op)
+	}
+
+	for i := range vals {
+		if err := validateLabelValue(vals[i]); err != nil {
+			return nil, err
+		}
+	}
+	return &Requirement{key: key, operator: op, strValues: vals}, nil
+}
+
+func (r *Requirement) hasValue(value string) bool {
+	for i := range r.strValues {
+		if r.strValues[i] == value {
+			return true
+		}
+	}
+	return false
+}
+
+// Matches returns true if the Requirement matches the input Labels.
+// There is a match in the following cases:
+// (1) The operator is Exists and Labels has the Requirement's key.
+// (2) The operator is In, Labels has the Requirement's key and Labels'
+//     value for that key is in Requirement's value set.
+// (3) The operator is NotIn, Labels has the Requirement's key and
+//     Labels' value for that key is not in Requirement's value set.
+// (4) The operator is DoesNotExist or NotIn and Labels does not have the
+//     Requirement's key.
+// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has
+//     the Requirement's key and the corresponding value satisfies mathematical inequality.
+func (r *Requirement) Matches(ls Labels) bool {
+	switch r.operator {
+	case selection.In, selection.Equals, selection.DoubleEquals:
+		if !ls.Has(r.key) {
+			return false
+		}
+		return r.hasValue(ls.Get(r.key))
+	case selection.NotIn, selection.NotEquals:
+		if !ls.Has(r.key) {
+			return true
+		}
+		return !r.hasValue(ls.Get(r.key))
+	case selection.Exists:
+		return ls.Has(r.key)
+	case selection.DoesNotExist:
+		return !ls.Has(r.key)
+	case selection.GreaterThan, selection.LessThan:
+		if !ls.Has(r.key) {
+			return false
+		}
+		lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64)
+		if err != nil {
+			klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err)
+			return false
+		}
+
+		// There should be only one strValue in r.strValues, and can be converted to a integer.
+		if len(r.strValues) != 1 {
+			klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r)
+			return false
+		}
+
+		var rValue int64
+		for i := range r.strValues {
+			rValue, err = strconv.ParseInt(r.strValues[i], 10, 64)
+			if err != nil {
+				klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r)
+				return false
+			}
+		}
+		return (r.operator == selection.GreaterThan && lsValue > rValue) || (r.operator == selection.LessThan && lsValue < rValue)
+	default:
+		return false
+	}
+}
+
+// Key returns requirement key
+func (r *Requirement) Key() string {
+	return r.key
+}
+
+// Operator returns requirement operator
+func (r *Requirement) Operator() selection.Operator {
+	return r.operator
+}
+
+// Values returns requirement values
+func (r *Requirement) Values() sets.String {
+	ret := sets.String{}
+	for i := range r.strValues {
+		ret.Insert(r.strValues[i])
+	}
+	return ret
+}
+
+// Empty returns true if the internalSelector doesn't restrict selection space
+func (lsel internalSelector) Empty() bool {
+	if lsel == nil {
+		return true
+	}
+	return len(lsel) == 0
+}
+
+// String returns a human-readable string that represents this
+// Requirement. If called on an invalid Requirement, an error is
+// returned. See NewRequirement for creating a valid Requirement.
+func (r *Requirement) String() string {
+	var buffer bytes.Buffer
+	if r.operator == selection.DoesNotExist {
+		buffer.WriteString("!")
+	}
+	buffer.WriteString(r.key)
+
+	switch r.operator {
+	case selection.Equals:
+		buffer.WriteString("=")
+	case selection.DoubleEquals:
+		buffer.WriteString("==")
+	case selection.NotEquals:
+		buffer.WriteString("!=")
+	case selection.In:
+		buffer.WriteString(" in ")
+	case selection.NotIn:
+		buffer.WriteString(" notin ")
+	case selection.GreaterThan:
+		buffer.WriteString(">")
+	case selection.LessThan:
+		buffer.WriteString("<")
+	case selection.Exists, selection.DoesNotExist:
+		return buffer.String()
+	}
+
+	switch r.operator {
+	case selection.In, selection.NotIn:
+		buffer.WriteString("(")
+	}
+	if len(r.strValues) == 1 {
+		buffer.WriteString(r.strValues[0])
+	} else { // only > 1 since == 0 prohibited by NewRequirement
+		// normalizes value order on output, without mutating the in-memory selector representation
+		// also avoids normalization when it is not required, and ensures we do not mutate shared data
+		buffer.WriteString(strings.Join(safeSort(r.strValues), ","))
+	}
+
+	switch r.operator {
+	case selection.In, selection.NotIn:
+		buffer.WriteString(")")
+	}
+	return buffer.String()
+}
+
+// safeSort sort input strings without modification
+func safeSort(in []string) []string {
+	if sort.StringsAreSorted(in) {
+		return in
+	}
+	out := make([]string, len(in))
+	copy(out, in)
+	sort.Strings(out)
+	return out
+}
+
+// Add adds requirements to the selector. It copies the current selector returning a new one
+func (lsel internalSelector) Add(reqs ...Requirement) Selector {
+	var sel internalSelector
+	for ix := range lsel {
+		sel = append(sel, lsel[ix])
+	}
+	for _, r := range reqs {
+		sel = append(sel, r)
+	}
+	sort.Sort(ByKey(sel))
+	return sel
+}
+
+// Matches for a internalSelector returns true if all
+// its Requirements match the input Labels. If any
+// Requirement does not match, false is returned.
+func (lsel internalSelector) Matches(l Labels) bool {
+	for ix := range lsel {
+		if matches := lsel[ix].Matches(l); !matches {
+			return false
+		}
+	}
+	return true
+}
+
+func (lsel internalSelector) Requirements() (Requirements, bool) { return Requirements(lsel), true }
+
+// String returns a comma-separated string of all
+// the internalSelector Requirements' human-readable strings.
+func (lsel internalSelector) String() string {
+	var reqs []string
+	for ix := range lsel {
+		reqs = append(reqs, lsel[ix].String())
+	}
+	return strings.Join(reqs, ",")
+}
+
+// Token represents constant definition for lexer token
+type Token int
+
+const (
+	// ErrorToken represents scan error
+	ErrorToken Token = iota
+	// EndOfStringToken represents end of string
+	EndOfStringToken
+	// ClosedParToken represents close parenthesis
+	ClosedParToken
+	// CommaToken represents the comma
+	CommaToken
+	// DoesNotExistToken represents logic not
+	DoesNotExistToken
+	// DoubleEqualsToken represents double equals
+	DoubleEqualsToken
+	// EqualsToken represents equal
+	EqualsToken
+	// GreaterThanToken represents greater than
+	GreaterThanToken
+	// IdentifierToken represents identifier, e.g. keys and values
+	IdentifierToken
+	// InToken represents in
+	InToken
+	// LessThanToken represents less than
+	LessThanToken
+	// NotEqualsToken represents not equal
+	NotEqualsToken
+	// NotInToken represents not in
+	NotInToken
+	// OpenParToken represents open parenthesis
+	OpenParToken
+)
+
+// string2token contains the mapping between lexer Token and token literal
+// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense)
+var string2token = map[string]Token{
+	")":     ClosedParToken,
+	",":     CommaToken,
+	"!":     DoesNotExistToken,
+	"==":    DoubleEqualsToken,
+	"=":     EqualsToken,
+	">":     GreaterThanToken,
+	"in":    InToken,
+	"<":     LessThanToken,
+	"!=":    NotEqualsToken,
+	"notin": NotInToken,
+	"(":     OpenParToken,
+}
+
+// ScannedItem contains the Token and the literal produced by the lexer.
+type ScannedItem struct {
+	tok     Token
+	literal string
+}
+
+// isWhitespace returns true if the rune is a space, tab, or newline.
+func isWhitespace(ch byte) bool {
+	return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n'
+}
+
+// isSpecialSymbol detect if the character ch can be an operator
+func isSpecialSymbol(ch byte) bool {
+	switch ch {
+	case '=', '!', '(', ')', ',', '>', '<':
+		return true
+	}
+	return false
+}
+
+// Lexer represents the Lexer struct for label selector.
+// It contains necessary informationt to tokenize the input string
+type Lexer struct {
+	// s stores the string to be tokenized
+	s string
+	// pos is the position currently tokenized
+	pos int
+}
+
+// read return the character currently lexed
+// increment the position and check the buffer overflow
+func (l *Lexer) read() (b byte) {
+	b = 0
+	if l.pos < len(l.s) {
+		b = l.s[l.pos]
+		l.pos++
+	}
+	return b
+}
+
+// unread 'undoes' the last read character
+func (l *Lexer) unread() {
+	l.pos--
+}
+
+// scanIDOrKeyword scans string to recognize literal token (for example 'in') or an identifier.
+func (l *Lexer) scanIDOrKeyword() (tok Token, lit string) {
+	var buffer []byte
+IdentifierLoop:
+	for {
+		switch ch := l.read(); {
+		case ch == 0:
+			break IdentifierLoop
+		case isSpecialSymbol(ch) || isWhitespace(ch):
+			l.unread()
+			break IdentifierLoop
+		default:
+			buffer = append(buffer, ch)
+		}
+	}
+	s := string(buffer)
+	if val, ok := string2token[s]; ok { // is a literal token?
+		return val, s
+	}
+	return IdentifierToken, s // otherwise is an identifier
+}
+
+// scanSpecialSymbol scans string starting with special symbol.
+// special symbol identify non literal operators. "!=", "==", "="
+func (l *Lexer) scanSpecialSymbol() (Token, string) {
+	lastScannedItem := ScannedItem{}
+	var buffer []byte
+SpecialSymbolLoop:
+	for {
+		switch ch := l.read(); {
+		case ch == 0:
+			break SpecialSymbolLoop
+		case isSpecialSymbol(ch):
+			buffer = append(buffer, ch)
+			if token, ok := string2token[string(buffer)]; ok {
+				lastScannedItem = ScannedItem{tok: token, literal: string(buffer)}
+			} else if lastScannedItem.tok != 0 {
+				l.unread()
+				break SpecialSymbolLoop
+			}
+		default:
+			l.unread()
+			break SpecialSymbolLoop
+		}
+	}
+	if lastScannedItem.tok == 0 {
+		return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer)
+	}
+	return lastScannedItem.tok, lastScannedItem.literal
+}
+
+// skipWhiteSpaces consumes all blank characters
+// returning the first non blank character
+func (l *Lexer) skipWhiteSpaces(ch byte) byte {
+	for {
+		if !isWhitespace(ch) {
+			return ch
+		}
+		ch = l.read()
+	}
+}
+
+// Lex returns a pair of Token and the literal
+// literal is meaningfull only for IdentifierToken token
+func (l *Lexer) Lex() (tok Token, lit string) {
+	switch ch := l.skipWhiteSpaces(l.read()); {
+	case ch == 0:
+		return EndOfStringToken, ""
+	case isSpecialSymbol(ch):
+		l.unread()
+		return l.scanSpecialSymbol()
+	default:
+		l.unread()
+		return l.scanIDOrKeyword()
+	}
+}
+
+// Parser data structure contains the label selector parser data structure
+type Parser struct {
+	l            *Lexer
+	scannedItems []ScannedItem
+	position     int
+}
+
+// ParserContext represents context during parsing:
+// some literal for example 'in' and 'notin' can be
+// recognized as operator for example 'x in (a)' but
+// it can be recognized as value for example 'value in (in)'
+type ParserContext int
+
+const (
+	// KeyAndOperator represents key and operator
+	KeyAndOperator ParserContext = iota
+	// Values represents values
+	Values
+)
+
+// lookahead func returns the current token and string. No increment of current position
+func (p *Parser) lookahead(context ParserContext) (Token, string) {
+	tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal
+	if context == Values {
+		switch tok {
+		case InToken, NotInToken:
+			tok = IdentifierToken
+		}
+	}
+	return tok, lit
+}
+
+// consume returns current token and string. Increments the position
+func (p *Parser) consume(context ParserContext) (Token, string) {
+	p.position++
+	tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal
+	if context == Values {
+		switch tok {
+		case InToken, NotInToken:
+			tok = IdentifierToken
+		}
+	}
+	return tok, lit
+}
+
+// scan runs through the input string and stores the ScannedItem in an array
+// Parser can now lookahead and consume the tokens
+func (p *Parser) scan() {
+	for {
+		token, literal := p.l.Lex()
+		p.scannedItems = append(p.scannedItems, ScannedItem{token, literal})
+		if token == EndOfStringToken {
+			break
+		}
+	}
+}
+
+// parse runs the left recursive descending algorithm
+// on input string. It returns a list of Requirement objects.
+func (p *Parser) parse() (internalSelector, error) {
+	p.scan() // init scannedItems
+
+	var requirements internalSelector
+	for {
+		tok, lit := p.lookahead(Values)
+		switch tok {
+		case IdentifierToken, DoesNotExistToken:
+			r, err := p.parseRequirement()
+			if err != nil {
+				return nil, fmt.Errorf("unable to parse requirement: %v", err)
+			}
+			requirements = append(requirements, *r)
+			t, l := p.consume(Values)
+			switch t {
+			case EndOfStringToken:
+				return requirements, nil
+			case CommaToken:
+				t2, l2 := p.lookahead(Values)
+				if t2 != IdentifierToken && t2 != DoesNotExistToken {
+					return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2)
+				}
+			default:
+				return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l)
+			}
+		case EndOfStringToken:
+			return requirements, nil
+		default:
+			return nil, fmt.Errorf("found '%s', expected: !, identifier, or 'end of string'", lit)
+		}
+	}
+}
+
+func (p *Parser) parseRequirement() (*Requirement, error) {
+	key, operator, err := p.parseKeyAndInferOperator()
+	if err != nil {
+		return nil, err
+	}
+	if operator == selection.Exists || operator == selection.DoesNotExist { // operator found lookahead set checked
+		return NewRequirement(key, operator, []string{})
+	}
+	operator, err = p.parseOperator()
+	if err != nil {
+		return nil, err
+	}
+	var values sets.String
+	switch operator {
+	case selection.In, selection.NotIn:
+		values, err = p.parseValues()
+	case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan:
+		values, err = p.parseExactValue()
+	}
+	if err != nil {
+		return nil, err
+	}
+	return NewRequirement(key, operator, values.List())
+
+}
+
+// parseKeyAndInferOperator parse literals.
+// in case of no operator '!, in, notin, ==, =, !=' are found
+// the 'exists' operator is inferred
+func (p *Parser) parseKeyAndInferOperator() (string, selection.Operator, error) {
+	var operator selection.Operator
+	tok, literal := p.consume(Values)
+	if tok == DoesNotExistToken {
+		operator = selection.DoesNotExist
+		tok, literal = p.consume(Values)
+	}
+	if tok != IdentifierToken {
+		err := fmt.Errorf("found '%s', expected: identifier", literal)
+		return "", "", err
+	}
+	if err := validateLabelKey(literal); err != nil {
+		return "", "", err
+	}
+	if t, _ := p.lookahead(Values); t == EndOfStringToken || t == CommaToken {
+		if operator != selection.DoesNotExist {
+			operator = selection.Exists
+		}
+	}
+	return literal, operator, nil
+}
+
+// parseOperator return operator and eventually matchType
+// matchType can be exact
+func (p *Parser) parseOperator() (op selection.Operator, err error) {
+	tok, lit := p.consume(KeyAndOperator)
+	switch tok {
+	// DoesNotExistToken shouldn't be here because it's a unary operator, not a binary operator
+	case InToken:
+		op = selection.In
+	case EqualsToken:
+		op = selection.Equals
+	case DoubleEqualsToken:
+		op = selection.DoubleEquals
+	case GreaterThanToken:
+		op = selection.GreaterThan
+	case LessThanToken:
+		op = selection.LessThan
+	case NotInToken:
+		op = selection.NotIn
+	case NotEqualsToken:
+		op = selection.NotEquals
+	default:
+		return "", fmt.Errorf("found '%s', expected: '=', '!=', '==', 'in', notin'", lit)
+	}
+	return op, nil
+}
+
+// parseValues parses the values for set based matching (x,y,z)
+func (p *Parser) parseValues() (sets.String, error) {
+	tok, lit := p.consume(Values)
+	if tok != OpenParToken {
+		return nil, fmt.Errorf("found '%s' expected: '('", lit)
+	}
+	tok, lit = p.lookahead(Values)
+	switch tok {
+	case IdentifierToken, CommaToken:
+		s, err := p.parseIdentifiersList() // handles general cases
+		if err != nil {
+			return s, err
+		}
+		if tok, _ = p.consume(Values); tok != ClosedParToken {
+			return nil, fmt.Errorf("found '%s', expected: ')'", lit)
+		}
+		return s, nil
+	case ClosedParToken: // handles "()"
+		p.consume(Values)
+		return sets.NewString(""), nil
+	default:
+		return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit)
+	}
+}
+
+// parseIdentifiersList parses a (possibly empty) list of
+// of comma separated (possibly empty) identifiers
+func (p *Parser) parseIdentifiersList() (sets.String, error) {
+	s := sets.NewString()
+	for {
+		tok, lit := p.consume(Values)
+		switch tok {
+		case IdentifierToken:
+			s.Insert(lit)
+			tok2, lit2 := p.lookahead(Values)
+			switch tok2 {
+			case CommaToken:
+				continue
+			case ClosedParToken:
+				return s, nil
+			default:
+				return nil, fmt.Errorf("found '%s', expected: ',' or ')'", lit2)
+			}
+		case CommaToken: // handled here since we can have "(,"
+			if s.Len() == 0 {
+				s.Insert("") // to handle (,
+			}
+			tok2, _ := p.lookahead(Values)
+			if tok2 == ClosedParToken {
+				s.Insert("") // to handle ,)  Double "" removed by StringSet
+				return s, nil
+			}
+			if tok2 == CommaToken {
+				p.consume(Values)
+				s.Insert("") // to handle ,, Double "" removed by StringSet
+			}
+		default: // it can be operator
+			return s, fmt.Errorf("found '%s', expected: ',', or identifier", lit)
+		}
+	}
+}
+
+// parseExactValue parses the only value for exact match style
+func (p *Parser) parseExactValue() (sets.String, error) {
+	s := sets.NewString()
+	tok, lit := p.lookahead(Values)
+	if tok == EndOfStringToken || tok == CommaToken {
+		s.Insert("")
+		return s, nil
+	}
+	tok, lit = p.consume(Values)
+	if tok == IdentifierToken {
+		s.Insert(lit)
+		return s, nil
+	}
+	return nil, fmt.Errorf("found '%s', expected: identifier", lit)
+}
+
+// Parse takes a string representing a selector and returns a selector
+// object, or an error. This parsing function differs from ParseSelector
+// as they parse different selectors with different syntaxes.
+// The input will cause an error if it does not follow this form:
+//
+//  <selector-syntax>         ::= <requirement> | <requirement> "," <selector-syntax>
+//  <requirement>             ::= [!] KEY [ <set-based-restriction> | <exact-match-restriction> ]
+//  <set-based-restriction>   ::= "" | <inclusion-exclusion> <value-set>
+//  <inclusion-exclusion>     ::= <inclusion> | <exclusion>
+//  <exclusion>               ::= "notin"
+//  <inclusion>               ::= "in"
+//  <value-set>               ::= "(" <values> ")"
+//  <values>                  ::= VALUE | VALUE "," <values>
+//  <exact-match-restriction> ::= ["="|"=="|"!="] VALUE
+//
+// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters.
+// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters.
+// Delimiter is white space: (' ', '\t')
+// Example of valid syntax:
+//  "x in (foo,,baz),y,z notin ()"
+//
+// Note:
+//  (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the
+//      VALUEs in its requirement
+//  (2) Exclusion - " notin " - denotes that the KEY is not equal to any
+//      of the VALUEs in its requirement or does not exist
+//  (3) The empty string is a valid VALUE
+//  (4) A requirement with just a KEY - as in "y" above - denotes that
+//      the KEY exists and can be any VALUE.
+//  (5) A requirement with just !KEY requires that the KEY not exist.
+//
+func Parse(selector string) (Selector, error) {
+	parsedSelector, err := parse(selector)
+	if err == nil {
+		return parsedSelector, nil
+	}
+	return nil, err
+}
+
+// parse parses the string representation of the selector and returns the internalSelector struct.
+// The callers of this method can then decide how to return the internalSelector struct to their
+// callers. This function has two callers now, one returns a Selector interface and the other
+// returns a list of requirements.
+func parse(selector string) (internalSelector, error) {
+	p := &Parser{l: &Lexer{s: selector, pos: 0}}
+	items, err := p.parse()
+	if err != nil {
+		return nil, err
+	}
+	sort.Sort(ByKey(items)) // sort to grant determistic parsing
+	return internalSelector(items), err
+}
+
+func validateLabelKey(k string) error {
+	if errs := validation.IsQualifiedName(k); len(errs) != 0 {
+		return fmt.Errorf("invalid label key %q: %s", k, strings.Join(errs, "; "))
+	}
+	return nil
+}
+
+func validateLabelValue(v string) error {
+	if errs := validation.IsValidLabelValue(v); len(errs) != 0 {
+		return fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; "))
+	}
+	return nil
+}
+
+// SelectorFromSet returns a Selector which will match exactly the given Set. A
+// nil and empty Sets are considered equivalent to Everything().
+func SelectorFromSet(ls Set) Selector {
+	if ls == nil || len(ls) == 0 {
+		return internalSelector{}
+	}
+	var requirements internalSelector
+	for label, value := range ls {
+		r, err := NewRequirement(label, selection.Equals, []string{value})
+		if err == nil {
+			requirements = append(requirements, *r)
+		} else {
+			//TODO: double check errors when input comes from serialization?
+			return internalSelector{}
+		}
+	}
+	// sort to have deterministic string representation
+	sort.Sort(ByKey(requirements))
+	return requirements
+}
+
+// SelectorFromValidatedSet returns a Selector which will match exactly the given Set.
+// A nil and empty Sets are considered equivalent to Everything().
+// It assumes that Set is already validated and doesn't do any validation.
+func SelectorFromValidatedSet(ls Set) Selector {
+	if ls == nil || len(ls) == 0 {
+		return internalSelector{}
+	}
+	var requirements internalSelector
+	for label, value := range ls {
+		requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}})
+	}
+	// sort to have deterministic string representation
+	sort.Sort(ByKey(requirements))
+	return requirements
+}
+
+// ParseToRequirements takes a string representing a selector and returns a list of
+// requirements. This function is suitable for those callers that perform additional
+// processing on selector requirements.
+// See the documentation for Parse() function for more details.
+// TODO: Consider exporting the internalSelector type instead.
+func ParseToRequirements(selector string) ([]Requirement, error) {
+	return parse(selector)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go
new file mode 100644
index 0000000..4d48294
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go
@@ -0,0 +1,42 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package labels
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Requirement) DeepCopyInto(out *Requirement) {
+	*out = *in
+	if in.strValues != nil {
+		in, out := &in.strValues, &out.strValues
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Requirement.
+func (in *Requirement) DeepCopy() *Requirement {
+	if in == nil {
+		return nil
+	}
+	out := new(Requirement)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go
new file mode 100644
index 0000000..6b859b2
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go
@@ -0,0 +1,312 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"net/url"
+	"reflect"
+
+	"k8s.io/apimachinery/pkg/conversion/queryparams"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// codec binds an encoder and decoder.
+type codec struct {
+	Encoder
+	Decoder
+}
+
+// NewCodec creates a Codec from an Encoder and Decoder.
+func NewCodec(e Encoder, d Decoder) Codec {
+	return codec{e, d}
+}
+
+// Encode is a convenience wrapper for encoding to a []byte from an Encoder
+func Encode(e Encoder, obj Object) ([]byte, error) {
+	// TODO: reuse buffer
+	buf := &bytes.Buffer{}
+	if err := e.Encode(obj, buf); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+// Decode is a convenience wrapper for decoding data into an Object.
+func Decode(d Decoder, data []byte) (Object, error) {
+	obj, _, err := d.Decode(data, nil, nil)
+	return obj, err
+}
+
+// DecodeInto performs a Decode into the provided object.
+func DecodeInto(d Decoder, data []byte, into Object) error {
+	out, gvk, err := d.Decode(data, nil, into)
+	if err != nil {
+		return err
+	}
+	if out != into {
+		return fmt.Errorf("unable to decode %s into %v", gvk, reflect.TypeOf(into))
+	}
+	return nil
+}
+
+// EncodeOrDie is a version of Encode which will panic instead of returning an error. For tests.
+func EncodeOrDie(e Encoder, obj Object) string {
+	bytes, err := Encode(e, obj)
+	if err != nil {
+		panic(err)
+	}
+	return string(bytes)
+}
+
+// UseOrCreateObject returns obj if the canonical ObjectKind returned by the provided typer matches gvk, or
+// invokes the ObjectCreator to instantiate a new gvk. Returns an error if the typer cannot find the object.
+func UseOrCreateObject(t ObjectTyper, c ObjectCreater, gvk schema.GroupVersionKind, obj Object) (Object, error) {
+	if obj != nil {
+		kinds, _, err := t.ObjectKinds(obj)
+		if err != nil {
+			return nil, err
+		}
+		for _, kind := range kinds {
+			if gvk == kind {
+				return obj, nil
+			}
+		}
+	}
+	return c.New(gvk)
+}
+
+// NoopEncoder converts an Decoder to a Serializer or Codec for code that expects them but only uses decoding.
+type NoopEncoder struct {
+	Decoder
+}
+
+var _ Serializer = NoopEncoder{}
+
+func (n NoopEncoder) Encode(obj Object, w io.Writer) error {
+	return fmt.Errorf("encoding is not allowed for this codec: %v", reflect.TypeOf(n.Decoder))
+}
+
+// NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding.
+type NoopDecoder struct {
+	Encoder
+}
+
+var _ Serializer = NoopDecoder{}
+
+func (n NoopDecoder) Decode(data []byte, gvk *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) {
+	return nil, nil, fmt.Errorf("decoding is not allowed for this codec: %v", reflect.TypeOf(n.Encoder))
+}
+
+// NewParameterCodec creates a ParameterCodec capable of transforming url values into versioned objects and back.
+func NewParameterCodec(scheme *Scheme) ParameterCodec {
+	return &parameterCodec{
+		typer:     scheme,
+		convertor: scheme,
+		creator:   scheme,
+		defaulter: scheme,
+	}
+}
+
+// parameterCodec implements conversion to and from query parameters and objects.
+type parameterCodec struct {
+	typer     ObjectTyper
+	convertor ObjectConvertor
+	creator   ObjectCreater
+	defaulter ObjectDefaulter
+}
+
+var _ ParameterCodec = &parameterCodec{}
+
+// DecodeParameters converts the provided url.Values into an object of type From with the kind of into, and then
+// converts that object to into (if necessary). Returns an error if the operation cannot be completed.
+func (c *parameterCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into Object) error {
+	if len(parameters) == 0 {
+		return nil
+	}
+	targetGVKs, _, err := c.typer.ObjectKinds(into)
+	if err != nil {
+		return err
+	}
+	for i := range targetGVKs {
+		if targetGVKs[i].GroupVersion() == from {
+			if err := c.convertor.Convert(&parameters, into, nil); err != nil {
+				return err
+			}
+			// in the case where we going into the same object we're receiving, default on the outbound object
+			if c.defaulter != nil {
+				c.defaulter.Default(into)
+			}
+			return nil
+		}
+	}
+
+	input, err := c.creator.New(from.WithKind(targetGVKs[0].Kind))
+	if err != nil {
+		return err
+	}
+	if err := c.convertor.Convert(&parameters, input, nil); err != nil {
+		return err
+	}
+	// if we have defaulter, default the input before converting to output
+	if c.defaulter != nil {
+		c.defaulter.Default(input)
+	}
+	return c.convertor.Convert(input, into, nil)
+}
+
+// EncodeParameters converts the provided object into the to version, then converts that object to url.Values.
+// Returns an error if conversion is not possible.
+func (c *parameterCodec) EncodeParameters(obj Object, to schema.GroupVersion) (url.Values, error) {
+	gvks, _, err := c.typer.ObjectKinds(obj)
+	if err != nil {
+		return nil, err
+	}
+	gvk := gvks[0]
+	if to != gvk.GroupVersion() {
+		out, err := c.convertor.ConvertToVersion(obj, to)
+		if err != nil {
+			return nil, err
+		}
+		obj = out
+	}
+	return queryparams.Convert(obj)
+}
+
+type base64Serializer struct {
+	Encoder
+	Decoder
+}
+
+func NewBase64Serializer(e Encoder, d Decoder) Serializer {
+	return &base64Serializer{e, d}
+}
+
+func (s base64Serializer) Encode(obj Object, stream io.Writer) error {
+	e := base64.NewEncoder(base64.StdEncoding, stream)
+	err := s.Encoder.Encode(obj, e)
+	e.Close()
+	return err
+}
+
+func (s base64Serializer) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) {
+	out := make([]byte, base64.StdEncoding.DecodedLen(len(data)))
+	n, err := base64.StdEncoding.Decode(out, data)
+	if err != nil {
+		return nil, nil, err
+	}
+	return s.Decoder.Decode(out[:n], defaults, into)
+}
+
+// SerializerInfoForMediaType returns the first info in types that has a matching media type (which cannot
+// include media-type parameters), or the first info with an empty media type, or false if no type matches.
+func SerializerInfoForMediaType(types []SerializerInfo, mediaType string) (SerializerInfo, bool) {
+	for _, info := range types {
+		if info.MediaType == mediaType {
+			return info, true
+		}
+	}
+	for _, info := range types {
+		if len(info.MediaType) == 0 {
+			return info, true
+		}
+	}
+	return SerializerInfo{}, false
+}
+
+var (
+	// InternalGroupVersioner will always prefer the internal version for a given group version kind.
+	InternalGroupVersioner GroupVersioner = internalGroupVersioner{}
+	// DisabledGroupVersioner will reject all kinds passed to it.
+	DisabledGroupVersioner GroupVersioner = disabledGroupVersioner{}
+)
+
+type internalGroupVersioner struct{}
+
+// KindForGroupVersionKinds returns an internal Kind if one is found, or converts the first provided kind to the internal version.
+func (internalGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {
+	for _, kind := range kinds {
+		if kind.Version == APIVersionInternal {
+			return kind, true
+		}
+	}
+	for _, kind := range kinds {
+		return schema.GroupVersionKind{Group: kind.Group, Version: APIVersionInternal, Kind: kind.Kind}, true
+	}
+	return schema.GroupVersionKind{}, false
+}
+
+type disabledGroupVersioner struct{}
+
+// KindForGroupVersionKinds returns false for any input.
+func (disabledGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {
+	return schema.GroupVersionKind{}, false
+}
+
+// GroupVersioners implements GroupVersioner and resolves to the first exact match for any kind.
+type GroupVersioners []GroupVersioner
+
+// KindForGroupVersionKinds returns the first match of any of the group versioners, or false if no match occurred.
+func (gvs GroupVersioners) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {
+	for _, gv := range gvs {
+		target, ok := gv.KindForGroupVersionKinds(kinds)
+		if !ok {
+			continue
+		}
+		return target, true
+	}
+	return schema.GroupVersionKind{}, false
+}
+
+// Assert that schema.GroupVersion and GroupVersions implement GroupVersioner
+var _ GroupVersioner = schema.GroupVersion{}
+var _ GroupVersioner = schema.GroupVersions{}
+var _ GroupVersioner = multiGroupVersioner{}
+
+type multiGroupVersioner struct {
+	target             schema.GroupVersion
+	acceptedGroupKinds []schema.GroupKind
+}
+
+// NewMultiGroupVersioner returns the provided group version for any kind that matches one of the provided group kinds.
+// Kind may be empty in the provided group kind, in which case any kind will match.
+func NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner {
+	if len(groupKinds) == 0 || (len(groupKinds) == 1 && groupKinds[0].Group == gv.Group) {
+		return gv
+	}
+	return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds}
+}
+
+// KindForGroupVersionKinds returns the target group version if any kind matches any of the original group kinds. It will
+// use the originating kind where possible.
+func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {
+	for _, src := range kinds {
+		for _, kind := range v.acceptedGroupKinds {
+			if kind.Group != src.Group {
+				continue
+			}
+			if len(kind.Kind) > 0 && kind.Kind != src.Kind {
+				continue
+			}
+			return v.target.WithKind(src.Kind), true
+		}
+	}
+	return schema.GroupVersionKind{}, false
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go
new file mode 100644
index 0000000..510444a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"fmt"
+	"reflect"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// CheckCodec makes sure that the codec can encode objects like internalType,
+// decode all of the external types listed, and also decode them into the given
+// object. (Will modify internalObject.) (Assumes JSON serialization.)
+// TODO: verify that the correct external version is chosen on encode...
+func CheckCodec(c Codec, internalType Object, externalTypes ...schema.GroupVersionKind) error {
+	if _, err := Encode(c, internalType); err != nil {
+		return fmt.Errorf("Internal type not encodable: %v", err)
+	}
+	for _, et := range externalTypes {
+		exBytes := []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v"}`, et.Kind, et.GroupVersion().String()))
+		obj, err := Decode(c, exBytes)
+		if err != nil {
+			return fmt.Errorf("external type %s not interpretable: %v", et, err)
+		}
+		if reflect.TypeOf(obj) != reflect.TypeOf(internalType) {
+			return fmt.Errorf("decode of external type %s produced: %#v", et, obj)
+		}
+		if err = DecodeInto(c, exBytes, internalType); err != nil {
+			return fmt.Errorf("external type %s not convertible to internal type: %v", et, err)
+		}
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go
new file mode 100644
index 0000000..08d2abf
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go
@@ -0,0 +1,113 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package runtime defines conversions between generic types and structs to map query strings
+// to struct objects.
+package runtime
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/conversion"
+)
+
+// DefaultMetaV1FieldSelectorConversion auto-accepts metav1 values for name and namespace.
+// A cluster scoped resource specifying namespace empty works fine and specifying a particular
+// namespace will return no results, as expected.
+func DefaultMetaV1FieldSelectorConversion(label, value string) (string, string, error) {
+	switch label {
+	case "metadata.name":
+		return label, value, nil
+	case "metadata.namespace":
+		return label, value, nil
+	default:
+		return "", "", fmt.Errorf("%q is not a known field selector: only %q, %q", label, "metadata.name", "metadata.namespace")
+	}
+}
+
+// JSONKeyMapper uses the struct tags on a conversion to determine the key value for
+// the other side. Use when mapping from a map[string]* to a struct or vice versa.
+func JSONKeyMapper(key string, sourceTag, destTag reflect.StructTag) (string, string) {
+	if s := destTag.Get("json"); len(s) > 0 {
+		return strings.SplitN(s, ",", 2)[0], key
+	}
+	if s := sourceTag.Get("json"); len(s) > 0 {
+		return key, strings.SplitN(s, ",", 2)[0]
+	}
+	return key, key
+}
+
+// DefaultStringConversions are helpers for converting []string and string to real values.
+var DefaultStringConversions = []interface{}{
+	Convert_Slice_string_To_string,
+	Convert_Slice_string_To_int,
+	Convert_Slice_string_To_bool,
+	Convert_Slice_string_To_int64,
+}
+
+func Convert_Slice_string_To_string(input *[]string, out *string, s conversion.Scope) error {
+	if len(*input) == 0 {
+		*out = ""
+	}
+	*out = (*input)[0]
+	return nil
+}
+
+func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) error {
+	if len(*input) == 0 {
+		*out = 0
+	}
+	str := (*input)[0]
+	i, err := strconv.Atoi(str)
+	if err != nil {
+		return err
+	}
+	*out = i
+	return nil
+}
+
+// Convert_Slice_string_To_bool will convert a string parameter to boolean.
+// Only the absence of a value, a value of "false", or a value of "0" resolve to false.
+// Any other value (including empty string) resolves to true.
+func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope) error {
+	if len(*input) == 0 {
+		*out = false
+		return nil
+	}
+	switch strings.ToLower((*input)[0]) {
+	case "false", "0":
+		*out = false
+	default:
+		*out = true
+	}
+	return nil
+}
+
+func Convert_Slice_string_To_int64(input *[]string, out *int64, s conversion.Scope) error {
+	if len(*input) == 0 {
+		*out = 0
+	}
+	str := (*input)[0]
+	i, err := strconv.ParseInt(str, 10, 64)
+	if err != nil {
+		return err
+	}
+	*out = i
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go
new file mode 100644
index 0000000..dff56e0
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go
@@ -0,0 +1,805 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"bytes"
+	encodingjson "encoding/json"
+	"fmt"
+	"math"
+	"os"
+	"reflect"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/util/json"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+
+	"k8s.io/klog"
+)
+
+// UnstructuredConverter is an interface for converting between interface{}
+// and map[string]interface representation.
+type UnstructuredConverter interface {
+	ToUnstructured(obj interface{}) (map[string]interface{}, error)
+	FromUnstructured(u map[string]interface{}, obj interface{}) error
+}
+
+type structField struct {
+	structType reflect.Type
+	field      int
+}
+
+type fieldInfo struct {
+	name      string
+	nameValue reflect.Value
+	omitempty bool
+}
+
+type fieldsCacheMap map[structField]*fieldInfo
+
+type fieldsCache struct {
+	sync.Mutex
+	value atomic.Value
+}
+
+func newFieldsCache() *fieldsCache {
+	cache := &fieldsCache{}
+	cache.value.Store(make(fieldsCacheMap))
+	return cache
+}
+
+var (
+	marshalerType          = reflect.TypeOf(new(encodingjson.Marshaler)).Elem()
+	unmarshalerType        = reflect.TypeOf(new(encodingjson.Unmarshaler)).Elem()
+	mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{})
+	stringType             = reflect.TypeOf(string(""))
+	int64Type              = reflect.TypeOf(int64(0))
+	float64Type            = reflect.TypeOf(float64(0))
+	boolType               = reflect.TypeOf(bool(false))
+	fieldCache             = newFieldsCache()
+
+	// DefaultUnstructuredConverter performs unstructured to Go typed object conversions.
+	DefaultUnstructuredConverter = &unstructuredConverter{
+		mismatchDetection: parseBool(os.Getenv("KUBE_PATCH_CONVERSION_DETECTOR")),
+		comparison: conversion.EqualitiesOrDie(
+			func(a, b time.Time) bool {
+				return a.UTC() == b.UTC()
+			},
+		),
+	}
+)
+
+func parseBool(key string) bool {
+	if len(key) == 0 {
+		return false
+	}
+	value, err := strconv.ParseBool(key)
+	if err != nil {
+		utilruntime.HandleError(fmt.Errorf("Couldn't parse '%s' as bool for unstructured mismatch detection", key))
+	}
+	return value
+}
+
+// unstructuredConverter knows how to convert between interface{} and
+// Unstructured in both ways.
+type unstructuredConverter struct {
+	// If true, we will be additionally running conversion via json
+	// to ensure that the result is true.
+	// This is supposed to be set only in tests.
+	mismatchDetection bool
+	// comparison is the default test logic used to compare
+	comparison conversion.Equalities
+}
+
+// NewTestUnstructuredConverter creates an UnstructuredConverter that accepts JSON typed maps and translates them
+// to Go types via reflection. It performs mismatch detection automatically and is intended for use by external
+// test tools. Use DefaultUnstructuredConverter if you do not explicitly need mismatch detection.
+func NewTestUnstructuredConverter(comparison conversion.Equalities) UnstructuredConverter {
+	return &unstructuredConverter{
+		mismatchDetection: true,
+		comparison:        comparison,
+	}
+}
+
+// FromUnstructured converts an object from map[string]interface{} representation into a concrete type.
+// It uses encoding/json/Unmarshaler if object implements it or reflection if not.
+func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj interface{}) error {
+	t := reflect.TypeOf(obj)
+	value := reflect.ValueOf(obj)
+	if t.Kind() != reflect.Ptr || value.IsNil() {
+		return fmt.Errorf("FromUnstructured requires a non-nil pointer to an object, got %v", t)
+	}
+	err := fromUnstructured(reflect.ValueOf(u), value.Elem())
+	if c.mismatchDetection {
+		newObj := reflect.New(t.Elem()).Interface()
+		newErr := fromUnstructuredViaJSON(u, newObj)
+		if (err != nil) != (newErr != nil) {
+			klog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err)
+		}
+		if err == nil && !c.comparison.DeepEqual(obj, newObj) {
+			klog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj)
+		}
+	}
+	return err
+}
+
+func fromUnstructuredViaJSON(u map[string]interface{}, obj interface{}) error {
+	data, err := json.Marshal(u)
+	if err != nil {
+		return err
+	}
+	return json.Unmarshal(data, obj)
+}
+
+func fromUnstructured(sv, dv reflect.Value) error {
+	sv = unwrapInterface(sv)
+	if !sv.IsValid() {
+		dv.Set(reflect.Zero(dv.Type()))
+		return nil
+	}
+	st, dt := sv.Type(), dv.Type()
+
+	switch dt.Kind() {
+	case reflect.Map, reflect.Slice, reflect.Ptr, reflect.Struct, reflect.Interface:
+		// Those require non-trivial conversion.
+	default:
+		// This should handle all simple types.
+		if st.AssignableTo(dt) {
+			dv.Set(sv)
+			return nil
+		}
+		// We cannot simply use "ConvertibleTo", as JSON doesn't support conversions
+		// between those four groups: bools, integers, floats and string. We need to
+		// do the same.
+		if st.ConvertibleTo(dt) {
+			switch st.Kind() {
+			case reflect.String:
+				switch dt.Kind() {
+				case reflect.String:
+					dv.Set(sv.Convert(dt))
+					return nil
+				}
+			case reflect.Bool:
+				switch dt.Kind() {
+				case reflect.Bool:
+					dv.Set(sv.Convert(dt))
+					return nil
+				}
+			case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+				reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+				switch dt.Kind() {
+				case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+					reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+					dv.Set(sv.Convert(dt))
+					return nil
+				}
+			case reflect.Float32, reflect.Float64:
+				switch dt.Kind() {
+				case reflect.Float32, reflect.Float64:
+					dv.Set(sv.Convert(dt))
+					return nil
+				}
+				if sv.Float() == math.Trunc(sv.Float()) {
+					dv.Set(sv.Convert(dt))
+					return nil
+				}
+			}
+			return fmt.Errorf("cannot convert %s to %s", st.String(), dt.String())
+		}
+	}
+
+	// Check if the object has a custom JSON marshaller/unmarshaller.
+	if reflect.PtrTo(dt).Implements(unmarshalerType) {
+		data, err := json.Marshal(sv.Interface())
+		if err != nil {
+			return fmt.Errorf("error encoding %s to json: %v", st.String(), err)
+		}
+		unmarshaler := dv.Addr().Interface().(encodingjson.Unmarshaler)
+		return unmarshaler.UnmarshalJSON(data)
+	}
+
+	switch dt.Kind() {
+	case reflect.Map:
+		return mapFromUnstructured(sv, dv)
+	case reflect.Slice:
+		return sliceFromUnstructured(sv, dv)
+	case reflect.Ptr:
+		return pointerFromUnstructured(sv, dv)
+	case reflect.Struct:
+		return structFromUnstructured(sv, dv)
+	case reflect.Interface:
+		return interfaceFromUnstructured(sv, dv)
+	default:
+		return fmt.Errorf("unrecognized type: %v", dt.Kind())
+	}
+}
+
+func fieldInfoFromField(structType reflect.Type, field int) *fieldInfo {
+	fieldCacheMap := fieldCache.value.Load().(fieldsCacheMap)
+	if info, ok := fieldCacheMap[structField{structType, field}]; ok {
+		return info
+	}
+
+	// Cache miss - we need to compute the field name.
+	info := &fieldInfo{}
+	typeField := structType.Field(field)
+	jsonTag := typeField.Tag.Get("json")
+	if len(jsonTag) == 0 {
+		// Make the first character lowercase.
+		if typeField.Name == "" {
+			info.name = typeField.Name
+		} else {
+			info.name = strings.ToLower(typeField.Name[:1]) + typeField.Name[1:]
+		}
+	} else {
+		items := strings.Split(jsonTag, ",")
+		info.name = items[0]
+		for i := range items {
+			if items[i] == "omitempty" {
+				info.omitempty = true
+			}
+		}
+	}
+	info.nameValue = reflect.ValueOf(info.name)
+
+	fieldCache.Lock()
+	defer fieldCache.Unlock()
+	fieldCacheMap = fieldCache.value.Load().(fieldsCacheMap)
+	newFieldCacheMap := make(fieldsCacheMap)
+	for k, v := range fieldCacheMap {
+		newFieldCacheMap[k] = v
+	}
+	newFieldCacheMap[structField{structType, field}] = info
+	fieldCache.value.Store(newFieldCacheMap)
+	return info
+}
+
+func unwrapInterface(v reflect.Value) reflect.Value {
+	for v.Kind() == reflect.Interface {
+		v = v.Elem()
+	}
+	return v
+}
+
+func mapFromUnstructured(sv, dv reflect.Value) error {
+	st, dt := sv.Type(), dv.Type()
+	if st.Kind() != reflect.Map {
+		return fmt.Errorf("cannot restore map from %v", st.Kind())
+	}
+
+	if !st.Key().AssignableTo(dt.Key()) && !st.Key().ConvertibleTo(dt.Key()) {
+		return fmt.Errorf("cannot copy map with non-assignable keys: %v %v", st.Key(), dt.Key())
+	}
+
+	if sv.IsNil() {
+		dv.Set(reflect.Zero(dt))
+		return nil
+	}
+	dv.Set(reflect.MakeMap(dt))
+	for _, key := range sv.MapKeys() {
+		value := reflect.New(dt.Elem()).Elem()
+		if val := unwrapInterface(sv.MapIndex(key)); val.IsValid() {
+			if err := fromUnstructured(val, value); err != nil {
+				return err
+			}
+		} else {
+			value.Set(reflect.Zero(dt.Elem()))
+		}
+		if st.Key().AssignableTo(dt.Key()) {
+			dv.SetMapIndex(key, value)
+		} else {
+			dv.SetMapIndex(key.Convert(dt.Key()), value)
+		}
+	}
+	return nil
+}
+
+func sliceFromUnstructured(sv, dv reflect.Value) error {
+	st, dt := sv.Type(), dv.Type()
+	if st.Kind() == reflect.String && dt.Elem().Kind() == reflect.Uint8 {
+		// We store original []byte representation as string.
+		// This conversion is allowed, but we need to be careful about
+		// marshaling data appropriately.
+		if len(sv.Interface().(string)) > 0 {
+			marshalled, err := json.Marshal(sv.Interface())
+			if err != nil {
+				return fmt.Errorf("error encoding %s to json: %v", st, err)
+			}
+			// TODO: Is this Unmarshal needed?
+			var data []byte
+			err = json.Unmarshal(marshalled, &data)
+			if err != nil {
+				return fmt.Errorf("error decoding from json: %v", err)
+			}
+			dv.SetBytes(data)
+		} else {
+			dv.Set(reflect.Zero(dt))
+		}
+		return nil
+	}
+	if st.Kind() != reflect.Slice {
+		return fmt.Errorf("cannot restore slice from %v", st.Kind())
+	}
+
+	if sv.IsNil() {
+		dv.Set(reflect.Zero(dt))
+		return nil
+	}
+	dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap()))
+	for i := 0; i < sv.Len(); i++ {
+		if err := fromUnstructured(sv.Index(i), dv.Index(i)); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func pointerFromUnstructured(sv, dv reflect.Value) error {
+	st, dt := sv.Type(), dv.Type()
+
+	if st.Kind() == reflect.Ptr && sv.IsNil() {
+		dv.Set(reflect.Zero(dt))
+		return nil
+	}
+	dv.Set(reflect.New(dt.Elem()))
+	switch st.Kind() {
+	case reflect.Ptr, reflect.Interface:
+		return fromUnstructured(sv.Elem(), dv.Elem())
+	default:
+		return fromUnstructured(sv, dv.Elem())
+	}
+}
+
+func structFromUnstructured(sv, dv reflect.Value) error {
+	st, dt := sv.Type(), dv.Type()
+	if st.Kind() != reflect.Map {
+		return fmt.Errorf("cannot restore struct from: %v", st.Kind())
+	}
+
+	for i := 0; i < dt.NumField(); i++ {
+		fieldInfo := fieldInfoFromField(dt, i)
+		fv := dv.Field(i)
+
+		if len(fieldInfo.name) == 0 {
+			// This field is inlined.
+			if err := fromUnstructured(sv, fv); err != nil {
+				return err
+			}
+		} else {
+			value := unwrapInterface(sv.MapIndex(fieldInfo.nameValue))
+			if value.IsValid() {
+				if err := fromUnstructured(value, fv); err != nil {
+					return err
+				}
+			} else {
+				fv.Set(reflect.Zero(fv.Type()))
+			}
+		}
+	}
+	return nil
+}
+
+func interfaceFromUnstructured(sv, dv reflect.Value) error {
+	// TODO: Is this conversion safe?
+	dv.Set(sv)
+	return nil
+}
+
+// ToUnstructured converts an object into map[string]interface{} representation.
+// It uses encoding/json/Marshaler if object implements it or reflection if not.
+func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]interface{}, error) {
+	var u map[string]interface{}
+	var err error
+	if unstr, ok := obj.(Unstructured); ok {
+		u = unstr.UnstructuredContent()
+	} else {
+		t := reflect.TypeOf(obj)
+		value := reflect.ValueOf(obj)
+		if t.Kind() != reflect.Ptr || value.IsNil() {
+			return nil, fmt.Errorf("ToUnstructured requires a non-nil pointer to an object, got %v", t)
+		}
+		u = map[string]interface{}{}
+		err = toUnstructured(value.Elem(), reflect.ValueOf(&u).Elem())
+	}
+	if c.mismatchDetection {
+		newUnstr := map[string]interface{}{}
+		newErr := toUnstructuredViaJSON(obj, &newUnstr)
+		if (err != nil) != (newErr != nil) {
+			klog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr)
+		}
+		if err == nil && !c.comparison.DeepEqual(u, newUnstr) {
+			klog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr)
+		}
+	}
+	if err != nil {
+		return nil, err
+	}
+	return u, nil
+}
+
+// DeepCopyJSON deep copies the passed value, assuming it is a valid JSON representation i.e. only contains
+// types produced by json.Unmarshal() and also int64.
+// bool, int64, float64, string, []interface{}, map[string]interface{}, json.Number and nil
+func DeepCopyJSON(x map[string]interface{}) map[string]interface{} {
+	return DeepCopyJSONValue(x).(map[string]interface{})
+}
+
+// DeepCopyJSONValue deep copies the passed value, assuming it is a valid JSON representation i.e. only contains
+// types produced by json.Unmarshal() and also int64.
+// bool, int64, float64, string, []interface{}, map[string]interface{}, json.Number and nil
+func DeepCopyJSONValue(x interface{}) interface{} {
+	switch x := x.(type) {
+	case map[string]interface{}:
+		if x == nil {
+			// Typed nil - an interface{} that contains a type map[string]interface{} with a value of nil
+			return x
+		}
+		clone := make(map[string]interface{}, len(x))
+		for k, v := range x {
+			clone[k] = DeepCopyJSONValue(v)
+		}
+		return clone
+	case []interface{}:
+		if x == nil {
+			// Typed nil - an interface{} that contains a type []interface{} with a value of nil
+			return x
+		}
+		clone := make([]interface{}, len(x))
+		for i, v := range x {
+			clone[i] = DeepCopyJSONValue(v)
+		}
+		return clone
+	case string, int64, bool, float64, nil, encodingjson.Number:
+		return x
+	default:
+		panic(fmt.Errorf("cannot deep copy %T", x))
+	}
+}
+
+func toUnstructuredViaJSON(obj interface{}, u *map[string]interface{}) error {
+	data, err := json.Marshal(obj)
+	if err != nil {
+		return err
+	}
+	return json.Unmarshal(data, u)
+}
+
+var (
+	nullBytes  = []byte("null")
+	trueBytes  = []byte("true")
+	falseBytes = []byte("false")
+)
+
+func getMarshaler(v reflect.Value) (encodingjson.Marshaler, bool) {
+	// Check value receivers if v is not a pointer and pointer receivers if v is a pointer
+	if v.Type().Implements(marshalerType) {
+		return v.Interface().(encodingjson.Marshaler), true
+	}
+	// Check pointer receivers if v is not a pointer
+	if v.Kind() != reflect.Ptr && v.CanAddr() {
+		v = v.Addr()
+		if v.Type().Implements(marshalerType) {
+			return v.Interface().(encodingjson.Marshaler), true
+		}
+	}
+	return nil, false
+}
+
+func toUnstructured(sv, dv reflect.Value) error {
+	// Check if the object has a custom JSON marshaller/unmarshaller.
+	if marshaler, ok := getMarshaler(sv); ok {
+		if sv.Kind() == reflect.Ptr && sv.IsNil() {
+			// We're done - we don't need to store anything.
+			return nil
+		}
+
+		data, err := marshaler.MarshalJSON()
+		if err != nil {
+			return err
+		}
+		switch {
+		case len(data) == 0:
+			return fmt.Errorf("error decoding from json: empty value")
+
+		case bytes.Equal(data, nullBytes):
+			// We're done - we don't need to store anything.
+
+		case bytes.Equal(data, trueBytes):
+			dv.Set(reflect.ValueOf(true))
+
+		case bytes.Equal(data, falseBytes):
+			dv.Set(reflect.ValueOf(false))
+
+		case data[0] == '"':
+			var result string
+			err := json.Unmarshal(data, &result)
+			if err != nil {
+				return fmt.Errorf("error decoding string from json: %v", err)
+			}
+			dv.Set(reflect.ValueOf(result))
+
+		case data[0] == '{':
+			result := make(map[string]interface{})
+			err := json.Unmarshal(data, &result)
+			if err != nil {
+				return fmt.Errorf("error decoding object from json: %v", err)
+			}
+			dv.Set(reflect.ValueOf(result))
+
+		case data[0] == '[':
+			result := make([]interface{}, 0)
+			err := json.Unmarshal(data, &result)
+			if err != nil {
+				return fmt.Errorf("error decoding array from json: %v", err)
+			}
+			dv.Set(reflect.ValueOf(result))
+
+		default:
+			var (
+				resultInt   int64
+				resultFloat float64
+				err         error
+			)
+			if err = json.Unmarshal(data, &resultInt); err == nil {
+				dv.Set(reflect.ValueOf(resultInt))
+			} else if err = json.Unmarshal(data, &resultFloat); err == nil {
+				dv.Set(reflect.ValueOf(resultFloat))
+			} else {
+				return fmt.Errorf("error decoding number from json: %v", err)
+			}
+		}
+
+		return nil
+	}
+
+	st, dt := sv.Type(), dv.Type()
+	switch st.Kind() {
+	case reflect.String:
+		if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
+			dv.Set(reflect.New(stringType))
+		}
+		dv.Set(reflect.ValueOf(sv.String()))
+		return nil
+	case reflect.Bool:
+		if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
+			dv.Set(reflect.New(boolType))
+		}
+		dv.Set(reflect.ValueOf(sv.Bool()))
+		return nil
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
+			dv.Set(reflect.New(int64Type))
+		}
+		dv.Set(reflect.ValueOf(sv.Int()))
+		return nil
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		uVal := sv.Uint()
+		if uVal > math.MaxInt64 {
+			return fmt.Errorf("unsigned value %d does not fit into int64 (overflow)", uVal)
+		}
+		if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
+			dv.Set(reflect.New(int64Type))
+		}
+		dv.Set(reflect.ValueOf(int64(uVal)))
+		return nil
+	case reflect.Float32, reflect.Float64:
+		if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
+			dv.Set(reflect.New(float64Type))
+		}
+		dv.Set(reflect.ValueOf(sv.Float()))
+		return nil
+	case reflect.Map:
+		return mapToUnstructured(sv, dv)
+	case reflect.Slice:
+		return sliceToUnstructured(sv, dv)
+	case reflect.Ptr:
+		return pointerToUnstructured(sv, dv)
+	case reflect.Struct:
+		return structToUnstructured(sv, dv)
+	case reflect.Interface:
+		return interfaceToUnstructured(sv, dv)
+	default:
+		return fmt.Errorf("unrecognized type: %v", st.Kind())
+	}
+}
+
+func mapToUnstructured(sv, dv reflect.Value) error {
+	st, dt := sv.Type(), dv.Type()
+	if sv.IsNil() {
+		dv.Set(reflect.Zero(dt))
+		return nil
+	}
+	if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
+		if st.Key().Kind() == reflect.String {
+			switch st.Elem().Kind() {
+			// TODO It should be possible to reuse the slice for primitive types.
+			// However, it is panicing in the following form.
+			// case reflect.String, reflect.Bool,
+			// 	reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+			// 	reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+			// 	sv.Set(sv)
+			// 	return nil
+			default:
+				// We need to do a proper conversion.
+			}
+		}
+		dv.Set(reflect.MakeMap(mapStringInterfaceType))
+		dv = dv.Elem()
+		dt = dv.Type()
+	}
+	if dt.Kind() != reflect.Map {
+		return fmt.Errorf("cannot convert struct to: %v", dt.Kind())
+	}
+
+	if !st.Key().AssignableTo(dt.Key()) && !st.Key().ConvertibleTo(dt.Key()) {
+		return fmt.Errorf("cannot copy map with non-assignable keys: %v %v", st.Key(), dt.Key())
+	}
+
+	for _, key := range sv.MapKeys() {
+		value := reflect.New(dt.Elem()).Elem()
+		if err := toUnstructured(sv.MapIndex(key), value); err != nil {
+			return err
+		}
+		if st.Key().AssignableTo(dt.Key()) {
+			dv.SetMapIndex(key, value)
+		} else {
+			dv.SetMapIndex(key.Convert(dt.Key()), value)
+		}
+	}
+	return nil
+}
+
+func sliceToUnstructured(sv, dv reflect.Value) error {
+	st, dt := sv.Type(), dv.Type()
+	if sv.IsNil() {
+		dv.Set(reflect.Zero(dt))
+		return nil
+	}
+	if st.Elem().Kind() == reflect.Uint8 {
+		dv.Set(reflect.New(stringType))
+		data, err := json.Marshal(sv.Bytes())
+		if err != nil {
+			return err
+		}
+		var result string
+		if err = json.Unmarshal(data, &result); err != nil {
+			return err
+		}
+		dv.Set(reflect.ValueOf(result))
+		return nil
+	}
+	if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
+		switch st.Elem().Kind() {
+		// TODO It should be possible to reuse the slice for primitive types.
+		// However, it is panicing in the following form.
+		// case reflect.String, reflect.Bool,
+		// 	reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+		// 	reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		// 	sv.Set(sv)
+		// 	return nil
+		default:
+			// We need to do a proper conversion.
+			dv.Set(reflect.MakeSlice(reflect.SliceOf(dt), sv.Len(), sv.Cap()))
+			dv = dv.Elem()
+			dt = dv.Type()
+		}
+	}
+	if dt.Kind() != reflect.Slice {
+		return fmt.Errorf("cannot convert slice to: %v", dt.Kind())
+	}
+	for i := 0; i < sv.Len(); i++ {
+		if err := toUnstructured(sv.Index(i), dv.Index(i)); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func pointerToUnstructured(sv, dv reflect.Value) error {
+	if sv.IsNil() {
+		// We're done - we don't need to store anything.
+		return nil
+	}
+	return toUnstructured(sv.Elem(), dv)
+}
+
+func isZero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Array, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Map, reflect.Slice:
+		// TODO: It seems that 0-len maps are ignored in it.
+		return v.IsNil() || v.Len() == 0
+	case reflect.Ptr, reflect.Interface:
+		return v.IsNil()
+	}
+	return false
+}
+
+func structToUnstructured(sv, dv reflect.Value) error {
+	st, dt := sv.Type(), dv.Type()
+	if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
+		dv.Set(reflect.MakeMap(mapStringInterfaceType))
+		dv = dv.Elem()
+		dt = dv.Type()
+	}
+	if dt.Kind() != reflect.Map {
+		return fmt.Errorf("cannot convert struct to: %v", dt.Kind())
+	}
+	realMap := dv.Interface().(map[string]interface{})
+
+	for i := 0; i < st.NumField(); i++ {
+		fieldInfo := fieldInfoFromField(st, i)
+		fv := sv.Field(i)
+
+		if fieldInfo.name == "-" {
+			// This field should be skipped.
+			continue
+		}
+		if fieldInfo.omitempty && isZero(fv) {
+			// omitempty fields should be ignored.
+			continue
+		}
+		if len(fieldInfo.name) == 0 {
+			// This field is inlined.
+			if err := toUnstructured(fv, dv); err != nil {
+				return err
+			}
+			continue
+		}
+		switch fv.Type().Kind() {
+		case reflect.String:
+			realMap[fieldInfo.name] = fv.String()
+		case reflect.Bool:
+			realMap[fieldInfo.name] = fv.Bool()
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			realMap[fieldInfo.name] = fv.Int()
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+			realMap[fieldInfo.name] = fv.Uint()
+		case reflect.Float32, reflect.Float64:
+			realMap[fieldInfo.name] = fv.Float()
+		default:
+			subv := reflect.New(dt.Elem()).Elem()
+			if err := toUnstructured(fv, subv); err != nil {
+				return err
+			}
+			dv.SetMapIndex(fieldInfo.nameValue, subv)
+		}
+	}
+	return nil
+}
+
+func interfaceToUnstructured(sv, dv reflect.Value) error {
+	if !sv.IsValid() || sv.IsNil() {
+		dv.Set(reflect.Zero(dv.Type()))
+		return nil
+	}
+	return toUnstructured(sv.Elem(), dv)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go
new file mode 100644
index 0000000..89feb40
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go
@@ -0,0 +1,51 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package runtime includes helper functions for working with API objects
+// that follow the kubernetes API object conventions, which are:
+//
+// 0. Your API objects have a common metadata struct member, TypeMeta.
+//
+// 1. Your code refers to an internal set of API objects.
+//
+// 2. In a separate package, you have an external set of API objects.
+//
+// 3. The external set is considered to be versioned, and no breaking
+// changes are ever made to it (fields may be added but not changed
+// or removed).
+//
+// 4. As your api evolves, you'll make an additional versioned package
+// with every major change.
+//
+// 5. Versioned packages have conversion functions which convert to
+// and from the internal version.
+//
+// 6. You'll continue to support older versions according to your
+// deprecation policy, and you can easily provide a program/library
+// to update old versions into new versions because of 5.
+//
+// 7. All of your serializations and deserializations are handled in a
+// centralized place.
+//
+// Package runtime provides a conversion helper to make 5 easy, and the
+// Encode/Decode/DecodeInto trio to accomplish 7. You can also register
+// additional "codecs" which use a version of your choice. It's
+// recommended that you register your types with runtime in your
+// package's init function.
+//
+// As a bonus, a few common types useful from all api objects and versions
+// are provided in types.go.
+package runtime // import "k8s.io/apimachinery/pkg/runtime"
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go
new file mode 100644
index 0000000..db11eb8
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go
@@ -0,0 +1,142 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"errors"
+
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+type encodable struct {
+	E        Encoder `json:"-"`
+	obj      Object
+	versions []schema.GroupVersion
+}
+
+func (e encodable) GetObjectKind() schema.ObjectKind { return e.obj.GetObjectKind() }
+func (e encodable) DeepCopyObject() Object {
+	out := e
+	out.obj = e.obj.DeepCopyObject()
+	copy(out.versions, e.versions)
+	return out
+}
+
+// NewEncodable creates an object that will be encoded with the provided codec on demand.
+// Provided as a convenience for test cases dealing with internal objects.
+func NewEncodable(e Encoder, obj Object, versions ...schema.GroupVersion) Object {
+	if _, ok := obj.(*Unknown); ok {
+		return obj
+	}
+	return encodable{e, obj, versions}
+}
+
+func (e encodable) UnmarshalJSON(in []byte) error {
+	return errors.New("runtime.encodable cannot be unmarshalled from JSON")
+}
+
+// Marshal may get called on pointers or values, so implement MarshalJSON on value.
+// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go
+func (e encodable) MarshalJSON() ([]byte, error) {
+	return Encode(e.E, e.obj)
+}
+
+// NewEncodableList creates an object that will be encoded with the provided codec on demand.
+// Provided as a convenience for test cases dealing with internal objects.
+func NewEncodableList(e Encoder, objects []Object, versions ...schema.GroupVersion) []Object {
+	out := make([]Object, len(objects))
+	for i := range objects {
+		if _, ok := objects[i].(*Unknown); ok {
+			out[i] = objects[i]
+			continue
+		}
+		out[i] = NewEncodable(e, objects[i], versions...)
+	}
+	return out
+}
+
+func (e *Unknown) UnmarshalJSON(in []byte) error {
+	if e == nil {
+		return errors.New("runtime.Unknown: UnmarshalJSON on nil pointer")
+	}
+	e.TypeMeta = TypeMeta{}
+	e.Raw = append(e.Raw[0:0], in...)
+	e.ContentEncoding = ""
+	e.ContentType = ContentTypeJSON
+	return nil
+}
+
+// Marshal may get called on pointers or values, so implement MarshalJSON on value.
+// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go
+func (e Unknown) MarshalJSON() ([]byte, error) {
+	// If ContentType is unset, we assume this is JSON.
+	if e.ContentType != "" && e.ContentType != ContentTypeJSON {
+		return nil, errors.New("runtime.Unknown: MarshalJSON on non-json data")
+	}
+	if e.Raw == nil {
+		return []byte("null"), nil
+	}
+	return e.Raw, nil
+}
+
+func Convert_runtime_Object_To_runtime_RawExtension(in *Object, out *RawExtension, s conversion.Scope) error {
+	if in == nil {
+		out.Raw = []byte("null")
+		return nil
+	}
+	obj := *in
+	if unk, ok := obj.(*Unknown); ok {
+		if unk.Raw != nil {
+			out.Raw = unk.Raw
+			return nil
+		}
+		obj = out.Object
+	}
+	if obj == nil {
+		out.Raw = nil
+		return nil
+	}
+	out.Object = obj
+	return nil
+}
+
+func Convert_runtime_RawExtension_To_runtime_Object(in *RawExtension, out *Object, s conversion.Scope) error {
+	if in.Object != nil {
+		*out = in.Object
+		return nil
+	}
+	data := in.Raw
+	if len(data) == 0 || (len(data) == 4 && string(data) == "null") {
+		*out = nil
+		return nil
+	}
+	*out = &Unknown{
+		Raw: data,
+		// TODO: Set ContentEncoding and ContentType appropriately.
+		// Currently we set ContentTypeJSON to make tests passing.
+		ContentType: ContentTypeJSON,
+	}
+	return nil
+}
+
+func DefaultEmbeddedConversions() []interface{} {
+	return []interface{}{
+		Convert_runtime_Object_To_runtime_RawExtension,
+		Convert_runtime_RawExtension_To_runtime_Object,
+	}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go
new file mode 100644
index 0000000..322b031
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go
@@ -0,0 +1,122 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"fmt"
+	"reflect"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+type notRegisteredErr struct {
+	schemeName string
+	gvk        schema.GroupVersionKind
+	target     GroupVersioner
+	t          reflect.Type
+}
+
+func NewNotRegisteredErrForKind(schemeName string, gvk schema.GroupVersionKind) error {
+	return &notRegisteredErr{schemeName: schemeName, gvk: gvk}
+}
+
+func NewNotRegisteredErrForType(schemeName string, t reflect.Type) error {
+	return &notRegisteredErr{schemeName: schemeName, t: t}
+}
+
+func NewNotRegisteredErrForTarget(schemeName string, t reflect.Type, target GroupVersioner) error {
+	return &notRegisteredErr{schemeName: schemeName, t: t, target: target}
+}
+
+func NewNotRegisteredGVKErrForTarget(schemeName string, gvk schema.GroupVersionKind, target GroupVersioner) error {
+	return &notRegisteredErr{schemeName: schemeName, gvk: gvk, target: target}
+}
+
+func (k *notRegisteredErr) Error() string {
+	if k.t != nil && k.target != nil {
+		return fmt.Sprintf("%v is not suitable for converting to %q in scheme %q", k.t, k.target, k.schemeName)
+	}
+	nullGVK := schema.GroupVersionKind{}
+	if k.gvk != nullGVK && k.target != nil {
+		return fmt.Sprintf("%q is not suitable for converting to %q in scheme %q", k.gvk.GroupVersion(), k.target, k.schemeName)
+	}
+	if k.t != nil {
+		return fmt.Sprintf("no kind is registered for the type %v in scheme %q", k.t, k.schemeName)
+	}
+	if len(k.gvk.Kind) == 0 {
+		return fmt.Sprintf("no version %q has been registered in scheme %q", k.gvk.GroupVersion(), k.schemeName)
+	}
+	if k.gvk.Version == APIVersionInternal {
+		return fmt.Sprintf("no kind %q is registered for the internal version of group %q in scheme %q", k.gvk.Kind, k.gvk.Group, k.schemeName)
+	}
+
+	return fmt.Sprintf("no kind %q is registered for version %q in scheme %q", k.gvk.Kind, k.gvk.GroupVersion(), k.schemeName)
+}
+
+// IsNotRegisteredError returns true if the error indicates the provided
+// object or input data is not registered.
+func IsNotRegisteredError(err error) bool {
+	if err == nil {
+		return false
+	}
+	_, ok := err.(*notRegisteredErr)
+	return ok
+}
+
+type missingKindErr struct {
+	data string
+}
+
+func NewMissingKindErr(data string) error {
+	return &missingKindErr{data}
+}
+
+func (k *missingKindErr) Error() string {
+	return fmt.Sprintf("Object 'Kind' is missing in '%s'", k.data)
+}
+
+// IsMissingKind returns true if the error indicates that the provided object
+// is missing a 'Kind' field.
+func IsMissingKind(err error) bool {
+	if err == nil {
+		return false
+	}
+	_, ok := err.(*missingKindErr)
+	return ok
+}
+
+type missingVersionErr struct {
+	data string
+}
+
+func NewMissingVersionErr(data string) error {
+	return &missingVersionErr{data}
+}
+
+func (k *missingVersionErr) Error() string {
+	return fmt.Sprintf("Object 'apiVersion' is missing in '%s'", k.data)
+}
+
+// IsMissingVersion returns true if the error indicates that the provided object
+// is missing a 'Version' field.
+func IsMissingVersion(err error) bool {
+	if err == nil {
+		return false
+	}
+	_, ok := err.(*missingVersionErr)
+	return ok
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
new file mode 100644
index 0000000..9056397
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
@@ -0,0 +1,51 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+)
+
+func (re *RawExtension) UnmarshalJSON(in []byte) error {
+	if re == nil {
+		return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer")
+	}
+	if !bytes.Equal(in, []byte("null")) {
+		re.Raw = append(re.Raw[0:0], in...)
+	}
+	return nil
+}
+
+// MarshalJSON may get called on pointers or values, so implement MarshalJSON on value.
+// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go
+func (re RawExtension) MarshalJSON() ([]byte, error) {
+	if re.Raw == nil {
+		// TODO: this is to support legacy behavior of JSONPrinter and YAMLPrinter, which
+		// expect to call json.Marshal on arbitrary versioned objects (even those not in
+		// the scheme). pkg/kubectl/resource#AsVersionedObjects and its interaction with
+		// kubectl get on objects not in the scheme needs to be updated to ensure that the
+		// objects that are not part of the scheme are correctly put into the right form.
+		if re.Object != nil {
+			return json.Marshal(re.Object)
+		}
+		return []byte("null"), nil
+	}
+	// TODO: Check whether ContentType is actually JSON before returning it.
+	return re.Raw, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto
new file mode 100644
index 0000000..0e212ec
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto
@@ -0,0 +1,127 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.apimachinery.pkg.runtime;
+
+// Package-wide variables from generator "generated".
+option go_package = "runtime";
+
+// RawExtension is used to hold extensions in external versions.
+//
+// To use this, make a field which has RawExtension as its type in your external, versioned
+// struct, and Object in your internal struct. You also need to register your
+// various plugin types.
+//
+// // Internal package:
+// type MyAPIObject struct {
+// 	runtime.TypeMeta `json:",inline"`
+// 	MyPlugin runtime.Object `json:"myPlugin"`
+// }
+// type PluginA struct {
+// 	AOption string `json:"aOption"`
+// }
+//
+// // External package:
+// type MyAPIObject struct {
+// 	runtime.TypeMeta `json:",inline"`
+// 	MyPlugin runtime.RawExtension `json:"myPlugin"`
+// }
+// type PluginA struct {
+// 	AOption string `json:"aOption"`
+// }
+//
+// // On the wire, the JSON will look something like this:
+// {
+// 	"kind":"MyAPIObject",
+// 	"apiVersion":"v1",
+// 	"myPlugin": {
+// 		"kind":"PluginA",
+// 		"aOption":"foo",
+// 	},
+// }
+//
+// So what happens? Decode first uses json or yaml to unmarshal the serialized data into
+// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.
+// The next step is to copy (using pkg/conversion) into the internal struct. The runtime
+// package's DefaultScheme has conversion functions installed which will unpack the
+// JSON stored in RawExtension, turning it into the correct object type, and storing it
+// in the Object. (TODO: In the case where the object is of an unknown type, a
+// runtime.Unknown object will be created and stored.)
+//
+// +k8s:deepcopy-gen=true
+// +protobuf=true
+// +k8s:openapi-gen=true
+message RawExtension {
+  // Raw is the underlying serialization of this object.
+  //
+  // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data.
+  optional bytes raw = 1;
+}
+
+// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type,
+// like this:
+// type MyAwesomeAPIObject struct {
+//      runtime.TypeMeta    `json:",inline"`
+//      ... // other fields
+// }
+// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind
+//
+// TypeMeta is provided here for convenience. You may use it directly from this package or define
+// your own with the same fields.
+//
+// +k8s:deepcopy-gen=false
+// +protobuf=true
+// +k8s:openapi-gen=true
+message TypeMeta {
+  // +optional
+  optional string apiVersion = 1;
+
+  // +optional
+  optional string kind = 2;
+}
+
+// Unknown allows api objects with unknown types to be passed-through. This can be used
+// to deal with the API objects from a plug-in. Unknown objects still have functioning
+// TypeMeta features-- kind, version, etc.
+// TODO: Make this object have easy access to field based accessors and settors for
+// metadata and field mutatation.
+//
+// +k8s:deepcopy-gen=true
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +protobuf=true
+// +k8s:openapi-gen=true
+message Unknown {
+  optional TypeMeta typeMeta = 1;
+
+  // Raw will hold the complete serialized object which couldn't be matched
+  // with a registered type. Most likely, nothing should be done with this
+  // except for passing it through the system.
+  optional bytes raw = 2;
+
+  // ContentEncoding is encoding used to encode 'Raw' data.
+  // Unspecified means no encoding.
+  optional string contentEncoding = 3;
+
+  // ContentType  is serialization method used to serialize 'Raw'.
+  // Unspecified means ContentTypeJSON.
+  optional string contentType = 4;
+}
+
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
new file mode 100644
index 0000000..33f11eb
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
@@ -0,0 +1,212 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"fmt"
+	"io"
+	"reflect"
+
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/util/errors"
+)
+
+// unsafeObjectConvertor implements ObjectConvertor using the unsafe conversion path.
+type unsafeObjectConvertor struct {
+	*Scheme
+}
+
+var _ ObjectConvertor = unsafeObjectConvertor{}
+
+// ConvertToVersion converts in to the provided outVersion without copying the input first, which
+// is only safe if the output object is not mutated or reused.
+func (c unsafeObjectConvertor) ConvertToVersion(in Object, outVersion GroupVersioner) (Object, error) {
+	return c.Scheme.UnsafeConvertToVersion(in, outVersion)
+}
+
+// UnsafeObjectConvertor performs object conversion without copying the object structure,
+// for use when the converted object will not be reused or mutated. Primarily for use within
+// versioned codecs, which use the external object for serialization but do not return it.
+func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor {
+	return unsafeObjectConvertor{scheme}
+}
+
+// SetField puts the value of src, into fieldName, which must be a member of v.
+// The value of src must be assignable to the field.
+func SetField(src interface{}, v reflect.Value, fieldName string) error {
+	field := v.FieldByName(fieldName)
+	if !field.IsValid() {
+		return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface())
+	}
+	srcValue := reflect.ValueOf(src)
+	if srcValue.Type().AssignableTo(field.Type()) {
+		field.Set(srcValue)
+		return nil
+	}
+	if srcValue.Type().ConvertibleTo(field.Type()) {
+		field.Set(srcValue.Convert(field.Type()))
+		return nil
+	}
+	return fmt.Errorf("couldn't assign/convert %v to %v", srcValue.Type(), field.Type())
+}
+
+// Field puts the value of fieldName, which must be a member of v, into dest,
+// which must be a variable to which this field's value can be assigned.
+func Field(v reflect.Value, fieldName string, dest interface{}) error {
+	field := v.FieldByName(fieldName)
+	if !field.IsValid() {
+		return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface())
+	}
+	destValue, err := conversion.EnforcePtr(dest)
+	if err != nil {
+		return err
+	}
+	if field.Type().AssignableTo(destValue.Type()) {
+		destValue.Set(field)
+		return nil
+	}
+	if field.Type().ConvertibleTo(destValue.Type()) {
+		destValue.Set(field.Convert(destValue.Type()))
+		return nil
+	}
+	return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), destValue.Type())
+}
+
+// FieldPtr puts the address of fieldName, which must be a member of v,
+// into dest, which must be an address of a variable to which this field's
+// address can be assigned.
+func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error {
+	field := v.FieldByName(fieldName)
+	if !field.IsValid() {
+		return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface())
+	}
+	v, err := conversion.EnforcePtr(dest)
+	if err != nil {
+		return err
+	}
+	field = field.Addr()
+	if field.Type().AssignableTo(v.Type()) {
+		v.Set(field)
+		return nil
+	}
+	if field.Type().ConvertibleTo(v.Type()) {
+		v.Set(field.Convert(v.Type()))
+		return nil
+	}
+	return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), v.Type())
+}
+
+// EncodeList ensures that each object in an array is converted to a Unknown{} in serialized form.
+// TODO: accept a content type.
+func EncodeList(e Encoder, objects []Object) error {
+	var errs []error
+	for i := range objects {
+		data, err := Encode(e, objects[i])
+		if err != nil {
+			errs = append(errs, err)
+			continue
+		}
+		// TODO: Set ContentEncoding and ContentType.
+		objects[i] = &Unknown{Raw: data}
+	}
+	return errors.NewAggregate(errs)
+}
+
+func decodeListItem(obj *Unknown, decoders []Decoder) (Object, error) {
+	for _, decoder := range decoders {
+		// TODO: Decode based on ContentType.
+		obj, err := Decode(decoder, obj.Raw)
+		if err != nil {
+			if IsNotRegisteredError(err) {
+				continue
+			}
+			return nil, err
+		}
+		return obj, nil
+	}
+	// could not decode, so leave the object as Unknown, but give the decoders the
+	// chance to set Unknown.TypeMeta if it is available.
+	for _, decoder := range decoders {
+		if err := DecodeInto(decoder, obj.Raw, obj); err == nil {
+			return obj, nil
+		}
+	}
+	return obj, nil
+}
+
+// DecodeList alters the list in place, attempting to decode any objects found in
+// the list that have the Unknown type. Any errors that occur are returned
+// after the entire list is processed. Decoders are tried in order.
+func DecodeList(objects []Object, decoders ...Decoder) []error {
+	errs := []error(nil)
+	for i, obj := range objects {
+		switch t := obj.(type) {
+		case *Unknown:
+			decoded, err := decodeListItem(t, decoders)
+			if err != nil {
+				errs = append(errs, err)
+				break
+			}
+			objects[i] = decoded
+		}
+	}
+	return errs
+}
+
+// MultiObjectTyper returns the types of objects across multiple schemes in order.
+type MultiObjectTyper []ObjectTyper
+
+var _ ObjectTyper = MultiObjectTyper{}
+
+func (m MultiObjectTyper) ObjectKinds(obj Object) (gvks []schema.GroupVersionKind, unversionedType bool, err error) {
+	for _, t := range m {
+		gvks, unversionedType, err = t.ObjectKinds(obj)
+		if err == nil {
+			return
+		}
+	}
+	return
+}
+
+func (m MultiObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool {
+	for _, t := range m {
+		if t.Recognizes(gvk) {
+			return true
+		}
+	}
+	return false
+}
+
+// SetZeroValue would set the object of objPtr to zero value of its type.
+func SetZeroValue(objPtr Object) error {
+	v, err := conversion.EnforcePtr(objPtr)
+	if err != nil {
+		return err
+	}
+	v.Set(reflect.Zero(v.Type()))
+	return nil
+}
+
+// DefaultFramer is valid for any stream that can read objects serially without
+// any separation in the stream.
+var DefaultFramer = defaultFramer{}
+
+type defaultFramer struct{}
+
+func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r }
+func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer         { return w }
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
new file mode 100644
index 0000000..699ff13
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
@@ -0,0 +1,252 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"io"
+	"net/url"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const (
+	// APIVersionInternal may be used if you are registering a type that should not
+	// be considered stable or serialized - it is a convention only and has no
+	// special behavior in this package.
+	APIVersionInternal = "__internal"
+)
+
+// GroupVersioner refines a set of possible conversion targets into a single option.
+type GroupVersioner interface {
+	// KindForGroupVersionKinds returns a desired target group version kind for the given input, or returns ok false if no
+	// target is known. In general, if the return target is not in the input list, the caller is expected to invoke
+	// Scheme.New(target) and then perform a conversion between the current Go type and the destination Go type.
+	// Sophisticated implementations may use additional information about the input kinds to pick a destination kind.
+	KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (target schema.GroupVersionKind, ok bool)
+}
+
+// Encoder writes objects to a serialized form
+type Encoder interface {
+	// Encode writes an object to a stream. Implementations may return errors if the versions are
+	// incompatible, or if no conversion is defined.
+	Encode(obj Object, w io.Writer) error
+}
+
+// Decoder attempts to load an object from data.
+type Decoder interface {
+	// Decode attempts to deserialize the provided data using either the innate typing of the scheme or the
+	// default kind, group, and version provided. It returns a decoded object as well as the kind, group, and
+	// version from the serialized data, or an error. If into is non-nil, it will be used as the target type
+	// and implementations may choose to use it rather than reallocating an object. However, the object is not
+	// guaranteed to be populated. The returned object is not guaranteed to match into. If defaults are
+	// provided, they are applied to the data by default. If no defaults or partial defaults are provided, the
+	// type of the into may be used to guide conversion decisions.
+	Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error)
+}
+
+// Serializer is the core interface for transforming objects into a serialized format and back.
+// Implementations may choose to perform conversion of the object, but no assumptions should be made.
+type Serializer interface {
+	Encoder
+	Decoder
+}
+
+// Codec is a Serializer that deals with the details of versioning objects. It offers the same
+// interface as Serializer, so this is a marker to consumers that care about the version of the objects
+// they receive.
+type Codec Serializer
+
+// ParameterCodec defines methods for serializing and deserializing API objects to url.Values and
+// performing any necessary conversion. Unlike the normal Codec, query parameters are not self describing
+// and the desired version must be specified.
+type ParameterCodec interface {
+	// DecodeParameters takes the given url.Values in the specified group version and decodes them
+	// into the provided object, or returns an error.
+	DecodeParameters(parameters url.Values, from schema.GroupVersion, into Object) error
+	// EncodeParameters encodes the provided object as query parameters or returns an error.
+	EncodeParameters(obj Object, to schema.GroupVersion) (url.Values, error)
+}
+
+// Framer is a factory for creating readers and writers that obey a particular framing pattern.
+type Framer interface {
+	NewFrameReader(r io.ReadCloser) io.ReadCloser
+	NewFrameWriter(w io.Writer) io.Writer
+}
+
+// SerializerInfo contains information about a specific serialization format
+type SerializerInfo struct {
+	// MediaType is the value that represents this serializer over the wire.
+	MediaType string
+	// EncodesAsText indicates this serializer can be encoded to UTF-8 safely.
+	EncodesAsText bool
+	// Serializer is the individual object serializer for this media type.
+	Serializer Serializer
+	// PrettySerializer, if set, can serialize this object in a form biased towards
+	// readability.
+	PrettySerializer Serializer
+	// StreamSerializer, if set, describes the streaming serialization format
+	// for this media type.
+	StreamSerializer *StreamSerializerInfo
+}
+
+// StreamSerializerInfo contains information about a specific stream serialization format
+type StreamSerializerInfo struct {
+	// EncodesAsText indicates this serializer can be encoded to UTF-8 safely.
+	EncodesAsText bool
+	// Serializer is the top level object serializer for this type when streaming
+	Serializer
+	// Framer is the factory for retrieving streams that separate objects on the wire
+	Framer
+}
+
+// NegotiatedSerializer is an interface used for obtaining encoders, decoders, and serializers
+// for multiple supported media types. This would commonly be accepted by a server component
+// that performs HTTP content negotiation to accept multiple formats.
+type NegotiatedSerializer interface {
+	// SupportedMediaTypes is the media types supported for reading and writing single objects.
+	SupportedMediaTypes() []SerializerInfo
+
+	// EncoderForVersion returns an encoder that ensures objects being written to the provided
+	// serializer are in the provided group version.
+	EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder
+	// DecoderForVersion returns a decoder that ensures objects being read by the provided
+	// serializer are in the provided group version by default.
+	DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder
+}
+
+// StorageSerializer is an interface used for obtaining encoders, decoders, and serializers
+// that can read and write data at rest. This would commonly be used by client tools that must
+// read files, or server side storage interfaces that persist restful objects.
+type StorageSerializer interface {
+	// SupportedMediaTypes are the media types supported for reading and writing objects.
+	SupportedMediaTypes() []SerializerInfo
+
+	// UniversalDeserializer returns a Serializer that can read objects in multiple supported formats
+	// by introspecting the data at rest.
+	UniversalDeserializer() Decoder
+
+	// EncoderForVersion returns an encoder that ensures objects being written to the provided
+	// serializer are in the provided group version.
+	EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder
+	// DecoderForVersion returns a decoder that ensures objects being read by the provided
+	// serializer are in the provided group version by default.
+	DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder
+}
+
+// NestedObjectEncoder is an optional interface that objects may implement to be given
+// an opportunity to encode any nested Objects / RawExtensions during serialization.
+type NestedObjectEncoder interface {
+	EncodeNestedObjects(e Encoder) error
+}
+
+// NestedObjectDecoder is an optional interface that objects may implement to be given
+// an opportunity to decode any nested Objects / RawExtensions during serialization.
+type NestedObjectDecoder interface {
+	DecodeNestedObjects(d Decoder) error
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Non-codec interfaces
+
+type ObjectDefaulter interface {
+	// Default takes an object (must be a pointer) and applies any default values.
+	// Defaulters may not error.
+	Default(in Object)
+}
+
+type ObjectVersioner interface {
+	ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error)
+}
+
+// ObjectConvertor converts an object to a different version.
+type ObjectConvertor interface {
+	// Convert attempts to convert one object into another, or returns an error. This
+	// method does not mutate the in object, but the in and out object might share data structures,
+	// i.e. the out object cannot be mutated without mutating the in object as well.
+	// The context argument will be passed to all nested conversions.
+	Convert(in, out, context interface{}) error
+	// ConvertToVersion takes the provided object and converts it the provided version. This
+	// method does not mutate the in object, but the in and out object might share data structures,
+	// i.e. the out object cannot be mutated without mutating the in object as well.
+	// This method is similar to Convert() but handles specific details of choosing the correct
+	// output version.
+	ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error)
+	ConvertFieldLabel(gvk schema.GroupVersionKind, label, value string) (string, string, error)
+}
+
+// ObjectTyper contains methods for extracting the APIVersion and Kind
+// of objects.
+type ObjectTyper interface {
+	// ObjectKinds returns the all possible group,version,kind of the provided object, true if
+	// the object is unversioned, or an error if the object is not recognized
+	// (IsNotRegisteredError will return true).
+	ObjectKinds(Object) ([]schema.GroupVersionKind, bool, error)
+	// Recognizes returns true if the scheme is able to handle the provided version and kind,
+	// or more precisely that the provided version is a possible conversion or decoding
+	// target.
+	Recognizes(gvk schema.GroupVersionKind) bool
+}
+
+// ObjectCreater contains methods for instantiating an object by kind and version.
+type ObjectCreater interface {
+	New(kind schema.GroupVersionKind) (out Object, err error)
+}
+
+// ResourceVersioner provides methods for setting and retrieving
+// the resource version from an API object.
+type ResourceVersioner interface {
+	SetResourceVersion(obj Object, version string) error
+	ResourceVersion(obj Object) (string, error)
+}
+
+// SelfLinker provides methods for setting and retrieving the SelfLink field of an API object.
+type SelfLinker interface {
+	SetSelfLink(obj Object, selfLink string) error
+	SelfLink(obj Object) (string, error)
+
+	// Knowing Name is sometimes necessary to use a SelfLinker.
+	Name(obj Object) (string, error)
+	// Knowing Namespace is sometimes necessary to use a SelfLinker
+	Namespace(obj Object) (string, error)
+}
+
+// Object interface must be supported by all API types registered with Scheme. Since objects in a scheme are
+// expected to be serialized to the wire, the interface an Object must provide to the Scheme allows
+// serializers to set the kind, version, and group the object is represented as. An Object may choose
+// to return a no-op ObjectKindAccessor in cases where it is not expected to be serialized.
+type Object interface {
+	GetObjectKind() schema.ObjectKind
+	DeepCopyObject() Object
+}
+
+// Unstructured objects store values as map[string]interface{}, with only values that can be serialized
+// to JSON allowed.
+type Unstructured interface {
+	Object
+	// UnstructuredContent returns a non-nil map with this object's contents. Values may be
+	// []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to
+	// and from JSON. SetUnstructuredContent should be used to mutate the contents.
+	UnstructuredContent() map[string]interface{}
+	// SetUnstructuredContent updates the object content to match the provided map.
+	SetUnstructuredContent(map[string]interface{})
+	// IsList returns true if this type is a list or matches the list convention - has an array called "items".
+	IsList() bool
+	// EachListItem should pass a single item out of the list as an Object to the provided function. Any
+	// error should terminate the iteration. If IsList() returns false, this method should return an error
+	// instead of calling the provided function.
+	EachListItem(func(Object) error) error
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/register.go b/vendor/k8s.io/apimachinery/pkg/runtime/register.go
new file mode 100644
index 0000000..eeb380c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/register.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import "k8s.io/apimachinery/pkg/runtime/schema"
+
+// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
+func (obj *TypeMeta) SetGroupVersionKind(gvk schema.GroupVersionKind) {
+	obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
+}
+
+// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
+func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind {
+	return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
+}
+
+func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj }
+
+// GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind
+// interface if no objects are provided, or the ObjectKind interface of the object in the
+// highest array position.
+func (obj *VersionedObjects) GetObjectKind() schema.ObjectKind {
+	last := obj.Last()
+	if last == nil {
+		return schema.EmptyObjectKind
+	}
+	return last.GetObjectKind()
+}
+
+// First returns the leftmost object in the VersionedObjects array, which is usually the
+// object as serialized on the wire.
+func (obj *VersionedObjects) First() Object {
+	if len(obj.Objects) == 0 {
+		return nil
+	}
+	return obj.Objects[0]
+}
+
+// Last is the rightmost object in the VersionedObjects array, which is the object after
+// all transformations have been applied. This is the same object that would be returned
+// by Decode in a normal invocation (without VersionedObjects in the into argument).
+func (obj *VersionedObjects) Last() Object {
+	if len(obj.Objects) == 0 {
+		return nil
+	}
+	return obj.Objects[len(obj.Objects)-1]
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto
new file mode 100644
index 0000000..5aeeaa1
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto
@@ -0,0 +1,26 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.apimachinery.pkg.runtime.schema;
+
+// Package-wide variables from generator "generated".
+option go_package = "schema";
+
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go
new file mode 100644
index 0000000..4c67ed5
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go
@@ -0,0 +1,300 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+import (
+	"fmt"
+	"strings"
+)
+
+// ParseResourceArg takes the common style of string which may be either `resource.group.com` or `resource.version.group.com`
+// and parses it out into both possibilities.  This code takes no responsibility for knowing which representation was intended
+// but with a knowledge of all GroupVersions, calling code can take a very good guess.  If there are only two segments, then
+// `*GroupVersionResource` is nil.
+// `resource.group.com` -> `group=com, version=group, resource=resource` and `group=group.com, resource=resource`
+func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) {
+	var gvr *GroupVersionResource
+	if strings.Count(arg, ".") >= 2 {
+		s := strings.SplitN(arg, ".", 3)
+		gvr = &GroupVersionResource{Group: s[2], Version: s[1], Resource: s[0]}
+	}
+
+	return gvr, ParseGroupResource(arg)
+}
+
+// ParseKindArg takes the common style of string which may be either `Kind.group.com` or `Kind.version.group.com`
+// and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended
+// but with a knowledge of all GroupKinds, calling code can take a very good guess. If there are only two segments, then
+// `*GroupVersionResource` is nil.
+// `Kind.group.com` -> `group=com, version=group, kind=Kind` and `group=group.com, kind=Kind`
+func ParseKindArg(arg string) (*GroupVersionKind, GroupKind) {
+	var gvk *GroupVersionKind
+	if strings.Count(arg, ".") >= 2 {
+		s := strings.SplitN(arg, ".", 3)
+		gvk = &GroupVersionKind{Group: s[2], Version: s[1], Kind: s[0]}
+	}
+
+	return gvk, ParseGroupKind(arg)
+}
+
+// GroupResource specifies a Group and a Resource, but does not force a version.  This is useful for identifying
+// concepts during lookup stages without having partially valid types
+type GroupResource struct {
+	Group    string
+	Resource string
+}
+
+func (gr GroupResource) WithVersion(version string) GroupVersionResource {
+	return GroupVersionResource{Group: gr.Group, Version: version, Resource: gr.Resource}
+}
+
+func (gr GroupResource) Empty() bool {
+	return len(gr.Group) == 0 && len(gr.Resource) == 0
+}
+
+func (gr GroupResource) String() string {
+	if len(gr.Group) == 0 {
+		return gr.Resource
+	}
+	return gr.Resource + "." + gr.Group
+}
+
+func ParseGroupKind(gk string) GroupKind {
+	i := strings.Index(gk, ".")
+	if i == -1 {
+		return GroupKind{Kind: gk}
+	}
+
+	return GroupKind{Group: gk[i+1:], Kind: gk[:i]}
+}
+
+// ParseGroupResource turns "resource.group" string into a GroupResource struct.  Empty strings are allowed
+// for each field.
+func ParseGroupResource(gr string) GroupResource {
+	if i := strings.Index(gr, "."); i >= 0 {
+		return GroupResource{Group: gr[i+1:], Resource: gr[:i]}
+	}
+	return GroupResource{Resource: gr}
+}
+
+// GroupVersionResource unambiguously identifies a resource.  It doesn't anonymously include GroupVersion
+// to avoid automatic coercion.  It doesn't use a GroupVersion to avoid custom marshalling
+type GroupVersionResource struct {
+	Group    string
+	Version  string
+	Resource string
+}
+
+func (gvr GroupVersionResource) Empty() bool {
+	return len(gvr.Group) == 0 && len(gvr.Version) == 0 && len(gvr.Resource) == 0
+}
+
+func (gvr GroupVersionResource) GroupResource() GroupResource {
+	return GroupResource{Group: gvr.Group, Resource: gvr.Resource}
+}
+
+func (gvr GroupVersionResource) GroupVersion() GroupVersion {
+	return GroupVersion{Group: gvr.Group, Version: gvr.Version}
+}
+
+func (gvr GroupVersionResource) String() string {
+	return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "")
+}
+
+// GroupKind specifies a Group and a Kind, but does not force a version.  This is useful for identifying
+// concepts during lookup stages without having partially valid types
+type GroupKind struct {
+	Group string
+	Kind  string
+}
+
+func (gk GroupKind) Empty() bool {
+	return len(gk.Group) == 0 && len(gk.Kind) == 0
+}
+
+func (gk GroupKind) WithVersion(version string) GroupVersionKind {
+	return GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind}
+}
+
+func (gk GroupKind) String() string {
+	if len(gk.Group) == 0 {
+		return gk.Kind
+	}
+	return gk.Kind + "." + gk.Group
+}
+
+// GroupVersionKind unambiguously identifies a kind.  It doesn't anonymously include GroupVersion
+// to avoid automatic coercion.  It doesn't use a GroupVersion to avoid custom marshalling
+type GroupVersionKind struct {
+	Group   string
+	Version string
+	Kind    string
+}
+
+// Empty returns true if group, version, and kind are empty
+func (gvk GroupVersionKind) Empty() bool {
+	return len(gvk.Group) == 0 && len(gvk.Version) == 0 && len(gvk.Kind) == 0
+}
+
+func (gvk GroupVersionKind) GroupKind() GroupKind {
+	return GroupKind{Group: gvk.Group, Kind: gvk.Kind}
+}
+
+func (gvk GroupVersionKind) GroupVersion() GroupVersion {
+	return GroupVersion{Group: gvk.Group, Version: gvk.Version}
+}
+
+func (gvk GroupVersionKind) String() string {
+	return gvk.Group + "/" + gvk.Version + ", Kind=" + gvk.Kind
+}
+
+// GroupVersion contains the "group" and the "version", which uniquely identifies the API.
+type GroupVersion struct {
+	Group   string
+	Version string
+}
+
+// Empty returns true if group and version are empty
+func (gv GroupVersion) Empty() bool {
+	return len(gv.Group) == 0 && len(gv.Version) == 0
+}
+
+// String puts "group" and "version" into a single "group/version" string. For the legacy v1
+// it returns "v1".
+func (gv GroupVersion) String() string {
+	// special case the internal apiVersion for the legacy kube types
+	if gv.Empty() {
+		return ""
+	}
+
+	// special case of "v1" for backward compatibility
+	if len(gv.Group) == 0 && gv.Version == "v1" {
+		return gv.Version
+	}
+	if len(gv.Group) > 0 {
+		return gv.Group + "/" + gv.Version
+	}
+	return gv.Version
+}
+
+// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false
+// if none of the options match the group. It prefers a match to group and version over just group.
+// TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme.
+// TODO: Introduce an adapter type between GroupVersion and runtime.GroupVersioner, and use LegacyCodec(GroupVersion)
+//   in fewer places.
+func (gv GroupVersion) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) {
+	for _, gvk := range kinds {
+		if gvk.Group == gv.Group && gvk.Version == gv.Version {
+			return gvk, true
+		}
+	}
+	for _, gvk := range kinds {
+		if gvk.Group == gv.Group {
+			return gv.WithKind(gvk.Kind), true
+		}
+	}
+	return GroupVersionKind{}, false
+}
+
+// ParseGroupVersion turns "group/version" string into a GroupVersion struct. It reports error
+// if it cannot parse the string.
+func ParseGroupVersion(gv string) (GroupVersion, error) {
+	// this can be the internal version for the legacy kube types
+	// TODO once we've cleared the last uses as strings, this special case should be removed.
+	if (len(gv) == 0) || (gv == "/") {
+		return GroupVersion{}, nil
+	}
+
+	switch strings.Count(gv, "/") {
+	case 0:
+		return GroupVersion{"", gv}, nil
+	case 1:
+		i := strings.Index(gv, "/")
+		return GroupVersion{gv[:i], gv[i+1:]}, nil
+	default:
+		return GroupVersion{}, fmt.Errorf("unexpected GroupVersion string: %v", gv)
+	}
+}
+
+// WithKind creates a GroupVersionKind based on the method receiver's GroupVersion and the passed Kind.
+func (gv GroupVersion) WithKind(kind string) GroupVersionKind {
+	return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind}
+}
+
+// WithResource creates a GroupVersionResource based on the method receiver's GroupVersion and the passed Resource.
+func (gv GroupVersion) WithResource(resource string) GroupVersionResource {
+	return GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: resource}
+}
+
+// GroupVersions can be used to represent a set of desired group versions.
+// TODO: Move GroupVersions to a package under pkg/runtime, since it's used by scheme.
+// TODO: Introduce an adapter type between GroupVersions and runtime.GroupVersioner, and use LegacyCodec(GroupVersion)
+//   in fewer places.
+type GroupVersions []GroupVersion
+
+// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false
+// if none of the options match the group.
+func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (GroupVersionKind, bool) {
+	var targets []GroupVersionKind
+	for _, gv := range gvs {
+		target, ok := gv.KindForGroupVersionKinds(kinds)
+		if !ok {
+			continue
+		}
+		targets = append(targets, target)
+	}
+	if len(targets) == 1 {
+		return targets[0], true
+	}
+	if len(targets) > 1 {
+		return bestMatch(kinds, targets), true
+	}
+	return GroupVersionKind{}, false
+}
+
+// bestMatch tries to pick best matching GroupVersionKind and falls back to the first
+// found if no exact match exists.
+func bestMatch(kinds []GroupVersionKind, targets []GroupVersionKind) GroupVersionKind {
+	for _, gvk := range targets {
+		for _, k := range kinds {
+			if k == gvk {
+				return k
+			}
+		}
+	}
+	return targets[0]
+}
+
+// ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that
+// do not use TypeMeta.
+func (gvk GroupVersionKind) ToAPIVersionAndKind() (string, string) {
+	if gvk.Empty() {
+		return "", ""
+	}
+	return gvk.GroupVersion().String(), gvk.Kind
+}
+
+// FromAPIVersionAndKind returns a GVK representing the provided fields for types that
+// do not use TypeMeta. This method exists to support test types and legacy serializations
+// that have a distinct group and kind.
+// TODO: further reduce usage of this method.
+func FromAPIVersionAndKind(apiVersion, kind string) GroupVersionKind {
+	if gv, err := ParseGroupVersion(apiVersion); err == nil {
+		return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind}
+	}
+	return GroupVersionKind{Kind: kind}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go
new file mode 100644
index 0000000..b570668
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+// All objects that are serialized from a Scheme encode their type information. This interface is used
+// by serialization to set type information from the Scheme onto the serialized version of an object.
+// For objects that cannot be serialized or have unique requirements, this interface may be a no-op.
+type ObjectKind interface {
+	// SetGroupVersionKind sets or clears the intended serialized kind of an object. Passing kind nil
+	// should clear the current setting.
+	SetGroupVersionKind(kind GroupVersionKind)
+	// GroupVersionKind returns the stored group, version, and kind of an object, or nil if the object does
+	// not expose or provide these fields.
+	GroupVersionKind() GroupVersionKind
+}
+
+// EmptyObjectKind implements the ObjectKind interface as a noop
+var EmptyObjectKind = emptyObjectKind{}
+
+type emptyObjectKind struct{}
+
+// SetGroupVersionKind implements the ObjectKind interface
+func (emptyObjectKind) SetGroupVersionKind(gvk GroupVersionKind) {}
+
+// GroupVersionKind implements the ObjectKind interface
+func (emptyObjectKind) GroupVersionKind() GroupVersionKind { return GroupVersionKind{} }
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
new file mode 100644
index 0000000..fd37e29
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
@@ -0,0 +1,754 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"fmt"
+	"net/url"
+	"reflect"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/util/naming"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/apimachinery/pkg/util/sets"
+)
+
+// Scheme defines methods for serializing and deserializing API objects, a type
+// registry for converting group, version, and kind information to and from Go
+// schemas, and mappings between Go schemas of different versions. A scheme is the
+// foundation for a versioned API and versioned configuration over time.
+//
+// In a Scheme, a Type is a particular Go struct, a Version is a point-in-time
+// identifier for a particular representation of that Type (typically backwards
+// compatible), a Kind is the unique name for that Type within the Version, and a
+// Group identifies a set of Versions, Kinds, and Types that evolve over time. An
+// Unversioned Type is one that is not yet formally bound to a type and is promised
+// to be backwards compatible (effectively a "v1" of a Type that does not expect
+// to break in the future).
+//
+// Schemes are not expected to change at runtime and are only threadsafe after
+// registration is complete.
+type Scheme struct {
+	// versionMap allows one to figure out the go type of an object with
+	// the given version and name.
+	gvkToType map[schema.GroupVersionKind]reflect.Type
+
+	// typeToGroupVersion allows one to find metadata for a given go object.
+	// The reflect.Type we index by should *not* be a pointer.
+	typeToGVK map[reflect.Type][]schema.GroupVersionKind
+
+	// unversionedTypes are transformed without conversion in ConvertToVersion.
+	unversionedTypes map[reflect.Type]schema.GroupVersionKind
+
+	// unversionedKinds are the names of kinds that can be created in the context of any group
+	// or version
+	// TODO: resolve the status of unversioned types.
+	unversionedKinds map[string]reflect.Type
+
+	// Map from version and resource to the corresponding func to convert
+	// resource field labels in that version to internal version.
+	fieldLabelConversionFuncs map[schema.GroupVersionKind]FieldLabelConversionFunc
+
+	// defaulterFuncs is an array of interfaces to be called with an object to provide defaulting
+	// the provided object must be a pointer.
+	defaulterFuncs map[reflect.Type]func(interface{})
+
+	// converter stores all registered conversion functions. It also has
+	// default converting behavior.
+	converter *conversion.Converter
+
+	// versionPriority is a map of groups to ordered lists of versions for those groups indicating the
+	// default priorities of these versions as registered in the scheme
+	versionPriority map[string][]string
+
+	// observedVersions keeps track of the order we've seen versions during type registration
+	observedVersions []schema.GroupVersion
+
+	// schemeName is the name of this scheme.  If you don't specify a name, the stack of the NewScheme caller will be used.
+	// This is useful for error reporting to indicate the origin of the scheme.
+	schemeName string
+}
+
+// FieldLabelConversionFunc converts a field selector to internal representation.
+type FieldLabelConversionFunc func(label, value string) (internalLabel, internalValue string, err error)
+
+// NewScheme creates a new Scheme. This scheme is pluggable by default.
+func NewScheme() *Scheme {
+	s := &Scheme{
+		gvkToType:                 map[schema.GroupVersionKind]reflect.Type{},
+		typeToGVK:                 map[reflect.Type][]schema.GroupVersionKind{},
+		unversionedTypes:          map[reflect.Type]schema.GroupVersionKind{},
+		unversionedKinds:          map[string]reflect.Type{},
+		fieldLabelConversionFuncs: map[schema.GroupVersionKind]FieldLabelConversionFunc{},
+		defaulterFuncs:            map[reflect.Type]func(interface{}){},
+		versionPriority:           map[string][]string{},
+		schemeName:                naming.GetNameFromCallsite(internalPackages...),
+	}
+	s.converter = conversion.NewConverter(s.nameFunc)
+
+	utilruntime.Must(s.AddConversionFuncs(DefaultEmbeddedConversions()...))
+
+	// Enable map[string][]string conversions by default
+	utilruntime.Must(s.AddConversionFuncs(DefaultStringConversions...))
+	utilruntime.Must(s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields))
+	utilruntime.Must(s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields))
+	return s
+}
+
+// nameFunc returns the name of the type that we wish to use to determine when two types attempt
+// a conversion. Defaults to the go name of the type if the type is not registered.
+func (s *Scheme) nameFunc(t reflect.Type) string {
+	// find the preferred names for this type
+	gvks, ok := s.typeToGVK[t]
+	if !ok {
+		return t.Name()
+	}
+
+	for _, gvk := range gvks {
+		internalGV := gvk.GroupVersion()
+		internalGV.Version = APIVersionInternal // this is hacky and maybe should be passed in
+		internalGVK := internalGV.WithKind(gvk.Kind)
+
+		if internalType, exists := s.gvkToType[internalGVK]; exists {
+			return s.typeToGVK[internalType][0].Kind
+		}
+	}
+
+	return gvks[0].Kind
+}
+
+// fromScope gets the input version, desired output version, and desired Scheme
+// from a conversion.Scope.
+func (s *Scheme) fromScope(scope conversion.Scope) *Scheme {
+	return s
+}
+
+// Converter allows access to the converter for the scheme
+func (s *Scheme) Converter() *conversion.Converter {
+	return s.converter
+}
+
+// AddUnversionedTypes registers the provided types as "unversioned", which means that they follow special rules.
+// Whenever an object of this type is serialized, it is serialized with the provided group version and is not
+// converted. Thus unversioned objects are expected to remain backwards compatible forever, as if they were in an
+// API group and version that would never be updated.
+//
+// TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into
+//   every version with particular schemas. Resolve this method at that point.
+func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) {
+	s.addObservedVersion(version)
+	s.AddKnownTypes(version, types...)
+	for _, obj := range types {
+		t := reflect.TypeOf(obj).Elem()
+		gvk := version.WithKind(t.Name())
+		s.unversionedTypes[t] = gvk
+		if old, ok := s.unversionedKinds[gvk.Kind]; ok && t != old {
+			panic(fmt.Sprintf("%v.%v has already been registered as unversioned kind %q - kind name must be unique in scheme %q", old.PkgPath(), old.Name(), gvk, s.schemeName))
+		}
+		s.unversionedKinds[gvk.Kind] = t
+	}
+}
+
+// AddKnownTypes registers all types passed in 'types' as being members of version 'version'.
+// All objects passed to types should be pointers to structs. The name that go reports for
+// the struct becomes the "kind" field when encoding. Version may not be empty - use the
+// APIVersionInternal constant if you have a type that does not have a formal version.
+func (s *Scheme) AddKnownTypes(gv schema.GroupVersion, types ...Object) {
+	s.addObservedVersion(gv)
+	for _, obj := range types {
+		t := reflect.TypeOf(obj)
+		if t.Kind() != reflect.Ptr {
+			panic("All types must be pointers to structs.")
+		}
+		t = t.Elem()
+		s.AddKnownTypeWithName(gv.WithKind(t.Name()), obj)
+	}
+}
+
+// AddKnownTypeWithName is like AddKnownTypes, but it lets you specify what this type should
+// be encoded as. Useful for testing when you don't want to make multiple packages to define
+// your structs. Version may not be empty - use the APIVersionInternal constant if you have a
+// type that does not have a formal version.
+func (s *Scheme) AddKnownTypeWithName(gvk schema.GroupVersionKind, obj Object) {
+	s.addObservedVersion(gvk.GroupVersion())
+	t := reflect.TypeOf(obj)
+	if len(gvk.Version) == 0 {
+		panic(fmt.Sprintf("version is required on all types: %s %v", gvk, t))
+	}
+	if t.Kind() != reflect.Ptr {
+		panic("All types must be pointers to structs.")
+	}
+	t = t.Elem()
+	if t.Kind() != reflect.Struct {
+		panic("All types must be pointers to structs.")
+	}
+
+	if oldT, found := s.gvkToType[gvk]; found && oldT != t {
+		panic(fmt.Sprintf("Double registration of different types for %v: old=%v.%v, new=%v.%v in scheme %q", gvk, oldT.PkgPath(), oldT.Name(), t.PkgPath(), t.Name(), s.schemeName))
+	}
+
+	s.gvkToType[gvk] = t
+
+	for _, existingGvk := range s.typeToGVK[t] {
+		if existingGvk == gvk {
+			return
+		}
+	}
+	s.typeToGVK[t] = append(s.typeToGVK[t], gvk)
+}
+
+// KnownTypes returns the types known for the given version.
+func (s *Scheme) KnownTypes(gv schema.GroupVersion) map[string]reflect.Type {
+	types := make(map[string]reflect.Type)
+	for gvk, t := range s.gvkToType {
+		if gv != gvk.GroupVersion() {
+			continue
+		}
+
+		types[gvk.Kind] = t
+	}
+	return types
+}
+
+// AllKnownTypes returns the all known types.
+func (s *Scheme) AllKnownTypes() map[schema.GroupVersionKind]reflect.Type {
+	return s.gvkToType
+}
+
+// ObjectKinds returns all possible group,version,kind of the go object, true if the
+// object is considered unversioned, or an error if it's not a pointer or is unregistered.
+func (s *Scheme) ObjectKinds(obj Object) ([]schema.GroupVersionKind, bool, error) {
+	// Unstructured objects are always considered to have their declared GVK
+	if _, ok := obj.(Unstructured); ok {
+		// we require that the GVK be populated in order to recognize the object
+		gvk := obj.GetObjectKind().GroupVersionKind()
+		if len(gvk.Kind) == 0 {
+			return nil, false, NewMissingKindErr("unstructured object has no kind")
+		}
+		if len(gvk.Version) == 0 {
+			return nil, false, NewMissingVersionErr("unstructured object has no version")
+		}
+		return []schema.GroupVersionKind{gvk}, false, nil
+	}
+
+	v, err := conversion.EnforcePtr(obj)
+	if err != nil {
+		return nil, false, err
+	}
+	t := v.Type()
+
+	gvks, ok := s.typeToGVK[t]
+	if !ok {
+		return nil, false, NewNotRegisteredErrForType(s.schemeName, t)
+	}
+	_, unversionedType := s.unversionedTypes[t]
+
+	return gvks, unversionedType, nil
+}
+
+// Recognizes returns true if the scheme is able to handle the provided group,version,kind
+// of an object.
+func (s *Scheme) Recognizes(gvk schema.GroupVersionKind) bool {
+	_, exists := s.gvkToType[gvk]
+	return exists
+}
+
+func (s *Scheme) IsUnversioned(obj Object) (bool, bool) {
+	v, err := conversion.EnforcePtr(obj)
+	if err != nil {
+		return false, false
+	}
+	t := v.Type()
+
+	if _, ok := s.typeToGVK[t]; !ok {
+		return false, false
+	}
+	_, ok := s.unversionedTypes[t]
+	return ok, true
+}
+
+// New returns a new API object of the given version and name, or an error if it hasn't
+// been registered. The version and kind fields must be specified.
+func (s *Scheme) New(kind schema.GroupVersionKind) (Object, error) {
+	if t, exists := s.gvkToType[kind]; exists {
+		return reflect.New(t).Interface().(Object), nil
+	}
+
+	if t, exists := s.unversionedKinds[kind.Kind]; exists {
+		return reflect.New(t).Interface().(Object), nil
+	}
+	return nil, NewNotRegisteredErrForKind(s.schemeName, kind)
+}
+
+// Log sets a logger on the scheme. For test purposes only
+func (s *Scheme) Log(l conversion.DebugLogger) {
+	s.converter.Debug = l
+}
+
+// AddIgnoredConversionType identifies a pair of types that should be skipped by
+// conversion (because the data inside them is explicitly dropped during
+// conversion).
+func (s *Scheme) AddIgnoredConversionType(from, to interface{}) error {
+	return s.converter.RegisterIgnoredConversion(from, to)
+}
+
+// AddConversionFuncs adds functions to the list of conversion functions. The given
+// functions should know how to convert between two of your API objects, or their
+// sub-objects. We deduce how to call these functions from the types of their two
+// parameters; see the comment for Converter.Register.
+//
+// Note that, if you need to copy sub-objects that didn't change, you can use the
+// conversion.Scope object that will be passed to your conversion function.
+// Additionally, all conversions started by Scheme will set the SrcVersion and
+// DestVersion fields on the Meta object. Example:
+//
+// s.AddConversionFuncs(
+//	func(in *InternalObject, out *ExternalObject, scope conversion.Scope) error {
+//		// You can depend on Meta() being non-nil, and this being set to
+//		// the source version, e.g., ""
+//		s.Meta().SrcVersion
+//		// You can depend on this being set to the destination version,
+//		// e.g., "v1".
+//		s.Meta().DestVersion
+//		// Call scope.Convert to copy sub-fields.
+//		s.Convert(&in.SubFieldThatMoved, &out.NewLocation.NewName, 0)
+//		return nil
+//	},
+// )
+//
+// (For more detail about conversion functions, see Converter.Register's comment.)
+//
+// Also note that the default behavior, if you don't add a conversion function, is to
+// sanely copy fields that have the same names and same type names. It's OK if the
+// destination type has extra fields, but it must not remove any. So you only need to
+// add conversion functions for things with changed/removed fields.
+func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error {
+	for _, f := range conversionFuncs {
+		if err := s.converter.RegisterConversionFunc(f); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// AddConversionFunc registers a function that converts between a and b by passing objects of those
+// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce
+// any other guarantee.
+func (s *Scheme) AddConversionFunc(a, b interface{}, fn conversion.ConversionFunc) error {
+	return s.converter.RegisterUntypedConversionFunc(a, b, fn)
+}
+
+// AddGeneratedConversionFunc registers a function that converts between a and b by passing objects of those
+// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce
+// any other guarantee.
+func (s *Scheme) AddGeneratedConversionFunc(a, b interface{}, fn conversion.ConversionFunc) error {
+	return s.converter.RegisterGeneratedUntypedConversionFunc(a, b, fn)
+}
+
+// AddFieldLabelConversionFunc adds a conversion function to convert field selectors
+// of the given kind from the given version to internal version representation.
+func (s *Scheme) AddFieldLabelConversionFunc(gvk schema.GroupVersionKind, conversionFunc FieldLabelConversionFunc) error {
+	s.fieldLabelConversionFuncs[gvk] = conversionFunc
+	return nil
+}
+
+// RegisterInputDefaults sets the provided field mapping function and field matching
+// as the defaults for the provided input type.  The fn may be nil, in which case no
+// mapping will happen by default. Use this method to register a mechanism for handling
+// a specific input type in conversion, such as a map[string]string to structs.
+func (s *Scheme) RegisterInputDefaults(in interface{}, fn conversion.FieldMappingFunc, defaultFlags conversion.FieldMatchingFlags) error {
+	return s.converter.RegisterInputDefaults(in, fn, defaultFlags)
+}
+
+// AddTypeDefaultingFunc registers a function that is passed a pointer to an
+// object and can default fields on the object. These functions will be invoked
+// when Default() is called. The function will never be called unless the
+// defaulted object matches srcType. If this function is invoked twice with the
+// same srcType, the fn passed to the later call will be used instead.
+func (s *Scheme) AddTypeDefaultingFunc(srcType Object, fn func(interface{})) {
+	s.defaulterFuncs[reflect.TypeOf(srcType)] = fn
+}
+
+// Default sets defaults on the provided Object.
+func (s *Scheme) Default(src Object) {
+	if fn, ok := s.defaulterFuncs[reflect.TypeOf(src)]; ok {
+		fn(src)
+	}
+}
+
+// Convert will attempt to convert in into out. Both must be pointers. For easy
+// testing of conversion functions. Returns an error if the conversion isn't
+// possible. You can call this with types that haven't been registered (for example,
+// a to test conversion of types that are nested within registered types). The
+// context interface is passed to the convertor. Convert also supports Unstructured
+// types and will convert them intelligently.
+func (s *Scheme) Convert(in, out interface{}, context interface{}) error {
+	unstructuredIn, okIn := in.(Unstructured)
+	unstructuredOut, okOut := out.(Unstructured)
+	switch {
+	case okIn && okOut:
+		// converting unstructured input to an unstructured output is a straight copy - unstructured
+		// is a "smart holder" and the contents are passed by reference between the two objects
+		unstructuredOut.SetUnstructuredContent(unstructuredIn.UnstructuredContent())
+		return nil
+
+	case okOut:
+		// if the output is an unstructured object, use the standard Go type to unstructured
+		// conversion. The object must not be internal.
+		obj, ok := in.(Object)
+		if !ok {
+			return fmt.Errorf("unable to convert object type %T to Unstructured, must be a runtime.Object", in)
+		}
+		gvks, unversioned, err := s.ObjectKinds(obj)
+		if err != nil {
+			return err
+		}
+		gvk := gvks[0]
+
+		// if no conversion is necessary, convert immediately
+		if unversioned || gvk.Version != APIVersionInternal {
+			content, err := DefaultUnstructuredConverter.ToUnstructured(in)
+			if err != nil {
+				return err
+			}
+			unstructuredOut.SetUnstructuredContent(content)
+			unstructuredOut.GetObjectKind().SetGroupVersionKind(gvk)
+			return nil
+		}
+
+		// attempt to convert the object to an external version first.
+		target, ok := context.(GroupVersioner)
+		if !ok {
+			return fmt.Errorf("unable to convert the internal object type %T to Unstructured without providing a preferred version to convert to", in)
+		}
+		// Convert is implicitly unsafe, so we don't need to perform a safe conversion
+		versioned, err := s.UnsafeConvertToVersion(obj, target)
+		if err != nil {
+			return err
+		}
+		content, err := DefaultUnstructuredConverter.ToUnstructured(versioned)
+		if err != nil {
+			return err
+		}
+		unstructuredOut.SetUnstructuredContent(content)
+		return nil
+
+	case okIn:
+		// converting an unstructured object to any type is modeled by first converting
+		// the input to a versioned type, then running standard conversions
+		typed, err := s.unstructuredToTyped(unstructuredIn)
+		if err != nil {
+			return err
+		}
+		in = typed
+	}
+
+	flags, meta := s.generateConvertMeta(in)
+	meta.Context = context
+	if flags == 0 {
+		flags = conversion.AllowDifferentFieldTypeNames
+	}
+	return s.converter.Convert(in, out, flags, meta)
+}
+
+// ConvertFieldLabel alters the given field label and value for an kind field selector from
+// versioned representation to an unversioned one or returns an error.
+func (s *Scheme) ConvertFieldLabel(gvk schema.GroupVersionKind, label, value string) (string, string, error) {
+	conversionFunc, ok := s.fieldLabelConversionFuncs[gvk]
+	if !ok {
+		return DefaultMetaV1FieldSelectorConversion(label, value)
+	}
+	return conversionFunc(label, value)
+}
+
+// ConvertToVersion attempts to convert an input object to its matching Kind in another
+// version within this scheme. Will return an error if the provided version does not
+// contain the inKind (or a mapping by name defined with AddKnownTypeWithName). Will also
+// return an error if the conversion does not result in a valid Object being
+// returned. Passes target down to the conversion methods as the Context on the scope.
+func (s *Scheme) ConvertToVersion(in Object, target GroupVersioner) (Object, error) {
+	return s.convertToVersion(true, in, target)
+}
+
+// UnsafeConvertToVersion will convert in to the provided target if such a conversion is possible,
+// but does not guarantee the output object does not share fields with the input object. It attempts to be as
+// efficient as possible when doing conversion.
+func (s *Scheme) UnsafeConvertToVersion(in Object, target GroupVersioner) (Object, error) {
+	return s.convertToVersion(false, in, target)
+}
+
+// convertToVersion handles conversion with an optional copy.
+func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) (Object, error) {
+	var t reflect.Type
+
+	if u, ok := in.(Unstructured); ok {
+		typed, err := s.unstructuredToTyped(u)
+		if err != nil {
+			return nil, err
+		}
+
+		in = typed
+		// unstructuredToTyped returns an Object, which must be a pointer to a struct.
+		t = reflect.TypeOf(in).Elem()
+
+	} else {
+		// determine the incoming kinds with as few allocations as possible.
+		t = reflect.TypeOf(in)
+		if t.Kind() != reflect.Ptr {
+			return nil, fmt.Errorf("only pointer types may be converted: %v", t)
+		}
+		t = t.Elem()
+		if t.Kind() != reflect.Struct {
+			return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t)
+		}
+	}
+
+	kinds, ok := s.typeToGVK[t]
+	if !ok || len(kinds) == 0 {
+		return nil, NewNotRegisteredErrForType(s.schemeName, t)
+	}
+
+	gvk, ok := target.KindForGroupVersionKinds(kinds)
+	if !ok {
+		// try to see if this type is listed as unversioned (for legacy support)
+		// TODO: when we move to server API versions, we should completely remove the unversioned concept
+		if unversionedKind, ok := s.unversionedTypes[t]; ok {
+			if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok {
+				return copyAndSetTargetKind(copy, in, gvk)
+			}
+			return copyAndSetTargetKind(copy, in, unversionedKind)
+		}
+		return nil, NewNotRegisteredErrForTarget(s.schemeName, t, target)
+	}
+
+	// target wants to use the existing type, set kind and return (no conversion necessary)
+	for _, kind := range kinds {
+		if gvk == kind {
+			return copyAndSetTargetKind(copy, in, gvk)
+		}
+	}
+
+	// type is unversioned, no conversion necessary
+	if unversionedKind, ok := s.unversionedTypes[t]; ok {
+		if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok {
+			return copyAndSetTargetKind(copy, in, gvk)
+		}
+		return copyAndSetTargetKind(copy, in, unversionedKind)
+	}
+
+	out, err := s.New(gvk)
+	if err != nil {
+		return nil, err
+	}
+
+	if copy {
+		in = in.DeepCopyObject()
+	}
+
+	flags, meta := s.generateConvertMeta(in)
+	meta.Context = target
+	if err := s.converter.Convert(in, out, flags, meta); err != nil {
+		return nil, err
+	}
+
+	setTargetKind(out, gvk)
+	return out, nil
+}
+
+// unstructuredToTyped attempts to transform an unstructured object to a typed
+// object if possible. It will return an error if conversion is not possible, or the versioned
+// Go form of the object. Note that this conversion will lose fields.
+func (s *Scheme) unstructuredToTyped(in Unstructured) (Object, error) {
+	// the type must be something we recognize
+	gvks, _, err := s.ObjectKinds(in)
+	if err != nil {
+		return nil, err
+	}
+	typed, err := s.New(gvks[0])
+	if err != nil {
+		return nil, err
+	}
+	if err := DefaultUnstructuredConverter.FromUnstructured(in.UnstructuredContent(), typed); err != nil {
+		return nil, fmt.Errorf("unable to convert unstructured object to %v: %v", gvks[0], err)
+	}
+	return typed, nil
+}
+
+// generateConvertMeta constructs the meta value we pass to Convert.
+func (s *Scheme) generateConvertMeta(in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) {
+	return s.converter.DefaultMeta(reflect.TypeOf(in))
+}
+
+// copyAndSetTargetKind performs a conditional copy before returning the object, or an error if copy was not successful.
+func copyAndSetTargetKind(copy bool, obj Object, kind schema.GroupVersionKind) (Object, error) {
+	if copy {
+		obj = obj.DeepCopyObject()
+	}
+	setTargetKind(obj, kind)
+	return obj, nil
+}
+
+// setTargetKind sets the kind on an object, taking into account whether the target kind is the internal version.
+func setTargetKind(obj Object, kind schema.GroupVersionKind) {
+	if kind.Version == APIVersionInternal {
+		// internal is a special case
+		// TODO: look at removing the need to special case this
+		obj.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{})
+		return
+	}
+	obj.GetObjectKind().SetGroupVersionKind(kind)
+}
+
+// SetVersionPriority allows specifying a precise order of priority. All specified versions must be in the same group,
+// and the specified order overwrites any previously specified order for this group
+func (s *Scheme) SetVersionPriority(versions ...schema.GroupVersion) error {
+	groups := sets.String{}
+	order := []string{}
+	for _, version := range versions {
+		if len(version.Version) == 0 || version.Version == APIVersionInternal {
+			return fmt.Errorf("internal versions cannot be prioritized: %v", version)
+		}
+
+		groups.Insert(version.Group)
+		order = append(order, version.Version)
+	}
+	if len(groups) != 1 {
+		return fmt.Errorf("must register versions for exactly one group: %v", strings.Join(groups.List(), ", "))
+	}
+
+	s.versionPriority[groups.List()[0]] = order
+	return nil
+}
+
+// PrioritizedVersionsForGroup returns versions for a single group in priority order
+func (s *Scheme) PrioritizedVersionsForGroup(group string) []schema.GroupVersion {
+	ret := []schema.GroupVersion{}
+	for _, version := range s.versionPriority[group] {
+		ret = append(ret, schema.GroupVersion{Group: group, Version: version})
+	}
+	for _, observedVersion := range s.observedVersions {
+		if observedVersion.Group != group {
+			continue
+		}
+		found := false
+		for _, existing := range ret {
+			if existing == observedVersion {
+				found = true
+				break
+			}
+		}
+		if !found {
+			ret = append(ret, observedVersion)
+		}
+	}
+
+	return ret
+}
+
+// PrioritizedVersionsAllGroups returns all known versions in their priority order.  Groups are random, but
+// versions for a single group are prioritized
+func (s *Scheme) PrioritizedVersionsAllGroups() []schema.GroupVersion {
+	ret := []schema.GroupVersion{}
+	for group, versions := range s.versionPriority {
+		for _, version := range versions {
+			ret = append(ret, schema.GroupVersion{Group: group, Version: version})
+		}
+	}
+	for _, observedVersion := range s.observedVersions {
+		found := false
+		for _, existing := range ret {
+			if existing == observedVersion {
+				found = true
+				break
+			}
+		}
+		if !found {
+			ret = append(ret, observedVersion)
+		}
+	}
+	return ret
+}
+
+// PreferredVersionAllGroups returns the most preferred version for every group.
+// group ordering is random.
+func (s *Scheme) PreferredVersionAllGroups() []schema.GroupVersion {
+	ret := []schema.GroupVersion{}
+	for group, versions := range s.versionPriority {
+		for _, version := range versions {
+			ret = append(ret, schema.GroupVersion{Group: group, Version: version})
+			break
+		}
+	}
+	for _, observedVersion := range s.observedVersions {
+		found := false
+		for _, existing := range ret {
+			if existing.Group == observedVersion.Group {
+				found = true
+				break
+			}
+		}
+		if !found {
+			ret = append(ret, observedVersion)
+		}
+	}
+
+	return ret
+}
+
+// IsGroupRegistered returns true if types for the group have been registered with the scheme
+func (s *Scheme) IsGroupRegistered(group string) bool {
+	for _, observedVersion := range s.observedVersions {
+		if observedVersion.Group == group {
+			return true
+		}
+	}
+	return false
+}
+
+// IsVersionRegistered returns true if types for the version have been registered with the scheme
+func (s *Scheme) IsVersionRegistered(version schema.GroupVersion) bool {
+	for _, observedVersion := range s.observedVersions {
+		if observedVersion == version {
+			return true
+		}
+	}
+
+	return false
+}
+
+func (s *Scheme) addObservedVersion(version schema.GroupVersion) {
+	if len(version.Version) == 0 || version.Version == APIVersionInternal {
+		return
+	}
+	for _, observedVersion := range s.observedVersions {
+		if observedVersion == version {
+			return
+		}
+	}
+
+	s.observedVersions = append(s.observedVersions, version)
+}
+
+func (s *Scheme) Name() string {
+	return s.schemeName
+}
+
+// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
+// call chains to NewReflector, so they'd be low entropy names for reflectors
+var internalPackages = []string{"k8s.io/apimachinery/pkg/runtime/scheme.go"}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go
new file mode 100644
index 0000000..944db48
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+// SchemeBuilder collects functions that add things to a scheme. It's to allow
+// code to compile without explicitly referencing generated types. You should
+// declare one in each package that will have generated deep copy or conversion
+// functions.
+type SchemeBuilder []func(*Scheme) error
+
+// AddToScheme applies all the stored functions to the scheme. A non-nil error
+// indicates that one function failed and the attempt was abandoned.
+func (sb *SchemeBuilder) AddToScheme(s *Scheme) error {
+	for _, f := range *sb {
+		if err := f(s); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Register adds a scheme setup function to the list.
+func (sb *SchemeBuilder) Register(funcs ...func(*Scheme) error) {
+	for _, f := range funcs {
+		*sb = append(*sb, f)
+	}
+}
+
+// NewSchemeBuilder calls Register for you.
+func NewSchemeBuilder(funcs ...func(*Scheme) error) SchemeBuilder {
+	var sb SchemeBuilder
+	sb.Register(funcs...)
+	return sb
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
new file mode 100644
index 0000000..65f4511
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
@@ -0,0 +1,237 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package serializer
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer/json"
+	"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
+	"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
+)
+
+// serializerExtensions are for serializers that are conditionally compiled in
+var serializerExtensions = []func(*runtime.Scheme) (serializerType, bool){}
+
+type serializerType struct {
+	AcceptContentTypes []string
+	ContentType        string
+	FileExtensions     []string
+	// EncodesAsText should be true if this content type can be represented safely in UTF-8
+	EncodesAsText bool
+
+	Serializer       runtime.Serializer
+	PrettySerializer runtime.Serializer
+
+	AcceptStreamContentTypes []string
+	StreamContentType        string
+
+	Framer           runtime.Framer
+	StreamSerializer runtime.Serializer
+}
+
+func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory) []serializerType {
+	jsonSerializer := json.NewSerializer(mf, scheme, scheme, false)
+	jsonPrettySerializer := json.NewSerializer(mf, scheme, scheme, true)
+	yamlSerializer := json.NewYAMLSerializer(mf, scheme, scheme)
+
+	serializers := []serializerType{
+		{
+			AcceptContentTypes: []string{"application/json"},
+			ContentType:        "application/json",
+			FileExtensions:     []string{"json"},
+			EncodesAsText:      true,
+			Serializer:         jsonSerializer,
+			PrettySerializer:   jsonPrettySerializer,
+
+			Framer:           json.Framer,
+			StreamSerializer: jsonSerializer,
+		},
+		{
+			AcceptContentTypes: []string{"application/yaml"},
+			ContentType:        "application/yaml",
+			FileExtensions:     []string{"yaml"},
+			EncodesAsText:      true,
+			Serializer:         yamlSerializer,
+		},
+	}
+
+	for _, fn := range serializerExtensions {
+		if serializer, ok := fn(scheme); ok {
+			serializers = append(serializers, serializer)
+		}
+	}
+	return serializers
+}
+
+// CodecFactory provides methods for retrieving codecs and serializers for specific
+// versions and content types.
+type CodecFactory struct {
+	scheme      *runtime.Scheme
+	serializers []serializerType
+	universal   runtime.Decoder
+	accepts     []runtime.SerializerInfo
+
+	legacySerializer runtime.Serializer
+}
+
+// NewCodecFactory provides methods for retrieving serializers for the supported wire formats
+// and conversion wrappers to define preferred internal and external versions. In the future,
+// as the internal version is used less, callers may instead use a defaulting serializer and
+// only convert objects which are shared internally (Status, common API machinery).
+// TODO: allow other codecs to be compiled in?
+// TODO: accept a scheme interface
+func NewCodecFactory(scheme *runtime.Scheme) CodecFactory {
+	serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory)
+	return newCodecFactory(scheme, serializers)
+}
+
+// newCodecFactory is a helper for testing that allows a different metafactory to be specified.
+func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) CodecFactory {
+	decoders := make([]runtime.Decoder, 0, len(serializers))
+	var accepts []runtime.SerializerInfo
+	alreadyAccepted := make(map[string]struct{})
+
+	var legacySerializer runtime.Serializer
+	for _, d := range serializers {
+		decoders = append(decoders, d.Serializer)
+		for _, mediaType := range d.AcceptContentTypes {
+			if _, ok := alreadyAccepted[mediaType]; ok {
+				continue
+			}
+			alreadyAccepted[mediaType] = struct{}{}
+			info := runtime.SerializerInfo{
+				MediaType:        d.ContentType,
+				EncodesAsText:    d.EncodesAsText,
+				Serializer:       d.Serializer,
+				PrettySerializer: d.PrettySerializer,
+			}
+			if d.StreamSerializer != nil {
+				info.StreamSerializer = &runtime.StreamSerializerInfo{
+					Serializer:    d.StreamSerializer,
+					EncodesAsText: d.EncodesAsText,
+					Framer:        d.Framer,
+				}
+			}
+			accepts = append(accepts, info)
+			if mediaType == runtime.ContentTypeJSON {
+				legacySerializer = d.Serializer
+			}
+		}
+	}
+	if legacySerializer == nil {
+		legacySerializer = serializers[0].Serializer
+	}
+
+	return CodecFactory{
+		scheme:      scheme,
+		serializers: serializers,
+		universal:   recognizer.NewDecoder(decoders...),
+
+		accepts: accepts,
+
+		legacySerializer: legacySerializer,
+	}
+}
+
+// SupportedMediaTypes returns the RFC2046 media types that this factory has serializers for.
+func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo {
+	return f.accepts
+}
+
+// LegacyCodec encodes output to a given API versions, and decodes output into the internal form from
+// any recognized source. The returned codec will always encode output to JSON. If a type is not
+// found in the list of versions an error will be returned.
+//
+// This method is deprecated - clients and servers should negotiate a serializer by mime-type and
+// invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder().
+//
+// TODO: make this call exist only in pkg/api, and initialize it with the set of default versions.
+//   All other callers will be forced to request a Codec directly.
+func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec {
+	return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner)
+}
+
+// UniversalDeserializer can convert any stored data recognized by this factory into a Go object that satisfies
+// runtime.Object. It does not perform conversion. It does not perform defaulting.
+func (f CodecFactory) UniversalDeserializer() runtime.Decoder {
+	return f.universal
+}
+
+// UniversalDecoder returns a runtime.Decoder capable of decoding all known API objects in all known formats. Used
+// by clients that do not need to encode objects but want to deserialize API objects stored on disk. Only decodes
+// objects in groups registered with the scheme. The GroupVersions passed may be used to select alternate
+// versions of objects to return - by default, runtime.APIVersionInternal is used. If any versions are specified,
+// unrecognized groups will be returned in the version they are encoded as (no conversion). This decoder performs
+// defaulting.
+//
+// TODO: the decoder will eventually be removed in favor of dealing with objects in their versioned form
+// TODO: only accept a group versioner
+func (f CodecFactory) UniversalDecoder(versions ...schema.GroupVersion) runtime.Decoder {
+	var versioner runtime.GroupVersioner
+	if len(versions) == 0 {
+		versioner = runtime.InternalGroupVersioner
+	} else {
+		versioner = schema.GroupVersions(versions)
+	}
+	return f.CodecForVersions(nil, f.universal, nil, versioner)
+}
+
+// CodecForVersions creates a codec with the provided serializer. If an object is decoded and its group is not in the list,
+// it will default to runtime.APIVersionInternal. If encode is not specified for an object's group, the object is not
+// converted. If encode or decode are nil, no conversion is performed.
+func (f CodecFactory) CodecForVersions(encoder runtime.Encoder, decoder runtime.Decoder, encode runtime.GroupVersioner, decode runtime.GroupVersioner) runtime.Codec {
+	// TODO: these are for backcompat, remove them in the future
+	if encode == nil {
+		encode = runtime.DisabledGroupVersioner
+	}
+	if decode == nil {
+		decode = runtime.InternalGroupVersioner
+	}
+	return versioning.NewDefaultingCodecForScheme(f.scheme, encoder, decoder, encode, decode)
+}
+
+// DecoderToVersion returns a decoder that targets the provided group version.
+func (f CodecFactory) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder {
+	return f.CodecForVersions(nil, decoder, nil, gv)
+}
+
+// EncoderForVersion returns an encoder that targets the provided group version.
+func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder {
+	return f.CodecForVersions(encoder, nil, gv, nil)
+}
+
+// DirectCodecFactory provides methods for retrieving "DirectCodec"s, which do not do conversion.
+type DirectCodecFactory struct {
+	CodecFactory
+}
+
+// EncoderForVersion returns an encoder that does not do conversion.
+func (f DirectCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder {
+	return versioning.DirectEncoder{
+		Version:     version,
+		Encoder:     serializer,
+		ObjectTyper: f.CodecFactory.scheme,
+	}
+}
+
+// DecoderToVersion returns an decoder that does not do conversion. gv is ignored.
+func (f DirectCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder {
+	return versioning.DirectDecoder{
+		Decoder: serializer,
+	}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
new file mode 100644
index 0000000..8987e74
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
@@ -0,0 +1,303 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package json
+
+import (
+	"encoding/json"
+	"io"
+	"strconv"
+	"unsafe"
+
+	jsoniter "github.com/json-iterator/go"
+	"github.com/modern-go/reflect2"
+	"sigs.k8s.io/yaml"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
+	"k8s.io/apimachinery/pkg/util/framer"
+	utilyaml "k8s.io/apimachinery/pkg/util/yaml"
+)
+
+// NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer
+// is not nil, the object has the group, version, and kind fields set.
+func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer {
+	return &Serializer{
+		meta:    meta,
+		creater: creater,
+		typer:   typer,
+		yaml:    false,
+		pretty:  pretty,
+	}
+}
+
+// NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer
+// is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that
+// matches JSON, and will error if constructs are used that do not serialize to JSON.
+func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
+	return &Serializer{
+		meta:    meta,
+		creater: creater,
+		typer:   typer,
+		yaml:    true,
+	}
+}
+
+type Serializer struct {
+	meta    MetaFactory
+	creater runtime.ObjectCreater
+	typer   runtime.ObjectTyper
+	yaml    bool
+	pretty  bool
+}
+
+// Serializer implements Serializer
+var _ runtime.Serializer = &Serializer{}
+var _ recognizer.RecognizingDecoder = &Serializer{}
+
+type customNumberExtension struct {
+	jsoniter.DummyExtension
+}
+
+func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder {
+	if typ.String() == "interface {}" {
+		return customNumberDecoder{}
+	}
+	return nil
+}
+
+type customNumberDecoder struct {
+}
+
+func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+	switch iter.WhatIsNext() {
+	case jsoniter.NumberValue:
+		var number jsoniter.Number
+		iter.ReadVal(&number)
+		i64, err := strconv.ParseInt(string(number), 10, 64)
+		if err == nil {
+			*(*interface{})(ptr) = i64
+			return
+		}
+		f64, err := strconv.ParseFloat(string(number), 64)
+		if err == nil {
+			*(*interface{})(ptr) = f64
+			return
+		}
+		iter.ReportError("DecodeNumber", err.Error())
+	default:
+		*(*interface{})(ptr) = iter.Read()
+	}
+}
+
+// CaseSensitiveJsonIterator returns a jsoniterator API that's configured to be
+// case-sensitive when unmarshalling, and otherwise compatible with
+// the encoding/json standard library.
+func CaseSensitiveJsonIterator() jsoniter.API {
+	config := jsoniter.Config{
+		EscapeHTML:             true,
+		SortMapKeys:            true,
+		ValidateJsonRawMessage: true,
+		CaseSensitive:          true,
+	}.Froze()
+	// Force jsoniter to decode number to interface{} via int64/float64, if possible.
+	config.RegisterExtension(&customNumberExtension{})
+	return config
+}
+
+// Private copy of jsoniter to try to shield against possible mutations
+// from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them
+// in some other library will mess with every usage of the jsoniter library in the whole program.
+// See https://github.com/json-iterator/go/issues/265
+var caseSensitiveJsonIterator = CaseSensitiveJsonIterator()
+
+// gvkWithDefaults returns group kind and version defaulting from provided default
+func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind {
+	if len(actual.Kind) == 0 {
+		actual.Kind = defaultGVK.Kind
+	}
+	if len(actual.Version) == 0 && len(actual.Group) == 0 {
+		actual.Group = defaultGVK.Group
+		actual.Version = defaultGVK.Version
+	}
+	if len(actual.Version) == 0 && actual.Group == defaultGVK.Group {
+		actual.Version = defaultGVK.Version
+	}
+	return actual
+}
+
+// Decode attempts to convert the provided data into YAML or JSON, extract the stored schema kind, apply the provided default gvk, and then
+// load that data into an object matching the desired schema kind or the provided into.
+// If into is *runtime.Unknown, the raw data will be extracted and no decoding will be performed.
+// If into is not registered with the typer, then the object will be straight decoded using normal JSON/YAML unmarshalling.
+// If into is provided and the original data is not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk.
+// If into is nil or data's gvk different from into's gvk, it will generate a new Object with ObjectCreater.New(gvk)
+// On success or most errors, the method will return the calculated schema kind.
+// The gvk calculate priority will be originalData > default gvk > into
+func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	if versioned, ok := into.(*runtime.VersionedObjects); ok {
+		into = versioned.Last()
+		obj, actual, err := s.Decode(originalData, gvk, into)
+		if err != nil {
+			return nil, actual, err
+		}
+		versioned.Objects = []runtime.Object{obj}
+		return versioned, actual, nil
+	}
+
+	data := originalData
+	if s.yaml {
+		altered, err := yaml.YAMLToJSON(data)
+		if err != nil {
+			return nil, nil, err
+		}
+		data = altered
+	}
+
+	actual, err := s.meta.Interpret(data)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	if gvk != nil {
+		*actual = gvkWithDefaults(*actual, *gvk)
+	}
+
+	if unk, ok := into.(*runtime.Unknown); ok && unk != nil {
+		unk.Raw = originalData
+		unk.ContentType = runtime.ContentTypeJSON
+		unk.GetObjectKind().SetGroupVersionKind(*actual)
+		return unk, actual, nil
+	}
+
+	if into != nil {
+		_, isUnstructured := into.(runtime.Unstructured)
+		types, _, err := s.typer.ObjectKinds(into)
+		switch {
+		case runtime.IsNotRegisteredError(err), isUnstructured:
+			if err := caseSensitiveJsonIterator.Unmarshal(data, into); err != nil {
+				return nil, actual, err
+			}
+			return into, actual, nil
+		case err != nil:
+			return nil, actual, err
+		default:
+			*actual = gvkWithDefaults(*actual, types[0])
+		}
+	}
+
+	if len(actual.Kind) == 0 {
+		return nil, actual, runtime.NewMissingKindErr(string(originalData))
+	}
+	if len(actual.Version) == 0 {
+		return nil, actual, runtime.NewMissingVersionErr(string(originalData))
+	}
+
+	// use the target if necessary
+	obj, err := runtime.UseOrCreateObject(s.typer, s.creater, *actual, into)
+	if err != nil {
+		return nil, actual, err
+	}
+
+	if err := caseSensitiveJsonIterator.Unmarshal(data, obj); err != nil {
+		return nil, actual, err
+	}
+	return obj, actual, nil
+}
+
+// Encode serializes the provided object to the given writer.
+func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
+	if s.yaml {
+		json, err := caseSensitiveJsonIterator.Marshal(obj)
+		if err != nil {
+			return err
+		}
+		data, err := yaml.JSONToYAML(json)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(data)
+		return err
+	}
+
+	if s.pretty {
+		data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", "  ")
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(data)
+		return err
+	}
+	encoder := json.NewEncoder(w)
+	return encoder.Encode(obj)
+}
+
+// RecognizesData implements the RecognizingDecoder interface.
+func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) {
+	if s.yaml {
+		// we could potentially look for '---'
+		return false, true, nil
+	}
+	_, _, ok = utilyaml.GuessJSONStream(peek, 2048)
+	return ok, false, nil
+}
+
+// Framer is the default JSON framing behavior, with newlines delimiting individual objects.
+var Framer = jsonFramer{}
+
+type jsonFramer struct{}
+
+// NewFrameWriter implements stream framing for this serializer
+func (jsonFramer) NewFrameWriter(w io.Writer) io.Writer {
+	// we can write JSON objects directly to the writer, because they are self-framing
+	return w
+}
+
+// NewFrameReader implements stream framing for this serializer
+func (jsonFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
+	// we need to extract the JSON chunks of data to pass to Decode()
+	return framer.NewJSONFramedReader(r)
+}
+
+// YAMLFramer is the default JSON framing behavior, with newlines delimiting individual objects.
+var YAMLFramer = yamlFramer{}
+
+type yamlFramer struct{}
+
+// NewFrameWriter implements stream framing for this serializer
+func (yamlFramer) NewFrameWriter(w io.Writer) io.Writer {
+	return yamlFrameWriter{w}
+}
+
+// NewFrameReader implements stream framing for this serializer
+func (yamlFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
+	// extract the YAML document chunks directly
+	return utilyaml.NewDocumentDecoder(r)
+}
+
+type yamlFrameWriter struct {
+	w io.Writer
+}
+
+// Write separates each document with the YAML document separator (`---` followed by line
+// break). Writers must write well formed YAML documents (include a final line break).
+func (w yamlFrameWriter) Write(data []byte) (n int, err error) {
+	if _, err := w.w.Write([]byte("---\n")); err != nil {
+		return 0, err
+	}
+	return w.w.Write(data)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go
new file mode 100644
index 0000000..df3f5f9
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package json
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// MetaFactory is used to store and retrieve the version and kind
+// information for JSON objects in a serializer.
+type MetaFactory interface {
+	// Interpret should return the version and kind of the wire-format of
+	// the object.
+	Interpret(data []byte) (*schema.GroupVersionKind, error)
+}
+
+// DefaultMetaFactory is a default factory for versioning objects in JSON. The object
+// in memory and in the default JSON serialization will use the "kind" and "apiVersion"
+// fields.
+var DefaultMetaFactory = SimpleMetaFactory{}
+
+// SimpleMetaFactory provides default methods for retrieving the type and version of objects
+// that are identified with an "apiVersion" and "kind" fields in their JSON
+// serialization. It may be parameterized with the names of the fields in memory, or an
+// optional list of base structs to search for those fields in memory.
+type SimpleMetaFactory struct {
+}
+
+// Interpret will return the APIVersion and Kind of the JSON wire-format
+// encoding of an object, or an error.
+func (SimpleMetaFactory) Interpret(data []byte) (*schema.GroupVersionKind, error) {
+	findKind := struct {
+		// +optional
+		APIVersion string `json:"apiVersion,omitempty"`
+		// +optional
+		Kind string `json:"kind,omitempty"`
+	}{}
+	if err := json.Unmarshal(data, &findKind); err != nil {
+		return nil, fmt.Errorf("couldn't get version/kind; json parse error: %v", err)
+	}
+	gv, err := schema.ParseGroupVersion(findKind.APIVersion)
+	if err != nil {
+		return nil, err
+	}
+	return &schema.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: findKind.Kind}, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go
new file mode 100644
index 0000000..a42b4a4
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package serializer
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// TODO: We should split negotiated serializers that we can change versions on from those we can change
+// serialization formats on
+type negotiatedSerializerWrapper struct {
+	info runtime.SerializerInfo
+}
+
+func NegotiatedSerializerWrapper(info runtime.SerializerInfo) runtime.NegotiatedSerializer {
+	return &negotiatedSerializerWrapper{info}
+}
+
+func (n *negotiatedSerializerWrapper) SupportedMediaTypes() []runtime.SerializerInfo {
+	return []runtime.SerializerInfo{n.info}
+}
+
+func (n *negotiatedSerializerWrapper) EncoderForVersion(e runtime.Encoder, _ runtime.GroupVersioner) runtime.Encoder {
+	return e
+}
+
+func (n *negotiatedSerializerWrapper) DecoderToVersion(d runtime.Decoder, _gv runtime.GroupVersioner) runtime.Decoder {
+	return d
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go
new file mode 100644
index 0000000..72d0ac7
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package protobuf provides a Kubernetes serializer for the protobuf format.
+package protobuf // import "k8s.io/apimachinery/pkg/runtime/serializer/protobuf"
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
new file mode 100644
index 0000000..b99ba25
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
@@ -0,0 +1,459 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package protobuf
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net/http"
+	"reflect"
+
+	"github.com/gogo/protobuf/proto"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
+	"k8s.io/apimachinery/pkg/util/framer"
+)
+
+var (
+	// protoEncodingPrefix serves as a magic number for an encoded protobuf message on this serializer. All
+	// proto messages serialized by this schema will be preceded by the bytes 0x6b 0x38 0x73, with the fourth
+	// byte being reserved for the encoding style. The only encoding style defined is 0x00, which means that
+	// the rest of the byte stream is a message of type k8s.io.kubernetes.pkg.runtime.Unknown (proto2).
+	//
+	// See k8s.io/apimachinery/pkg/runtime/generated.proto for details of the runtime.Unknown message.
+	//
+	// This encoding scheme is experimental, and is subject to change at any time.
+	protoEncodingPrefix = []byte{0x6b, 0x38, 0x73, 0x00}
+)
+
+type errNotMarshalable struct {
+	t reflect.Type
+}
+
+func (e errNotMarshalable) Error() string {
+	return fmt.Sprintf("object %v does not implement the protobuf marshalling interface and cannot be encoded to a protobuf message", e.t)
+}
+
+func (e errNotMarshalable) Status() metav1.Status {
+	return metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusNotAcceptable,
+		Reason:  metav1.StatusReason("NotAcceptable"),
+		Message: e.Error(),
+	}
+}
+
+func IsNotMarshalable(err error) bool {
+	_, ok := err.(errNotMarshalable)
+	return err != nil && ok
+}
+
+// NewSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer
+// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written
+// as-is (any type info passed with the object will be used).
+//
+// This encoding scheme is experimental, and is subject to change at any time.
+func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *Serializer {
+	return &Serializer{
+		prefix:      protoEncodingPrefix,
+		creater:     creater,
+		typer:       typer,
+		contentType: defaultContentType,
+	}
+}
+
+type Serializer struct {
+	prefix      []byte
+	creater     runtime.ObjectCreater
+	typer       runtime.ObjectTyper
+	contentType string
+}
+
+var _ runtime.Serializer = &Serializer{}
+var _ recognizer.RecognizingDecoder = &Serializer{}
+
+// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default
+// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown,
+// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will
+// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is
+// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most
+// errors, the method will return the calculated schema kind.
+func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	if versioned, ok := into.(*runtime.VersionedObjects); ok {
+		into = versioned.Last()
+		obj, actual, err := s.Decode(originalData, gvk, into)
+		if err != nil {
+			return nil, actual, err
+		}
+		// the last item in versioned becomes into, so if versioned was not originally empty we reset the object
+		// array so the first position is the decoded object and the second position is the outermost object.
+		// if there were no objects in the versioned list passed to us, only add ourselves.
+		if into != nil && into != obj {
+			versioned.Objects = []runtime.Object{obj, into}
+		} else {
+			versioned.Objects = []runtime.Object{obj}
+		}
+		return versioned, actual, err
+	}
+
+	prefixLen := len(s.prefix)
+	switch {
+	case len(originalData) == 0:
+		// TODO: treat like decoding {} from JSON with defaulting
+		return nil, nil, fmt.Errorf("empty data")
+	case len(originalData) < prefixLen || !bytes.Equal(s.prefix, originalData[:prefixLen]):
+		return nil, nil, fmt.Errorf("provided data does not appear to be a protobuf message, expected prefix %v", s.prefix)
+	case len(originalData) == prefixLen:
+		// TODO: treat like decoding {} from JSON with defaulting
+		return nil, nil, fmt.Errorf("empty body")
+	}
+
+	data := originalData[prefixLen:]
+	unk := runtime.Unknown{}
+	if err := unk.Unmarshal(data); err != nil {
+		return nil, nil, err
+	}
+
+	actual := unk.GroupVersionKind()
+	copyKindDefaults(&actual, gvk)
+
+	if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil {
+		*intoUnknown = unk
+		if ok, _, _ := s.RecognizesData(bytes.NewBuffer(unk.Raw)); ok {
+			intoUnknown.ContentType = s.contentType
+		}
+		return intoUnknown, &actual, nil
+	}
+
+	if into != nil {
+		types, _, err := s.typer.ObjectKinds(into)
+		switch {
+		case runtime.IsNotRegisteredError(err):
+			pb, ok := into.(proto.Message)
+			if !ok {
+				return nil, &actual, errNotMarshalable{reflect.TypeOf(into)}
+			}
+			if err := proto.Unmarshal(unk.Raw, pb); err != nil {
+				return nil, &actual, err
+			}
+			return into, &actual, nil
+		case err != nil:
+			return nil, &actual, err
+		default:
+			copyKindDefaults(&actual, &types[0])
+			// if the result of defaulting did not set a version or group, ensure that at least group is set
+			// (copyKindDefaults will not assign Group if version is already set). This guarantees that the group
+			// of into is set if there is no better information from the caller or object.
+			if len(actual.Version) == 0 && len(actual.Group) == 0 {
+				actual.Group = types[0].Group
+			}
+		}
+	}
+
+	if len(actual.Kind) == 0 {
+		return nil, &actual, runtime.NewMissingKindErr(fmt.Sprintf("%#v", unk.TypeMeta))
+	}
+	if len(actual.Version) == 0 {
+		return nil, &actual, runtime.NewMissingVersionErr(fmt.Sprintf("%#v", unk.TypeMeta))
+	}
+
+	return unmarshalToObject(s.typer, s.creater, &actual, into, unk.Raw)
+}
+
+// Encode serializes the provided object to the given writer.
+func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
+	prefixSize := uint64(len(s.prefix))
+
+	var unk runtime.Unknown
+	switch t := obj.(type) {
+	case *runtime.Unknown:
+		estimatedSize := prefixSize + uint64(t.Size())
+		data := make([]byte, estimatedSize)
+		i, err := t.MarshalTo(data[prefixSize:])
+		if err != nil {
+			return err
+		}
+		copy(data, s.prefix)
+		_, err = w.Write(data[:prefixSize+uint64(i)])
+		return err
+	default:
+		kind := obj.GetObjectKind().GroupVersionKind()
+		unk = runtime.Unknown{
+			TypeMeta: runtime.TypeMeta{
+				Kind:       kind.Kind,
+				APIVersion: kind.GroupVersion().String(),
+			},
+		}
+	}
+
+	switch t := obj.(type) {
+	case bufferedMarshaller:
+		// this path performs a single allocation during write but requires the caller to implement
+		// the more efficient Size and MarshalTo methods
+		encodedSize := uint64(t.Size())
+		estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize)
+		data := make([]byte, estimatedSize)
+
+		i, err := unk.NestedMarshalTo(data[prefixSize:], t, encodedSize)
+		if err != nil {
+			return err
+		}
+
+		copy(data, s.prefix)
+
+		_, err = w.Write(data[:prefixSize+uint64(i)])
+		return err
+
+	case proto.Marshaler:
+		// this path performs extra allocations
+		data, err := t.Marshal()
+		if err != nil {
+			return err
+		}
+		unk.Raw = data
+
+		estimatedSize := prefixSize + uint64(unk.Size())
+		data = make([]byte, estimatedSize)
+
+		i, err := unk.MarshalTo(data[prefixSize:])
+		if err != nil {
+			return err
+		}
+
+		copy(data, s.prefix)
+
+		_, err = w.Write(data[:prefixSize+uint64(i)])
+		return err
+
+	default:
+		// TODO: marshal with a different content type and serializer (JSON for third party objects)
+		return errNotMarshalable{reflect.TypeOf(obj)}
+	}
+}
+
+// RecognizesData implements the RecognizingDecoder interface.
+func (s *Serializer) RecognizesData(peek io.Reader) (bool, bool, error) {
+	prefix := make([]byte, 4)
+	n, err := peek.Read(prefix)
+	if err != nil {
+		if err == io.EOF {
+			return false, false, nil
+		}
+		return false, false, err
+	}
+	if n != 4 {
+		return false, false, nil
+	}
+	return bytes.Equal(s.prefix, prefix), false, nil
+}
+
+// copyKindDefaults defaults dst to the value in src if dst does not have a value set.
+func copyKindDefaults(dst, src *schema.GroupVersionKind) {
+	if src == nil {
+		return
+	}
+	// apply kind and version defaulting from provided default
+	if len(dst.Kind) == 0 {
+		dst.Kind = src.Kind
+	}
+	if len(dst.Version) == 0 && len(src.Version) > 0 {
+		dst.Group = src.Group
+		dst.Version = src.Version
+	}
+}
+
+// bufferedMarshaller describes a more efficient marshalling interface that can avoid allocating multiple
+// byte buffers by pre-calculating the size of the final buffer needed.
+type bufferedMarshaller interface {
+	proto.Sizer
+	runtime.ProtobufMarshaller
+}
+
+// estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown
+// object with a nil RawJSON struct and the expected size of the provided buffer. The
+// returned size will not be correct if RawJSOn is set on unk.
+func estimateUnknownSize(unk *runtime.Unknown, byteSize uint64) uint64 {
+	size := uint64(unk.Size())
+	// protobuf uses 1 byte for the tag, a varint for the length of the array (at most 8 bytes - uint64 - here),
+	// and the size of the array.
+	size += 1 + 8 + byteSize
+	return size
+}
+
+// NewRawSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If typer
+// is not nil, the object has the group, version, and kind fields set. This serializer does not provide type information for the
+// encoded object, and thus is not self describing (callers must know what type is being described in order to decode).
+//
+// This encoding scheme is experimental, and is subject to change at any time.
+func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *RawSerializer {
+	return &RawSerializer{
+		creater:     creater,
+		typer:       typer,
+		contentType: defaultContentType,
+	}
+}
+
+// RawSerializer encodes and decodes objects without adding a runtime.Unknown wrapper (objects are encoded without identifying
+// type).
+type RawSerializer struct {
+	creater     runtime.ObjectCreater
+	typer       runtime.ObjectTyper
+	contentType string
+}
+
+var _ runtime.Serializer = &RawSerializer{}
+
+// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default
+// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown,
+// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will
+// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is
+// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most
+// errors, the method will return the calculated schema kind.
+func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	if into == nil {
+		return nil, nil, fmt.Errorf("this serializer requires an object to decode into: %#v", s)
+	}
+
+	if versioned, ok := into.(*runtime.VersionedObjects); ok {
+		into = versioned.Last()
+		obj, actual, err := s.Decode(originalData, gvk, into)
+		if err != nil {
+			return nil, actual, err
+		}
+		if into != nil && into != obj {
+			versioned.Objects = []runtime.Object{obj, into}
+		} else {
+			versioned.Objects = []runtime.Object{obj}
+		}
+		return versioned, actual, err
+	}
+
+	if len(originalData) == 0 {
+		// TODO: treat like decoding {} from JSON with defaulting
+		return nil, nil, fmt.Errorf("empty data")
+	}
+	data := originalData
+
+	actual := &schema.GroupVersionKind{}
+	copyKindDefaults(actual, gvk)
+
+	if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil {
+		intoUnknown.Raw = data
+		intoUnknown.ContentEncoding = ""
+		intoUnknown.ContentType = s.contentType
+		intoUnknown.SetGroupVersionKind(*actual)
+		return intoUnknown, actual, nil
+	}
+
+	types, _, err := s.typer.ObjectKinds(into)
+	switch {
+	case runtime.IsNotRegisteredError(err):
+		pb, ok := into.(proto.Message)
+		if !ok {
+			return nil, actual, errNotMarshalable{reflect.TypeOf(into)}
+		}
+		if err := proto.Unmarshal(data, pb); err != nil {
+			return nil, actual, err
+		}
+		return into, actual, nil
+	case err != nil:
+		return nil, actual, err
+	default:
+		copyKindDefaults(actual, &types[0])
+		// if the result of defaulting did not set a version or group, ensure that at least group is set
+		// (copyKindDefaults will not assign Group if version is already set). This guarantees that the group
+		// of into is set if there is no better information from the caller or object.
+		if len(actual.Version) == 0 && len(actual.Group) == 0 {
+			actual.Group = types[0].Group
+		}
+	}
+
+	if len(actual.Kind) == 0 {
+		return nil, actual, runtime.NewMissingKindErr("<protobuf encoded body - must provide default type>")
+	}
+	if len(actual.Version) == 0 {
+		return nil, actual, runtime.NewMissingVersionErr("<protobuf encoded body - must provide default type>")
+	}
+
+	return unmarshalToObject(s.typer, s.creater, actual, into, data)
+}
+
+// unmarshalToObject is the common code between decode in the raw and normal serializer.
+func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, actual *schema.GroupVersionKind, into runtime.Object, data []byte) (runtime.Object, *schema.GroupVersionKind, error) {
+	// use the target if necessary
+	obj, err := runtime.UseOrCreateObject(typer, creater, *actual, into)
+	if err != nil {
+		return nil, actual, err
+	}
+
+	pb, ok := obj.(proto.Message)
+	if !ok {
+		return nil, actual, errNotMarshalable{reflect.TypeOf(obj)}
+	}
+	if err := proto.Unmarshal(data, pb); err != nil {
+		return nil, actual, err
+	}
+	return obj, actual, nil
+}
+
+// Encode serializes the provided object to the given writer. Overrides is ignored.
+func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error {
+	switch t := obj.(type) {
+	case bufferedMarshaller:
+		// this path performs a single allocation during write but requires the caller to implement
+		// the more efficient Size and MarshalTo methods
+		encodedSize := uint64(t.Size())
+		data := make([]byte, encodedSize)
+
+		n, err := t.MarshalTo(data)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(data[:n])
+		return err
+
+	case proto.Marshaler:
+		// this path performs extra allocations
+		data, err := t.Marshal()
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(data)
+		return err
+
+	default:
+		return errNotMarshalable{reflect.TypeOf(obj)}
+	}
+}
+
+var LengthDelimitedFramer = lengthDelimitedFramer{}
+
+type lengthDelimitedFramer struct{}
+
+// NewFrameWriter implements stream framing for this serializer
+func (lengthDelimitedFramer) NewFrameWriter(w io.Writer) io.Writer {
+	return framer.NewLengthDelimitedFrameWriter(w)
+}
+
+// NewFrameReader implements stream framing for this serializer
+func (lengthDelimitedFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
+	return framer.NewLengthDelimitedFrameReader(r)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go
new file mode 100644
index 0000000..545cf78
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package serializer
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/serializer/protobuf"
+)
+
+const (
+	// contentTypeProtobuf is the protobuf type exposed for Kubernetes. It is private to prevent others from
+	// depending on it unintentionally.
+	// TODO: potentially move to pkg/api (since it's part of the Kube public API) and pass it in to the
+	//   CodecFactory on initialization.
+	contentTypeProtobuf = "application/vnd.kubernetes.protobuf"
+)
+
+func protobufSerializer(scheme *runtime.Scheme) (serializerType, bool) {
+	serializer := protobuf.NewSerializer(scheme, scheme, contentTypeProtobuf)
+	raw := protobuf.NewRawSerializer(scheme, scheme, contentTypeProtobuf)
+	return serializerType{
+		AcceptContentTypes: []string{contentTypeProtobuf},
+		ContentType:        contentTypeProtobuf,
+		FileExtensions:     []string{"pb"},
+		Serializer:         serializer,
+
+		Framer:           protobuf.LengthDelimitedFramer,
+		StreamSerializer: raw,
+	}, true
+}
+
+func init() {
+	serializerExtensions = append(serializerExtensions, protobufSerializer)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go
new file mode 100644
index 0000000..38497ab
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go
@@ -0,0 +1,127 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package recognizer
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+type RecognizingDecoder interface {
+	runtime.Decoder
+	// RecognizesData should return true if the input provided in the provided reader
+	// belongs to this decoder, or an error if the data could not be read or is ambiguous.
+	// Unknown is true if the data could not be determined to match the decoder type.
+	// Decoders should assume that they can read as much of peek as they need (as the caller
+	// provides) and may return unknown if the data provided is not sufficient to make a
+	// a determination. When peek returns EOF that may mean the end of the input or the
+	// end of buffered input - recognizers should return the best guess at that time.
+	RecognizesData(peek io.Reader) (ok, unknown bool, err error)
+}
+
+// NewDecoder creates a decoder that will attempt multiple decoders in an order defined
+// by:
+//
+// 1. The decoder implements RecognizingDecoder and identifies the data
+// 2. All other decoders, and any decoder that returned true for unknown.
+//
+// The order passed to the constructor is preserved within those priorities.
+func NewDecoder(decoders ...runtime.Decoder) runtime.Decoder {
+	return &decoder{
+		decoders: decoders,
+	}
+}
+
+type decoder struct {
+	decoders []runtime.Decoder
+}
+
+var _ RecognizingDecoder = &decoder{}
+
+func (d *decoder) RecognizesData(peek io.Reader) (bool, bool, error) {
+	var (
+		lastErr    error
+		anyUnknown bool
+	)
+	data, _ := bufio.NewReaderSize(peek, 1024).Peek(1024)
+	for _, r := range d.decoders {
+		switch t := r.(type) {
+		case RecognizingDecoder:
+			ok, unknown, err := t.RecognizesData(bytes.NewBuffer(data))
+			if err != nil {
+				lastErr = err
+				continue
+			}
+			anyUnknown = anyUnknown || unknown
+			if !ok {
+				continue
+			}
+			return true, false, nil
+		}
+	}
+	return false, anyUnknown, lastErr
+}
+
+func (d *decoder) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	var (
+		lastErr error
+		skipped []runtime.Decoder
+	)
+
+	// try recognizers, record any decoders we need to give a chance later
+	for _, r := range d.decoders {
+		switch t := r.(type) {
+		case RecognizingDecoder:
+			buf := bytes.NewBuffer(data)
+			ok, unknown, err := t.RecognizesData(buf)
+			if err != nil {
+				lastErr = err
+				continue
+			}
+			if unknown {
+				skipped = append(skipped, t)
+				continue
+			}
+			if !ok {
+				continue
+			}
+			return r.Decode(data, gvk, into)
+		default:
+			skipped = append(skipped, t)
+		}
+	}
+
+	// try recognizers that returned unknown or didn't recognize their data
+	for _, r := range skipped {
+		out, actual, err := r.Decode(data, gvk, into)
+		if err != nil {
+			lastErr = err
+			continue
+		}
+		return out, actual, nil
+	}
+
+	if lastErr == nil {
+		lastErr = fmt.Errorf("no serialization format matched the provided data")
+	}
+	return nil, nil, lastErr
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
new file mode 100644
index 0000000..a60a7c0
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package streaming implements encoder and decoder for streams
+// of runtime.Objects over io.Writer/Readers.
+package streaming
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// Encoder is a runtime.Encoder on a stream.
+type Encoder interface {
+	// Encode will write the provided object to the stream or return an error. It obeys the same
+	// contract as runtime.VersionedEncoder.
+	Encode(obj runtime.Object) error
+}
+
+// Decoder is a runtime.Decoder from a stream.
+type Decoder interface {
+	// Decode will return io.EOF when no more objects are available.
+	Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error)
+	// Close closes the underlying stream.
+	Close() error
+}
+
+// Serializer is a factory for creating encoders and decoders that work over streams.
+type Serializer interface {
+	NewEncoder(w io.Writer) Encoder
+	NewDecoder(r io.ReadCloser) Decoder
+}
+
+type decoder struct {
+	reader    io.ReadCloser
+	decoder   runtime.Decoder
+	buf       []byte
+	maxBytes  int
+	resetRead bool
+}
+
+// NewDecoder creates a streaming decoder that reads object chunks from r and decodes them with d.
+// The reader is expected to return ErrShortRead if the provided buffer is not large enough to read
+// an entire object.
+func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder {
+	return &decoder{
+		reader:   r,
+		decoder:  d,
+		buf:      make([]byte, 1024),
+		maxBytes: 16 * 1024 * 1024,
+	}
+}
+
+var ErrObjectTooLarge = fmt.Errorf("object to decode was longer than maximum allowed size")
+
+// Decode reads the next object from the stream and decodes it.
+func (d *decoder) Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	base := 0
+	for {
+		n, err := d.reader.Read(d.buf[base:])
+		if err == io.ErrShortBuffer {
+			if n == 0 {
+				return nil, nil, fmt.Errorf("got short buffer with n=0, base=%d, cap=%d", base, cap(d.buf))
+			}
+			if d.resetRead {
+				continue
+			}
+			// double the buffer size up to maxBytes
+			if len(d.buf) < d.maxBytes {
+				base += n
+				d.buf = append(d.buf, make([]byte, len(d.buf))...)
+				continue
+			}
+			// must read the rest of the frame (until we stop getting ErrShortBuffer)
+			d.resetRead = true
+			base = 0
+			return nil, nil, ErrObjectTooLarge
+		}
+		if err != nil {
+			return nil, nil, err
+		}
+		if d.resetRead {
+			// now that we have drained the large read, continue
+			d.resetRead = false
+			continue
+		}
+		base += n
+		break
+	}
+	return d.decoder.Decode(d.buf[:base], defaults, into)
+}
+
+func (d *decoder) Close() error {
+	return d.reader.Close()
+}
+
+type encoder struct {
+	writer  io.Writer
+	encoder runtime.Encoder
+	buf     *bytes.Buffer
+}
+
+// NewEncoder returns a new streaming encoder.
+func NewEncoder(w io.Writer, e runtime.Encoder) Encoder {
+	return &encoder{
+		writer:  w,
+		encoder: e,
+		buf:     &bytes.Buffer{},
+	}
+}
+
+// Encode writes the provided object to the nested writer.
+func (e *encoder) Encode(obj runtime.Object) error {
+	if err := e.encoder.Encode(obj, e.buf); err != nil {
+		return err
+	}
+	_, err := e.writer.Write(e.buf.Bytes())
+	e.buf.Reset()
+	return err
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
new file mode 100644
index 0000000..0018471
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
@@ -0,0 +1,282 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package versioning
+
+import (
+	"io"
+	"reflect"
+
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme.
+func NewDefaultingCodecForScheme(
+	// TODO: I should be a scheme interface?
+	scheme *runtime.Scheme,
+	encoder runtime.Encoder,
+	decoder runtime.Decoder,
+	encodeVersion runtime.GroupVersioner,
+	decodeVersion runtime.GroupVersioner,
+) runtime.Codec {
+	return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, encodeVersion, decodeVersion, scheme.Name())
+}
+
+// NewCodec takes objects in their internal versions and converts them to external versions before
+// serializing them. It assumes the serializer provided to it only deals with external versions.
+// This class is also a serializer, but is generally used with a specific version.
+func NewCodec(
+	encoder runtime.Encoder,
+	decoder runtime.Decoder,
+	convertor runtime.ObjectConvertor,
+	creater runtime.ObjectCreater,
+	typer runtime.ObjectTyper,
+	defaulter runtime.ObjectDefaulter,
+	encodeVersion runtime.GroupVersioner,
+	decodeVersion runtime.GroupVersioner,
+	originalSchemeName string,
+) runtime.Codec {
+	internal := &codec{
+		encoder:   encoder,
+		decoder:   decoder,
+		convertor: convertor,
+		creater:   creater,
+		typer:     typer,
+		defaulter: defaulter,
+
+		encodeVersion: encodeVersion,
+		decodeVersion: decodeVersion,
+
+		originalSchemeName: originalSchemeName,
+	}
+	return internal
+}
+
+type codec struct {
+	encoder   runtime.Encoder
+	decoder   runtime.Decoder
+	convertor runtime.ObjectConvertor
+	creater   runtime.ObjectCreater
+	typer     runtime.ObjectTyper
+	defaulter runtime.ObjectDefaulter
+
+	encodeVersion runtime.GroupVersioner
+	decodeVersion runtime.GroupVersioner
+
+	// originalSchemeName is optional, but when filled in it holds the name of the scheme from which this codec originates
+	originalSchemeName string
+}
+
+// Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is
+// successful, the returned runtime.Object will be the value passed as into. Note that this may bypass conversion if you pass an
+// into that matches the serialized version.
+func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	versioned, isVersioned := into.(*runtime.VersionedObjects)
+	if isVersioned {
+		into = versioned.Last()
+	}
+
+	// If the into object is unstructured and expresses an opinion about its group/version,
+	// create a new instance of the type so we always exercise the conversion path (skips short-circuiting on `into == obj`)
+	decodeInto := into
+	if into != nil {
+		if _, ok := into.(runtime.Unstructured); ok && !into.GetObjectKind().GroupVersionKind().GroupVersion().Empty() {
+			decodeInto = reflect.New(reflect.TypeOf(into).Elem()).Interface().(runtime.Object)
+		}
+	}
+
+	obj, gvk, err := c.decoder.Decode(data, defaultGVK, decodeInto)
+	if err != nil {
+		return nil, gvk, err
+	}
+
+	if d, ok := obj.(runtime.NestedObjectDecoder); ok {
+		if err := d.DecodeNestedObjects(DirectDecoder{c.decoder}); err != nil {
+			return nil, gvk, err
+		}
+	}
+
+	// if we specify a target, use generic conversion.
+	if into != nil {
+		if into == obj {
+			if isVersioned {
+				return versioned, gvk, nil
+			}
+			return into, gvk, nil
+		}
+
+		// perform defaulting if requested
+		if c.defaulter != nil {
+			// create a copy to ensure defaulting is not applied to the original versioned objects
+			if isVersioned {
+				versioned.Objects = []runtime.Object{obj.DeepCopyObject()}
+			}
+			c.defaulter.Default(obj)
+		} else {
+			if isVersioned {
+				versioned.Objects = []runtime.Object{obj}
+			}
+		}
+
+		if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil {
+			return nil, gvk, err
+		}
+
+		if isVersioned {
+			versioned.Objects = append(versioned.Objects, into)
+			return versioned, gvk, nil
+		}
+		return into, gvk, nil
+	}
+
+	// Convert if needed.
+	if isVersioned {
+		// create a copy, because ConvertToVersion does not guarantee non-mutation of objects
+		versioned.Objects = []runtime.Object{obj.DeepCopyObject()}
+	}
+
+	// perform defaulting if requested
+	if c.defaulter != nil {
+		c.defaulter.Default(obj)
+	}
+
+	out, err := c.convertor.ConvertToVersion(obj, c.decodeVersion)
+	if err != nil {
+		return nil, gvk, err
+	}
+	if isVersioned {
+		if versioned.Last() != out {
+			versioned.Objects = append(versioned.Objects, out)
+		}
+		return versioned, gvk, nil
+	}
+	return out, gvk, nil
+}
+
+// Encode ensures the provided object is output in the appropriate group and version, invoking
+// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is.
+func (c *codec) Encode(obj runtime.Object, w io.Writer) error {
+	switch obj := obj.(type) {
+	case *runtime.Unknown:
+		return c.encoder.Encode(obj, w)
+	case runtime.Unstructured:
+		// An unstructured list can contain objects of multiple group version kinds. don't short-circuit just
+		// because the top-level type matches our desired destination type. actually send the object to the converter
+		// to give it a chance to convert the list items if needed.
+		if _, ok := obj.(*unstructured.UnstructuredList); !ok {
+			// avoid conversion roundtrip if GVK is the right one already or is empty (yes, this is a hack, but the old behaviour we rely on in kubectl)
+			objGVK := obj.GetObjectKind().GroupVersionKind()
+			if len(objGVK.Version) == 0 {
+				return c.encoder.Encode(obj, w)
+			}
+			targetGVK, ok := c.encodeVersion.KindForGroupVersionKinds([]schema.GroupVersionKind{objGVK})
+			if !ok {
+				return runtime.NewNotRegisteredGVKErrForTarget(c.originalSchemeName, objGVK, c.encodeVersion)
+			}
+			if targetGVK == objGVK {
+				return c.encoder.Encode(obj, w)
+			}
+		}
+	}
+
+	gvks, isUnversioned, err := c.typer.ObjectKinds(obj)
+	if err != nil {
+		return err
+	}
+
+	if c.encodeVersion == nil || isUnversioned {
+		if e, ok := obj.(runtime.NestedObjectEncoder); ok {
+			if err := e.EncodeNestedObjects(DirectEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
+				return err
+			}
+		}
+		objectKind := obj.GetObjectKind()
+		old := objectKind.GroupVersionKind()
+		objectKind.SetGroupVersionKind(gvks[0])
+		err = c.encoder.Encode(obj, w)
+		objectKind.SetGroupVersionKind(old)
+		return err
+	}
+
+	// Perform a conversion if necessary
+	objectKind := obj.GetObjectKind()
+	old := objectKind.GroupVersionKind()
+	out, err := c.convertor.ConvertToVersion(obj, c.encodeVersion)
+	if err != nil {
+		return err
+	}
+
+	if e, ok := out.(runtime.NestedObjectEncoder); ok {
+		if err := e.EncodeNestedObjects(DirectEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
+			return err
+		}
+	}
+
+	// Conversion is responsible for setting the proper group, version, and kind onto the outgoing object
+	err = c.encoder.Encode(out, w)
+	// restore the old GVK, in case conversion returned the same object
+	objectKind.SetGroupVersionKind(old)
+	return err
+}
+
+// DirectEncoder serializes an object and ensures the GVK is set.
+type DirectEncoder struct {
+	Version runtime.GroupVersioner
+	runtime.Encoder
+	runtime.ObjectTyper
+}
+
+// Encode does not do conversion. It sets the gvk during serialization.
+func (e DirectEncoder) Encode(obj runtime.Object, stream io.Writer) error {
+	gvks, _, err := e.ObjectTyper.ObjectKinds(obj)
+	if err != nil {
+		if runtime.IsNotRegisteredError(err) {
+			return e.Encoder.Encode(obj, stream)
+		}
+		return err
+	}
+	kind := obj.GetObjectKind()
+	oldGVK := kind.GroupVersionKind()
+	gvk := gvks[0]
+	if e.Version != nil {
+		preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks)
+		if ok {
+			gvk = preferredGVK
+		}
+	}
+	kind.SetGroupVersionKind(gvk)
+	err = e.Encoder.Encode(obj, stream)
+	kind.SetGroupVersionKind(oldGVK)
+	return err
+}
+
+// DirectDecoder clears the group version kind of a deserialized object.
+type DirectDecoder struct {
+	runtime.Decoder
+}
+
+// Decode does not do conversion. It removes the gvk during deserialization.
+func (d DirectDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	obj, gvk, err := d.Decoder.Decode(data, defaults, into)
+	if obj != nil {
+		kind := obj.GetObjectKind()
+		// clearing the gvk is just a convention of a codec
+		kind.SetGroupVersionKind(schema.GroupVersionKind{})
+	}
+	return obj, gvk, err
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go b/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go
new file mode 100644
index 0000000..5bc642b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go
@@ -0,0 +1,262 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"bytes"
+	"fmt"
+	"go/ast"
+	"go/doc"
+	"go/parser"
+	"go/token"
+	"io"
+	"reflect"
+	"strings"
+)
+
+// Pair of strings. We keed the name of fields and the doc
+type Pair struct {
+	Name, Doc string
+}
+
+// KubeTypes is an array to represent all available types in a parsed file. [0] is for the type itself
+type KubeTypes []Pair
+
+func astFrom(filePath string) *doc.Package {
+	fset := token.NewFileSet()
+	m := make(map[string]*ast.File)
+
+	f, err := parser.ParseFile(fset, filePath, nil, parser.ParseComments)
+	if err != nil {
+		fmt.Println(err)
+		return nil
+	}
+
+	m[filePath] = f
+	apkg, _ := ast.NewPackage(fset, m, nil, nil)
+
+	return doc.New(apkg, "", 0)
+}
+
+func fmtRawDoc(rawDoc string) string {
+	var buffer bytes.Buffer
+	delPrevChar := func() {
+		if buffer.Len() > 0 {
+			buffer.Truncate(buffer.Len() - 1) // Delete the last " " or "\n"
+		}
+	}
+
+	// Ignore all lines after ---
+	rawDoc = strings.Split(rawDoc, "---")[0]
+
+	for _, line := range strings.Split(rawDoc, "\n") {
+		line = strings.TrimRight(line, " ")
+		leading := strings.TrimLeft(line, " ")
+		switch {
+		case len(line) == 0: // Keep paragraphs
+			delPrevChar()
+			buffer.WriteString("\n\n")
+		case strings.HasPrefix(leading, "TODO"): // Ignore one line TODOs
+		case strings.HasPrefix(leading, "+"): // Ignore instructions to the generators
+		default:
+			if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") {
+				delPrevChar()
+				line = "\n" + line + "\n" // Replace it with newline. This is useful when we have a line with: "Example:\n\tJSON-someting..."
+			} else {
+				line += " "
+			}
+			buffer.WriteString(line)
+		}
+	}
+
+	postDoc := strings.TrimRight(buffer.String(), "\n")
+	postDoc = strings.Replace(postDoc, "\\\"", "\"", -1) // replace user's \" to "
+	postDoc = strings.Replace(postDoc, "\"", "\\\"", -1) // Escape "
+	postDoc = strings.Replace(postDoc, "\n", "\\n", -1)
+	postDoc = strings.Replace(postDoc, "\t", "\\t", -1)
+
+	return postDoc
+}
+
+// fieldName returns the name of the field as it should appear in JSON format
+// "-" indicates that this field is not part of the JSON representation
+func fieldName(field *ast.Field) string {
+	jsonTag := ""
+	if field.Tag != nil {
+		jsonTag = reflect.StructTag(field.Tag.Value[1 : len(field.Tag.Value)-1]).Get("json") // Delete first and last quotation
+		if strings.Contains(jsonTag, "inline") {
+			return "-"
+		}
+	}
+
+	jsonTag = strings.Split(jsonTag, ",")[0] // This can return "-"
+	if jsonTag == "" {
+		if field.Names != nil {
+			return field.Names[0].Name
+		}
+		return field.Type.(*ast.Ident).Name
+	}
+	return jsonTag
+}
+
+// A buffer of lines that will be written.
+type bufferedLine struct {
+	line        string
+	indentation int
+}
+
+type buffer struct {
+	lines []bufferedLine
+}
+
+func newBuffer() *buffer {
+	return &buffer{
+		lines: make([]bufferedLine, 0),
+	}
+}
+
+func (b *buffer) addLine(line string, indent int) {
+	b.lines = append(b.lines, bufferedLine{line, indent})
+}
+
+func (b *buffer) flushLines(w io.Writer) error {
+	for _, line := range b.lines {
+		indentation := strings.Repeat("\t", line.indentation)
+		fullLine := fmt.Sprintf("%s%s", indentation, line.line)
+		if _, err := io.WriteString(w, fullLine); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeFuncHeader(b *buffer, structName string, indent int) {
+	s := fmt.Sprintf("var map_%s = map[string]string {\n", structName)
+	b.addLine(s, indent)
+}
+
+func writeFuncFooter(b *buffer, structName string, indent int) {
+	b.addLine("}\n", indent) // Closes the map definition
+
+	s := fmt.Sprintf("func (%s) SwaggerDoc() map[string]string {\n", structName)
+	b.addLine(s, indent)
+	s = fmt.Sprintf("return map_%s\n", structName)
+	b.addLine(s, indent+1)
+	b.addLine("}\n", indent) // Closes the function definition
+}
+
+func writeMapBody(b *buffer, kubeType []Pair, indent int) {
+	format := "\"%s\": \"%s\",\n"
+	for _, pair := range kubeType {
+		s := fmt.Sprintf(format, pair.Name, pair.Doc)
+		b.addLine(s, indent+2)
+	}
+}
+
+// ParseDocumentationFrom gets all types' documentation and returns them as an
+// array. Each type is again represented as an array (we have to use arrays as we
+// need to be sure for the order of the fields). This function returns fields and
+// struct definitions that have no documentation as {name, ""}.
+func ParseDocumentationFrom(src string) []KubeTypes {
+	var docForTypes []KubeTypes
+
+	pkg := astFrom(src)
+
+	for _, kubType := range pkg.Types {
+		if structType, ok := kubType.Decl.Specs[0].(*ast.TypeSpec).Type.(*ast.StructType); ok {
+			var ks KubeTypes
+			ks = append(ks, Pair{kubType.Name, fmtRawDoc(kubType.Doc)})
+
+			for _, field := range structType.Fields.List {
+				if n := fieldName(field); n != "-" {
+					fieldDoc := fmtRawDoc(field.Doc.Text())
+					ks = append(ks, Pair{n, fieldDoc})
+				}
+			}
+			docForTypes = append(docForTypes, ks)
+		}
+	}
+
+	return docForTypes
+}
+
+// WriteSwaggerDocFunc writes a declaration of a function as a string. This function is used in
+// Swagger as a documentation source for structs and theirs fields
+func WriteSwaggerDocFunc(kubeTypes []KubeTypes, w io.Writer) error {
+	for _, kubeType := range kubeTypes {
+		structName := kubeType[0].Name
+		kubeType[0].Name = ""
+
+		// Ignore empty documentation
+		docfulTypes := make(KubeTypes, 0, len(kubeType))
+		for _, pair := range kubeType {
+			if pair.Doc != "" {
+				docfulTypes = append(docfulTypes, pair)
+			}
+		}
+
+		if len(docfulTypes) == 0 {
+			continue // If no fields and the struct have documentation, skip the function definition
+		}
+
+		indent := 0
+		buffer := newBuffer()
+
+		writeFuncHeader(buffer, structName, indent)
+		writeMapBody(buffer, docfulTypes, indent)
+		writeFuncFooter(buffer, structName, indent)
+		buffer.addLine("\n", 0)
+
+		if err := buffer.flushLines(w); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// VerifySwaggerDocsExist writes in a io.Writer a list of structs and fields that
+// are missing of documentation.
+func VerifySwaggerDocsExist(kubeTypes []KubeTypes, w io.Writer) (int, error) {
+	missingDocs := 0
+	buffer := newBuffer()
+
+	for _, kubeType := range kubeTypes {
+		structName := kubeType[0].Name
+		if kubeType[0].Doc == "" {
+			format := "Missing documentation for the struct itself: %s\n"
+			s := fmt.Sprintf(format, structName)
+			buffer.addLine(s, 0)
+			missingDocs++
+		}
+		kubeType = kubeType[1:] // Skip struct definition
+
+		for _, pair := range kubeType { // Iterate only the fields
+			if pair.Doc == "" {
+				format := "In struct: %s, field documentation is missing: %s\n"
+				s := fmt.Sprintf(format, structName, pair.Name)
+				buffer.addLine(s, 0)
+				missingDocs++
+			}
+		}
+	}
+
+	if err := buffer.flushLines(w); err != nil {
+		return -1, err
+	}
+	return missingDocs, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go
new file mode 100644
index 0000000..e4515d8
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+// Note that the types provided in this file are not versioned and are intended to be
+// safe to use from within all versions of every API object.
+
+// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type,
+// like this:
+// type MyAwesomeAPIObject struct {
+//      runtime.TypeMeta    `json:",inline"`
+//      ... // other fields
+// }
+// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind
+//
+// TypeMeta is provided here for convenience. You may use it directly from this package or define
+// your own with the same fields.
+//
+// +k8s:deepcopy-gen=false
+// +protobuf=true
+// +k8s:openapi-gen=true
+type TypeMeta struct {
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"`
+	// +optional
+	Kind string `json:"kind,omitempty" yaml:"kind,omitempty" protobuf:"bytes,2,opt,name=kind"`
+}
+
+const (
+	ContentTypeJSON string = "application/json"
+)
+
+// RawExtension is used to hold extensions in external versions.
+//
+// To use this, make a field which has RawExtension as its type in your external, versioned
+// struct, and Object in your internal struct. You also need to register your
+// various plugin types.
+//
+// // Internal package:
+// type MyAPIObject struct {
+// 	runtime.TypeMeta `json:",inline"`
+//	MyPlugin runtime.Object `json:"myPlugin"`
+// }
+// type PluginA struct {
+//	AOption string `json:"aOption"`
+// }
+//
+// // External package:
+// type MyAPIObject struct {
+// 	runtime.TypeMeta `json:",inline"`
+//	MyPlugin runtime.RawExtension `json:"myPlugin"`
+// }
+// type PluginA struct {
+//	AOption string `json:"aOption"`
+// }
+//
+// // On the wire, the JSON will look something like this:
+// {
+//	"kind":"MyAPIObject",
+//	"apiVersion":"v1",
+//	"myPlugin": {
+//		"kind":"PluginA",
+//		"aOption":"foo",
+//	},
+// }
+//
+// So what happens? Decode first uses json or yaml to unmarshal the serialized data into
+// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.
+// The next step is to copy (using pkg/conversion) into the internal struct. The runtime
+// package's DefaultScheme has conversion functions installed which will unpack the
+// JSON stored in RawExtension, turning it into the correct object type, and storing it
+// in the Object. (TODO: In the case where the object is of an unknown type, a
+// runtime.Unknown object will be created and stored.)
+//
+// +k8s:deepcopy-gen=true
+// +protobuf=true
+// +k8s:openapi-gen=true
+type RawExtension struct {
+	// Raw is the underlying serialization of this object.
+	//
+	// TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data.
+	Raw []byte `protobuf:"bytes,1,opt,name=raw"`
+	// Object can hold a representation of this extension - useful for working with versioned
+	// structs.
+	Object Object `json:"-"`
+}
+
+// Unknown allows api objects with unknown types to be passed-through. This can be used
+// to deal with the API objects from a plug-in. Unknown objects still have functioning
+// TypeMeta features-- kind, version, etc.
+// TODO: Make this object have easy access to field based accessors and settors for
+// metadata and field mutatation.
+//
+// +k8s:deepcopy-gen=true
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +protobuf=true
+// +k8s:openapi-gen=true
+type Unknown struct {
+	TypeMeta `json:",inline" protobuf:"bytes,1,opt,name=typeMeta"`
+	// Raw will hold the complete serialized object which couldn't be matched
+	// with a registered type. Most likely, nothing should be done with this
+	// except for passing it through the system.
+	Raw []byte `protobuf:"bytes,2,opt,name=raw"`
+	// ContentEncoding is encoding used to encode 'Raw' data.
+	// Unspecified means no encoding.
+	ContentEncoding string `protobuf:"bytes,3,opt,name=contentEncoding"`
+	// ContentType  is serialization method used to serialize 'Raw'.
+	// Unspecified means ContentTypeJSON.
+	ContentType string `protobuf:"bytes,4,opt,name=contentType"`
+}
+
+// VersionedObjects is used by Decoders to give callers a way to access all versions
+// of an object during the decoding process.
+//
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:deepcopy-gen=true
+type VersionedObjects struct {
+	// Objects is the set of objects retrieved during decoding, in order of conversion.
+	// The 0 index is the object as serialized on the wire. If conversion has occurred,
+	// other objects may be present. The right most object is the same as would be returned
+	// by a normal Decode call.
+	Objects []Object
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go
new file mode 100644
index 0000000..ead96ee
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go
@@ -0,0 +1,69 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"fmt"
+)
+
+type ProtobufMarshaller interface {
+	MarshalTo(data []byte) (int, error)
+}
+
+// NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown
+// that will contain an object that implements ProtobufMarshaller.
+func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	data[i] = 0xa
+	i++
+	i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size()))
+	n1, err := m.TypeMeta.MarshalTo(data[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n1
+
+	if b != nil {
+		data[i] = 0x12
+		i++
+		i = encodeVarintGenerated(data, i, size)
+		n2, err := b.MarshalTo(data[i:])
+		if err != nil {
+			return 0, err
+		}
+		if uint64(n2) != size {
+			// programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto
+			// struct returned would be wrong.
+			return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n2)
+		}
+		i += n2
+	}
+
+	data[i] = 0x1a
+	i++
+	i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding)))
+	i += copy(data[i:], m.ContentEncoding)
+
+	data[i] = 0x22
+	i++
+	i = encodeVarintGenerated(data, i, uint64(len(m.ContentType)))
+	i += copy(data[i:], m.ContentType)
+	return i, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go
new file mode 100644
index 0000000..8b9182f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go
@@ -0,0 +1,108 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package runtime
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RawExtension) DeepCopyInto(out *RawExtension) {
+	*out = *in
+	if in.Raw != nil {
+		in, out := &in.Raw, &out.Raw
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.Object != nil {
+		out.Object = in.Object.DeepCopyObject()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RawExtension.
+func (in *RawExtension) DeepCopy() *RawExtension {
+	if in == nil {
+		return nil
+	}
+	out := new(RawExtension)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Unknown) DeepCopyInto(out *Unknown) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Raw != nil {
+		in, out := &in.Raw, &out.Raw
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Unknown.
+func (in *Unknown) DeepCopy() *Unknown {
+	if in == nil {
+		return nil
+	}
+	out := new(Unknown)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new Object.
+func (in *Unknown) DeepCopyObject() Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VersionedObjects) DeepCopyInto(out *VersionedObjects) {
+	*out = *in
+	if in.Objects != nil {
+		in, out := &in.Objects, &out.Objects
+		*out = make([]Object, len(*in))
+		for i := range *in {
+			if (*in)[i] != nil {
+				(*out)[i] = (*in)[i].DeepCopyObject()
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionedObjects.
+func (in *VersionedObjects) DeepCopy() *VersionedObjects {
+	if in == nil {
+		return nil
+	}
+	out := new(VersionedObjects)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new Object.
+func (in *VersionedObjects) DeepCopyObject() Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/selection/operator.go b/vendor/k8s.io/apimachinery/pkg/selection/operator.go
new file mode 100644
index 0000000..298f798
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/selection/operator.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package selection
+
+// Operator represents a key/field's relationship to value(s).
+// See labels.Requirement and fields.Requirement for more details.
+type Operator string
+
+const (
+	DoesNotExist Operator = "!"
+	Equals       Operator = "="
+	DoubleEquals Operator = "=="
+	In           Operator = "in"
+	NotEquals    Operator = "!="
+	NotIn        Operator = "notin"
+	Exists       Operator = "exists"
+	GreaterThan  Operator = "gt"
+	LessThan     Operator = "lt"
+)
diff --git a/vendor/k8s.io/apimachinery/pkg/types/doc.go b/vendor/k8s.io/apimachinery/pkg/types/doc.go
new file mode 100644
index 0000000..5667fa9
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/types/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package types implements various generic types used throughout kubernetes.
+package types // import "k8s.io/apimachinery/pkg/types"
diff --git a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go
new file mode 100644
index 0000000..88f0de3
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+import (
+	"fmt"
+)
+
+// NamespacedName comprises a resource name, with a mandatory namespace,
+// rendered as "<namespace>/<name>".  Being a type captures intent and
+// helps make sure that UIDs, namespaced names and non-namespaced names
+// do not get conflated in code.  For most use cases, namespace and name
+// will already have been format validated at the API entry point, so we
+// don't do that here.  Where that's not the case (e.g. in testing),
+// consider using NamespacedNameOrDie() in testing.go in this package.
+
+type NamespacedName struct {
+	Namespace string
+	Name      string
+}
+
+const (
+	Separator = '/'
+)
+
+// String returns the general purpose string representation
+func (n NamespacedName) String() string {
+	return fmt.Sprintf("%s%c%s", n.Namespace, Separator, n.Name)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/types/nodename.go b/vendor/k8s.io/apimachinery/pkg/types/nodename.go
new file mode 100644
index 0000000..fee348d
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/types/nodename.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+// NodeName is a type that holds a api.Node's Name identifier.
+// Being a type captures intent and helps make sure that the node name
+// is not confused with similar concepts (the hostname, the cloud provider id,
+// the cloud provider name etc)
+//
+// To clarify the various types:
+//
+// * Node.Name is the Name field of the Node in the API.  This should be stored in a NodeName.
+//   Unfortunately, because Name is part of ObjectMeta, we can't store it as a NodeName at the API level.
+//
+// * Hostname is the hostname of the local machine (from uname -n).
+//   However, some components allow the user to pass in a --hostname-override flag,
+//   which will override this in most places. In the absence of anything more meaningful,
+//   kubelet will use Hostname as the Node.Name when it creates the Node.
+//
+// * The cloudproviders have the own names: GCE has InstanceName, AWS has InstanceId.
+//
+//   For GCE, InstanceName is the Name of an Instance object in the GCE API.  On GCE, Instance.Name becomes the
+//   Hostname, and thus it makes sense also to use it as the Node.Name.  But that is GCE specific, and it is up
+//   to the cloudprovider how to do this mapping.
+//
+//   For AWS, the InstanceID is not yet suitable for use as a Node.Name, so we actually use the
+//   PrivateDnsName for the Node.Name.  And this is _not_ always the same as the hostname: if
+//   we are using a custom DHCP domain it won't be.
+type NodeName string
diff --git a/vendor/k8s.io/apimachinery/pkg/types/patch.go b/vendor/k8s.io/apimachinery/pkg/types/patch.go
new file mode 100644
index 0000000..d522d1d
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/types/patch.go
@@ -0,0 +1,28 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+// Similarly to above, these are constants to support HTTP PATCH utilized by
+// both the client and server that didn't make sense for a whole package to be
+// dedicated to.
+type PatchType string
+
+const (
+	JSONPatchType           PatchType = "application/json-patch+json"
+	MergePatchType          PatchType = "application/merge-patch+json"
+	StrategicMergePatchType PatchType = "application/strategic-merge-patch+json"
+)
diff --git a/vendor/k8s.io/apimachinery/pkg/types/uid.go b/vendor/k8s.io/apimachinery/pkg/types/uid.go
new file mode 100644
index 0000000..8693392
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/types/uid.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+// UID is a type that holds unique ID values, including UUIDs.  Because we
+// don't ONLY use UUIDs, this is an alias to string.  Being a type captures
+// intent and helps make sure that UIDs and names do not get conflated.
+type UID string
diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
new file mode 100644
index 0000000..9567f90
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
@@ -0,0 +1,348 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clock
+
+import (
+	"sync"
+	"time"
+)
+
+// Clock allows for injecting fake or real clocks into code that
+// needs to do arbitrary things based on time.
+type Clock interface {
+	Now() time.Time
+	Since(time.Time) time.Duration
+	After(time.Duration) <-chan time.Time
+	NewTimer(time.Duration) Timer
+	Sleep(time.Duration)
+	NewTicker(time.Duration) Ticker
+}
+
+// RealClock really calls time.Now()
+type RealClock struct{}
+
+// Now returns the current time.
+func (RealClock) Now() time.Time {
+	return time.Now()
+}
+
+// Since returns time since the specified timestamp.
+func (RealClock) Since(ts time.Time) time.Duration {
+	return time.Since(ts)
+}
+
+// Same as time.After(d).
+func (RealClock) After(d time.Duration) <-chan time.Time {
+	return time.After(d)
+}
+
+func (RealClock) NewTimer(d time.Duration) Timer {
+	return &realTimer{
+		timer: time.NewTimer(d),
+	}
+}
+
+func (RealClock) NewTicker(d time.Duration) Ticker {
+	return &realTicker{
+		ticker: time.NewTicker(d),
+	}
+}
+
+func (RealClock) Sleep(d time.Duration) {
+	time.Sleep(d)
+}
+
+// FakeClock implements Clock, but returns an arbitrary time.
+type FakeClock struct {
+	lock sync.RWMutex
+	time time.Time
+
+	// waiters are waiting for the fake time to pass their specified time
+	waiters []fakeClockWaiter
+}
+
+type fakeClockWaiter struct {
+	targetTime    time.Time
+	stepInterval  time.Duration
+	skipIfBlocked bool
+	destChan      chan time.Time
+	fired         bool
+}
+
+func NewFakeClock(t time.Time) *FakeClock {
+	return &FakeClock{
+		time: t,
+	}
+}
+
+// Now returns f's time.
+func (f *FakeClock) Now() time.Time {
+	f.lock.RLock()
+	defer f.lock.RUnlock()
+	return f.time
+}
+
+// Since returns time since the time in f.
+func (f *FakeClock) Since(ts time.Time) time.Duration {
+	f.lock.RLock()
+	defer f.lock.RUnlock()
+	return f.time.Sub(ts)
+}
+
+// Fake version of time.After(d).
+func (f *FakeClock) After(d time.Duration) <-chan time.Time {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	stopTime := f.time.Add(d)
+	ch := make(chan time.Time, 1) // Don't block!
+	f.waiters = append(f.waiters, fakeClockWaiter{
+		targetTime: stopTime,
+		destChan:   ch,
+	})
+	return ch
+}
+
+// Fake version of time.NewTimer(d).
+func (f *FakeClock) NewTimer(d time.Duration) Timer {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	stopTime := f.time.Add(d)
+	ch := make(chan time.Time, 1) // Don't block!
+	timer := &fakeTimer{
+		fakeClock: f,
+		waiter: fakeClockWaiter{
+			targetTime: stopTime,
+			destChan:   ch,
+		},
+	}
+	f.waiters = append(f.waiters, timer.waiter)
+	return timer
+}
+
+func (f *FakeClock) NewTicker(d time.Duration) Ticker {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	tickTime := f.time.Add(d)
+	ch := make(chan time.Time, 1) // hold one tick
+	f.waiters = append(f.waiters, fakeClockWaiter{
+		targetTime:    tickTime,
+		stepInterval:  d,
+		skipIfBlocked: true,
+		destChan:      ch,
+	})
+
+	return &fakeTicker{
+		c: ch,
+	}
+}
+
+// Move clock by Duration, notify anyone that's called After, Tick, or NewTimer
+func (f *FakeClock) Step(d time.Duration) {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	f.setTimeLocked(f.time.Add(d))
+}
+
+// Sets the time.
+func (f *FakeClock) SetTime(t time.Time) {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	f.setTimeLocked(t)
+}
+
+// Actually changes the time and checks any waiters. f must be write-locked.
+func (f *FakeClock) setTimeLocked(t time.Time) {
+	f.time = t
+	newWaiters := make([]fakeClockWaiter, 0, len(f.waiters))
+	for i := range f.waiters {
+		w := &f.waiters[i]
+		if !w.targetTime.After(t) {
+
+			if w.skipIfBlocked {
+				select {
+				case w.destChan <- t:
+					w.fired = true
+				default:
+				}
+			} else {
+				w.destChan <- t
+				w.fired = true
+			}
+
+			if w.stepInterval > 0 {
+				for !w.targetTime.After(t) {
+					w.targetTime = w.targetTime.Add(w.stepInterval)
+				}
+				newWaiters = append(newWaiters, *w)
+			}
+
+		} else {
+			newWaiters = append(newWaiters, f.waiters[i])
+		}
+	}
+	f.waiters = newWaiters
+}
+
+// Returns true if After has been called on f but not yet satisfied (so you can
+// write race-free tests).
+func (f *FakeClock) HasWaiters() bool {
+	f.lock.RLock()
+	defer f.lock.RUnlock()
+	return len(f.waiters) > 0
+}
+
+func (f *FakeClock) Sleep(d time.Duration) {
+	f.Step(d)
+}
+
+// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration
+type IntervalClock struct {
+	Time     time.Time
+	Duration time.Duration
+}
+
+// Now returns i's time.
+func (i *IntervalClock) Now() time.Time {
+	i.Time = i.Time.Add(i.Duration)
+	return i.Time
+}
+
+// Since returns time since the time in i.
+func (i *IntervalClock) Since(ts time.Time) time.Duration {
+	return i.Time.Sub(ts)
+}
+
+// Unimplemented, will panic.
+// TODO: make interval clock use FakeClock so this can be implemented.
+func (*IntervalClock) After(d time.Duration) <-chan time.Time {
+	panic("IntervalClock doesn't implement After")
+}
+
+// Unimplemented, will panic.
+// TODO: make interval clock use FakeClock so this can be implemented.
+func (*IntervalClock) NewTimer(d time.Duration) Timer {
+	panic("IntervalClock doesn't implement NewTimer")
+}
+
+// Unimplemented, will panic.
+// TODO: make interval clock use FakeClock so this can be implemented.
+func (*IntervalClock) NewTicker(d time.Duration) Ticker {
+	panic("IntervalClock doesn't implement NewTicker")
+}
+
+func (*IntervalClock) Sleep(d time.Duration) {
+	panic("IntervalClock doesn't implement Sleep")
+}
+
+// Timer allows for injecting fake or real timers into code that
+// needs to do arbitrary things based on time.
+type Timer interface {
+	C() <-chan time.Time
+	Stop() bool
+	Reset(d time.Duration) bool
+}
+
+// realTimer is backed by an actual time.Timer.
+type realTimer struct {
+	timer *time.Timer
+}
+
+// C returns the underlying timer's channel.
+func (r *realTimer) C() <-chan time.Time {
+	return r.timer.C
+}
+
+// Stop calls Stop() on the underlying timer.
+func (r *realTimer) Stop() bool {
+	return r.timer.Stop()
+}
+
+// Reset calls Reset() on the underlying timer.
+func (r *realTimer) Reset(d time.Duration) bool {
+	return r.timer.Reset(d)
+}
+
+// fakeTimer implements Timer based on a FakeClock.
+type fakeTimer struct {
+	fakeClock *FakeClock
+	waiter    fakeClockWaiter
+}
+
+// C returns the channel that notifies when this timer has fired.
+func (f *fakeTimer) C() <-chan time.Time {
+	return f.waiter.destChan
+}
+
+// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise.
+func (f *fakeTimer) Stop() bool {
+	f.fakeClock.lock.Lock()
+	defer f.fakeClock.lock.Unlock()
+
+	newWaiters := make([]fakeClockWaiter, 0, len(f.fakeClock.waiters))
+	for i := range f.fakeClock.waiters {
+		w := &f.fakeClock.waiters[i]
+		if w != &f.waiter {
+			newWaiters = append(newWaiters, *w)
+		}
+	}
+
+	f.fakeClock.waiters = newWaiters
+
+	return !f.waiter.fired
+}
+
+// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet
+// fired, or false otherwise.
+func (f *fakeTimer) Reset(d time.Duration) bool {
+	f.fakeClock.lock.Lock()
+	defer f.fakeClock.lock.Unlock()
+
+	active := !f.waiter.fired
+
+	f.waiter.fired = false
+	f.waiter.targetTime = f.fakeClock.time.Add(d)
+
+	return active
+}
+
+type Ticker interface {
+	C() <-chan time.Time
+	Stop()
+}
+
+type realTicker struct {
+	ticker *time.Ticker
+}
+
+func (t *realTicker) C() <-chan time.Time {
+	return t.ticker.C
+}
+
+func (t *realTicker) Stop() {
+	t.ticker.Stop()
+}
+
+type fakeTicker struct {
+	c <-chan time.Time
+}
+
+func (t *fakeTicker) C() <-chan time.Time {
+	return t.c
+}
+
+func (t *fakeTicker) Stop() {
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go
new file mode 100644
index 0000000..5d4d625
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package errors implements various utility functions and types around errors.
+package errors // import "k8s.io/apimachinery/pkg/util/errors"
diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
new file mode 100644
index 0000000..88e9376
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
@@ -0,0 +1,201 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package errors
+
+import (
+	"errors"
+	"fmt"
+)
+
+// MessageCountMap contains occurrence for each error message.
+type MessageCountMap map[string]int
+
+// Aggregate represents an object that contains multiple errors, but does not
+// necessarily have singular semantic meaning.
+type Aggregate interface {
+	error
+	Errors() []error
+}
+
+// NewAggregate converts a slice of errors into an Aggregate interface, which
+// is itself an implementation of the error interface.  If the slice is empty,
+// this returns nil.
+// It will check if any of the element of input error list is nil, to avoid
+// nil pointer panic when call Error().
+func NewAggregate(errlist []error) Aggregate {
+	if len(errlist) == 0 {
+		return nil
+	}
+	// In case of input error list contains nil
+	var errs []error
+	for _, e := range errlist {
+		if e != nil {
+			errs = append(errs, e)
+		}
+	}
+	if len(errs) == 0 {
+		return nil
+	}
+	return aggregate(errs)
+}
+
+// This helper implements the error and Errors interfaces.  Keeping it private
+// prevents people from making an aggregate of 0 errors, which is not
+// an error, but does satisfy the error interface.
+type aggregate []error
+
+// Error is part of the error interface.
+func (agg aggregate) Error() string {
+	if len(agg) == 0 {
+		// This should never happen, really.
+		return ""
+	}
+	if len(agg) == 1 {
+		return agg[0].Error()
+	}
+	result := fmt.Sprintf("[%s", agg[0].Error())
+	for i := 1; i < len(agg); i++ {
+		result += fmt.Sprintf(", %s", agg[i].Error())
+	}
+	result += "]"
+	return result
+}
+
+// Errors is part of the Aggregate interface.
+func (agg aggregate) Errors() []error {
+	return []error(agg)
+}
+
+// Matcher is used to match errors.  Returns true if the error matches.
+type Matcher func(error) bool
+
+// FilterOut removes all errors that match any of the matchers from the input
+// error.  If the input is a singular error, only that error is tested.  If the
+// input implements the Aggregate interface, the list of errors will be
+// processed recursively.
+//
+// This can be used, for example, to remove known-OK errors (such as io.EOF or
+// os.PathNotFound) from a list of errors.
+func FilterOut(err error, fns ...Matcher) error {
+	if err == nil {
+		return nil
+	}
+	if agg, ok := err.(Aggregate); ok {
+		return NewAggregate(filterErrors(agg.Errors(), fns...))
+	}
+	if !matchesError(err, fns...) {
+		return err
+	}
+	return nil
+}
+
+// matchesError returns true if any Matcher returns true
+func matchesError(err error, fns ...Matcher) bool {
+	for _, fn := range fns {
+		if fn(err) {
+			return true
+		}
+	}
+	return false
+}
+
+// filterErrors returns any errors (or nested errors, if the list contains
+// nested Errors) for which all fns return false. If no errors
+// remain a nil list is returned. The resulting silec will have all
+// nested slices flattened as a side effect.
+func filterErrors(list []error, fns ...Matcher) []error {
+	result := []error{}
+	for _, err := range list {
+		r := FilterOut(err, fns...)
+		if r != nil {
+			result = append(result, r)
+		}
+	}
+	return result
+}
+
+// Flatten takes an Aggregate, which may hold other Aggregates in arbitrary
+// nesting, and flattens them all into a single Aggregate, recursively.
+func Flatten(agg Aggregate) Aggregate {
+	result := []error{}
+	if agg == nil {
+		return nil
+	}
+	for _, err := range agg.Errors() {
+		if a, ok := err.(Aggregate); ok {
+			r := Flatten(a)
+			if r != nil {
+				result = append(result, r.Errors()...)
+			}
+		} else {
+			if err != nil {
+				result = append(result, err)
+			}
+		}
+	}
+	return NewAggregate(result)
+}
+
+// CreateAggregateFromMessageCountMap converts MessageCountMap Aggregate
+func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate {
+	if m == nil {
+		return nil
+	}
+	result := make([]error, 0, len(m))
+	for errStr, count := range m {
+		var countStr string
+		if count > 1 {
+			countStr = fmt.Sprintf(" (repeated %v times)", count)
+		}
+		result = append(result, fmt.Errorf("%v%v", errStr, countStr))
+	}
+	return NewAggregate(result)
+}
+
+// Reduce will return err or, if err is an Aggregate and only has one item,
+// the first item in the aggregate.
+func Reduce(err error) error {
+	if agg, ok := err.(Aggregate); ok && err != nil {
+		switch len(agg.Errors()) {
+		case 1:
+			return agg.Errors()[0]
+		case 0:
+			return nil
+		}
+	}
+	return err
+}
+
+// AggregateGoroutines runs the provided functions in parallel, stuffing all
+// non-nil errors into the returned Aggregate.
+// Returns nil if all the functions complete successfully.
+func AggregateGoroutines(funcs ...func() error) Aggregate {
+	errChan := make(chan error, len(funcs))
+	for _, f := range funcs {
+		go func(f func() error) { errChan <- f() }(f)
+	}
+	errs := make([]error, 0)
+	for i := 0; i < cap(errChan); i++ {
+		if err := <-errChan; err != nil {
+			errs = append(errs, err)
+		}
+	}
+	return NewAggregate(errs)
+}
+
+// ErrPreconditionViolated is returned when the precondition is violated
+var ErrPreconditionViolated = errors.New("precondition is violated")
diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
new file mode 100644
index 0000000..066680f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
@@ -0,0 +1,167 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package framer implements simple frame decoding techniques for an io.ReadCloser
+package framer
+
+import (
+	"encoding/binary"
+	"encoding/json"
+	"io"
+)
+
+type lengthDelimitedFrameWriter struct {
+	w io.Writer
+	h [4]byte
+}
+
+func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer {
+	return &lengthDelimitedFrameWriter{w: w}
+}
+
+// Write writes a single frame to the nested writer, prepending it with the length in
+// in bytes of data (as a 4 byte, bigendian uint32).
+func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) {
+	binary.BigEndian.PutUint32(w.h[:], uint32(len(data)))
+	n, err := w.w.Write(w.h[:])
+	if err != nil {
+		return 0, err
+	}
+	if n != len(w.h) {
+		return 0, io.ErrShortWrite
+	}
+	return w.w.Write(data)
+}
+
+type lengthDelimitedFrameReader struct {
+	r         io.ReadCloser
+	remaining int
+}
+
+// NewLengthDelimitedFrameReader returns an io.Reader that will decode length-prefixed
+// frames off of a stream.
+//
+// The protocol is:
+//
+//   stream: message ...
+//   message: prefix body
+//   prefix: 4 byte uint32 in BigEndian order, denotes length of body
+//   body: bytes (0..prefix)
+//
+// If the buffer passed to Read is not long enough to contain an entire frame, io.ErrShortRead
+// will be returned along with the number of bytes read.
+func NewLengthDelimitedFrameReader(r io.ReadCloser) io.ReadCloser {
+	return &lengthDelimitedFrameReader{r: r}
+}
+
+// Read attempts to read an entire frame into data. If that is not possible, io.ErrShortBuffer
+// is returned and subsequent calls will attempt to read the last frame. A frame is complete when
+// err is nil.
+func (r *lengthDelimitedFrameReader) Read(data []byte) (int, error) {
+	if r.remaining <= 0 {
+		header := [4]byte{}
+		n, err := io.ReadAtLeast(r.r, header[:4], 4)
+		if err != nil {
+			return 0, err
+		}
+		if n != 4 {
+			return 0, io.ErrUnexpectedEOF
+		}
+		frameLength := int(binary.BigEndian.Uint32(header[:]))
+		r.remaining = frameLength
+	}
+
+	expect := r.remaining
+	max := expect
+	if max > len(data) {
+		max = len(data)
+	}
+	n, err := io.ReadAtLeast(r.r, data[:max], int(max))
+	r.remaining -= n
+	if err == io.ErrShortBuffer || r.remaining > 0 {
+		return n, io.ErrShortBuffer
+	}
+	if err != nil {
+		return n, err
+	}
+	if n != expect {
+		return n, io.ErrUnexpectedEOF
+	}
+
+	return n, nil
+}
+
+func (r *lengthDelimitedFrameReader) Close() error {
+	return r.r.Close()
+}
+
+type jsonFrameReader struct {
+	r         io.ReadCloser
+	decoder   *json.Decoder
+	remaining []byte
+}
+
+// NewJSONFramedReader returns an io.Reader that will decode individual JSON objects off
+// of a wire.
+//
+// The boundaries between each frame are valid JSON objects. A JSON parsing error will terminate
+// the read.
+func NewJSONFramedReader(r io.ReadCloser) io.ReadCloser {
+	return &jsonFrameReader{
+		r:       r,
+		decoder: json.NewDecoder(r),
+	}
+}
+
+// ReadFrame decodes the next JSON object in the stream, or returns an error. The returned
+// byte slice will be modified the next time ReadFrame is invoked and should not be altered.
+func (r *jsonFrameReader) Read(data []byte) (int, error) {
+	// Return whatever remaining data exists from an in progress frame
+	if n := len(r.remaining); n > 0 {
+		if n <= len(data) {
+			data = append(data[0:0], r.remaining...)
+			r.remaining = nil
+			return n, nil
+		}
+
+		n = len(data)
+		data = append(data[0:0], r.remaining[:n]...)
+		r.remaining = r.remaining[n:]
+		return n, io.ErrShortBuffer
+	}
+
+	// RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see
+	// data written to data, or be larger than data and a different array.
+	n := len(data)
+	m := json.RawMessage(data[:0])
+	if err := r.decoder.Decode(&m); err != nil {
+		return 0, err
+	}
+
+	// If capacity of data is less than length of the message, decoder will allocate a new slice
+	// and set m to it, which means we need to copy the partial result back into data and preserve
+	// the remaining result for subsequent reads.
+	if len(m) > n {
+		data = append(data[0:0], m[:n]...)
+		r.remaining = m[n:]
+		return n, io.ErrShortBuffer
+	}
+	return len(m), nil
+}
+
+func (r *jsonFrameReader) Close() error {
+	return r.r.Close()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
new file mode 100644
index 0000000..e79fb9e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
@@ -0,0 +1,43 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.apimachinery.pkg.util.intstr;
+
+// Package-wide variables from generator "generated".
+option go_package = "intstr";
+
+// IntOrString is a type that can hold an int32 or a string.  When used in
+// JSON or YAML marshalling and unmarshalling, it produces or consumes the
+// inner type.  This allows you to have, for example, a JSON field that can
+// accept a name or number.
+// TODO: Rename to Int32OrString
+//
+// +protobuf=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+// +k8s:openapi-gen=true
+message IntOrString {
+  optional int64 type = 1;
+
+  optional int32 intVal = 2;
+
+  optional string strVal = 3;
+}
+
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
new file mode 100644
index 0000000..5b26ed2
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
@@ -0,0 +1,184 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package intstr
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math"
+	"runtime/debug"
+	"strconv"
+	"strings"
+
+	"github.com/google/gofuzz"
+	"k8s.io/klog"
+)
+
+// IntOrString is a type that can hold an int32 or a string.  When used in
+// JSON or YAML marshalling and unmarshalling, it produces or consumes the
+// inner type.  This allows you to have, for example, a JSON field that can
+// accept a name or number.
+// TODO: Rename to Int32OrString
+//
+// +protobuf=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+// +k8s:openapi-gen=true
+type IntOrString struct {
+	Type   Type   `protobuf:"varint,1,opt,name=type,casttype=Type"`
+	IntVal int32  `protobuf:"varint,2,opt,name=intVal"`
+	StrVal string `protobuf:"bytes,3,opt,name=strVal"`
+}
+
+// Type represents the stored type of IntOrString.
+type Type int
+
+const (
+	Int    Type = iota // The IntOrString holds an int.
+	String             // The IntOrString holds a string.
+)
+
+// FromInt creates an IntOrString object with an int32 value. It is
+// your responsibility not to call this method with a value greater
+// than int32.
+// TODO: convert to (val int32)
+func FromInt(val int) IntOrString {
+	if val > math.MaxInt32 || val < math.MinInt32 {
+		klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack())
+	}
+	return IntOrString{Type: Int, IntVal: int32(val)}
+}
+
+// FromString creates an IntOrString object with a string value.
+func FromString(val string) IntOrString {
+	return IntOrString{Type: String, StrVal: val}
+}
+
+// Parse the given string and try to convert it to an integer before
+// setting it as a string value.
+func Parse(val string) IntOrString {
+	i, err := strconv.Atoi(val)
+	if err != nil {
+		return FromString(val)
+	}
+	return FromInt(i)
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (intstr *IntOrString) UnmarshalJSON(value []byte) error {
+	if value[0] == '"' {
+		intstr.Type = String
+		return json.Unmarshal(value, &intstr.StrVal)
+	}
+	intstr.Type = Int
+	return json.Unmarshal(value, &intstr.IntVal)
+}
+
+// String returns the string value, or the Itoa of the int value.
+func (intstr *IntOrString) String() string {
+	if intstr.Type == String {
+		return intstr.StrVal
+	}
+	return strconv.Itoa(intstr.IntValue())
+}
+
+// IntValue returns the IntVal if type Int, or if
+// it is a String, will attempt a conversion to int.
+func (intstr *IntOrString) IntValue() int {
+	if intstr.Type == String {
+		i, _ := strconv.Atoi(intstr.StrVal)
+		return i
+	}
+	return int(intstr.IntVal)
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+func (intstr IntOrString) MarshalJSON() ([]byte, error) {
+	switch intstr.Type {
+	case Int:
+		return json.Marshal(intstr.IntVal)
+	case String:
+		return json.Marshal(intstr.StrVal)
+	default:
+		return []byte{}, fmt.Errorf("impossible IntOrString.Type")
+	}
+}
+
+// OpenAPISchemaType is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+//
+// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
+func (_ IntOrString) OpenAPISchemaType() []string { return []string{"string"} }
+
+// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+func (_ IntOrString) OpenAPISchemaFormat() string { return "int-or-string" }
+
+func (intstr *IntOrString) Fuzz(c fuzz.Continue) {
+	if intstr == nil {
+		return
+	}
+	if c.RandBool() {
+		intstr.Type = Int
+		c.Fuzz(&intstr.IntVal)
+		intstr.StrVal = ""
+	} else {
+		intstr.Type = String
+		intstr.IntVal = 0
+		c.Fuzz(&intstr.StrVal)
+	}
+}
+
+func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrString {
+	if intOrPercent == nil {
+		return &defaultValue
+	}
+	return intOrPercent
+}
+
+func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) {
+	if intOrPercent == nil {
+		return 0, errors.New("nil value for IntOrString")
+	}
+	value, isPercent, err := getIntOrPercentValue(intOrPercent)
+	if err != nil {
+		return 0, fmt.Errorf("invalid value for IntOrString: %v", err)
+	}
+	if isPercent {
+		if roundUp {
+			value = int(math.Ceil(float64(value) * (float64(total)) / 100))
+		} else {
+			value = int(math.Floor(float64(value) * (float64(total)) / 100))
+		}
+	}
+	return value, nil
+}
+
+func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) {
+	switch intOrStr.Type {
+	case Int:
+		return intOrStr.IntValue(), false, nil
+	case String:
+		s := strings.Replace(intOrStr.StrVal, "%", "", -1)
+		v, err := strconv.Atoi(s)
+		if err != nil {
+			return 0, false, fmt.Errorf("invalid value %q: %v", intOrStr.StrVal, err)
+		}
+		return int(v), true, nil
+	}
+	return 0, false, fmt.Errorf("invalid type: neither int nor percentage")
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go
new file mode 100644
index 0000000..10c8cb8
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package json
+
+import (
+	"bytes"
+	"encoding/json"
+	"io"
+)
+
+// NewEncoder delegates to json.NewEncoder
+// It is only here so this package can be a drop-in for common encoding/json uses
+func NewEncoder(w io.Writer) *json.Encoder {
+	return json.NewEncoder(w)
+}
+
+// Marshal delegates to json.Marshal
+// It is only here so this package can be a drop-in for common encoding/json uses
+func Marshal(v interface{}) ([]byte, error) {
+	return json.Marshal(v)
+}
+
+// Unmarshal unmarshals the given data
+// If v is a *map[string]interface{}, numbers are converted to int64 or float64
+func Unmarshal(data []byte, v interface{}) error {
+	switch v := v.(type) {
+	case *map[string]interface{}:
+		// Build a decoder from the given data
+		decoder := json.NewDecoder(bytes.NewBuffer(data))
+		// Preserve numbers, rather than casting to float64 automatically
+		decoder.UseNumber()
+		// Run the decode
+		if err := decoder.Decode(v); err != nil {
+			return err
+		}
+		// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
+		return convertMapNumbers(*v)
+
+	case *[]interface{}:
+		// Build a decoder from the given data
+		decoder := json.NewDecoder(bytes.NewBuffer(data))
+		// Preserve numbers, rather than casting to float64 automatically
+		decoder.UseNumber()
+		// Run the decode
+		if err := decoder.Decode(v); err != nil {
+			return err
+		}
+		// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
+		return convertSliceNumbers(*v)
+
+	default:
+		return json.Unmarshal(data, v)
+	}
+}
+
+// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
+// values which are map[string]interface{} or []interface{} are recursively visited
+func convertMapNumbers(m map[string]interface{}) error {
+	var err error
+	for k, v := range m {
+		switch v := v.(type) {
+		case json.Number:
+			m[k], err = convertNumber(v)
+		case map[string]interface{}:
+			err = convertMapNumbers(v)
+		case []interface{}:
+			err = convertSliceNumbers(v)
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64.
+// values which are map[string]interface{} or []interface{} are recursively visited
+func convertSliceNumbers(s []interface{}) error {
+	var err error
+	for i, v := range s {
+		switch v := v.(type) {
+		case json.Number:
+			s[i], err = convertNumber(v)
+		case map[string]interface{}:
+			err = convertMapNumbers(v)
+		case []interface{}:
+			err = convertSliceNumbers(v)
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// convertNumber converts a json.Number to an int64 or float64, or returns an error
+func convertNumber(n json.Number) (interface{}, error) {
+	// Attempt to convert to an int64 first
+	if i, err := n.Int64(); err == nil {
+		return i, nil
+	}
+	// Return a float64 (default json.Decode() behavior)
+	// An overflow will return an error
+	return n.Float64()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go b/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go
new file mode 100644
index 0000000..2965d5a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go
@@ -0,0 +1,93 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package naming
+
+import (
+	"fmt"
+	"regexp"
+	goruntime "runtime"
+	"runtime/debug"
+	"strconv"
+	"strings"
+)
+
+// GetNameFromCallsite walks back through the call stack until we find a caller from outside of the ignoredPackages
+// it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging
+func GetNameFromCallsite(ignoredPackages ...string) string {
+	name := "????"
+	const maxStack = 10
+	for i := 1; i < maxStack; i++ {
+		_, file, line, ok := goruntime.Caller(i)
+		if !ok {
+			file, line, ok = extractStackCreator()
+			if !ok {
+				break
+			}
+			i += maxStack
+		}
+		if hasPackage(file, append(ignoredPackages, "/runtime/asm_")) {
+			continue
+		}
+
+		file = trimPackagePrefix(file)
+		name = fmt.Sprintf("%s:%d", file, line)
+		break
+	}
+	return name
+}
+
+// hasPackage returns true if the file is in one of the ignored packages.
+func hasPackage(file string, ignoredPackages []string) bool {
+	for _, ignoredPackage := range ignoredPackages {
+		if strings.Contains(file, ignoredPackage) {
+			return true
+		}
+	}
+	return false
+}
+
+// trimPackagePrefix reduces duplicate values off the front of a package name.
+func trimPackagePrefix(file string) string {
+	if l := strings.LastIndex(file, "/vendor/"); l >= 0 {
+		return file[l+len("/vendor/"):]
+	}
+	if l := strings.LastIndex(file, "/src/"); l >= 0 {
+		return file[l+5:]
+	}
+	if l := strings.LastIndex(file, "/pkg/"); l >= 0 {
+		return file[l+1:]
+	}
+	return file
+}
+
+var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[[:xdigit:]]+$`)
+
+// extractStackCreator retrieves the goroutine file and line that launched this stack. Returns false
+// if the creator cannot be located.
+// TODO: Go does not expose this via runtime https://github.com/golang/go/issues/11440
+func extractStackCreator() (string, int, bool) {
+	stack := debug.Stack()
+	matches := stackCreator.FindStringSubmatch(string(stack))
+	if matches == nil || len(matches) != 4 {
+		return "", 0, false
+	}
+	line, err := strconv.Atoi(matches[3])
+	if err != nil {
+		return "", 0, false
+	}
+	return matches[2], line, true
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go
new file mode 100644
index 0000000..155667c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go
@@ -0,0 +1,442 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+	"bufio"
+	"bytes"
+	"context"
+	"crypto/tls"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"path"
+	"strconv"
+	"strings"
+
+	"golang.org/x/net/http2"
+	"k8s.io/klog"
+)
+
+// JoinPreservingTrailingSlash does a path.Join of the specified elements,
+// preserving any trailing slash on the last non-empty segment
+func JoinPreservingTrailingSlash(elem ...string) string {
+	// do the basic path join
+	result := path.Join(elem...)
+
+	// find the last non-empty segment
+	for i := len(elem) - 1; i >= 0; i-- {
+		if len(elem[i]) > 0 {
+			// if the last segment ended in a slash, ensure our result does as well
+			if strings.HasSuffix(elem[i], "/") && !strings.HasSuffix(result, "/") {
+				result += "/"
+			}
+			break
+		}
+	}
+
+	return result
+}
+
+// IsProbableEOF returns true if the given error resembles a connection termination
+// scenario that would justify assuming that the watch is empty.
+// These errors are what the Go http stack returns back to us which are general
+// connection closure errors (strongly correlated) and callers that need to
+// differentiate probable errors in connection behavior between normal "this is
+// disconnected" should use the method.
+func IsProbableEOF(err error) bool {
+	if err == nil {
+		return false
+	}
+	if uerr, ok := err.(*url.Error); ok {
+		err = uerr.Err
+	}
+	switch {
+	case err == io.EOF:
+		return true
+	case err.Error() == "http: can't write HTTP request on broken connection":
+		return true
+	case strings.Contains(err.Error(), "connection reset by peer"):
+		return true
+	case strings.Contains(strings.ToLower(err.Error()), "use of closed network connection"):
+		return true
+	}
+	return false
+}
+
+var defaultTransport = http.DefaultTransport.(*http.Transport)
+
+// SetOldTransportDefaults applies the defaults from http.DefaultTransport
+// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset
+func SetOldTransportDefaults(t *http.Transport) *http.Transport {
+	if t.Proxy == nil || isDefault(t.Proxy) {
+		// http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings
+		// ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY
+		t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
+	}
+	// If no custom dialer is set, use the default context dialer
+	if t.DialContext == nil && t.Dial == nil {
+		t.DialContext = defaultTransport.DialContext
+	}
+	if t.TLSHandshakeTimeout == 0 {
+		t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout
+	}
+	return t
+}
+
+// SetTransportDefaults applies the defaults from http.DefaultTransport
+// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset
+func SetTransportDefaults(t *http.Transport) *http.Transport {
+	t = SetOldTransportDefaults(t)
+	// Allow clients to disable http2 if needed.
+	if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 {
+		klog.Infof("HTTP2 has been explicitly disabled")
+	} else {
+		if err := http2.ConfigureTransport(t); err != nil {
+			klog.Warningf("Transport failed http2 configuration: %v", err)
+		}
+	}
+	return t
+}
+
+type RoundTripperWrapper interface {
+	http.RoundTripper
+	WrappedRoundTripper() http.RoundTripper
+}
+
+type DialFunc func(ctx context.Context, net, addr string) (net.Conn, error)
+
+func DialerFor(transport http.RoundTripper) (DialFunc, error) {
+	if transport == nil {
+		return nil, nil
+	}
+
+	switch transport := transport.(type) {
+	case *http.Transport:
+		// transport.DialContext takes precedence over transport.Dial
+		if transport.DialContext != nil {
+			return transport.DialContext, nil
+		}
+		// adapt transport.Dial to the DialWithContext signature
+		if transport.Dial != nil {
+			return func(ctx context.Context, net, addr string) (net.Conn, error) {
+				return transport.Dial(net, addr)
+			}, nil
+		}
+		// otherwise return nil
+		return nil, nil
+	case RoundTripperWrapper:
+		return DialerFor(transport.WrappedRoundTripper())
+	default:
+		return nil, fmt.Errorf("unknown transport type: %T", transport)
+	}
+}
+
+type TLSClientConfigHolder interface {
+	TLSClientConfig() *tls.Config
+}
+
+func TLSClientConfig(transport http.RoundTripper) (*tls.Config, error) {
+	if transport == nil {
+		return nil, nil
+	}
+
+	switch transport := transport.(type) {
+	case *http.Transport:
+		return transport.TLSClientConfig, nil
+	case TLSClientConfigHolder:
+		return transport.TLSClientConfig(), nil
+	case RoundTripperWrapper:
+		return TLSClientConfig(transport.WrappedRoundTripper())
+	default:
+		return nil, fmt.Errorf("unknown transport type: %T", transport)
+	}
+}
+
+func FormatURL(scheme string, host string, port int, path string) *url.URL {
+	return &url.URL{
+		Scheme: scheme,
+		Host:   net.JoinHostPort(host, strconv.Itoa(port)),
+		Path:   path,
+	}
+}
+
+func GetHTTPClient(req *http.Request) string {
+	if ua := req.UserAgent(); len(ua) != 0 {
+		return ua
+	}
+	return "unknown"
+}
+
+// SourceIPs splits the comma separated X-Forwarded-For header or returns the X-Real-Ip header or req.RemoteAddr,
+// in that order, ignoring invalid IPs. It returns nil if all of these are empty or invalid.
+func SourceIPs(req *http.Request) []net.IP {
+	hdr := req.Header
+	// First check the X-Forwarded-For header for requests via proxy.
+	hdrForwardedFor := hdr.Get("X-Forwarded-For")
+	forwardedForIPs := []net.IP{}
+	if hdrForwardedFor != "" {
+		// X-Forwarded-For can be a csv of IPs in case of multiple proxies.
+		// Use the first valid one.
+		parts := strings.Split(hdrForwardedFor, ",")
+		for _, part := range parts {
+			ip := net.ParseIP(strings.TrimSpace(part))
+			if ip != nil {
+				forwardedForIPs = append(forwardedForIPs, ip)
+			}
+		}
+	}
+	if len(forwardedForIPs) > 0 {
+		return forwardedForIPs
+	}
+
+	// Try the X-Real-Ip header.
+	hdrRealIp := hdr.Get("X-Real-Ip")
+	if hdrRealIp != "" {
+		ip := net.ParseIP(hdrRealIp)
+		if ip != nil {
+			return []net.IP{ip}
+		}
+	}
+
+	// Fallback to Remote Address in request, which will give the correct client IP when there is no proxy.
+	// Remote Address in Go's HTTP server is in the form host:port so we need to split that first.
+	host, _, err := net.SplitHostPort(req.RemoteAddr)
+	if err == nil {
+		if remoteIP := net.ParseIP(host); remoteIP != nil {
+			return []net.IP{remoteIP}
+		}
+	}
+
+	// Fallback if Remote Address was just IP.
+	if remoteIP := net.ParseIP(req.RemoteAddr); remoteIP != nil {
+		return []net.IP{remoteIP}
+	}
+
+	return nil
+}
+
+// Extracts and returns the clients IP from the given request.
+// Looks at X-Forwarded-For header, X-Real-Ip header and request.RemoteAddr in that order.
+// Returns nil if none of them are set or is set to an invalid value.
+func GetClientIP(req *http.Request) net.IP {
+	ips := SourceIPs(req)
+	if len(ips) == 0 {
+		return nil
+	}
+	return ips[0]
+}
+
+// Prepares the X-Forwarded-For header for another forwarding hop by appending the previous sender's
+// IP address to the X-Forwarded-For chain.
+func AppendForwardedForHeader(req *http.Request) {
+	// Copied from net/http/httputil/reverseproxy.go:
+	if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
+		// If we aren't the first proxy retain prior
+		// X-Forwarded-For information as a comma+space
+		// separated list and fold multiple headers into one.
+		if prior, ok := req.Header["X-Forwarded-For"]; ok {
+			clientIP = strings.Join(prior, ", ") + ", " + clientIP
+		}
+		req.Header.Set("X-Forwarded-For", clientIP)
+	}
+}
+
+var defaultProxyFuncPointer = fmt.Sprintf("%p", http.ProxyFromEnvironment)
+
+// isDefault checks to see if the transportProxierFunc is pointing to the default one
+func isDefault(transportProxier func(*http.Request) (*url.URL, error)) bool {
+	transportProxierPointer := fmt.Sprintf("%p", transportProxier)
+	return transportProxierPointer == defaultProxyFuncPointer
+}
+
+// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if
+// no matching CIDRs are found
+func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) {
+	// we wrap the default method, so we only need to perform our check if the NO_PROXY (or no_proxy) envvar has a CIDR in it
+	noProxyEnv := os.Getenv("NO_PROXY")
+	if noProxyEnv == "" {
+		noProxyEnv = os.Getenv("no_proxy")
+	}
+	noProxyRules := strings.Split(noProxyEnv, ",")
+
+	cidrs := []*net.IPNet{}
+	for _, noProxyRule := range noProxyRules {
+		_, cidr, _ := net.ParseCIDR(noProxyRule)
+		if cidr != nil {
+			cidrs = append(cidrs, cidr)
+		}
+	}
+
+	if len(cidrs) == 0 {
+		return delegate
+	}
+
+	return func(req *http.Request) (*url.URL, error) {
+		ip := net.ParseIP(req.URL.Hostname())
+		if ip == nil {
+			return delegate(req)
+		}
+
+		for _, cidr := range cidrs {
+			if cidr.Contains(ip) {
+				return nil, nil
+			}
+		}
+
+		return delegate(req)
+	}
+}
+
+// DialerFunc implements Dialer for the provided function.
+type DialerFunc func(req *http.Request) (net.Conn, error)
+
+func (fn DialerFunc) Dial(req *http.Request) (net.Conn, error) {
+	return fn(req)
+}
+
+// Dialer dials a host and writes a request to it.
+type Dialer interface {
+	// Dial connects to the host specified by req's URL, writes the request to the connection, and
+	// returns the opened net.Conn.
+	Dial(req *http.Request) (net.Conn, error)
+}
+
+// ConnectWithRedirects uses dialer to send req, following up to 10 redirects (relative to
+// originalLocation). It returns the opened net.Conn and the raw response bytes.
+// If requireSameHostRedirects is true, only redirects to the same host are permitted.
+func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer, requireSameHostRedirects bool) (net.Conn, []byte, error) {
+	const (
+		maxRedirects    = 9     // Fail on the 10th redirect
+		maxResponseSize = 16384 // play it safe to allow the potential for lots of / large headers
+	)
+
+	var (
+		location         = originalLocation
+		method           = originalMethod
+		intermediateConn net.Conn
+		rawResponse      = bytes.NewBuffer(make([]byte, 0, 256))
+		body             = originalBody
+	)
+
+	defer func() {
+		if intermediateConn != nil {
+			intermediateConn.Close()
+		}
+	}()
+
+redirectLoop:
+	for redirects := 0; ; redirects++ {
+		if redirects > maxRedirects {
+			return nil, nil, fmt.Errorf("too many redirects (%d)", redirects)
+		}
+
+		req, err := http.NewRequest(method, location.String(), body)
+		if err != nil {
+			return nil, nil, err
+		}
+
+		req.Header = header
+
+		intermediateConn, err = dialer.Dial(req)
+		if err != nil {
+			return nil, nil, err
+		}
+
+		// Peek at the backend response.
+		rawResponse.Reset()
+		respReader := bufio.NewReader(io.TeeReader(
+			io.LimitReader(intermediateConn, maxResponseSize), // Don't read more than maxResponseSize bytes.
+			rawResponse)) // Save the raw response.
+		resp, err := http.ReadResponse(respReader, nil)
+		if err != nil {
+			// Unable to read the backend response; let the client handle it.
+			klog.Warningf("Error reading backend response: %v", err)
+			break redirectLoop
+		}
+
+		switch resp.StatusCode {
+		case http.StatusFound:
+			// Redirect, continue.
+		default:
+			// Don't redirect.
+			break redirectLoop
+		}
+
+		// Redirected requests switch to "GET" according to the HTTP spec:
+		// https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3
+		method = "GET"
+		// don't send a body when following redirects
+		body = nil
+
+		resp.Body.Close() // not used
+
+		// Prepare to follow the redirect.
+		redirectStr := resp.Header.Get("Location")
+		if redirectStr == "" {
+			return nil, nil, fmt.Errorf("%d response missing Location header", resp.StatusCode)
+		}
+		// We have to parse relative to the current location, NOT originalLocation. For example,
+		// if we request http://foo.com/a and get back "http://bar.com/b", the result should be
+		// http://bar.com/b. If we then make that request and get back a redirect to "/c", the result
+		// should be http://bar.com/c, not http://foo.com/c.
+		location, err = location.Parse(redirectStr)
+		if err != nil {
+			return nil, nil, fmt.Errorf("malformed Location header: %v", err)
+		}
+
+		// Only follow redirects to the same host. Otherwise, propagate the redirect response back.
+		if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() {
+			break redirectLoop
+		}
+
+		// Reset the connection.
+		intermediateConn.Close()
+		intermediateConn = nil
+	}
+
+	connToReturn := intermediateConn
+	intermediateConn = nil // Don't close the connection when we return it.
+	return connToReturn, rawResponse.Bytes(), nil
+}
+
+// CloneRequest creates a shallow copy of the request along with a deep copy of the Headers.
+func CloneRequest(req *http.Request) *http.Request {
+	r := new(http.Request)
+
+	// shallow clone
+	*r = *req
+
+	// deep copy headers
+	r.Header = CloneHeader(req.Header)
+
+	return r
+}
+
+// CloneHeader creates a deep copy of an http.Header.
+func CloneHeader(in http.Header) http.Header {
+	out := make(http.Header, len(in))
+	for key, values := range in {
+		newValues := make([]string, len(values))
+		copy(newValues, values)
+		out[key] = newValues
+	}
+	return out
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go
new file mode 100644
index 0000000..daf5d24
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go
@@ -0,0 +1,416 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+	"bufio"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"net"
+	"os"
+
+	"strings"
+
+	"k8s.io/klog"
+)
+
+type AddressFamily uint
+
+const (
+	familyIPv4 AddressFamily = 4
+	familyIPv6 AddressFamily = 6
+)
+
+const (
+	ipv4RouteFile = "/proc/net/route"
+	ipv6RouteFile = "/proc/net/ipv6_route"
+)
+
+type Route struct {
+	Interface   string
+	Destination net.IP
+	Gateway     net.IP
+	Family      AddressFamily
+}
+
+type RouteFile struct {
+	name  string
+	parse func(input io.Reader) ([]Route, error)
+}
+
+// noRoutesError can be returned by ChooseBindAddress() in case of no routes
+type noRoutesError struct {
+	message string
+}
+
+func (e noRoutesError) Error() string {
+	return e.message
+}
+
+// IsNoRoutesError checks if an error is of type noRoutesError
+func IsNoRoutesError(err error) bool {
+	if err == nil {
+		return false
+	}
+	switch err.(type) {
+	case noRoutesError:
+		return true
+	default:
+		return false
+	}
+}
+
+var (
+	v4File = RouteFile{name: ipv4RouteFile, parse: getIPv4DefaultRoutes}
+	v6File = RouteFile{name: ipv6RouteFile, parse: getIPv6DefaultRoutes}
+)
+
+func (rf RouteFile) extract() ([]Route, error) {
+	file, err := os.Open(rf.name)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+	return rf.parse(file)
+}
+
+// getIPv4DefaultRoutes obtains the IPv4 routes, and filters out non-default routes.
+func getIPv4DefaultRoutes(input io.Reader) ([]Route, error) {
+	routes := []Route{}
+	scanner := bufio.NewReader(input)
+	for {
+		line, err := scanner.ReadString('\n')
+		if err == io.EOF {
+			break
+		}
+		//ignore the headers in the route info
+		if strings.HasPrefix(line, "Iface") {
+			continue
+		}
+		fields := strings.Fields(line)
+		// Interested in fields:
+		//  0 - interface name
+		//  1 - destination address
+		//  2 - gateway
+		dest, err := parseIP(fields[1], familyIPv4)
+		if err != nil {
+			return nil, err
+		}
+		gw, err := parseIP(fields[2], familyIPv4)
+		if err != nil {
+			return nil, err
+		}
+		if !dest.Equal(net.IPv4zero) {
+			continue
+		}
+		routes = append(routes, Route{
+			Interface:   fields[0],
+			Destination: dest,
+			Gateway:     gw,
+			Family:      familyIPv4,
+		})
+	}
+	return routes, nil
+}
+
+func getIPv6DefaultRoutes(input io.Reader) ([]Route, error) {
+	routes := []Route{}
+	scanner := bufio.NewReader(input)
+	for {
+		line, err := scanner.ReadString('\n')
+		if err == io.EOF {
+			break
+		}
+		fields := strings.Fields(line)
+		// Interested in fields:
+		//  0 - destination address
+		//  4 - gateway
+		//  9 - interface name
+		dest, err := parseIP(fields[0], familyIPv6)
+		if err != nil {
+			return nil, err
+		}
+		gw, err := parseIP(fields[4], familyIPv6)
+		if err != nil {
+			return nil, err
+		}
+		if !dest.Equal(net.IPv6zero) {
+			continue
+		}
+		if gw.Equal(net.IPv6zero) {
+			continue // loopback
+		}
+		routes = append(routes, Route{
+			Interface:   fields[9],
+			Destination: dest,
+			Gateway:     gw,
+			Family:      familyIPv6,
+		})
+	}
+	return routes, nil
+}
+
+// parseIP takes the hex IP address string from route file and converts it
+// to a net.IP address. For IPv4, the value must be converted to big endian.
+func parseIP(str string, family AddressFamily) (net.IP, error) {
+	if str == "" {
+		return nil, fmt.Errorf("input is nil")
+	}
+	bytes, err := hex.DecodeString(str)
+	if err != nil {
+		return nil, err
+	}
+	if family == familyIPv4 {
+		if len(bytes) != net.IPv4len {
+			return nil, fmt.Errorf("invalid IPv4 address in route")
+		}
+		return net.IP([]byte{bytes[3], bytes[2], bytes[1], bytes[0]}), nil
+	}
+	// Must be IPv6
+	if len(bytes) != net.IPv6len {
+		return nil, fmt.Errorf("invalid IPv6 address in route")
+	}
+	return net.IP(bytes), nil
+}
+
+func isInterfaceUp(intf *net.Interface) bool {
+	if intf == nil {
+		return false
+	}
+	if intf.Flags&net.FlagUp != 0 {
+		klog.V(4).Infof("Interface %v is up", intf.Name)
+		return true
+	}
+	return false
+}
+
+func isLoopbackOrPointToPoint(intf *net.Interface) bool {
+	return intf.Flags&(net.FlagLoopback|net.FlagPointToPoint) != 0
+}
+
+// getMatchingGlobalIP returns the first valid global unicast address of the given
+// 'family' from the list of 'addrs'.
+func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) {
+	if len(addrs) > 0 {
+		for i := range addrs {
+			klog.V(4).Infof("Checking addr  %s.", addrs[i].String())
+			ip, _, err := net.ParseCIDR(addrs[i].String())
+			if err != nil {
+				return nil, err
+			}
+			if memberOf(ip, family) {
+				if ip.IsGlobalUnicast() {
+					klog.V(4).Infof("IP found %v", ip)
+					return ip, nil
+				} else {
+					klog.V(4).Infof("Non-global unicast address found %v", ip)
+				}
+			} else {
+				klog.V(4).Infof("%v is not an IPv%d address", ip, int(family))
+			}
+
+		}
+	}
+	return nil, nil
+}
+
+// getIPFromInterface gets the IPs on an interface and returns a global unicast address, if any. The
+// interface must be up, the IP must in the family requested, and the IP must be a global unicast address.
+func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInterfacer) (net.IP, error) {
+	intf, err := nw.InterfaceByName(intfName)
+	if err != nil {
+		return nil, err
+	}
+	if isInterfaceUp(intf) {
+		addrs, err := nw.Addrs(intf)
+		if err != nil {
+			return nil, err
+		}
+		klog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs)
+		matchingIP, err := getMatchingGlobalIP(addrs, forFamily)
+		if err != nil {
+			return nil, err
+		}
+		if matchingIP != nil {
+			klog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName)
+			return matchingIP, nil
+		}
+	}
+	return nil, nil
+}
+
+// memberOF tells if the IP is of the desired family. Used for checking interface addresses.
+func memberOf(ip net.IP, family AddressFamily) bool {
+	if ip.To4() != nil {
+		return family == familyIPv4
+	} else {
+		return family == familyIPv6
+	}
+}
+
+// chooseIPFromHostInterfaces looks at all system interfaces, trying to find one that is up that
+// has a global unicast address (non-loopback, non-link local, non-point2point), and returns the IP.
+// Searches for IPv4 addresses, and then IPv6 addresses.
+func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) {
+	intfs, err := nw.Interfaces()
+	if err != nil {
+		return nil, err
+	}
+	if len(intfs) == 0 {
+		return nil, fmt.Errorf("no interfaces found on host.")
+	}
+	for _, family := range []AddressFamily{familyIPv4, familyIPv6} {
+		klog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family))
+		for _, intf := range intfs {
+			if !isInterfaceUp(&intf) {
+				klog.V(4).Infof("Skipping: down interface %q", intf.Name)
+				continue
+			}
+			if isLoopbackOrPointToPoint(&intf) {
+				klog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name)
+				continue
+			}
+			addrs, err := nw.Addrs(&intf)
+			if err != nil {
+				return nil, err
+			}
+			if len(addrs) == 0 {
+				klog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name)
+				continue
+			}
+			for _, addr := range addrs {
+				ip, _, err := net.ParseCIDR(addr.String())
+				if err != nil {
+					return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err)
+				}
+				if !memberOf(ip, family) {
+					klog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name)
+					continue
+				}
+				// TODO: Decide if should open up to allow IPv6 LLAs in future.
+				if !ip.IsGlobalUnicast() {
+					klog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name)
+					continue
+				}
+				klog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name)
+				return ip, nil
+			}
+		}
+	}
+	return nil, fmt.Errorf("no acceptable interface with global unicast address found on host")
+}
+
+// ChooseHostInterface is a method used fetch an IP for a daemon.
+// If there is no routing info file, it will choose a global IP from the system
+// interfaces. Otherwise, it will use IPv4 and IPv6 route information to return the
+// IP of the interface with a gateway on it (with priority given to IPv4). For a node
+// with no internet connection, it returns error.
+func ChooseHostInterface() (net.IP, error) {
+	var nw networkInterfacer = networkInterface{}
+	if _, err := os.Stat(ipv4RouteFile); os.IsNotExist(err) {
+		return chooseIPFromHostInterfaces(nw)
+	}
+	routes, err := getAllDefaultRoutes()
+	if err != nil {
+		return nil, err
+	}
+	return chooseHostInterfaceFromRoute(routes, nw)
+}
+
+// networkInterfacer defines an interface for several net library functions. Production
+// code will forward to net library functions, and unit tests will override the methods
+// for testing purposes.
+type networkInterfacer interface {
+	InterfaceByName(intfName string) (*net.Interface, error)
+	Addrs(intf *net.Interface) ([]net.Addr, error)
+	Interfaces() ([]net.Interface, error)
+}
+
+// networkInterface implements the networkInterfacer interface for production code, just
+// wrapping the underlying net library function calls.
+type networkInterface struct{}
+
+func (_ networkInterface) InterfaceByName(intfName string) (*net.Interface, error) {
+	return net.InterfaceByName(intfName)
+}
+
+func (_ networkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) {
+	return intf.Addrs()
+}
+
+func (_ networkInterface) Interfaces() ([]net.Interface, error) {
+	return net.Interfaces()
+}
+
+// getAllDefaultRoutes obtains IPv4 and IPv6 default routes on the node. If unable
+// to read the IPv4 routing info file, we return an error. If unable to read the IPv6
+// routing info file (which is optional), we'll just use the IPv4 route information.
+// Using all the routing info, if no default routes are found, an error is returned.
+func getAllDefaultRoutes() ([]Route, error) {
+	routes, err := v4File.extract()
+	if err != nil {
+		return nil, err
+	}
+	v6Routes, _ := v6File.extract()
+	routes = append(routes, v6Routes...)
+	if len(routes) == 0 {
+		return nil, noRoutesError{
+			message: fmt.Sprintf("no default routes found in %q or %q", v4File.name, v6File.name),
+		}
+	}
+	return routes, nil
+}
+
+// chooseHostInterfaceFromRoute cycles through each default route provided, looking for a
+// global IP address from the interface for the route. Will first look all each IPv4 route for
+// an IPv4 IP, and then will look at each IPv6 route for an IPv6 IP.
+func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, error) {
+	for _, family := range []AddressFamily{familyIPv4, familyIPv6} {
+		klog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family))
+		for _, route := range routes {
+			if route.Family != family {
+				continue
+			}
+			klog.V(4).Infof("Default route transits interface %q", route.Interface)
+			finalIP, err := getIPFromInterface(route.Interface, family, nw)
+			if err != nil {
+				return nil, err
+			}
+			if finalIP != nil {
+				klog.V(4).Infof("Found active IP %v ", finalIP)
+				return finalIP, nil
+			}
+		}
+	}
+	klog.V(4).Infof("No active IP found by looking at default routes")
+	return nil, fmt.Errorf("unable to select an IP from default routes.")
+}
+
+// If bind-address is usable, return it directly
+// If bind-address is not usable (unset, 0.0.0.0, or loopback), we will use the host's default
+// interface.
+func ChooseBindAddress(bindAddress net.IP) (net.IP, error) {
+	if bindAddress == nil || bindAddress.IsUnspecified() || bindAddress.IsLoopback() {
+		hostIP, err := ChooseHostInterface()
+		if err != nil {
+			return nil, err
+		}
+		bindAddress = hostIP
+	}
+	return bindAddress, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go
new file mode 100644
index 0000000..7b6eca8
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go
@@ -0,0 +1,149 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// PortRange represents a range of TCP/UDP ports.  To represent a single port,
+// set Size to 1.
+type PortRange struct {
+	Base int
+	Size int
+}
+
+// Contains tests whether a given port falls within the PortRange.
+func (pr *PortRange) Contains(p int) bool {
+	return (p >= pr.Base) && ((p - pr.Base) < pr.Size)
+}
+
+// String converts the PortRange to a string representation, which can be
+// parsed by PortRange.Set or ParsePortRange.
+func (pr PortRange) String() string {
+	if pr.Size == 0 {
+		return ""
+	}
+	return fmt.Sprintf("%d-%d", pr.Base, pr.Base+pr.Size-1)
+}
+
+// Set parses a string of the form "value", "min-max", or "min+offset", inclusive at both ends, and
+// sets the PortRange from it.  This is part of the flag.Value and pflag.Value
+// interfaces.
+func (pr *PortRange) Set(value string) error {
+	const (
+		SinglePortNotation = 1 << iota
+		HyphenNotation
+		PlusNotation
+	)
+
+	value = strings.TrimSpace(value)
+	hyphenIndex := strings.Index(value, "-")
+	plusIndex := strings.Index(value, "+")
+
+	if value == "" {
+		pr.Base = 0
+		pr.Size = 0
+		return nil
+	}
+
+	var err error
+	var low, high int
+	var notation int
+
+	if plusIndex == -1 && hyphenIndex == -1 {
+		notation |= SinglePortNotation
+	}
+	if hyphenIndex != -1 {
+		notation |= HyphenNotation
+	}
+	if plusIndex != -1 {
+		notation |= PlusNotation
+	}
+
+	switch notation {
+	case SinglePortNotation:
+		var port int
+		port, err = strconv.Atoi(value)
+		if err != nil {
+			return err
+		}
+		low = port
+		high = port
+	case HyphenNotation:
+		low, err = strconv.Atoi(value[:hyphenIndex])
+		if err != nil {
+			return err
+		}
+		high, err = strconv.Atoi(value[hyphenIndex+1:])
+		if err != nil {
+			return err
+		}
+	case PlusNotation:
+		var offset int
+		low, err = strconv.Atoi(value[:plusIndex])
+		if err != nil {
+			return err
+		}
+		offset, err = strconv.Atoi(value[plusIndex+1:])
+		if err != nil {
+			return err
+		}
+		high = low + offset
+	default:
+		return fmt.Errorf("unable to parse port range: %s", value)
+	}
+
+	if low > 65535 || high > 65535 {
+		return fmt.Errorf("the port range cannot be greater than 65535: %s", value)
+	}
+
+	if high < low {
+		return fmt.Errorf("end port cannot be less than start port: %s", value)
+	}
+
+	pr.Base = low
+	pr.Size = 1 + high - low
+	return nil
+}
+
+// Type returns a descriptive string about this type.  This is part of the
+// pflag.Value interface.
+func (*PortRange) Type() string {
+	return "portRange"
+}
+
+// ParsePortRange parses a string of the form "min-max", inclusive at both
+// ends, and initializs a new PortRange from it.
+func ParsePortRange(value string) (*PortRange, error) {
+	pr := &PortRange{}
+	err := pr.Set(value)
+	if err != nil {
+		return nil, err
+	}
+	return pr, nil
+}
+
+func ParsePortRangeOrDie(value string) *PortRange {
+	pr, err := ParsePortRange(value)
+	if err != nil {
+		panic(fmt.Sprintf("couldn't parse port range %q: %v", value, err))
+	}
+	return pr
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go
new file mode 100644
index 0000000..c0fd4e2
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go
@@ -0,0 +1,77 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+	"strings"
+
+	"k8s.io/apimachinery/pkg/util/sets"
+)
+
+var validSchemes = sets.NewString("http", "https", "")
+
+// SplitSchemeNamePort takes a string of the following forms:
+//  * "<name>",                 returns "",        "<name>","",      true
+//  * "<name>:<port>",          returns "",        "<name>","<port>",true
+//  * "<scheme>:<name>:<port>", returns "<scheme>","<name>","<port>",true
+//
+// Name must be non-empty or valid will be returned false.
+// Scheme must be "http" or "https" if specified
+// Port is returned as a string, and it is not required to be numeric (could be
+// used for a named port, for example).
+func SplitSchemeNamePort(id string) (scheme, name, port string, valid bool) {
+	parts := strings.Split(id, ":")
+	switch len(parts) {
+	case 1:
+		name = parts[0]
+	case 2:
+		name = parts[0]
+		port = parts[1]
+	case 3:
+		scheme = parts[0]
+		name = parts[1]
+		port = parts[2]
+	default:
+		return "", "", "", false
+	}
+
+	if len(name) > 0 && validSchemes.Has(scheme) {
+		return scheme, name, port, true
+	} else {
+		return "", "", "", false
+	}
+}
+
+// JoinSchemeNamePort returns a string that specifies the scheme, name, and port:
+//  * "<name>"
+//  * "<name>:<port>"
+//  * "<scheme>:<name>:<port>"
+// None of the parameters may contain a ':' character
+// Name is required
+// Scheme must be "", "http", or "https"
+func JoinSchemeNamePort(scheme, name, port string) string {
+	if len(scheme) > 0 {
+		// Must include three segments to specify scheme
+		return scheme + ":" + name + ":" + port
+	}
+	if len(port) > 0 {
+		// Must include two segments to specify port
+		return name + ":" + port
+	}
+	// Return name alone
+	return name
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go
new file mode 100644
index 0000000..8344d10
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+	"net"
+	"net/url"
+	"os"
+	"reflect"
+	"syscall"
+)
+
+// IPNetEqual checks if the two input IPNets are representing the same subnet.
+// For example,
+//	10.0.0.1/24 and 10.0.0.0/24 are the same subnet.
+//	10.0.0.1/24 and 10.0.0.0/25 are not the same subnet.
+func IPNetEqual(ipnet1, ipnet2 *net.IPNet) bool {
+	if ipnet1 == nil || ipnet2 == nil {
+		return false
+	}
+	if reflect.DeepEqual(ipnet1.Mask, ipnet2.Mask) && ipnet1.Contains(ipnet2.IP) && ipnet2.Contains(ipnet1.IP) {
+		return true
+	}
+	return false
+}
+
+// Returns if the given err is "connection reset by peer" error.
+func IsConnectionReset(err error) bool {
+	if urlErr, ok := err.(*url.Error); ok {
+		err = urlErr.Err
+	}
+	if opErr, ok := err.(*net.OpError); ok {
+		err = opErr.Err
+	}
+	if osErr, ok := err.(*os.SyscallError); ok {
+		err = osErr.Err
+	}
+	if errno, ok := err.(syscall.Errno); ok && errno == syscall.ECONNRESET {
+		return true
+	}
+	return false
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
new file mode 100644
index 0000000..8e34f92
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
@@ -0,0 +1,173 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"fmt"
+	"runtime"
+	"sync"
+	"time"
+
+	"k8s.io/klog"
+)
+
+var (
+	// ReallyCrash controls the behavior of HandleCrash and now defaults
+	// true. It's still exposed so components can optionally set to false
+	// to restore prior behavior.
+	ReallyCrash = true
+)
+
+// PanicHandlers is a list of functions which will be invoked when a panic happens.
+var PanicHandlers = []func(interface{}){logPanic}
+
+// HandleCrash simply catches a crash and logs an error. Meant to be called via
+// defer.  Additional context-specific handlers can be provided, and will be
+// called in case of panic.  HandleCrash actually crashes, after calling the
+// handlers and logging the panic message.
+//
+// TODO: remove this function. We are switching to a world where it's safe for
+// apiserver to panic, since it will be restarted by kubelet. At the beginning
+// of the Kubernetes project, nothing was going to restart apiserver and so
+// catching panics was important. But it's actually much simpler for monitoring
+// software if we just exit when an unexpected panic happens.
+func HandleCrash(additionalHandlers ...func(interface{})) {
+	if r := recover(); r != nil {
+		for _, fn := range PanicHandlers {
+			fn(r)
+		}
+		for _, fn := range additionalHandlers {
+			fn(r)
+		}
+		if ReallyCrash {
+			// Actually proceed to panic.
+			panic(r)
+		}
+	}
+}
+
+// logPanic logs the caller tree when a panic occurs.
+func logPanic(r interface{}) {
+	callers := getCallers(r)
+	if _, ok := r.(string); ok {
+		klog.Errorf("Observed a panic: %s\n%v", r, callers)
+	} else {
+		klog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers)
+	}
+}
+
+func getCallers(r interface{}) string {
+	callers := ""
+	for i := 0; true; i++ {
+		_, file, line, ok := runtime.Caller(i)
+		if !ok {
+			break
+		}
+		callers = callers + fmt.Sprintf("%v:%v\n", file, line)
+	}
+
+	return callers
+}
+
+// ErrorHandlers is a list of functions which will be invoked when an unreturnable
+// error occurs.
+// TODO(lavalamp): for testability, this and the below HandleError function
+// should be packaged up into a testable and reusable object.
+var ErrorHandlers = []func(error){
+	logError,
+	(&rudimentaryErrorBackoff{
+		lastErrorTime: time.Now(),
+		// 1ms was the number folks were able to stomach as a global rate limit.
+		// If you need to log errors more than 1000 times a second you
+		// should probably consider fixing your code instead. :)
+		minPeriod: time.Millisecond,
+	}).OnError,
+}
+
+// HandlerError is a method to invoke when a non-user facing piece of code cannot
+// return an error and needs to indicate it has been ignored. Invoking this method
+// is preferable to logging the error - the default behavior is to log but the
+// errors may be sent to a remote server for analysis.
+func HandleError(err error) {
+	// this is sometimes called with a nil error.  We probably shouldn't fail and should do nothing instead
+	if err == nil {
+		return
+	}
+
+	for _, fn := range ErrorHandlers {
+		fn(err)
+	}
+}
+
+// logError prints an error with the call stack of the location it was reported
+func logError(err error) {
+	klog.ErrorDepth(2, err)
+}
+
+type rudimentaryErrorBackoff struct {
+	minPeriod time.Duration // immutable
+	// TODO(lavalamp): use the clock for testability. Need to move that
+	// package for that to be accessible here.
+	lastErrorTimeLock sync.Mutex
+	lastErrorTime     time.Time
+}
+
+// OnError will block if it is called more often than the embedded period time.
+// This will prevent overly tight hot error loops.
+func (r *rudimentaryErrorBackoff) OnError(error) {
+	r.lastErrorTimeLock.Lock()
+	defer r.lastErrorTimeLock.Unlock()
+	d := time.Since(r.lastErrorTime)
+	if d < r.minPeriod {
+		// If the time moves backwards for any reason, do nothing
+		time.Sleep(r.minPeriod - d)
+	}
+	r.lastErrorTime = time.Now()
+}
+
+// GetCaller returns the caller of the function that calls it.
+func GetCaller() string {
+	var pc [1]uintptr
+	runtime.Callers(3, pc[:])
+	f := runtime.FuncForPC(pc[0])
+	if f == nil {
+		return fmt.Sprintf("Unable to find caller")
+	}
+	return f.Name()
+}
+
+// RecoverFromPanic replaces the specified error with an error containing the
+// original error, and  the call tree when a panic occurs. This enables error
+// handlers to handle errors and panics the same way.
+func RecoverFromPanic(err *error) {
+	if r := recover(); r != nil {
+		callers := getCallers(r)
+
+		*err = fmt.Errorf(
+			"recovered from panic %q. (err=%v) Call stack:\n%v",
+			r,
+			*err,
+			callers)
+	}
+}
+
+// Must panics on non-nil errors.  Useful to handling programmer level errors.
+func Must(err error) {
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go
new file mode 100644
index 0000000..766f450
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go
@@ -0,0 +1,203 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by set-gen. DO NOT EDIT.
+
+package sets
+
+import (
+	"reflect"
+	"sort"
+)
+
+// sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption.
+type Byte map[byte]Empty
+
+// NewByte creates a Byte from a list of values.
+func NewByte(items ...byte) Byte {
+	ss := Byte{}
+	ss.Insert(items...)
+	return ss
+}
+
+// ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func ByteKeySet(theMap interface{}) Byte {
+	v := reflect.ValueOf(theMap)
+	ret := Byte{}
+
+	for _, keyValue := range v.MapKeys() {
+		ret.Insert(keyValue.Interface().(byte))
+	}
+	return ret
+}
+
+// Insert adds items to the set.
+func (s Byte) Insert(items ...byte) {
+	for _, item := range items {
+		s[item] = Empty{}
+	}
+}
+
+// Delete removes all items from the set.
+func (s Byte) Delete(items ...byte) {
+	for _, item := range items {
+		delete(s, item)
+	}
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Byte) Has(item byte) bool {
+	_, contained := s[item]
+	return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Byte) HasAll(items ...byte) bool {
+	for _, item := range items {
+		if !s.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Byte) HasAny(items ...byte) bool {
+	for _, item := range items {
+		if s.Has(item) {
+			return true
+		}
+	}
+	return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s Byte) Difference(s2 Byte) Byte {
+	result := NewByte()
+	for key := range s {
+		if !s2.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Byte) Union(s2 Byte) Byte {
+	result := NewByte()
+	for key := range s1 {
+		result.Insert(key)
+	}
+	for key := range s2 {
+		result.Insert(key)
+	}
+	return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Byte) Intersection(s2 Byte) Byte {
+	var walk, other Byte
+	result := NewByte()
+	if s1.Len() < s2.Len() {
+		walk = s1
+		other = s2
+	} else {
+		walk = s2
+		other = s1
+	}
+	for key := range walk {
+		if other.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Byte) IsSuperset(s2 Byte) bool {
+	for item := range s2 {
+		if !s1.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Byte) Equal(s2 Byte) bool {
+	return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfByte []byte
+
+func (s sortableSliceOfByte) Len() int           { return len(s) }
+func (s sortableSliceOfByte) Less(i, j int) bool { return lessByte(s[i], s[j]) }
+func (s sortableSliceOfByte) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted byte slice.
+func (s Byte) List() []byte {
+	res := make(sortableSliceOfByte, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	sort.Sort(res)
+	return []byte(res)
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Byte) UnsortedList() []byte {
+	res := make([]byte, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	return res
+}
+
+// Returns a single element from the set.
+func (s Byte) PopAny() (byte, bool) {
+	for key := range s {
+		s.Delete(key)
+		return key, true
+	}
+	var zeroValue byte
+	return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s Byte) Len() int {
+	return len(s)
+}
+
+func lessByte(lhs, rhs byte) bool {
+	return lhs < rhs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
new file mode 100644
index 0000000..b152a0b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by set-gen. DO NOT EDIT.
+
+// Package sets has auto-generated set types.
+package sets
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go
new file mode 100644
index 0000000..e11e622
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go
@@ -0,0 +1,23 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by set-gen. DO NOT EDIT.
+
+package sets
+
+// Empty is public since it is used by some internal API objects for conversions between external
+// string arrays and internal sets, and conversion logic requires public types today.
+type Empty struct{}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go
new file mode 100644
index 0000000..a0a513c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go
@@ -0,0 +1,203 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by set-gen. DO NOT EDIT.
+
+package sets
+
+import (
+	"reflect"
+	"sort"
+)
+
+// sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption.
+type Int map[int]Empty
+
+// NewInt creates a Int from a list of values.
+func NewInt(items ...int) Int {
+	ss := Int{}
+	ss.Insert(items...)
+	return ss
+}
+
+// IntKeySet creates a Int from a keys of a map[int](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func IntKeySet(theMap interface{}) Int {
+	v := reflect.ValueOf(theMap)
+	ret := Int{}
+
+	for _, keyValue := range v.MapKeys() {
+		ret.Insert(keyValue.Interface().(int))
+	}
+	return ret
+}
+
+// Insert adds items to the set.
+func (s Int) Insert(items ...int) {
+	for _, item := range items {
+		s[item] = Empty{}
+	}
+}
+
+// Delete removes all items from the set.
+func (s Int) Delete(items ...int) {
+	for _, item := range items {
+		delete(s, item)
+	}
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Int) Has(item int) bool {
+	_, contained := s[item]
+	return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Int) HasAll(items ...int) bool {
+	for _, item := range items {
+		if !s.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Int) HasAny(items ...int) bool {
+	for _, item := range items {
+		if s.Has(item) {
+			return true
+		}
+	}
+	return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s Int) Difference(s2 Int) Int {
+	result := NewInt()
+	for key := range s {
+		if !s2.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Int) Union(s2 Int) Int {
+	result := NewInt()
+	for key := range s1 {
+		result.Insert(key)
+	}
+	for key := range s2 {
+		result.Insert(key)
+	}
+	return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Int) Intersection(s2 Int) Int {
+	var walk, other Int
+	result := NewInt()
+	if s1.Len() < s2.Len() {
+		walk = s1
+		other = s2
+	} else {
+		walk = s2
+		other = s1
+	}
+	for key := range walk {
+		if other.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Int) IsSuperset(s2 Int) bool {
+	for item := range s2 {
+		if !s1.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Int) Equal(s2 Int) bool {
+	return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfInt []int
+
+func (s sortableSliceOfInt) Len() int           { return len(s) }
+func (s sortableSliceOfInt) Less(i, j int) bool { return lessInt(s[i], s[j]) }
+func (s sortableSliceOfInt) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted int slice.
+func (s Int) List() []int {
+	res := make(sortableSliceOfInt, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	sort.Sort(res)
+	return []int(res)
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Int) UnsortedList() []int {
+	res := make([]int, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	return res
+}
+
+// Returns a single element from the set.
+func (s Int) PopAny() (int, bool) {
+	for key := range s {
+		s.Delete(key)
+		return key, true
+	}
+	var zeroValue int
+	return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s Int) Len() int {
+	return len(s)
+}
+
+func lessInt(lhs, rhs int) bool {
+	return lhs < rhs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go
new file mode 100644
index 0000000..9ca9af0
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go
@@ -0,0 +1,203 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by set-gen. DO NOT EDIT.
+
+package sets
+
+import (
+	"reflect"
+	"sort"
+)
+
+// sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption.
+type Int64 map[int64]Empty
+
+// NewInt64 creates a Int64 from a list of values.
+func NewInt64(items ...int64) Int64 {
+	ss := Int64{}
+	ss.Insert(items...)
+	return ss
+}
+
+// Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func Int64KeySet(theMap interface{}) Int64 {
+	v := reflect.ValueOf(theMap)
+	ret := Int64{}
+
+	for _, keyValue := range v.MapKeys() {
+		ret.Insert(keyValue.Interface().(int64))
+	}
+	return ret
+}
+
+// Insert adds items to the set.
+func (s Int64) Insert(items ...int64) {
+	for _, item := range items {
+		s[item] = Empty{}
+	}
+}
+
+// Delete removes all items from the set.
+func (s Int64) Delete(items ...int64) {
+	for _, item := range items {
+		delete(s, item)
+	}
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Int64) Has(item int64) bool {
+	_, contained := s[item]
+	return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Int64) HasAll(items ...int64) bool {
+	for _, item := range items {
+		if !s.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Int64) HasAny(items ...int64) bool {
+	for _, item := range items {
+		if s.Has(item) {
+			return true
+		}
+	}
+	return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s Int64) Difference(s2 Int64) Int64 {
+	result := NewInt64()
+	for key := range s {
+		if !s2.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Int64) Union(s2 Int64) Int64 {
+	result := NewInt64()
+	for key := range s1 {
+		result.Insert(key)
+	}
+	for key := range s2 {
+		result.Insert(key)
+	}
+	return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Int64) Intersection(s2 Int64) Int64 {
+	var walk, other Int64
+	result := NewInt64()
+	if s1.Len() < s2.Len() {
+		walk = s1
+		other = s2
+	} else {
+		walk = s2
+		other = s1
+	}
+	for key := range walk {
+		if other.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Int64) IsSuperset(s2 Int64) bool {
+	for item := range s2 {
+		if !s1.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Int64) Equal(s2 Int64) bool {
+	return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfInt64 []int64
+
+func (s sortableSliceOfInt64) Len() int           { return len(s) }
+func (s sortableSliceOfInt64) Less(i, j int) bool { return lessInt64(s[i], s[j]) }
+func (s sortableSliceOfInt64) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted int64 slice.
+func (s Int64) List() []int64 {
+	res := make(sortableSliceOfInt64, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	sort.Sort(res)
+	return []int64(res)
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Int64) UnsortedList() []int64 {
+	res := make([]int64, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	return res
+}
+
+// Returns a single element from the set.
+func (s Int64) PopAny() (int64, bool) {
+	for key := range s {
+		s.Delete(key)
+		return key, true
+	}
+	var zeroValue int64
+	return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s Int64) Len() int {
+	return len(s)
+}
+
+func lessInt64(lhs, rhs int64) bool {
+	return lhs < rhs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go
new file mode 100644
index 0000000..ba00ad7
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go
@@ -0,0 +1,203 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by set-gen. DO NOT EDIT.
+
+package sets
+
+import (
+	"reflect"
+	"sort"
+)
+
+// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption.
+type String map[string]Empty
+
+// NewString creates a String from a list of values.
+func NewString(items ...string) String {
+	ss := String{}
+	ss.Insert(items...)
+	return ss
+}
+
+// StringKeySet creates a String from a keys of a map[string](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func StringKeySet(theMap interface{}) String {
+	v := reflect.ValueOf(theMap)
+	ret := String{}
+
+	for _, keyValue := range v.MapKeys() {
+		ret.Insert(keyValue.Interface().(string))
+	}
+	return ret
+}
+
+// Insert adds items to the set.
+func (s String) Insert(items ...string) {
+	for _, item := range items {
+		s[item] = Empty{}
+	}
+}
+
+// Delete removes all items from the set.
+func (s String) Delete(items ...string) {
+	for _, item := range items {
+		delete(s, item)
+	}
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s String) Has(item string) bool {
+	_, contained := s[item]
+	return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s String) HasAll(items ...string) bool {
+	for _, item := range items {
+		if !s.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s String) HasAny(items ...string) bool {
+	for _, item := range items {
+		if s.Has(item) {
+			return true
+		}
+	}
+	return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s String) Difference(s2 String) String {
+	result := NewString()
+	for key := range s {
+		if !s2.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 String) Union(s2 String) String {
+	result := NewString()
+	for key := range s1 {
+		result.Insert(key)
+	}
+	for key := range s2 {
+		result.Insert(key)
+	}
+	return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 String) Intersection(s2 String) String {
+	var walk, other String
+	result := NewString()
+	if s1.Len() < s2.Len() {
+		walk = s1
+		other = s2
+	} else {
+		walk = s2
+		other = s1
+	}
+	for key := range walk {
+		if other.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 String) IsSuperset(s2 String) bool {
+	for item := range s2 {
+		if !s1.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 String) Equal(s2 String) bool {
+	return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfString []string
+
+func (s sortableSliceOfString) Len() int           { return len(s) }
+func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) }
+func (s sortableSliceOfString) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted string slice.
+func (s String) List() []string {
+	res := make(sortableSliceOfString, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	sort.Sort(res)
+	return []string(res)
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s String) UnsortedList() []string {
+	res := make([]string, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	return res
+}
+
+// Returns a single element from the set.
+func (s String) PopAny() (string, bool) {
+	for key := range s {
+		s.Delete(key)
+		return key, true
+	}
+	var zeroValue string
+	return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s String) Len() int {
+	return len(s)
+}
+
+func lessString(lhs, rhs string) bool {
+	return lhs < rhs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
new file mode 100644
index 0000000..4767fd1
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
@@ -0,0 +1,259 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package field
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+	"k8s.io/apimachinery/pkg/util/sets"
+)
+
+// Error is an implementation of the 'error' interface, which represents a
+// field-level validation error.
+type Error struct {
+	Type     ErrorType
+	Field    string
+	BadValue interface{}
+	Detail   string
+}
+
+var _ error = &Error{}
+
+// Error implements the error interface.
+func (v *Error) Error() string {
+	return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody())
+}
+
+// ErrorBody returns the error message without the field name.  This is useful
+// for building nice-looking higher-level error reporting.
+func (v *Error) ErrorBody() string {
+	var s string
+	switch v.Type {
+	case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal:
+		s = v.Type.String()
+	default:
+		value := v.BadValue
+		valueType := reflect.TypeOf(value)
+		if value == nil || valueType == nil {
+			value = "null"
+		} else if valueType.Kind() == reflect.Ptr {
+			if reflectValue := reflect.ValueOf(value); reflectValue.IsNil() {
+				value = "null"
+			} else {
+				value = reflectValue.Elem().Interface()
+			}
+		}
+		switch t := value.(type) {
+		case int64, int32, float64, float32, bool:
+			// use simple printer for simple types
+			s = fmt.Sprintf("%s: %v", v.Type, value)
+		case string:
+			s = fmt.Sprintf("%s: %q", v.Type, t)
+		case fmt.Stringer:
+			// anything that defines String() is better than raw struct
+			s = fmt.Sprintf("%s: %s", v.Type, t.String())
+		default:
+			// fallback to raw struct
+			// TODO: internal types have panic guards against json.Marshalling to prevent
+			// accidental use of internal types in external serialized form.  For now, use
+			// %#v, although it would be better to show a more expressive output in the future
+			s = fmt.Sprintf("%s: %#v", v.Type, value)
+		}
+	}
+	if len(v.Detail) != 0 {
+		s += fmt.Sprintf(": %s", v.Detail)
+	}
+	return s
+}
+
+// ErrorType is a machine readable value providing more detail about why
+// a field is invalid.  These values are expected to match 1-1 with
+// CauseType in api/types.go.
+type ErrorType string
+
+// TODO: These values are duplicated in api/types.go, but there's a circular dep.  Fix it.
+const (
+	// ErrorTypeNotFound is used to report failure to find a requested value
+	// (e.g. looking up an ID).  See NotFound().
+	ErrorTypeNotFound ErrorType = "FieldValueNotFound"
+	// ErrorTypeRequired is used to report required values that are not
+	// provided (e.g. empty strings, null values, or empty arrays).  See
+	// Required().
+	ErrorTypeRequired ErrorType = "FieldValueRequired"
+	// ErrorTypeDuplicate is used to report collisions of values that must be
+	// unique (e.g. unique IDs).  See Duplicate().
+	ErrorTypeDuplicate ErrorType = "FieldValueDuplicate"
+	// ErrorTypeInvalid is used to report malformed values (e.g. failed regex
+	// match, too long, out of bounds).  See Invalid().
+	ErrorTypeInvalid ErrorType = "FieldValueInvalid"
+	// ErrorTypeNotSupported is used to report unknown values for enumerated
+	// fields (e.g. a list of valid values).  See NotSupported().
+	ErrorTypeNotSupported ErrorType = "FieldValueNotSupported"
+	// ErrorTypeForbidden is used to report valid (as per formatting rules)
+	// values which would be accepted under some conditions, but which are not
+	// permitted by the current conditions (such as security policy).  See
+	// Forbidden().
+	ErrorTypeForbidden ErrorType = "FieldValueForbidden"
+	// ErrorTypeTooLong is used to report that the given value is too long.
+	// This is similar to ErrorTypeInvalid, but the error will not include the
+	// too-long value.  See TooLong().
+	ErrorTypeTooLong ErrorType = "FieldValueTooLong"
+	// ErrorTypeInternal is used to report other errors that are not related
+	// to user input.  See InternalError().
+	ErrorTypeInternal ErrorType = "InternalError"
+)
+
+// String converts a ErrorType into its corresponding canonical error message.
+func (t ErrorType) String() string {
+	switch t {
+	case ErrorTypeNotFound:
+		return "Not found"
+	case ErrorTypeRequired:
+		return "Required value"
+	case ErrorTypeDuplicate:
+		return "Duplicate value"
+	case ErrorTypeInvalid:
+		return "Invalid value"
+	case ErrorTypeNotSupported:
+		return "Unsupported value"
+	case ErrorTypeForbidden:
+		return "Forbidden"
+	case ErrorTypeTooLong:
+		return "Too long"
+	case ErrorTypeInternal:
+		return "Internal error"
+	default:
+		panic(fmt.Sprintf("unrecognized validation error: %q", string(t)))
+	}
+}
+
+// NotFound returns a *Error indicating "value not found".  This is
+// used to report failure to find a requested value (e.g. looking up an ID).
+func NotFound(field *Path, value interface{}) *Error {
+	return &Error{ErrorTypeNotFound, field.String(), value, ""}
+}
+
+// Required returns a *Error indicating "value required".  This is used
+// to report required values that are not provided (e.g. empty strings, null
+// values, or empty arrays).
+func Required(field *Path, detail string) *Error {
+	return &Error{ErrorTypeRequired, field.String(), "", detail}
+}
+
+// Duplicate returns a *Error indicating "duplicate value".  This is
+// used to report collisions of values that must be unique (e.g. names or IDs).
+func Duplicate(field *Path, value interface{}) *Error {
+	return &Error{ErrorTypeDuplicate, field.String(), value, ""}
+}
+
+// Invalid returns a *Error indicating "invalid value".  This is used
+// to report malformed values (e.g. failed regex match, too long, out of bounds).
+func Invalid(field *Path, value interface{}, detail string) *Error {
+	return &Error{ErrorTypeInvalid, field.String(), value, detail}
+}
+
+// NotSupported returns a *Error indicating "unsupported value".
+// This is used to report unknown values for enumerated fields (e.g. a list of
+// valid values).
+func NotSupported(field *Path, value interface{}, validValues []string) *Error {
+	detail := ""
+	if validValues != nil && len(validValues) > 0 {
+		quotedValues := make([]string, len(validValues))
+		for i, v := range validValues {
+			quotedValues[i] = strconv.Quote(v)
+		}
+		detail = "supported values: " + strings.Join(quotedValues, ", ")
+	}
+	return &Error{ErrorTypeNotSupported, field.String(), value, detail}
+}
+
+// Forbidden returns a *Error indicating "forbidden".  This is used to
+// report valid (as per formatting rules) values which would be accepted under
+// some conditions, but which are not permitted by current conditions (e.g.
+// security policy).
+func Forbidden(field *Path, detail string) *Error {
+	return &Error{ErrorTypeForbidden, field.String(), "", detail}
+}
+
+// TooLong returns a *Error indicating "too long".  This is used to
+// report that the given value is too long.  This is similar to
+// Invalid, but the returned error will not include the too-long
+// value.
+func TooLong(field *Path, value interface{}, maxLength int) *Error {
+	return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)}
+}
+
+// InternalError returns a *Error indicating "internal error".  This is used
+// to signal that an error was found that was not directly related to user
+// input.  The err argument must be non-nil.
+func InternalError(field *Path, err error) *Error {
+	return &Error{ErrorTypeInternal, field.String(), nil, err.Error()}
+}
+
+// ErrorList holds a set of Errors.  It is plausible that we might one day have
+// non-field errors in this same umbrella package, but for now we don't, so
+// we can keep it simple and leave ErrorList here.
+type ErrorList []*Error
+
+// NewErrorTypeMatcher returns an errors.Matcher that returns true
+// if the provided error is a Error and has the provided ErrorType.
+func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher {
+	return func(err error) bool {
+		if e, ok := err.(*Error); ok {
+			return e.Type == t
+		}
+		return false
+	}
+}
+
+// ToAggregate converts the ErrorList into an errors.Aggregate.
+func (list ErrorList) ToAggregate() utilerrors.Aggregate {
+	errs := make([]error, 0, len(list))
+	errorMsgs := sets.NewString()
+	for _, err := range list {
+		msg := fmt.Sprintf("%v", err)
+		if errorMsgs.Has(msg) {
+			continue
+		}
+		errorMsgs.Insert(msg)
+		errs = append(errs, err)
+	}
+	return utilerrors.NewAggregate(errs)
+}
+
+func fromAggregate(agg utilerrors.Aggregate) ErrorList {
+	errs := agg.Errors()
+	list := make(ErrorList, len(errs))
+	for i := range errs {
+		list[i] = errs[i].(*Error)
+	}
+	return list
+}
+
+// Filter removes items from the ErrorList that match the provided fns.
+func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList {
+	err := utilerrors.FilterOut(list.ToAggregate(), fns...)
+	if err == nil {
+		return nil
+	}
+	// FilterOut takes an Aggregate and returns an Aggregate
+	return fromAggregate(err.(utilerrors.Aggregate))
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go
new file mode 100644
index 0000000..2efc8ee
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go
@@ -0,0 +1,91 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package field
+
+import (
+	"bytes"
+	"fmt"
+	"strconv"
+)
+
+// Path represents the path from some root to a particular field.
+type Path struct {
+	name   string // the name of this field or "" if this is an index
+	index  string // if name == "", this is a subscript (index or map key) of the previous element
+	parent *Path  // nil if this is the root element
+}
+
+// NewPath creates a root Path object.
+func NewPath(name string, moreNames ...string) *Path {
+	r := &Path{name: name, parent: nil}
+	for _, anotherName := range moreNames {
+		r = &Path{name: anotherName, parent: r}
+	}
+	return r
+}
+
+// Root returns the root element of this Path.
+func (p *Path) Root() *Path {
+	for ; p.parent != nil; p = p.parent {
+		// Do nothing.
+	}
+	return p
+}
+
+// Child creates a new Path that is a child of the method receiver.
+func (p *Path) Child(name string, moreNames ...string) *Path {
+	r := NewPath(name, moreNames...)
+	r.Root().parent = p
+	return r
+}
+
+// Index indicates that the previous Path is to be subscripted by an int.
+// This sets the same underlying value as Key.
+func (p *Path) Index(index int) *Path {
+	return &Path{index: strconv.Itoa(index), parent: p}
+}
+
+// Key indicates that the previous Path is to be subscripted by a string.
+// This sets the same underlying value as Index.
+func (p *Path) Key(key string) *Path {
+	return &Path{index: key, parent: p}
+}
+
+// String produces a string representation of the Path.
+func (p *Path) String() string {
+	// make a slice to iterate
+	elems := []*Path{}
+	for ; p != nil; p = p.parent {
+		elems = append(elems, p)
+	}
+
+	// iterate, but it has to be backwards
+	buf := bytes.NewBuffer(nil)
+	for i := range elems {
+		p := elems[len(elems)-1-i]
+		if p.parent != nil && len(p.name) > 0 {
+			// This is either the root or it is a subscript.
+			buf.WriteString(".")
+		}
+		if len(p.name) > 0 {
+			buf.WriteString(p.name)
+		} else {
+			fmt.Fprintf(buf, "[%s]", p.index)
+		}
+	}
+	return buf.String()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
new file mode 100644
index 0000000..2dd9999
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
@@ -0,0 +1,416 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+	"fmt"
+	"math"
+	"net"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+const qnameCharFmt string = "[A-Za-z0-9]"
+const qnameExtCharFmt string = "[-A-Za-z0-9_.]"
+const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt
+const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
+const qualifiedNameMaxLength int = 63
+
+var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$")
+
+// IsQualifiedName tests whether the value passed is what Kubernetes calls a
+// "qualified name".  This is a format used in various places throughout the
+// system.  If the value is not valid, a list of error strings is returned.
+// Otherwise an empty list (or nil) is returned.
+func IsQualifiedName(value string) []string {
+	var errs []string
+	parts := strings.Split(value, "/")
+	var name string
+	switch len(parts) {
+	case 1:
+		name = parts[0]
+	case 2:
+		var prefix string
+		prefix, name = parts[0], parts[1]
+		if len(prefix) == 0 {
+			errs = append(errs, "prefix part "+EmptyError())
+		} else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 {
+			errs = append(errs, prefixEach(msgs, "prefix part ")...)
+		}
+	default:
+		return append(errs, "a qualified name "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+
+			" with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')")
+	}
+
+	if len(name) == 0 {
+		errs = append(errs, "name part "+EmptyError())
+	} else if len(name) > qualifiedNameMaxLength {
+		errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength))
+	}
+	if !qualifiedNameRegexp.MatchString(name) {
+		errs = append(errs, "name part "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc"))
+	}
+	return errs
+}
+
+// IsFullyQualifiedName checks if the name is fully qualified.
+func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList {
+	var allErrors field.ErrorList
+	if len(name) == 0 {
+		return append(allErrors, field.Required(fldPath, ""))
+	}
+	if errs := IsDNS1123Subdomain(name); len(errs) > 0 {
+		return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ",")))
+	}
+	if len(strings.Split(name, ".")) < 3 {
+		return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least three segments separated by dots"))
+	}
+	return allErrors
+}
+
+const labelValueFmt string = "(" + qualifiedNameFmt + ")?"
+const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
+
+// LabelValueMaxLength is a label's max length
+const LabelValueMaxLength int = 63
+
+var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$")
+
+// IsValidLabelValue tests whether the value passed is a valid label value.  If
+// the value is not valid, a list of error strings is returned.  Otherwise an
+// empty list (or nil) is returned.
+func IsValidLabelValue(value string) []string {
+	var errs []string
+	if len(value) > LabelValueMaxLength {
+		errs = append(errs, MaxLenError(LabelValueMaxLength))
+	}
+	if !labelValueRegexp.MatchString(value) {
+		errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345"))
+	}
+	return errs
+}
+
+const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?"
+const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character"
+
+// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123)
+const DNS1123LabelMaxLength int = 63
+
+var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$")
+
+// IsDNS1123Label tests for a string that conforms to the definition of a label in
+// DNS (RFC 1123).
+func IsDNS1123Label(value string) []string {
+	var errs []string
+	if len(value) > DNS1123LabelMaxLength {
+		errs = append(errs, MaxLenError(DNS1123LabelMaxLength))
+	}
+	if !dns1123LabelRegexp.MatchString(value) {
+		errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc"))
+	}
+	return errs
+}
+
+const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*"
+const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character"
+
+// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123)
+const DNS1123SubdomainMaxLength int = 253
+
+var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$")
+
+// IsDNS1123Subdomain tests for a string that conforms to the definition of a
+// subdomain in DNS (RFC 1123).
+func IsDNS1123Subdomain(value string) []string {
+	var errs []string
+	if len(value) > DNS1123SubdomainMaxLength {
+		errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
+	}
+	if !dns1123SubdomainRegexp.MatchString(value) {
+		errs = append(errs, RegexError(dns1123SubdomainErrorMsg, dns1123SubdomainFmt, "example.com"))
+	}
+	return errs
+}
+
+const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?"
+const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character"
+
+// DNS1035LabelMaxLength is a label's max length in DNS (RFC 1035)
+const DNS1035LabelMaxLength int = 63
+
+var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$")
+
+// IsDNS1035Label tests for a string that conforms to the definition of a label in
+// DNS (RFC 1035).
+func IsDNS1035Label(value string) []string {
+	var errs []string
+	if len(value) > DNS1035LabelMaxLength {
+		errs = append(errs, MaxLenError(DNS1035LabelMaxLength))
+	}
+	if !dns1035LabelRegexp.MatchString(value) {
+		errs = append(errs, RegexError(dns1035LabelErrMsg, dns1035LabelFmt, "my-name", "abc-123"))
+	}
+	return errs
+}
+
+// wildcard definition - RFC 1034 section 4.3.3.
+// examples:
+// - valid: *.bar.com, *.foo.bar.com
+// - invalid: *.*.bar.com, *.foo.*.com, *bar.com, f*.bar.com, *
+const wildcardDNS1123SubdomainFmt = "\\*\\." + dns1123SubdomainFmt
+const wildcardDNS1123SubdomainErrMsg = "a wildcard DNS-1123 subdomain must start with '*.', followed by a valid DNS subdomain, which must consist of lower case alphanumeric characters, '-' or '.' and end with an alphanumeric character"
+
+// IsWildcardDNS1123Subdomain tests for a string that conforms to the definition of a
+// wildcard subdomain in DNS (RFC 1034 section 4.3.3).
+func IsWildcardDNS1123Subdomain(value string) []string {
+	wildcardDNS1123SubdomainRegexp := regexp.MustCompile("^" + wildcardDNS1123SubdomainFmt + "$")
+
+	var errs []string
+	if len(value) > DNS1123SubdomainMaxLength {
+		errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
+	}
+	if !wildcardDNS1123SubdomainRegexp.MatchString(value) {
+		errs = append(errs, RegexError(wildcardDNS1123SubdomainErrMsg, wildcardDNS1123SubdomainFmt, "*.example.com"))
+	}
+	return errs
+}
+
+const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*"
+const identifierErrMsg string = "a valid C identifier must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'"
+
+var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$")
+
+// IsCIdentifier tests for a string that conforms the definition of an identifier
+// in C. This checks the format, but not the length.
+func IsCIdentifier(value string) []string {
+	if !cIdentifierRegexp.MatchString(value) {
+		return []string{RegexError(identifierErrMsg, cIdentifierFmt, "my_name", "MY_NAME", "MyName")}
+	}
+	return nil
+}
+
+// IsValidPortNum tests that the argument is a valid, non-zero port number.
+func IsValidPortNum(port int) []string {
+	if 1 <= port && port <= 65535 {
+		return nil
+	}
+	return []string{InclusiveRangeError(1, 65535)}
+}
+
+// IsInRange tests that the argument is in an inclusive range.
+func IsInRange(value int, min int, max int) []string {
+	if value >= min && value <= max {
+		return nil
+	}
+	return []string{InclusiveRangeError(min, max)}
+}
+
+// Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1
+// TODO: once we have a type for UID/GID we should make these that type.
+const (
+	minUserID  = 0
+	maxUserID  = math.MaxInt32
+	minGroupID = 0
+	maxGroupID = math.MaxInt32
+)
+
+// IsValidGroupID tests that the argument is a valid Unix GID.
+func IsValidGroupID(gid int64) []string {
+	if minGroupID <= gid && gid <= maxGroupID {
+		return nil
+	}
+	return []string{InclusiveRangeError(minGroupID, maxGroupID)}
+}
+
+// IsValidUserID tests that the argument is a valid Unix UID.
+func IsValidUserID(uid int64) []string {
+	if minUserID <= uid && uid <= maxUserID {
+		return nil
+	}
+	return []string{InclusiveRangeError(minUserID, maxUserID)}
+}
+
+var portNameCharsetRegex = regexp.MustCompile("^[-a-z0-9]+$")
+var portNameOneLetterRegexp = regexp.MustCompile("[a-z]")
+
+// IsValidPortName check that the argument is valid syntax. It must be
+// non-empty and no more than 15 characters long. It may contain only [-a-z0-9]
+// and must contain at least one letter [a-z]. It must not start or end with a
+// hyphen, nor contain adjacent hyphens.
+//
+// Note: We only allow lower-case characters, even though RFC 6335 is case
+// insensitive.
+func IsValidPortName(port string) []string {
+	var errs []string
+	if len(port) > 15 {
+		errs = append(errs, MaxLenError(15))
+	}
+	if !portNameCharsetRegex.MatchString(port) {
+		errs = append(errs, "must contain only alpha-numeric characters (a-z, 0-9), and hyphens (-)")
+	}
+	if !portNameOneLetterRegexp.MatchString(port) {
+		errs = append(errs, "must contain at least one letter or number (a-z, 0-9)")
+	}
+	if strings.Contains(port, "--") {
+		errs = append(errs, "must not contain consecutive hyphens")
+	}
+	if len(port) > 0 && (port[0] == '-' || port[len(port)-1] == '-') {
+		errs = append(errs, "must not begin or end with a hyphen")
+	}
+	return errs
+}
+
+// IsValidIP tests that the argument is a valid IP address.
+func IsValidIP(value string) []string {
+	if net.ParseIP(value) == nil {
+		return []string{"must be a valid IP address, (e.g. 10.9.8.7)"}
+	}
+	return nil
+}
+
+const percentFmt string = "[0-9]+%"
+const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'"
+
+var percentRegexp = regexp.MustCompile("^" + percentFmt + "$")
+
+// IsValidPercent checks that string is in the form of a percentage
+func IsValidPercent(percent string) []string {
+	if !percentRegexp.MatchString(percent) {
+		return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")}
+	}
+	return nil
+}
+
+const httpHeaderNameFmt string = "[-A-Za-z0-9]+"
+const httpHeaderNameErrMsg string = "a valid HTTP header must consist of alphanumeric characters or '-'"
+
+var httpHeaderNameRegexp = regexp.MustCompile("^" + httpHeaderNameFmt + "$")
+
+// IsHTTPHeaderName checks that a string conforms to the Go HTTP library's
+// definition of a valid header field name (a stricter subset than RFC7230).
+func IsHTTPHeaderName(value string) []string {
+	if !httpHeaderNameRegexp.MatchString(value) {
+		return []string{RegexError(httpHeaderNameErrMsg, httpHeaderNameFmt, "X-Header-Name")}
+	}
+	return nil
+}
+
+const envVarNameFmt = "[-._a-zA-Z][-._a-zA-Z0-9]*"
+const envVarNameFmtErrMsg string = "a valid environment variable name must consist of alphabetic characters, digits, '_', '-', or '.', and must not start with a digit"
+
+var envVarNameRegexp = regexp.MustCompile("^" + envVarNameFmt + "$")
+
+// IsEnvVarName tests if a string is a valid environment variable name.
+func IsEnvVarName(value string) []string {
+	var errs []string
+	if !envVarNameRegexp.MatchString(value) {
+		errs = append(errs, RegexError(envVarNameFmtErrMsg, envVarNameFmt, "my.env-name", "MY_ENV.NAME", "MyEnvName1"))
+	}
+
+	errs = append(errs, hasChDirPrefix(value)...)
+	return errs
+}
+
+const configMapKeyFmt = `[-._a-zA-Z0-9]+`
+const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'"
+
+var configMapKeyRegexp = regexp.MustCompile("^" + configMapKeyFmt + "$")
+
+// IsConfigMapKey tests for a string that is a valid key for a ConfigMap or Secret
+func IsConfigMapKey(value string) []string {
+	var errs []string
+	if len(value) > DNS1123SubdomainMaxLength {
+		errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
+	}
+	if !configMapKeyRegexp.MatchString(value) {
+		errs = append(errs, RegexError(configMapKeyErrMsg, configMapKeyFmt, "key.name", "KEY_NAME", "key-name"))
+	}
+	errs = append(errs, hasChDirPrefix(value)...)
+	return errs
+}
+
+// MaxLenError returns a string explanation of a "string too long" validation
+// failure.
+func MaxLenError(length int) string {
+	return fmt.Sprintf("must be no more than %d characters", length)
+}
+
+// RegexError returns a string explanation of a regex validation failure.
+func RegexError(msg string, fmt string, examples ...string) string {
+	if len(examples) == 0 {
+		return msg + " (regex used for validation is '" + fmt + "')"
+	}
+	msg += " (e.g. "
+	for i := range examples {
+		if i > 0 {
+			msg += " or "
+		}
+		msg += "'" + examples[i] + "', "
+	}
+	msg += "regex used for validation is '" + fmt + "')"
+	return msg
+}
+
+// EmptyError returns a string explanation of a "must not be empty" validation
+// failure.
+func EmptyError() string {
+	return "must be non-empty"
+}
+
+func prefixEach(msgs []string, prefix string) []string {
+	for i := range msgs {
+		msgs[i] = prefix + msgs[i]
+	}
+	return msgs
+}
+
+// InclusiveRangeError returns a string explanation of a numeric "must be
+// between" validation failure.
+func InclusiveRangeError(lo, hi int) string {
+	return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi)
+}
+
+func hasChDirPrefix(value string) []string {
+	var errs []string
+	switch {
+	case value == ".":
+		errs = append(errs, `must not be '.'`)
+	case value == "..":
+		errs = append(errs, `must not be '..'`)
+	case strings.HasPrefix(value, ".."):
+		errs = append(errs, `must not start with '..'`)
+	}
+	return errs
+}
+
+// IsValidSocketAddr checks that string represents a valid socket address
+// as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254))
+func IsValidSocketAddr(value string) []string {
+	var errs []string
+	ip, port, err := net.SplitHostPort(value)
+	if err != nil {
+		errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)")
+		return errs
+	}
+	portInt, _ := strconv.Atoi(port)
+	errs = append(errs, IsValidPortNum(portInt)...)
+	errs = append(errs, IsValidIP(ip)...)
+	return errs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
new file mode 100644
index 0000000..63d735a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
@@ -0,0 +1,346 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package yaml
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"strings"
+	"unicode"
+
+	"k8s.io/klog"
+	"sigs.k8s.io/yaml"
+)
+
+// ToJSON converts a single YAML document into a JSON document
+// or returns an error. If the document appears to be JSON the
+// YAML decoding path is not used (so that error messages are
+// JSON specific).
+func ToJSON(data []byte) ([]byte, error) {
+	if hasJSONPrefix(data) {
+		return data, nil
+	}
+	return yaml.YAMLToJSON(data)
+}
+
+// YAMLToJSONDecoder decodes YAML documents from an io.Reader by
+// separating individual documents. It first converts the YAML
+// body to JSON, then unmarshals the JSON.
+type YAMLToJSONDecoder struct {
+	reader Reader
+}
+
+// NewYAMLToJSONDecoder decodes YAML documents from the provided
+// stream in chunks by converting each document (as defined by
+// the YAML spec) into its own chunk, converting it to JSON via
+// yaml.YAMLToJSON, and then passing it to json.Decoder.
+func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder {
+	reader := bufio.NewReader(r)
+	return &YAMLToJSONDecoder{
+		reader: NewYAMLReader(reader),
+	}
+}
+
+// Decode reads a YAML document as JSON from the stream or returns
+// an error. The decoding rules match json.Unmarshal, not
+// yaml.Unmarshal.
+func (d *YAMLToJSONDecoder) Decode(into interface{}) error {
+	bytes, err := d.reader.Read()
+	if err != nil && err != io.EOF {
+		return err
+	}
+
+	if len(bytes) != 0 {
+		err := yaml.Unmarshal(bytes, into)
+		if err != nil {
+			return YAMLSyntaxError{err}
+		}
+	}
+	return err
+}
+
+// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if
+// the data is not sufficient.
+type YAMLDecoder struct {
+	r         io.ReadCloser
+	scanner   *bufio.Scanner
+	remaining []byte
+}
+
+// NewDocumentDecoder decodes YAML documents from the provided
+// stream in chunks by converting each document (as defined by
+// the YAML spec) into its own chunk. io.ErrShortBuffer will be
+// returned if the entire buffer could not be read to assist
+// the caller in framing the chunk.
+func NewDocumentDecoder(r io.ReadCloser) io.ReadCloser {
+	scanner := bufio.NewScanner(r)
+	scanner.Split(splitYAMLDocument)
+	return &YAMLDecoder{
+		r:       r,
+		scanner: scanner,
+	}
+}
+
+// Read reads the previous slice into the buffer, or attempts to read
+// the next chunk.
+// TODO: switch to readline approach.
+func (d *YAMLDecoder) Read(data []byte) (n int, err error) {
+	left := len(d.remaining)
+	if left == 0 {
+		// return the next chunk from the stream
+		if !d.scanner.Scan() {
+			err := d.scanner.Err()
+			if err == nil {
+				err = io.EOF
+			}
+			return 0, err
+		}
+		out := d.scanner.Bytes()
+		d.remaining = out
+		left = len(out)
+	}
+
+	// fits within data
+	if left <= len(data) {
+		copy(data, d.remaining)
+		d.remaining = nil
+		return left, nil
+	}
+
+	// caller will need to reread
+	copy(data, d.remaining[:len(data)])
+	d.remaining = d.remaining[len(data):]
+	return len(data), io.ErrShortBuffer
+}
+
+func (d *YAMLDecoder) Close() error {
+	return d.r.Close()
+}
+
+const yamlSeparator = "\n---"
+const separator = "---"
+
+// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents.
+func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) {
+	if atEOF && len(data) == 0 {
+		return 0, nil, nil
+	}
+	sep := len([]byte(yamlSeparator))
+	if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 {
+		// We have a potential document terminator
+		i += sep
+		after := data[i:]
+		if len(after) == 0 {
+			// we can't read any more characters
+			if atEOF {
+				return len(data), data[:len(data)-sep], nil
+			}
+			return 0, nil, nil
+		}
+		if j := bytes.IndexByte(after, '\n'); j >= 0 {
+			return i + j + 1, data[0 : i-sep], nil
+		}
+		return 0, nil, nil
+	}
+	// If we're at EOF, we have a final, non-terminated line. Return it.
+	if atEOF {
+		return len(data), data, nil
+	}
+	// Request more data.
+	return 0, nil, nil
+}
+
+// decoder is a convenience interface for Decode.
+type decoder interface {
+	Decode(into interface{}) error
+}
+
+// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or
+// YAML documents by sniffing for a leading { character.
+type YAMLOrJSONDecoder struct {
+	r          io.Reader
+	bufferSize int
+
+	decoder decoder
+	rawData []byte
+}
+
+type JSONSyntaxError struct {
+	Line int
+	Err  error
+}
+
+func (e JSONSyntaxError) Error() string {
+	return fmt.Sprintf("json: line %d: %s", e.Line, e.Err.Error())
+}
+
+type YAMLSyntaxError struct {
+	err error
+}
+
+func (e YAMLSyntaxError) Error() string {
+	return e.err.Error()
+}
+
+// NewYAMLOrJSONDecoder returns a decoder that will process YAML documents
+// or JSON documents from the given reader as a stream. bufferSize determines
+// how far into the stream the decoder will look to figure out whether this
+// is a JSON stream (has whitespace followed by an open brace).
+func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder {
+	return &YAMLOrJSONDecoder{
+		r:          r,
+		bufferSize: bufferSize,
+	}
+}
+
+// Decode unmarshals the next object from the underlying stream into the
+// provide object, or returns an error.
+func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
+	if d.decoder == nil {
+		buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize)
+		if isJSON {
+			klog.V(4).Infof("decoding stream as JSON")
+			d.decoder = json.NewDecoder(buffer)
+			d.rawData = origData
+		} else {
+			klog.V(4).Infof("decoding stream as YAML")
+			d.decoder = NewYAMLToJSONDecoder(buffer)
+		}
+	}
+	err := d.decoder.Decode(into)
+	if jsonDecoder, ok := d.decoder.(*json.Decoder); ok {
+		if syntax, ok := err.(*json.SyntaxError); ok {
+			data, readErr := ioutil.ReadAll(jsonDecoder.Buffered())
+			if readErr != nil {
+				klog.V(4).Infof("reading stream failed: %v", readErr)
+			}
+			js := string(data)
+
+			// if contents from io.Reader are not complete,
+			// use the original raw data to prevent panic
+			if int64(len(js)) <= syntax.Offset {
+				js = string(d.rawData)
+			}
+
+			start := strings.LastIndex(js[:syntax.Offset], "\n") + 1
+			line := strings.Count(js[:start], "\n")
+			return JSONSyntaxError{
+				Line: line,
+				Err:  fmt.Errorf(syntax.Error()),
+			}
+		}
+	}
+	return err
+}
+
+type Reader interface {
+	Read() ([]byte, error)
+}
+
+type YAMLReader struct {
+	reader Reader
+}
+
+func NewYAMLReader(r *bufio.Reader) *YAMLReader {
+	return &YAMLReader{
+		reader: &LineReader{reader: r},
+	}
+}
+
+// Read returns a full YAML document.
+func (r *YAMLReader) Read() ([]byte, error) {
+	var buffer bytes.Buffer
+	for {
+		line, err := r.reader.Read()
+		if err != nil && err != io.EOF {
+			return nil, err
+		}
+
+		sep := len([]byte(separator))
+		if i := bytes.Index(line, []byte(separator)); i == 0 {
+			// We have a potential document terminator
+			i += sep
+			after := line[i:]
+			if len(strings.TrimRightFunc(string(after), unicode.IsSpace)) == 0 {
+				if buffer.Len() != 0 {
+					return buffer.Bytes(), nil
+				}
+				if err == io.EOF {
+					return nil, err
+				}
+			}
+		}
+		if err == io.EOF {
+			if buffer.Len() != 0 {
+				// If we're at EOF, we have a final, non-terminated line. Return it.
+				return buffer.Bytes(), nil
+			}
+			return nil, err
+		}
+		buffer.Write(line)
+	}
+}
+
+type LineReader struct {
+	reader *bufio.Reader
+}
+
+// Read returns a single line (with '\n' ended) from the underlying reader.
+// An error is returned iff there is an error with the underlying reader.
+func (r *LineReader) Read() ([]byte, error) {
+	var (
+		isPrefix bool  = true
+		err      error = nil
+		line     []byte
+		buffer   bytes.Buffer
+	)
+
+	for isPrefix && err == nil {
+		line, isPrefix, err = r.reader.ReadLine()
+		buffer.Write(line)
+	}
+	buffer.WriteByte('\n')
+	return buffer.Bytes(), err
+}
+
+// GuessJSONStream scans the provided reader up to size, looking
+// for an open brace indicating this is JSON. It will return the
+// bufio.Reader it creates for the consumer.
+func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) {
+	buffer := bufio.NewReaderSize(r, size)
+	b, _ := buffer.Peek(size)
+	return buffer, b, hasJSONPrefix(b)
+}
+
+var jsonPrefix = []byte("{")
+
+// hasJSONPrefix returns true if the provided buffer appears to start with
+// a JSON open brace.
+func hasJSONPrefix(buf []byte) bool {
+	return hasPrefix(buf, jsonPrefix)
+}
+
+// Return true if the first non-whitespace bytes in buf is
+// prefix.
+func hasPrefix(buf []byte, prefix []byte) bool {
+	trim := bytes.TrimLeftFunc(buf, unicode.IsSpace)
+	return bytes.HasPrefix(trim, prefix)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/version/doc.go b/vendor/k8s.io/apimachinery/pkg/version/doc.go
new file mode 100644
index 0000000..29574fd
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/version/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:openapi-gen=true
+
+// Package version supplies the type for version information collected at build time.
+package version // import "k8s.io/apimachinery/pkg/version"
diff --git a/vendor/k8s.io/apimachinery/pkg/version/helpers.go b/vendor/k8s.io/apimachinery/pkg/version/helpers.go
new file mode 100644
index 0000000..5e041d6
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/version/helpers.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+type versionType int
+
+const (
+	// Bigger the version type number, higher priority it is
+	versionTypeAlpha versionType = iota
+	versionTypeBeta
+	versionTypeGA
+)
+
+var kubeVersionRegex = regexp.MustCompile("^v([\\d]+)(?:(alpha|beta)([\\d]+))?$")
+
+func parseKubeVersion(v string) (majorVersion int, vType versionType, minorVersion int, ok bool) {
+	var err error
+	submatches := kubeVersionRegex.FindStringSubmatch(v)
+	if len(submatches) != 4 {
+		return 0, 0, 0, false
+	}
+	switch submatches[2] {
+	case "alpha":
+		vType = versionTypeAlpha
+	case "beta":
+		vType = versionTypeBeta
+	case "":
+		vType = versionTypeGA
+	default:
+		return 0, 0, 0, false
+	}
+	if majorVersion, err = strconv.Atoi(submatches[1]); err != nil {
+		return 0, 0, 0, false
+	}
+	if vType != versionTypeGA {
+		if minorVersion, err = strconv.Atoi(submatches[3]); err != nil {
+			return 0, 0, 0, false
+		}
+	}
+	return majorVersion, vType, minorVersion, true
+}
+
+// CompareKubeAwareVersionStrings compares two kube-like version strings.
+// Kube-like version strings are starting with a v, followed by a major version, optional "alpha" or "beta" strings
+// followed by a minor version (e.g. v1, v2beta1). Versions will be sorted based on GA/alpha/beta first and then major
+// and minor versions. e.g. v2, v1, v1beta2, v1beta1, v1alpha1.
+func CompareKubeAwareVersionStrings(v1, v2 string) int {
+	if v1 == v2 {
+		return 0
+	}
+	v1major, v1type, v1minor, ok1 := parseKubeVersion(v1)
+	v2major, v2type, v2minor, ok2 := parseKubeVersion(v2)
+	switch {
+	case !ok1 && !ok2:
+		return strings.Compare(v2, v1)
+	case !ok1 && ok2:
+		return -1
+	case ok1 && !ok2:
+		return 1
+	}
+	if v1type != v2type {
+		return int(v1type) - int(v2type)
+	}
+	if v1major != v2major {
+		return v1major - v2major
+	}
+	return v1minor - v2minor
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/version/types.go b/vendor/k8s.io/apimachinery/pkg/version/types.go
new file mode 100644
index 0000000..72727b5
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/version/types.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+// Info contains versioning information.
+// TODO: Add []string of api versions supported? It's still unclear
+// how we'll want to distribute that information.
+type Info struct {
+	Major        string `json:"major"`
+	Minor        string `json:"minor"`
+	GitVersion   string `json:"gitVersion"`
+	GitCommit    string `json:"gitCommit"`
+	GitTreeState string `json:"gitTreeState"`
+	BuildDate    string `json:"buildDate"`
+	GoVersion    string `json:"goVersion"`
+	Compiler     string `json:"compiler"`
+	Platform     string `json:"platform"`
+}
+
+// String returns info as a human-friendly version string.
+func (info Info) String() string {
+	return info.GitVersion
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/doc.go b/vendor/k8s.io/apimachinery/pkg/watch/doc.go
new file mode 100644
index 0000000..7e6bf3f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/watch/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package watch contains a generic watchable interface, and a fake for
+// testing code that uses the watch interface.
+package watch // import "k8s.io/apimachinery/pkg/watch"
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/filter.go b/vendor/k8s.io/apimachinery/pkg/watch/filter.go
new file mode 100644
index 0000000..22c9449
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/watch/filter.go
@@ -0,0 +1,105 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package watch
+
+import (
+	"sync"
+)
+
+// FilterFunc should take an event, possibly modify it in some way, and return
+// the modified event. If the event should be ignored, then return keep=false.
+type FilterFunc func(in Event) (out Event, keep bool)
+
+// Filter passes all events through f before allowing them to pass on.
+// Putting a filter on a watch, as an unavoidable side-effect due to the way
+// go channels work, effectively causes the watch's event channel to have its
+// queue length increased by one.
+//
+// WARNING: filter has a fatal flaw, in that it can't properly update the
+// Type field (Add/Modified/Deleted) to reflect items beginning to pass the
+// filter when they previously didn't.
+//
+func Filter(w Interface, f FilterFunc) Interface {
+	fw := &filteredWatch{
+		incoming: w,
+		result:   make(chan Event),
+		f:        f,
+	}
+	go fw.loop()
+	return fw
+}
+
+type filteredWatch struct {
+	incoming Interface
+	result   chan Event
+	f        FilterFunc
+}
+
+// ResultChan returns a channel which will receive filtered events.
+func (fw *filteredWatch) ResultChan() <-chan Event {
+	return fw.result
+}
+
+// Stop stops the upstream watch, which will eventually stop this watch.
+func (fw *filteredWatch) Stop() {
+	fw.incoming.Stop()
+}
+
+// loop waits for new values, filters them, and resends them.
+func (fw *filteredWatch) loop() {
+	defer close(fw.result)
+	for event := range fw.incoming.ResultChan() {
+		filtered, keep := fw.f(event)
+		if keep {
+			fw.result <- filtered
+		}
+	}
+}
+
+// Recorder records all events that are sent from the watch until it is closed.
+type Recorder struct {
+	Interface
+
+	lock   sync.Mutex
+	events []Event
+}
+
+var _ Interface = &Recorder{}
+
+// NewRecorder wraps an Interface and records any changes sent across it.
+func NewRecorder(w Interface) *Recorder {
+	r := &Recorder{}
+	r.Interface = Filter(w, r.record)
+	return r
+}
+
+// record is a FilterFunc and tracks each received event.
+func (r *Recorder) record(in Event) (Event, bool) {
+	r.lock.Lock()
+	defer r.lock.Unlock()
+	r.events = append(r.events, in)
+	return in, true
+}
+
+// Events returns a copy of the events sent across this recorder.
+func (r *Recorder) Events() []Event {
+	r.lock.Lock()
+	defer r.lock.Unlock()
+	copied := make([]Event, len(r.events))
+	copy(copied, r.events)
+	return copied
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/mux.go b/vendor/k8s.io/apimachinery/pkg/watch/mux.go
new file mode 100644
index 0000000..0ac8dc4
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/watch/mux.go
@@ -0,0 +1,260 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package watch
+
+import (
+	"sync"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// FullChannelBehavior controls how the Broadcaster reacts if a watcher's watch
+// channel is full.
+type FullChannelBehavior int
+
+const (
+	WaitIfChannelFull FullChannelBehavior = iota
+	DropIfChannelFull
+)
+
+// Buffer the incoming queue a little bit even though it should rarely ever accumulate
+// anything, just in case a few events are received in such a short window that
+// Broadcaster can't move them onto the watchers' queues fast enough.
+const incomingQueueLength = 25
+
+// Broadcaster distributes event notifications among any number of watchers. Every event
+// is delivered to every watcher.
+type Broadcaster struct {
+	// TODO: see if this lock is needed now that new watchers go through
+	// the incoming channel.
+	lock sync.Mutex
+
+	watchers     map[int64]*broadcasterWatcher
+	nextWatcher  int64
+	distributing sync.WaitGroup
+
+	incoming chan Event
+
+	// How large to make watcher's channel.
+	watchQueueLength int
+	// If one of the watch channels is full, don't wait for it to become empty.
+	// Instead just deliver it to the watchers that do have space in their
+	// channels and move on to the next event.
+	// It's more fair to do this on a per-watcher basis than to do it on the
+	// "incoming" channel, which would allow one slow watcher to prevent all
+	// other watchers from getting new events.
+	fullChannelBehavior FullChannelBehavior
+}
+
+// NewBroadcaster creates a new Broadcaster. queueLength is the maximum number of events to queue per watcher.
+// It is guaranteed that events will be distributed in the order in which they occur,
+// but the order in which a single event is distributed among all of the watchers is unspecified.
+func NewBroadcaster(queueLength int, fullChannelBehavior FullChannelBehavior) *Broadcaster {
+	m := &Broadcaster{
+		watchers:            map[int64]*broadcasterWatcher{},
+		incoming:            make(chan Event, incomingQueueLength),
+		watchQueueLength:    queueLength,
+		fullChannelBehavior: fullChannelBehavior,
+	}
+	m.distributing.Add(1)
+	go m.loop()
+	return m
+}
+
+const internalRunFunctionMarker = "internal-do-function"
+
+// a function type we can shoehorn into the queue.
+type functionFakeRuntimeObject func()
+
+func (obj functionFakeRuntimeObject) GetObjectKind() schema.ObjectKind {
+	return schema.EmptyObjectKind
+}
+func (obj functionFakeRuntimeObject) DeepCopyObject() runtime.Object {
+	if obj == nil {
+		return nil
+	}
+	// funcs are immutable. Hence, just return the original func.
+	return obj
+}
+
+// Execute f, blocking the incoming queue (and waiting for it to drain first).
+// The purpose of this terrible hack is so that watchers added after an event
+// won't ever see that event, and will always see any event after they are
+// added.
+func (b *Broadcaster) blockQueue(f func()) {
+	var wg sync.WaitGroup
+	wg.Add(1)
+	b.incoming <- Event{
+		Type: internalRunFunctionMarker,
+		Object: functionFakeRuntimeObject(func() {
+			defer wg.Done()
+			f()
+		}),
+	}
+	wg.Wait()
+}
+
+// Watch adds a new watcher to the list and returns an Interface for it.
+// Note: new watchers will only receive new events. They won't get an entire history
+// of previous events.
+func (m *Broadcaster) Watch() Interface {
+	var w *broadcasterWatcher
+	m.blockQueue(func() {
+		m.lock.Lock()
+		defer m.lock.Unlock()
+		id := m.nextWatcher
+		m.nextWatcher++
+		w = &broadcasterWatcher{
+			result:  make(chan Event, m.watchQueueLength),
+			stopped: make(chan struct{}),
+			id:      id,
+			m:       m,
+		}
+		m.watchers[id] = w
+	})
+	return w
+}
+
+// WatchWithPrefix adds a new watcher to the list and returns an Interface for it. It sends
+// queuedEvents down the new watch before beginning to send ordinary events from Broadcaster.
+// The returned watch will have a queue length that is at least large enough to accommodate
+// all of the items in queuedEvents.
+func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface {
+	var w *broadcasterWatcher
+	m.blockQueue(func() {
+		m.lock.Lock()
+		defer m.lock.Unlock()
+		id := m.nextWatcher
+		m.nextWatcher++
+		length := m.watchQueueLength
+		if n := len(queuedEvents) + 1; n > length {
+			length = n
+		}
+		w = &broadcasterWatcher{
+			result:  make(chan Event, length),
+			stopped: make(chan struct{}),
+			id:      id,
+			m:       m,
+		}
+		m.watchers[id] = w
+		for _, e := range queuedEvents {
+			w.result <- e
+		}
+	})
+	return w
+}
+
+// stopWatching stops the given watcher and removes it from the list.
+func (m *Broadcaster) stopWatching(id int64) {
+	m.lock.Lock()
+	defer m.lock.Unlock()
+	w, ok := m.watchers[id]
+	if !ok {
+		// No need to do anything, it's already been removed from the list.
+		return
+	}
+	delete(m.watchers, id)
+	close(w.result)
+}
+
+// closeAll disconnects all watchers (presumably in response to a Shutdown call).
+func (m *Broadcaster) closeAll() {
+	m.lock.Lock()
+	defer m.lock.Unlock()
+	for _, w := range m.watchers {
+		close(w.result)
+	}
+	// Delete everything from the map, since presence/absence in the map is used
+	// by stopWatching to avoid double-closing the channel.
+	m.watchers = map[int64]*broadcasterWatcher{}
+}
+
+// Action distributes the given event among all watchers.
+func (m *Broadcaster) Action(action EventType, obj runtime.Object) {
+	m.incoming <- Event{action, obj}
+}
+
+// Shutdown disconnects all watchers (but any queued events will still be distributed).
+// You must not call Action or Watch* after calling Shutdown. This call blocks
+// until all events have been distributed through the outbound channels. Note
+// that since they can be buffered, this means that the watchers might not
+// have received the data yet as it can remain sitting in the buffered
+// channel.
+func (m *Broadcaster) Shutdown() {
+	close(m.incoming)
+	m.distributing.Wait()
+}
+
+// loop receives from m.incoming and distributes to all watchers.
+func (m *Broadcaster) loop() {
+	// Deliberately not catching crashes here. Yes, bring down the process if there's a
+	// bug in watch.Broadcaster.
+	for event := range m.incoming {
+		if event.Type == internalRunFunctionMarker {
+			event.Object.(functionFakeRuntimeObject)()
+			continue
+		}
+		m.distribute(event)
+	}
+	m.closeAll()
+	m.distributing.Done()
+}
+
+// distribute sends event to all watchers. Blocking.
+func (m *Broadcaster) distribute(event Event) {
+	m.lock.Lock()
+	defer m.lock.Unlock()
+	if m.fullChannelBehavior == DropIfChannelFull {
+		for _, w := range m.watchers {
+			select {
+			case w.result <- event:
+			case <-w.stopped:
+			default: // Don't block if the event can't be queued.
+			}
+		}
+	} else {
+		for _, w := range m.watchers {
+			select {
+			case w.result <- event:
+			case <-w.stopped:
+			}
+		}
+	}
+}
+
+// broadcasterWatcher handles a single watcher of a broadcaster
+type broadcasterWatcher struct {
+	result  chan Event
+	stopped chan struct{}
+	stop    sync.Once
+	id      int64
+	m       *Broadcaster
+}
+
+// ResultChan returns a channel to use for waiting on events.
+func (mw *broadcasterWatcher) ResultChan() <-chan Event {
+	return mw.result
+}
+
+// Stop stops watching and removes mw from its list.
+func (mw *broadcasterWatcher) Stop() {
+	mw.stop.Do(func() {
+		close(mw.stopped)
+		mw.m.stopWatching(mw.id)
+	})
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
new file mode 100644
index 0000000..d61cf5a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package watch
+
+import (
+	"io"
+	"sync"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/net"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/klog"
+)
+
+// Decoder allows StreamWatcher to watch any stream for which a Decoder can be written.
+type Decoder interface {
+	// Decode should return the type of event, the decoded object, or an error.
+	// An error will cause StreamWatcher to call Close(). Decode should block until
+	// it has data or an error occurs.
+	Decode() (action EventType, object runtime.Object, err error)
+
+	// Close should close the underlying io.Reader, signalling to the source of
+	// the stream that it is no longer being watched. Close() must cause any
+	// outstanding call to Decode() to return with an error of some sort.
+	Close()
+}
+
+// StreamWatcher turns any stream for which you can write a Decoder interface
+// into a watch.Interface.
+type StreamWatcher struct {
+	sync.Mutex
+	source  Decoder
+	result  chan Event
+	stopped bool
+}
+
+// NewStreamWatcher creates a StreamWatcher from the given decoder.
+func NewStreamWatcher(d Decoder) *StreamWatcher {
+	sw := &StreamWatcher{
+		source: d,
+		// It's easy for a consumer to add buffering via an extra
+		// goroutine/channel, but impossible for them to remove it,
+		// so nonbuffered is better.
+		result: make(chan Event),
+	}
+	go sw.receive()
+	return sw
+}
+
+// ResultChan implements Interface.
+func (sw *StreamWatcher) ResultChan() <-chan Event {
+	return sw.result
+}
+
+// Stop implements Interface.
+func (sw *StreamWatcher) Stop() {
+	// Call Close() exactly once by locking and setting a flag.
+	sw.Lock()
+	defer sw.Unlock()
+	if !sw.stopped {
+		sw.stopped = true
+		sw.source.Close()
+	}
+}
+
+// stopping returns true if Stop() was called previously.
+func (sw *StreamWatcher) stopping() bool {
+	sw.Lock()
+	defer sw.Unlock()
+	return sw.stopped
+}
+
+// receive reads result from the decoder in a loop and sends down the result channel.
+func (sw *StreamWatcher) receive() {
+	defer close(sw.result)
+	defer sw.Stop()
+	defer utilruntime.HandleCrash()
+	for {
+		action, obj, err := sw.source.Decode()
+		if err != nil {
+			// Ignore expected error.
+			if sw.stopping() {
+				return
+			}
+			switch err {
+			case io.EOF:
+				// watch closed normally
+			case io.ErrUnexpectedEOF:
+				klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
+			default:
+				msg := "Unable to decode an event from the watch stream: %v"
+				if net.IsProbableEOF(err) {
+					klog.V(5).Infof(msg, err)
+				} else {
+					klog.Errorf(msg, err)
+				}
+			}
+			return
+		}
+		sw.result <- Event{
+			Type:   action,
+			Object: obj,
+		}
+	}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go
new file mode 100644
index 0000000..be9c90c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go
@@ -0,0 +1,317 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package watch
+
+import (
+	"fmt"
+	"sync"
+
+	"k8s.io/klog"
+
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// Interface can be implemented by anything that knows how to watch and report changes.
+type Interface interface {
+	// Stops watching. Will close the channel returned by ResultChan(). Releases
+	// any resources used by the watch.
+	Stop()
+
+	// Returns a chan which will receive all the events. If an error occurs
+	// or Stop() is called, this channel will be closed, in which case the
+	// watch should be completely cleaned up.
+	ResultChan() <-chan Event
+}
+
+// EventType defines the possible types of events.
+type EventType string
+
+const (
+	Added    EventType = "ADDED"
+	Modified EventType = "MODIFIED"
+	Deleted  EventType = "DELETED"
+	Error    EventType = "ERROR"
+
+	DefaultChanSize int32 = 100
+)
+
+// Event represents a single event to a watched resource.
+// +k8s:deepcopy-gen=true
+type Event struct {
+	Type EventType
+
+	// Object is:
+	//  * If Type is Added or Modified: the new state of the object.
+	//  * If Type is Deleted: the state of the object immediately before deletion.
+	//  * If Type is Error: *api.Status is recommended; other types may make sense
+	//    depending on context.
+	Object runtime.Object
+}
+
+type emptyWatch chan Event
+
+// NewEmptyWatch returns a watch interface that returns no results and is closed.
+// May be used in certain error conditions where no information is available but
+// an error is not warranted.
+func NewEmptyWatch() Interface {
+	ch := make(chan Event)
+	close(ch)
+	return emptyWatch(ch)
+}
+
+// Stop implements Interface
+func (w emptyWatch) Stop() {
+}
+
+// ResultChan implements Interface
+func (w emptyWatch) ResultChan() <-chan Event {
+	return chan Event(w)
+}
+
+// FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe.
+type FakeWatcher struct {
+	result  chan Event
+	Stopped bool
+	sync.Mutex
+}
+
+func NewFake() *FakeWatcher {
+	return &FakeWatcher{
+		result: make(chan Event),
+	}
+}
+
+func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher {
+	return &FakeWatcher{
+		result: make(chan Event, size),
+	}
+}
+
+// Stop implements Interface.Stop().
+func (f *FakeWatcher) Stop() {
+	f.Lock()
+	defer f.Unlock()
+	if !f.Stopped {
+		klog.V(4).Infof("Stopping fake watcher.")
+		close(f.result)
+		f.Stopped = true
+	}
+}
+
+func (f *FakeWatcher) IsStopped() bool {
+	f.Lock()
+	defer f.Unlock()
+	return f.Stopped
+}
+
+// Reset prepares the watcher to be reused.
+func (f *FakeWatcher) Reset() {
+	f.Lock()
+	defer f.Unlock()
+	f.Stopped = false
+	f.result = make(chan Event)
+}
+
+func (f *FakeWatcher) ResultChan() <-chan Event {
+	return f.result
+}
+
+// Add sends an add event.
+func (f *FakeWatcher) Add(obj runtime.Object) {
+	f.result <- Event{Added, obj}
+}
+
+// Modify sends a modify event.
+func (f *FakeWatcher) Modify(obj runtime.Object) {
+	f.result <- Event{Modified, obj}
+}
+
+// Delete sends a delete event.
+func (f *FakeWatcher) Delete(lastValue runtime.Object) {
+	f.result <- Event{Deleted, lastValue}
+}
+
+// Error sends an Error event.
+func (f *FakeWatcher) Error(errValue runtime.Object) {
+	f.result <- Event{Error, errValue}
+}
+
+// Action sends an event of the requested type, for table-based testing.
+func (f *FakeWatcher) Action(action EventType, obj runtime.Object) {
+	f.result <- Event{action, obj}
+}
+
+// RaceFreeFakeWatcher lets you test anything that consumes a watch.Interface; threadsafe.
+type RaceFreeFakeWatcher struct {
+	result  chan Event
+	Stopped bool
+	sync.Mutex
+}
+
+func NewRaceFreeFake() *RaceFreeFakeWatcher {
+	return &RaceFreeFakeWatcher{
+		result: make(chan Event, DefaultChanSize),
+	}
+}
+
+// Stop implements Interface.Stop().
+func (f *RaceFreeFakeWatcher) Stop() {
+	f.Lock()
+	defer f.Unlock()
+	if !f.Stopped {
+		klog.V(4).Infof("Stopping fake watcher.")
+		close(f.result)
+		f.Stopped = true
+	}
+}
+
+func (f *RaceFreeFakeWatcher) IsStopped() bool {
+	f.Lock()
+	defer f.Unlock()
+	return f.Stopped
+}
+
+// Reset prepares the watcher to be reused.
+func (f *RaceFreeFakeWatcher) Reset() {
+	f.Lock()
+	defer f.Unlock()
+	f.Stopped = false
+	f.result = make(chan Event, DefaultChanSize)
+}
+
+func (f *RaceFreeFakeWatcher) ResultChan() <-chan Event {
+	f.Lock()
+	defer f.Unlock()
+	return f.result
+}
+
+// Add sends an add event.
+func (f *RaceFreeFakeWatcher) Add(obj runtime.Object) {
+	f.Lock()
+	defer f.Unlock()
+	if !f.Stopped {
+		select {
+		case f.result <- Event{Added, obj}:
+			return
+		default:
+			panic(fmt.Errorf("channel full"))
+		}
+	}
+}
+
+// Modify sends a modify event.
+func (f *RaceFreeFakeWatcher) Modify(obj runtime.Object) {
+	f.Lock()
+	defer f.Unlock()
+	if !f.Stopped {
+		select {
+		case f.result <- Event{Modified, obj}:
+			return
+		default:
+			panic(fmt.Errorf("channel full"))
+		}
+	}
+}
+
+// Delete sends a delete event.
+func (f *RaceFreeFakeWatcher) Delete(lastValue runtime.Object) {
+	f.Lock()
+	defer f.Unlock()
+	if !f.Stopped {
+		select {
+		case f.result <- Event{Deleted, lastValue}:
+			return
+		default:
+			panic(fmt.Errorf("channel full"))
+		}
+	}
+}
+
+// Error sends an Error event.
+func (f *RaceFreeFakeWatcher) Error(errValue runtime.Object) {
+	f.Lock()
+	defer f.Unlock()
+	if !f.Stopped {
+		select {
+		case f.result <- Event{Error, errValue}:
+			return
+		default:
+			panic(fmt.Errorf("channel full"))
+		}
+	}
+}
+
+// Action sends an event of the requested type, for table-based testing.
+func (f *RaceFreeFakeWatcher) Action(action EventType, obj runtime.Object) {
+	f.Lock()
+	defer f.Unlock()
+	if !f.Stopped {
+		select {
+		case f.result <- Event{action, obj}:
+			return
+		default:
+			panic(fmt.Errorf("channel full"))
+		}
+	}
+}
+
+// ProxyWatcher lets you wrap your channel in watch Interface. Threadsafe.
+type ProxyWatcher struct {
+	result chan Event
+	stopCh chan struct{}
+
+	mutex   sync.Mutex
+	stopped bool
+}
+
+var _ Interface = &ProxyWatcher{}
+
+// NewProxyWatcher creates new ProxyWatcher by wrapping a channel
+func NewProxyWatcher(ch chan Event) *ProxyWatcher {
+	return &ProxyWatcher{
+		result:  ch,
+		stopCh:  make(chan struct{}),
+		stopped: false,
+	}
+}
+
+// Stop implements Interface
+func (pw *ProxyWatcher) Stop() {
+	pw.mutex.Lock()
+	defer pw.mutex.Unlock()
+	if !pw.stopped {
+		pw.stopped = true
+		close(pw.stopCh)
+	}
+}
+
+// Stopping returns true if Stop() has been called
+func (pw *ProxyWatcher) Stopping() bool {
+	pw.mutex.Lock()
+	defer pw.mutex.Unlock()
+	return pw.stopped
+}
+
+// ResultChan implements Interface
+func (pw *ProxyWatcher) ResultChan() <-chan Event {
+	return pw.result
+}
+
+// StopChan returns stop channel
+func (pw *ProxyWatcher) StopChan() <-chan struct{} {
+	return pw.stopCh
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go
new file mode 100644
index 0000000..71ef4da
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go
@@ -0,0 +1,40 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package watch
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Event) DeepCopyInto(out *Event) {
+	*out = *in
+	if in.Object != nil {
+		out.Object = in.Object.DeepCopyObject()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event.
+func (in *Event) DeepCopy() *Event {
+	if in == nil {
+		return nil
+	}
+	out := new(Event)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go
new file mode 100644
index 0000000..7ed1d1c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go
@@ -0,0 +1,388 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package reflect is a fork of go's standard library reflection package, which
+// allows for deep equal with equality functions defined.
+package reflect
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+)
+
+// Equalities is a map from type to a function comparing two values of
+// that type.
+type Equalities map[reflect.Type]reflect.Value
+
+// For convenience, panics on errrors
+func EqualitiesOrDie(funcs ...interface{}) Equalities {
+	e := Equalities{}
+	if err := e.AddFuncs(funcs...); err != nil {
+		panic(err)
+	}
+	return e
+}
+
+// AddFuncs is a shortcut for multiple calls to AddFunc.
+func (e Equalities) AddFuncs(funcs ...interface{}) error {
+	for _, f := range funcs {
+		if err := e.AddFunc(f); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// AddFunc uses func as an equality function: it must take
+// two parameters of the same type, and return a boolean.
+func (e Equalities) AddFunc(eqFunc interface{}) error {
+	fv := reflect.ValueOf(eqFunc)
+	ft := fv.Type()
+	if ft.Kind() != reflect.Func {
+		return fmt.Errorf("expected func, got: %v", ft)
+	}
+	if ft.NumIn() != 2 {
+		return fmt.Errorf("expected two 'in' params, got: %v", ft)
+	}
+	if ft.NumOut() != 1 {
+		return fmt.Errorf("expected one 'out' param, got: %v", ft)
+	}
+	if ft.In(0) != ft.In(1) {
+		return fmt.Errorf("expected arg 1 and 2 to have same type, but got %v", ft)
+	}
+	var forReturnType bool
+	boolType := reflect.TypeOf(forReturnType)
+	if ft.Out(0) != boolType {
+		return fmt.Errorf("expected bool return, got: %v", ft)
+	}
+	e[ft.In(0)] = fv
+	return nil
+}
+
+// Below here is forked from go's reflect/deepequal.go
+
+// During deepValueEqual, must keep track of checks that are
+// in progress.  The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited comparisons are stored in a map indexed by visit.
+type visit struct {
+	a1  uintptr
+	a2  uintptr
+	typ reflect.Type
+}
+
+// unexportedTypePanic is thrown when you use this DeepEqual on something that has an
+// unexported type. It indicates a programmer error, so should not occur at runtime,
+// which is why it's not public and thus impossible to catch.
+type unexportedTypePanic []reflect.Type
+
+func (u unexportedTypePanic) Error() string { return u.String() }
+func (u unexportedTypePanic) String() string {
+	strs := make([]string, len(u))
+	for i, t := range u {
+		strs[i] = fmt.Sprintf("%v", t)
+	}
+	return "an unexported field was encountered, nested like this: " + strings.Join(strs, " -> ")
+}
+
+func makeUsefulPanic(v reflect.Value) {
+	if x := recover(); x != nil {
+		if u, ok := x.(unexportedTypePanic); ok {
+			u = append(unexportedTypePanic{v.Type()}, u...)
+			x = u
+		}
+		panic(x)
+	}
+}
+
+// Tests for deep equality using reflected types. The map argument tracks
+// comparisons that have already been seen, which allows short circuiting on
+// recursive types.
+func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool {
+	defer makeUsefulPanic(v1)
+
+	if !v1.IsValid() || !v2.IsValid() {
+		return v1.IsValid() == v2.IsValid()
+	}
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	if fv, ok := e[v1.Type()]; ok {
+		return fv.Call([]reflect.Value{v1, v2})[0].Bool()
+	}
+
+	hard := func(k reflect.Kind) bool {
+		switch k {
+		case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct:
+			return true
+		}
+		return false
+	}
+
+	if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) {
+		addr1 := v1.UnsafeAddr()
+		addr2 := v2.UnsafeAddr()
+		if addr1 > addr2 {
+			// Canonicalize order to reduce number of entries in visited.
+			addr1, addr2 = addr2, addr1
+		}
+
+		// Short circuit if references are identical ...
+		if addr1 == addr2 {
+			return true
+		}
+
+		// ... or already seen
+		typ := v1.Type()
+		v := visit{addr1, addr2, typ}
+		if visited[v] {
+			return true
+		}
+
+		// Remember for later.
+		visited[v] = true
+	}
+
+	switch v1.Kind() {
+	case reflect.Array:
+		// We don't need to check length here because length is part of
+		// an array's type, which has already been filtered for.
+		for i := 0; i < v1.Len(); i++ {
+			if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) {
+				return false
+			}
+		}
+		return true
+	case reflect.Slice:
+		if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) {
+			return false
+		}
+		if v1.IsNil() || v1.Len() == 0 {
+			return true
+		}
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		if v1.Pointer() == v2.Pointer() {
+			return true
+		}
+		for i := 0; i < v1.Len(); i++ {
+			if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) {
+				return false
+			}
+		}
+		return true
+	case reflect.Interface:
+		if v1.IsNil() || v2.IsNil() {
+			return v1.IsNil() == v2.IsNil()
+		}
+		return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1)
+	case reflect.Ptr:
+		return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1)
+	case reflect.Struct:
+		for i, n := 0, v1.NumField(); i < n; i++ {
+			if !e.deepValueEqual(v1.Field(i), v2.Field(i), visited, depth+1) {
+				return false
+			}
+		}
+		return true
+	case reflect.Map:
+		if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) {
+			return false
+		}
+		if v1.IsNil() || v1.Len() == 0 {
+			return true
+		}
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		if v1.Pointer() == v2.Pointer() {
+			return true
+		}
+		for _, k := range v1.MapKeys() {
+			if !e.deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) {
+				return false
+			}
+		}
+		return true
+	case reflect.Func:
+		if v1.IsNil() && v2.IsNil() {
+			return true
+		}
+		// Can't do better than this:
+		return false
+	default:
+		// Normal equality suffices
+		if !v1.CanInterface() || !v2.CanInterface() {
+			panic(unexportedTypePanic{})
+		}
+		return v1.Interface() == v2.Interface()
+	}
+}
+
+// DeepEqual is like reflect.DeepEqual, but focused on semantic equality
+// instead of memory equality.
+//
+// It will use e's equality functions if it finds types that match.
+//
+// An empty slice *is* equal to a nil slice for our purposes; same for maps.
+//
+// Unexported field members cannot be compared and will cause an imformative panic; you must add an Equality
+// function for these types.
+func (e Equalities) DeepEqual(a1, a2 interface{}) bool {
+	if a1 == nil || a2 == nil {
+		return a1 == a2
+	}
+	v1 := reflect.ValueOf(a1)
+	v2 := reflect.ValueOf(a2)
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	return e.deepValueEqual(v1, v2, make(map[visit]bool), 0)
+}
+
+func (e Equalities) deepValueDerive(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool {
+	defer makeUsefulPanic(v1)
+
+	if !v1.IsValid() || !v2.IsValid() {
+		return v1.IsValid() == v2.IsValid()
+	}
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	if fv, ok := e[v1.Type()]; ok {
+		return fv.Call([]reflect.Value{v1, v2})[0].Bool()
+	}
+
+	hard := func(k reflect.Kind) bool {
+		switch k {
+		case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct:
+			return true
+		}
+		return false
+	}
+
+	if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) {
+		addr1 := v1.UnsafeAddr()
+		addr2 := v2.UnsafeAddr()
+		if addr1 > addr2 {
+			// Canonicalize order to reduce number of entries in visited.
+			addr1, addr2 = addr2, addr1
+		}
+
+		// Short circuit if references are identical ...
+		if addr1 == addr2 {
+			return true
+		}
+
+		// ... or already seen
+		typ := v1.Type()
+		v := visit{addr1, addr2, typ}
+		if visited[v] {
+			return true
+		}
+
+		// Remember for later.
+		visited[v] = true
+	}
+
+	switch v1.Kind() {
+	case reflect.Array:
+		// We don't need to check length here because length is part of
+		// an array's type, which has already been filtered for.
+		for i := 0; i < v1.Len(); i++ {
+			if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) {
+				return false
+			}
+		}
+		return true
+	case reflect.Slice:
+		if v1.IsNil() || v1.Len() == 0 {
+			return true
+		}
+		if v1.Len() > v2.Len() {
+			return false
+		}
+		if v1.Pointer() == v2.Pointer() {
+			return true
+		}
+		for i := 0; i < v1.Len(); i++ {
+			if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) {
+				return false
+			}
+		}
+		return true
+	case reflect.String:
+		if v1.Len() == 0 {
+			return true
+		}
+		if v1.Len() > v2.Len() {
+			return false
+		}
+		return v1.String() == v2.String()
+	case reflect.Interface:
+		if v1.IsNil() {
+			return true
+		}
+		return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1)
+	case reflect.Ptr:
+		if v1.IsNil() {
+			return true
+		}
+		return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1)
+	case reflect.Struct:
+		for i, n := 0, v1.NumField(); i < n; i++ {
+			if !e.deepValueDerive(v1.Field(i), v2.Field(i), visited, depth+1) {
+				return false
+			}
+		}
+		return true
+	case reflect.Map:
+		if v1.IsNil() || v1.Len() == 0 {
+			return true
+		}
+		if v1.Len() > v2.Len() {
+			return false
+		}
+		if v1.Pointer() == v2.Pointer() {
+			return true
+		}
+		for _, k := range v1.MapKeys() {
+			if !e.deepValueDerive(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) {
+				return false
+			}
+		}
+		return true
+	case reflect.Func:
+		if v1.IsNil() && v2.IsNil() {
+			return true
+		}
+		// Can't do better than this:
+		return false
+	default:
+		// Normal equality suffices
+		if !v1.CanInterface() || !v2.CanInterface() {
+			panic(unexportedTypePanic{})
+		}
+		return v1.Interface() == v2.Interface()
+	}
+}
+
+// DeepDerivative is similar to DeepEqual except that unset fields in a1 are
+// ignored (not compared). This allows us to focus on the fields that matter to
+// the semantic comparison.
+//
+// The unset fields include a nil pointer and an empty string.
+func (e Equalities) DeepDerivative(a1, a2 interface{}) bool {
+	if a1 == nil {
+		return true
+	}
+	v1 := reflect.ValueOf(a1)
+	v2 := reflect.ValueOf(a2)
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	return e.deepValueDerive(v1, v2, make(map[visit]bool), 0)
+}
diff --git a/vendor/k8s.io/client-go/LICENSE b/vendor/k8s.io/client-go/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/k8s.io/client-go/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/k8s.io/client-go/discovery/cached_discovery.go b/vendor/k8s.io/client-go/discovery/cached_discovery.go
new file mode 100644
index 0000000..df69d6a
--- /dev/null
+++ b/vendor/k8s.io/client-go/discovery/cached_discovery.go
@@ -0,0 +1,295 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package discovery
+
+import (
+	"errors"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"path/filepath"
+	"sync"
+	"time"
+
+	"github.com/googleapis/gnostic/OpenAPIv2"
+	"k8s.io/klog"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/version"
+	"k8s.io/client-go/kubernetes/scheme"
+	restclient "k8s.io/client-go/rest"
+)
+
+// CachedDiscoveryClient implements the functions that discovery server-supported API groups,
+// versions and resources.
+type CachedDiscoveryClient struct {
+	delegate DiscoveryInterface
+
+	// cacheDirectory is the directory where discovery docs are held.  It must be unique per host:port combination to work well.
+	cacheDirectory string
+
+	// ttl is how long the cache should be considered valid
+	ttl time.Duration
+
+	// mutex protects the variables below
+	mutex sync.Mutex
+
+	// ourFiles are all filenames of cache files created by this process
+	ourFiles map[string]struct{}
+	// invalidated is true if all cache files should be ignored that are not ours (e.g. after Invalidate() was called)
+	invalidated bool
+	// fresh is true if all used cache files were ours
+	fresh bool
+}
+
+var _ CachedDiscoveryInterface = &CachedDiscoveryClient{}
+
+// ServerResourcesForGroupVersion returns the supported resources for a group and version.
+func (d *CachedDiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
+	filename := filepath.Join(d.cacheDirectory, groupVersion, "serverresources.json")
+	cachedBytes, err := d.getCachedFile(filename)
+	// don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback.
+	if err == nil {
+		cachedResources := &metav1.APIResourceList{}
+		if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil {
+			klog.V(10).Infof("returning cached discovery info from %v", filename)
+			return cachedResources, nil
+		}
+	}
+
+	liveResources, err := d.delegate.ServerResourcesForGroupVersion(groupVersion)
+	if err != nil {
+		klog.V(3).Infof("skipped caching discovery info due to %v", err)
+		return liveResources, err
+	}
+	if liveResources == nil || len(liveResources.APIResources) == 0 {
+		klog.V(3).Infof("skipped caching discovery info, no resources found")
+		return liveResources, err
+	}
+
+	if err := d.writeCachedFile(filename, liveResources); err != nil {
+		klog.V(1).Infof("failed to write cache to %v due to %v", filename, err)
+	}
+
+	return liveResources, nil
+}
+
+// ServerResources returns the supported resources for all groups and versions.
+func (d *CachedDiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) {
+	return ServerResources(d)
+}
+
+// ServerGroups returns the supported groups, with information like supported versions and the
+// preferred version.
+func (d *CachedDiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) {
+	filename := filepath.Join(d.cacheDirectory, "servergroups.json")
+	cachedBytes, err := d.getCachedFile(filename)
+	// don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback.
+	if err == nil {
+		cachedGroups := &metav1.APIGroupList{}
+		if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil {
+			klog.V(10).Infof("returning cached discovery info from %v", filename)
+			return cachedGroups, nil
+		}
+	}
+
+	liveGroups, err := d.delegate.ServerGroups()
+	if err != nil {
+		klog.V(3).Infof("skipped caching discovery info due to %v", err)
+		return liveGroups, err
+	}
+	if liveGroups == nil || len(liveGroups.Groups) == 0 {
+		klog.V(3).Infof("skipped caching discovery info, no groups found")
+		return liveGroups, err
+	}
+
+	if err := d.writeCachedFile(filename, liveGroups); err != nil {
+		klog.V(1).Infof("failed to write cache to %v due to %v", filename, err)
+	}
+
+	return liveGroups, nil
+}
+
+func (d *CachedDiscoveryClient) getCachedFile(filename string) ([]byte, error) {
+	// after invalidation ignore cache files not created by this process
+	d.mutex.Lock()
+	_, ourFile := d.ourFiles[filename]
+	if d.invalidated && !ourFile {
+		d.mutex.Unlock()
+		return nil, errors.New("cache invalidated")
+	}
+	d.mutex.Unlock()
+
+	file, err := os.Open(filename)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	fileInfo, err := file.Stat()
+	if err != nil {
+		return nil, err
+	}
+
+	if time.Now().After(fileInfo.ModTime().Add(d.ttl)) {
+		return nil, errors.New("cache expired")
+	}
+
+	// the cache is present and its valid.  Try to read and use it.
+	cachedBytes, err := ioutil.ReadAll(file)
+	if err != nil {
+		return nil, err
+	}
+
+	d.mutex.Lock()
+	defer d.mutex.Unlock()
+	d.fresh = d.fresh && ourFile
+
+	return cachedBytes, nil
+}
+
+func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Object) error {
+	if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil {
+		return err
+	}
+
+	bytes, err := runtime.Encode(scheme.Codecs.LegacyCodec(), obj)
+	if err != nil {
+		return err
+	}
+
+	f, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)+".")
+	if err != nil {
+		return err
+	}
+	defer os.Remove(f.Name())
+	_, err = f.Write(bytes)
+	if err != nil {
+		return err
+	}
+
+	err = os.Chmod(f.Name(), 0755)
+	if err != nil {
+		return err
+	}
+
+	name := f.Name()
+	err = f.Close()
+	if err != nil {
+		return err
+	}
+
+	// atomic rename
+	d.mutex.Lock()
+	defer d.mutex.Unlock()
+	err = os.Rename(name, filename)
+	if err == nil {
+		d.ourFiles[filename] = struct{}{}
+	}
+	return err
+}
+
+// RESTClient returns a RESTClient that is used to communicate with API server
+// by this client implementation.
+func (d *CachedDiscoveryClient) RESTClient() restclient.Interface {
+	return d.delegate.RESTClient()
+}
+
+// ServerPreferredResources returns the supported resources with the version preferred by the
+// server.
+func (d *CachedDiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
+	return ServerPreferredResources(d)
+}
+
+// ServerPreferredNamespacedResources returns the supported namespaced resources with the
+// version preferred by the server.
+func (d *CachedDiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) {
+	return ServerPreferredNamespacedResources(d)
+}
+
+// ServerVersion retrieves and parses the server's version (git version).
+func (d *CachedDiscoveryClient) ServerVersion() (*version.Info, error) {
+	return d.delegate.ServerVersion()
+}
+
+// OpenAPISchema retrieves and parses the swagger API schema the server supports.
+func (d *CachedDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) {
+	return d.delegate.OpenAPISchema()
+}
+
+// Fresh is supposed to tell the caller whether or not to retry if the cache
+// fails to find something (false = retry, true = no need to retry).
+func (d *CachedDiscoveryClient) Fresh() bool {
+	d.mutex.Lock()
+	defer d.mutex.Unlock()
+
+	return d.fresh
+}
+
+// Invalidate enforces that no cached data is used in the future that is older than the current time.
+func (d *CachedDiscoveryClient) Invalidate() {
+	d.mutex.Lock()
+	defer d.mutex.Unlock()
+
+	d.ourFiles = map[string]struct{}{}
+	d.fresh = true
+	d.invalidated = true
+}
+
+// NewCachedDiscoveryClientForConfig creates a new DiscoveryClient for the given config, and wraps
+// the created client in a CachedDiscoveryClient. The provided configuration is updated with a
+// custom transport that understands cache responses.
+// We receive two distinct cache directories for now, in order to preserve old behavior
+// which makes use of the --cache-dir flag value for storing cache data from the CacheRoundTripper,
+// and makes use of the hardcoded destination (~/.kube/cache/discovery/...) for storing
+// CachedDiscoveryClient cache data. If httpCacheDir is empty, the restconfig's transport will not
+// be updated with a roundtripper that understands cache responses.
+// If discoveryCacheDir is empty, cached server resource data will be looked up in the current directory.
+// TODO(juanvallejo): the value of "--cache-dir" should be honored. Consolidate discoveryCacheDir with httpCacheDir
+// so that server resources and http-cache data are stored in the same location, provided via config flags.
+func NewCachedDiscoveryClientForConfig(config *restclient.Config, discoveryCacheDir, httpCacheDir string, ttl time.Duration) (*CachedDiscoveryClient, error) {
+	if len(httpCacheDir) > 0 {
+		// update the given restconfig with a custom roundtripper that
+		// understands how to handle cache responses.
+		wt := config.WrapTransport
+		config.WrapTransport = func(rt http.RoundTripper) http.RoundTripper {
+			if wt != nil {
+				rt = wt(rt)
+			}
+			return newCacheRoundTripper(httpCacheDir, rt)
+		}
+	}
+
+	discoveryClient, err := NewDiscoveryClientForConfig(config)
+	if err != nil {
+		return nil, err
+	}
+
+	return newCachedDiscoveryClient(discoveryClient, discoveryCacheDir, ttl), nil
+}
+
+// NewCachedDiscoveryClient creates a new DiscoveryClient.  cacheDirectory is the directory where discovery docs are held.  It must be unique per host:port combination to work well.
+func newCachedDiscoveryClient(delegate DiscoveryInterface, cacheDirectory string, ttl time.Duration) *CachedDiscoveryClient {
+	return &CachedDiscoveryClient{
+		delegate:       delegate,
+		cacheDirectory: cacheDirectory,
+		ttl:            ttl,
+		ourFiles:       map[string]struct{}{},
+		fresh:          true,
+	}
+}
diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go
new file mode 100644
index 0000000..17b39de
--- /dev/null
+++ b/vendor/k8s.io/client-go/discovery/discovery_client.go
@@ -0,0 +1,472 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package discovery
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/googleapis/gnostic/OpenAPIv2"
+
+	"k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/apimachinery/pkg/version"
+	"k8s.io/client-go/kubernetes/scheme"
+	restclient "k8s.io/client-go/rest"
+)
+
+const (
+	// defaultRetries is the number of times a resource discovery is repeated if an api group disappears on the fly (e.g. ThirdPartyResources).
+	defaultRetries = 2
+	// protobuf mime type
+	mimePb = "application/com.github.proto-openapi.spec.v2@v1.0+protobuf"
+	// defaultTimeout is the maximum amount of time per request when no timeout has been set on a RESTClient.
+	// Defaults to 32s in order to have a distinguishable length of time, relative to other timeouts that exist.
+	defaultTimeout = 32 * time.Second
+)
+
+// DiscoveryInterface holds the methods that discover server-supported API groups,
+// versions and resources.
+type DiscoveryInterface interface {
+	RESTClient() restclient.Interface
+	ServerGroupsInterface
+	ServerResourcesInterface
+	ServerVersionInterface
+	OpenAPISchemaInterface
+}
+
+// CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness.
+type CachedDiscoveryInterface interface {
+	DiscoveryInterface
+	// Fresh is supposed to tell the caller whether or not to retry if the cache
+	// fails to find something (false = retry, true = no need to retry).
+	//
+	// TODO: this needs to be revisited, this interface can't be locked properly
+	// and doesn't make a lot of sense.
+	Fresh() bool
+	// Invalidate enforces that no cached data is used in the future that is older than the current time.
+	Invalidate()
+}
+
+// ServerGroupsInterface has methods for obtaining supported groups on the API server
+type ServerGroupsInterface interface {
+	// ServerGroups returns the supported groups, with information like supported versions and the
+	// preferred version.
+	ServerGroups() (*metav1.APIGroupList, error)
+}
+
+// ServerResourcesInterface has methods for obtaining supported resources on the API server
+type ServerResourcesInterface interface {
+	// ServerResourcesForGroupVersion returns the supported resources for a group and version.
+	ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error)
+	// ServerResources returns the supported resources for all groups and versions.
+	ServerResources() ([]*metav1.APIResourceList, error)
+	// ServerPreferredResources returns the supported resources with the version preferred by the
+	// server.
+	ServerPreferredResources() ([]*metav1.APIResourceList, error)
+	// ServerPreferredNamespacedResources returns the supported namespaced resources with the
+	// version preferred by the server.
+	ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error)
+}
+
+// ServerVersionInterface has a method for retrieving the server's version.
+type ServerVersionInterface interface {
+	// ServerVersion retrieves and parses the server's version (git version).
+	ServerVersion() (*version.Info, error)
+}
+
+// OpenAPISchemaInterface has a method to retrieve the open API schema.
+type OpenAPISchemaInterface interface {
+	// OpenAPISchema retrieves and parses the swagger API schema the server supports.
+	OpenAPISchema() (*openapi_v2.Document, error)
+}
+
+// DiscoveryClient implements the functions that discover server-supported API groups,
+// versions and resources.
+type DiscoveryClient struct {
+	restClient restclient.Interface
+
+	LegacyPrefix string
+}
+
+// Convert metav1.APIVersions to metav1.APIGroup. APIVersions is used by legacy v1, so
+// group would be "".
+func apiVersionsToAPIGroup(apiVersions *metav1.APIVersions) (apiGroup metav1.APIGroup) {
+	groupVersions := []metav1.GroupVersionForDiscovery{}
+	for _, version := range apiVersions.Versions {
+		groupVersion := metav1.GroupVersionForDiscovery{
+			GroupVersion: version,
+			Version:      version,
+		}
+		groupVersions = append(groupVersions, groupVersion)
+	}
+	apiGroup.Versions = groupVersions
+	// There should be only one groupVersion returned at /api
+	apiGroup.PreferredVersion = groupVersions[0]
+	return
+}
+
+// ServerGroups returns the supported groups, with information like supported versions and the
+// preferred version.
+func (d *DiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err error) {
+	// Get the groupVersions exposed at /api
+	v := &metav1.APIVersions{}
+	err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do().Into(v)
+	apiGroup := metav1.APIGroup{}
+	if err == nil && len(v.Versions) != 0 {
+		apiGroup = apiVersionsToAPIGroup(v)
+	}
+	if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) {
+		return nil, err
+	}
+
+	// Get the groupVersions exposed at /apis
+	apiGroupList = &metav1.APIGroupList{}
+	err = d.restClient.Get().AbsPath("/apis").Do().Into(apiGroupList)
+	if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) {
+		return nil, err
+	}
+	// to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api
+	if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) {
+		apiGroupList = &metav1.APIGroupList{}
+	}
+
+	// prepend the group retrieved from /api to the list if not empty
+	if len(v.Versions) != 0 {
+		apiGroupList.Groups = append([]metav1.APIGroup{apiGroup}, apiGroupList.Groups...)
+	}
+	return apiGroupList, nil
+}
+
+// ServerResourcesForGroupVersion returns the supported resources for a group and version.
+func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *metav1.APIResourceList, err error) {
+	url := url.URL{}
+	if len(groupVersion) == 0 {
+		return nil, fmt.Errorf("groupVersion shouldn't be empty")
+	}
+	if len(d.LegacyPrefix) > 0 && groupVersion == "v1" {
+		url.Path = d.LegacyPrefix + "/" + groupVersion
+	} else {
+		url.Path = "/apis/" + groupVersion
+	}
+	resources = &metav1.APIResourceList{
+		GroupVersion: groupVersion,
+	}
+	err = d.restClient.Get().AbsPath(url.String()).Do().Into(resources)
+	if err != nil {
+		// ignore 403 or 404 error to be compatible with an v1.0 server.
+		if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) {
+			return resources, nil
+		}
+		return nil, err
+	}
+	return resources, nil
+}
+
+// serverResources returns the supported resources for all groups and versions.
+func (d *DiscoveryClient) serverResources() ([]*metav1.APIResourceList, error) {
+	return ServerResources(d)
+}
+
+// ServerResources returns the supported resources for all groups and versions.
+func (d *DiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) {
+	return withRetries(defaultRetries, d.serverResources)
+}
+
+// ErrGroupDiscoveryFailed is returned if one or more API groups fail to load.
+type ErrGroupDiscoveryFailed struct {
+	// Groups is a list of the groups that failed to load and the error cause
+	Groups map[schema.GroupVersion]error
+}
+
+// Error implements the error interface
+func (e *ErrGroupDiscoveryFailed) Error() string {
+	var groups []string
+	for k, v := range e.Groups {
+		groups = append(groups, fmt.Sprintf("%s: %v", k, v))
+	}
+	sort.Strings(groups)
+	return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(groups, ", "))
+}
+
+// IsGroupDiscoveryFailedError returns true if the provided error indicates the server was unable to discover
+// a complete list of APIs for the client to use.
+func IsGroupDiscoveryFailedError(err error) bool {
+	_, ok := err.(*ErrGroupDiscoveryFailed)
+	return err != nil && ok
+}
+
+// serverPreferredResources returns the supported resources with the version preferred by the server.
+func (d *DiscoveryClient) serverPreferredResources() ([]*metav1.APIResourceList, error) {
+	return ServerPreferredResources(d)
+}
+
+// ServerResources uses the provided discovery interface to look up supported resources for all groups and versions.
+func ServerResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) {
+	apiGroups, err := d.ServerGroups()
+	if err != nil {
+		return nil, err
+	}
+
+	groupVersionResources, failedGroups := fetchGroupVersionResources(d, apiGroups)
+
+	// order results by group/version discovery order
+	result := []*metav1.APIResourceList{}
+	for _, apiGroup := range apiGroups.Groups {
+		for _, version := range apiGroup.Versions {
+			gv := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version}
+			if resources, ok := groupVersionResources[gv]; ok {
+				result = append(result, resources)
+			}
+		}
+	}
+
+	if len(failedGroups) == 0 {
+		return result, nil
+	}
+
+	return result, &ErrGroupDiscoveryFailed{Groups: failedGroups}
+}
+
+// ServerPreferredResources uses the provided discovery interface to look up preferred resources
+func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) {
+	serverGroupList, err := d.ServerGroups()
+	if err != nil {
+		return nil, err
+	}
+
+	groupVersionResources, failedGroups := fetchGroupVersionResources(d, serverGroupList)
+
+	result := []*metav1.APIResourceList{}
+	grVersions := map[schema.GroupResource]string{}                         // selected version of a GroupResource
+	grAPIResources := map[schema.GroupResource]*metav1.APIResource{}        // selected APIResource for a GroupResource
+	gvAPIResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping
+
+	for _, apiGroup := range serverGroupList.Groups {
+		for _, version := range apiGroup.Versions {
+			groupVersion := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version}
+
+			apiResourceList, ok := groupVersionResources[groupVersion]
+			if !ok {
+				continue
+			}
+
+			// create empty list which is filled later in another loop
+			emptyAPIResourceList := metav1.APIResourceList{
+				GroupVersion: version.GroupVersion,
+			}
+			gvAPIResourceLists[groupVersion] = &emptyAPIResourceList
+			result = append(result, &emptyAPIResourceList)
+
+			for i := range apiResourceList.APIResources {
+				apiResource := &apiResourceList.APIResources[i]
+				if strings.Contains(apiResource.Name, "/") {
+					continue
+				}
+				gv := schema.GroupResource{Group: apiGroup.Name, Resource: apiResource.Name}
+				if _, ok := grAPIResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version {
+					// only override with preferred version
+					continue
+				}
+				grVersions[gv] = version.Version
+				grAPIResources[gv] = apiResource
+			}
+		}
+	}
+
+	// group selected APIResources according to GroupVersion into APIResourceLists
+	for groupResource, apiResource := range grAPIResources {
+		version := grVersions[groupResource]
+		groupVersion := schema.GroupVersion{Group: groupResource.Group, Version: version}
+		apiResourceList := gvAPIResourceLists[groupVersion]
+		apiResourceList.APIResources = append(apiResourceList.APIResources, *apiResource)
+	}
+
+	if len(failedGroups) == 0 {
+		return result, nil
+	}
+
+	return result, &ErrGroupDiscoveryFailed{Groups: failedGroups}
+}
+
+// fetchServerResourcesForGroupVersions uses the discovery client to fetch the resources for the specified groups in parallel
+func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroupList) (map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error) {
+	groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList)
+	failedGroups := make(map[schema.GroupVersion]error)
+
+	wg := &sync.WaitGroup{}
+	resultLock := &sync.Mutex{}
+	for _, apiGroup := range apiGroups.Groups {
+		for _, version := range apiGroup.Versions {
+			groupVersion := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version}
+			wg.Add(1)
+			go func() {
+				defer wg.Done()
+				defer utilruntime.HandleCrash()
+
+				apiResourceList, err := d.ServerResourcesForGroupVersion(groupVersion.String())
+
+				// lock to record results
+				resultLock.Lock()
+				defer resultLock.Unlock()
+
+				if err != nil {
+					// TODO: maybe restrict this to NotFound errors
+					failedGroups[groupVersion] = err
+				} else {
+					groupVersionResources[groupVersion] = apiResourceList
+				}
+			}()
+		}
+	}
+	wg.Wait()
+
+	return groupVersionResources, failedGroups
+}
+
+// ServerPreferredResources returns the supported resources with the version preferred by the
+// server.
+func (d *DiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
+	return withRetries(defaultRetries, d.serverPreferredResources)
+}
+
+// ServerPreferredNamespacedResources returns the supported namespaced resources with the
+// version preferred by the server.
+func (d *DiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) {
+	return ServerPreferredNamespacedResources(d)
+}
+
+// ServerPreferredNamespacedResources uses the provided discovery interface to look up preferred namespaced resources
+func ServerPreferredNamespacedResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) {
+	all, err := ServerPreferredResources(d)
+	return FilteredBy(ResourcePredicateFunc(func(groupVersion string, r *metav1.APIResource) bool {
+		return r.Namespaced
+	}), all), err
+}
+
+// ServerVersion retrieves and parses the server's version (git version).
+func (d *DiscoveryClient) ServerVersion() (*version.Info, error) {
+	body, err := d.restClient.Get().AbsPath("/version").Do().Raw()
+	if err != nil {
+		return nil, err
+	}
+	var info version.Info
+	err = json.Unmarshal(body, &info)
+	if err != nil {
+		return nil, fmt.Errorf("got '%s': %v", string(body), err)
+	}
+	return &info, nil
+}
+
+// OpenAPISchema fetches the open api schema using a rest client and parses the proto.
+func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) {
+	data, err := d.restClient.Get().AbsPath("/openapi/v2").SetHeader("Accept", mimePb).Do().Raw()
+	if err != nil {
+		if errors.IsForbidden(err) || errors.IsNotFound(err) || errors.IsNotAcceptable(err) {
+			// single endpoint not found/registered in old server, try to fetch old endpoint
+			// TODO(roycaihw): remove this in 1.11
+			data, err = d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do().Raw()
+			if err != nil {
+				return nil, err
+			}
+		} else {
+			return nil, err
+		}
+	}
+	document := &openapi_v2.Document{}
+	err = proto.Unmarshal(data, document)
+	if err != nil {
+		return nil, err
+	}
+	return document, nil
+}
+
+// withRetries retries the given recovery function in case the groups supported by the server change after ServerGroup() returns.
+func withRetries(maxRetries int, f func() ([]*metav1.APIResourceList, error)) ([]*metav1.APIResourceList, error) {
+	var result []*metav1.APIResourceList
+	var err error
+	for i := 0; i < maxRetries; i++ {
+		result, err = f()
+		if err == nil {
+			return result, nil
+		}
+		if _, ok := err.(*ErrGroupDiscoveryFailed); !ok {
+			return nil, err
+		}
+	}
+	return result, err
+}
+
+func setDiscoveryDefaults(config *restclient.Config) error {
+	config.APIPath = ""
+	config.GroupVersion = nil
+	if config.Timeout == 0 {
+		config.Timeout = defaultTimeout
+	}
+	codec := runtime.NoopEncoder{Decoder: scheme.Codecs.UniversalDecoder()}
+	config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec})
+	if len(config.UserAgent) == 0 {
+		config.UserAgent = restclient.DefaultKubernetesUserAgent()
+	}
+	return nil
+}
+
+// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. This client
+// can be used to discover supported resources in the API server.
+func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) {
+	config := *c
+	if err := setDiscoveryDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := restclient.UnversionedRESTClientFor(&config)
+	return &DiscoveryClient{restClient: client, LegacyPrefix: "/api"}, err
+}
+
+// NewDiscoveryClientForConfigOrDie creates a new DiscoveryClient for the given config. If
+// there is an error, it panics.
+func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient {
+	client, err := NewDiscoveryClientForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+
+}
+
+// NewDiscoveryClient returns  a new DiscoveryClient for the given RESTClient.
+func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient {
+	return &DiscoveryClient{restClient: c, LegacyPrefix: "/api"}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (d *DiscoveryClient) RESTClient() restclient.Interface {
+	if d == nil {
+		return nil
+	}
+	return d.restClient
+}
diff --git a/vendor/k8s.io/client-go/discovery/doc.go b/vendor/k8s.io/client-go/discovery/doc.go
new file mode 100644
index 0000000..7649558
--- /dev/null
+++ b/vendor/k8s.io/client-go/discovery/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package discovery provides ways to discover server-supported
+// API groups, versions and resources.
+package discovery
diff --git a/vendor/k8s.io/client-go/discovery/helper.go b/vendor/k8s.io/client-go/discovery/helper.go
new file mode 100644
index 0000000..3bfe514
--- /dev/null
+++ b/vendor/k8s.io/client-go/discovery/helper.go
@@ -0,0 +1,125 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package discovery
+
+import (
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/util/sets"
+	apimachineryversion "k8s.io/apimachinery/pkg/version"
+)
+
+// MatchesServerVersion queries the server to compares the build version
+// (git hash) of the client with the server's build version. It returns an error
+// if it failed to contact the server or if the versions are not an exact match.
+func MatchesServerVersion(clientVersion apimachineryversion.Info, client DiscoveryInterface) error {
+	sVer, err := client.ServerVersion()
+	if err != nil {
+		return fmt.Errorf("couldn't read version from server: %v", err)
+	}
+	// GitVersion includes GitCommit and GitTreeState, but best to be safe?
+	if clientVersion.GitVersion != sVer.GitVersion || clientVersion.GitCommit != sVer.GitCommit || clientVersion.GitTreeState != sVer.GitTreeState {
+		return fmt.Errorf("server version (%#v) differs from client version (%#v)", sVer, clientVersion)
+	}
+
+	return nil
+}
+
+// ServerSupportsVersion returns an error if the server doesn't have the required version
+func ServerSupportsVersion(client DiscoveryInterface, requiredGV schema.GroupVersion) error {
+	groups, err := client.ServerGroups()
+	if err != nil {
+		// This is almost always a connection error, and higher level code should treat this as a generic error,
+		// not a negotiation specific error.
+		return err
+	}
+	versions := metav1.ExtractGroupVersions(groups)
+	serverVersions := sets.String{}
+	for _, v := range versions {
+		serverVersions.Insert(v)
+	}
+
+	if serverVersions.Has(requiredGV.String()) {
+		return nil
+	}
+
+	// If the server supports no versions, then we should pretend it has the version because of old servers.
+	// This can happen because discovery fails due to 403 Forbidden errors
+	if len(serverVersions) == 0 {
+		return nil
+	}
+
+	return fmt.Errorf("server does not support API version %q", requiredGV)
+}
+
+// GroupVersionResources converts APIResourceLists to the GroupVersionResources.
+func GroupVersionResources(rls []*metav1.APIResourceList) (map[schema.GroupVersionResource]struct{}, error) {
+	gvrs := map[schema.GroupVersionResource]struct{}{}
+	for _, rl := range rls {
+		gv, err := schema.ParseGroupVersion(rl.GroupVersion)
+		if err != nil {
+			return nil, err
+		}
+		for i := range rl.APIResources {
+			gvrs[schema.GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: rl.APIResources[i].Name}] = struct{}{}
+		}
+	}
+	return gvrs, nil
+}
+
+// FilteredBy filters by the given predicate. Empty APIResourceLists are dropped.
+func FilteredBy(pred ResourcePredicate, rls []*metav1.APIResourceList) []*metav1.APIResourceList {
+	result := []*metav1.APIResourceList{}
+	for _, rl := range rls {
+		filtered := *rl
+		filtered.APIResources = nil
+		for i := range rl.APIResources {
+			if pred.Match(rl.GroupVersion, &rl.APIResources[i]) {
+				filtered.APIResources = append(filtered.APIResources, rl.APIResources[i])
+			}
+		}
+		if filtered.APIResources != nil {
+			result = append(result, &filtered)
+		}
+	}
+	return result
+}
+
+// ResourcePredicate has a method to check if a resource matches a given condition.
+type ResourcePredicate interface {
+	Match(groupVersion string, r *metav1.APIResource) bool
+}
+
+// ResourcePredicateFunc returns true if it matches a resource based on a custom condition.
+type ResourcePredicateFunc func(groupVersion string, r *metav1.APIResource) bool
+
+// Match is a wrapper around ResourcePredicateFunc.
+func (fn ResourcePredicateFunc) Match(groupVersion string, r *metav1.APIResource) bool {
+	return fn(groupVersion, r)
+}
+
+// SupportsAllVerbs is a predicate matching a resource iff all given verbs are supported.
+type SupportsAllVerbs struct {
+	Verbs []string
+}
+
+// Match checks if a resource contains all the given verbs.
+func (p SupportsAllVerbs) Match(groupVersion string, r *metav1.APIResource) bool {
+	return sets.NewString([]string(r.Verbs)...).HasAll(p.Verbs...)
+}
diff --git a/vendor/k8s.io/client-go/discovery/round_tripper.go b/vendor/k8s.io/client-go/discovery/round_tripper.go
new file mode 100644
index 0000000..4e2bc24
--- /dev/null
+++ b/vendor/k8s.io/client-go/discovery/round_tripper.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package discovery
+
+import (
+	"net/http"
+	"path/filepath"
+
+	"github.com/gregjones/httpcache"
+	"github.com/gregjones/httpcache/diskcache"
+	"github.com/peterbourgon/diskv"
+	"k8s.io/klog"
+)
+
+type cacheRoundTripper struct {
+	rt *httpcache.Transport
+}
+
+// newCacheRoundTripper creates a roundtripper that reads the ETag on
+// response headers and send the If-None-Match header on subsequent
+// corresponding requests.
+func newCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper {
+	d := diskv.New(diskv.Options{
+		BasePath: cacheDir,
+		TempDir:  filepath.Join(cacheDir, ".diskv-temp"),
+	})
+	t := httpcache.NewTransport(diskcache.NewWithDiskv(d))
+	t.Transport = rt
+
+	return &cacheRoundTripper{rt: t}
+}
+
+func (rt *cacheRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	return rt.rt.RoundTrip(req)
+}
+
+func (rt *cacheRoundTripper) CancelRequest(req *http.Request) {
+	type canceler interface {
+		CancelRequest(*http.Request)
+	}
+	if cr, ok := rt.rt.Transport.(canceler); ok {
+		cr.CancelRequest(req)
+	} else {
+		klog.Errorf("CancelRequest not implemented by %T", rt.rt.Transport)
+	}
+}
+
+func (rt *cacheRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt.Transport }
diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go
new file mode 100644
index 0000000..6ad01d6
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/clientset.go
@@ -0,0 +1,668 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package kubernetes
+
+import (
+	discovery "k8s.io/client-go/discovery"
+	admissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1"
+	admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1"
+	appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
+	appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1"
+	appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2"
+	auditregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1"
+	authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1"
+	authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1"
+	authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1"
+	authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1"
+	autoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1"
+	autoscalingv2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1"
+	autoscalingv2beta2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2"
+	batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1"
+	batchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1"
+	batchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1"
+	certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
+	coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1"
+	corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
+	eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1"
+	extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
+	networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1"
+	policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1"
+	rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1"
+	rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1"
+	rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1"
+	schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1"
+	schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1"
+	settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1"
+	storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1"
+	storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1"
+	storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1"
+	rest "k8s.io/client-go/rest"
+	flowcontrol "k8s.io/client-go/util/flowcontrol"
+)
+
+type Interface interface {
+	Discovery() discovery.DiscoveryInterface
+	AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface
+	AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface
+	AppsV1beta1() appsv1beta1.AppsV1beta1Interface
+	AppsV1beta2() appsv1beta2.AppsV1beta2Interface
+	AppsV1() appsv1.AppsV1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Apps() appsv1.AppsV1Interface
+	AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Auditregistration() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface
+	AuthenticationV1() authenticationv1.AuthenticationV1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Authentication() authenticationv1.AuthenticationV1Interface
+	AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface
+	AuthorizationV1() authorizationv1.AuthorizationV1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Authorization() authorizationv1.AuthorizationV1Interface
+	AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface
+	AutoscalingV1() autoscalingv1.AutoscalingV1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Autoscaling() autoscalingv1.AutoscalingV1Interface
+	AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface
+	AutoscalingV2beta2() autoscalingv2beta2.AutoscalingV2beta2Interface
+	BatchV1() batchv1.BatchV1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Batch() batchv1.BatchV1Interface
+	BatchV1beta1() batchv1beta1.BatchV1beta1Interface
+	BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface
+	CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Certificates() certificatesv1beta1.CertificatesV1beta1Interface
+	CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Coordination() coordinationv1beta1.CoordinationV1beta1Interface
+	CoreV1() corev1.CoreV1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Core() corev1.CoreV1Interface
+	EventsV1beta1() eventsv1beta1.EventsV1beta1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Events() eventsv1beta1.EventsV1beta1Interface
+	ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Extensions() extensionsv1beta1.ExtensionsV1beta1Interface
+	NetworkingV1() networkingv1.NetworkingV1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Networking() networkingv1.NetworkingV1Interface
+	PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Policy() policyv1beta1.PolicyV1beta1Interface
+	RbacV1() rbacv1.RbacV1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Rbac() rbacv1.RbacV1Interface
+	RbacV1beta1() rbacv1beta1.RbacV1beta1Interface
+	RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface
+	SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface
+	SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Scheduling() schedulingv1beta1.SchedulingV1beta1Interface
+	SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Settings() settingsv1alpha1.SettingsV1alpha1Interface
+	StorageV1beta1() storagev1beta1.StorageV1beta1Interface
+	StorageV1() storagev1.StorageV1Interface
+	// Deprecated: please explicitly pick a version if possible.
+	Storage() storagev1.StorageV1Interface
+	StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface
+}
+
+// Clientset contains the clients for groups. Each group has exactly one
+// version included in a Clientset.
+type Clientset struct {
+	*discovery.DiscoveryClient
+	admissionregistrationV1alpha1 *admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Client
+	admissionregistrationV1beta1  *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client
+	appsV1beta1                   *appsv1beta1.AppsV1beta1Client
+	appsV1beta2                   *appsv1beta2.AppsV1beta2Client
+	appsV1                        *appsv1.AppsV1Client
+	auditregistrationV1alpha1     *auditregistrationv1alpha1.AuditregistrationV1alpha1Client
+	authenticationV1              *authenticationv1.AuthenticationV1Client
+	authenticationV1beta1         *authenticationv1beta1.AuthenticationV1beta1Client
+	authorizationV1               *authorizationv1.AuthorizationV1Client
+	authorizationV1beta1          *authorizationv1beta1.AuthorizationV1beta1Client
+	autoscalingV1                 *autoscalingv1.AutoscalingV1Client
+	autoscalingV2beta1            *autoscalingv2beta1.AutoscalingV2beta1Client
+	autoscalingV2beta2            *autoscalingv2beta2.AutoscalingV2beta2Client
+	batchV1                       *batchv1.BatchV1Client
+	batchV1beta1                  *batchv1beta1.BatchV1beta1Client
+	batchV2alpha1                 *batchv2alpha1.BatchV2alpha1Client
+	certificatesV1beta1           *certificatesv1beta1.CertificatesV1beta1Client
+	coordinationV1beta1           *coordinationv1beta1.CoordinationV1beta1Client
+	coreV1                        *corev1.CoreV1Client
+	eventsV1beta1                 *eventsv1beta1.EventsV1beta1Client
+	extensionsV1beta1             *extensionsv1beta1.ExtensionsV1beta1Client
+	networkingV1                  *networkingv1.NetworkingV1Client
+	policyV1beta1                 *policyv1beta1.PolicyV1beta1Client
+	rbacV1                        *rbacv1.RbacV1Client
+	rbacV1beta1                   *rbacv1beta1.RbacV1beta1Client
+	rbacV1alpha1                  *rbacv1alpha1.RbacV1alpha1Client
+	schedulingV1alpha1            *schedulingv1alpha1.SchedulingV1alpha1Client
+	schedulingV1beta1             *schedulingv1beta1.SchedulingV1beta1Client
+	settingsV1alpha1              *settingsv1alpha1.SettingsV1alpha1Client
+	storageV1beta1                *storagev1beta1.StorageV1beta1Client
+	storageV1                     *storagev1.StorageV1Client
+	storageV1alpha1               *storagev1alpha1.StorageV1alpha1Client
+}
+
+// AdmissionregistrationV1alpha1 retrieves the AdmissionregistrationV1alpha1Client
+func (c *Clientset) AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface {
+	return c.admissionregistrationV1alpha1
+}
+
+// AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client
+func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface {
+	return c.admissionregistrationV1beta1
+}
+
+// Deprecated: Admissionregistration retrieves the default version of AdmissionregistrationClient.
+// Please explicitly pick a version.
+func (c *Clientset) Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface {
+	return c.admissionregistrationV1beta1
+}
+
+// AppsV1beta1 retrieves the AppsV1beta1Client
+func (c *Clientset) AppsV1beta1() appsv1beta1.AppsV1beta1Interface {
+	return c.appsV1beta1
+}
+
+// AppsV1beta2 retrieves the AppsV1beta2Client
+func (c *Clientset) AppsV1beta2() appsv1beta2.AppsV1beta2Interface {
+	return c.appsV1beta2
+}
+
+// AppsV1 retrieves the AppsV1Client
+func (c *Clientset) AppsV1() appsv1.AppsV1Interface {
+	return c.appsV1
+}
+
+// Deprecated: Apps retrieves the default version of AppsClient.
+// Please explicitly pick a version.
+func (c *Clientset) Apps() appsv1.AppsV1Interface {
+	return c.appsV1
+}
+
+// AuditregistrationV1alpha1 retrieves the AuditregistrationV1alpha1Client
+func (c *Clientset) AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface {
+	return c.auditregistrationV1alpha1
+}
+
+// Deprecated: Auditregistration retrieves the default version of AuditregistrationClient.
+// Please explicitly pick a version.
+func (c *Clientset) Auditregistration() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface {
+	return c.auditregistrationV1alpha1
+}
+
+// AuthenticationV1 retrieves the AuthenticationV1Client
+func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface {
+	return c.authenticationV1
+}
+
+// Deprecated: Authentication retrieves the default version of AuthenticationClient.
+// Please explicitly pick a version.
+func (c *Clientset) Authentication() authenticationv1.AuthenticationV1Interface {
+	return c.authenticationV1
+}
+
+// AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client
+func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface {
+	return c.authenticationV1beta1
+}
+
+// AuthorizationV1 retrieves the AuthorizationV1Client
+func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface {
+	return c.authorizationV1
+}
+
+// Deprecated: Authorization retrieves the default version of AuthorizationClient.
+// Please explicitly pick a version.
+func (c *Clientset) Authorization() authorizationv1.AuthorizationV1Interface {
+	return c.authorizationV1
+}
+
+// AuthorizationV1beta1 retrieves the AuthorizationV1beta1Client
+func (c *Clientset) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface {
+	return c.authorizationV1beta1
+}
+
+// AutoscalingV1 retrieves the AutoscalingV1Client
+func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface {
+	return c.autoscalingV1
+}
+
+// Deprecated: Autoscaling retrieves the default version of AutoscalingClient.
+// Please explicitly pick a version.
+func (c *Clientset) Autoscaling() autoscalingv1.AutoscalingV1Interface {
+	return c.autoscalingV1
+}
+
+// AutoscalingV2beta1 retrieves the AutoscalingV2beta1Client
+func (c *Clientset) AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface {
+	return c.autoscalingV2beta1
+}
+
+// AutoscalingV2beta2 retrieves the AutoscalingV2beta2Client
+func (c *Clientset) AutoscalingV2beta2() autoscalingv2beta2.AutoscalingV2beta2Interface {
+	return c.autoscalingV2beta2
+}
+
+// BatchV1 retrieves the BatchV1Client
+func (c *Clientset) BatchV1() batchv1.BatchV1Interface {
+	return c.batchV1
+}
+
+// Deprecated: Batch retrieves the default version of BatchClient.
+// Please explicitly pick a version.
+func (c *Clientset) Batch() batchv1.BatchV1Interface {
+	return c.batchV1
+}
+
+// BatchV1beta1 retrieves the BatchV1beta1Client
+func (c *Clientset) BatchV1beta1() batchv1beta1.BatchV1beta1Interface {
+	return c.batchV1beta1
+}
+
+// BatchV2alpha1 retrieves the BatchV2alpha1Client
+func (c *Clientset) BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface {
+	return c.batchV2alpha1
+}
+
+// CertificatesV1beta1 retrieves the CertificatesV1beta1Client
+func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface {
+	return c.certificatesV1beta1
+}
+
+// Deprecated: Certificates retrieves the default version of CertificatesClient.
+// Please explicitly pick a version.
+func (c *Clientset) Certificates() certificatesv1beta1.CertificatesV1beta1Interface {
+	return c.certificatesV1beta1
+}
+
+// CoordinationV1beta1 retrieves the CoordinationV1beta1Client
+func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface {
+	return c.coordinationV1beta1
+}
+
+// Deprecated: Coordination retrieves the default version of CoordinationClient.
+// Please explicitly pick a version.
+func (c *Clientset) Coordination() coordinationv1beta1.CoordinationV1beta1Interface {
+	return c.coordinationV1beta1
+}
+
+// CoreV1 retrieves the CoreV1Client
+func (c *Clientset) CoreV1() corev1.CoreV1Interface {
+	return c.coreV1
+}
+
+// Deprecated: Core retrieves the default version of CoreClient.
+// Please explicitly pick a version.
+func (c *Clientset) Core() corev1.CoreV1Interface {
+	return c.coreV1
+}
+
+// EventsV1beta1 retrieves the EventsV1beta1Client
+func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface {
+	return c.eventsV1beta1
+}
+
+// Deprecated: Events retrieves the default version of EventsClient.
+// Please explicitly pick a version.
+func (c *Clientset) Events() eventsv1beta1.EventsV1beta1Interface {
+	return c.eventsV1beta1
+}
+
+// ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client
+func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface {
+	return c.extensionsV1beta1
+}
+
+// Deprecated: Extensions retrieves the default version of ExtensionsClient.
+// Please explicitly pick a version.
+func (c *Clientset) Extensions() extensionsv1beta1.ExtensionsV1beta1Interface {
+	return c.extensionsV1beta1
+}
+
+// NetworkingV1 retrieves the NetworkingV1Client
+func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface {
+	return c.networkingV1
+}
+
+// Deprecated: Networking retrieves the default version of NetworkingClient.
+// Please explicitly pick a version.
+func (c *Clientset) Networking() networkingv1.NetworkingV1Interface {
+	return c.networkingV1
+}
+
+// PolicyV1beta1 retrieves the PolicyV1beta1Client
+func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface {
+	return c.policyV1beta1
+}
+
+// Deprecated: Policy retrieves the default version of PolicyClient.
+// Please explicitly pick a version.
+func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface {
+	return c.policyV1beta1
+}
+
+// RbacV1 retrieves the RbacV1Client
+func (c *Clientset) RbacV1() rbacv1.RbacV1Interface {
+	return c.rbacV1
+}
+
+// Deprecated: Rbac retrieves the default version of RbacClient.
+// Please explicitly pick a version.
+func (c *Clientset) Rbac() rbacv1.RbacV1Interface {
+	return c.rbacV1
+}
+
+// RbacV1beta1 retrieves the RbacV1beta1Client
+func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface {
+	return c.rbacV1beta1
+}
+
+// RbacV1alpha1 retrieves the RbacV1alpha1Client
+func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface {
+	return c.rbacV1alpha1
+}
+
+// SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client
+func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface {
+	return c.schedulingV1alpha1
+}
+
+// SchedulingV1beta1 retrieves the SchedulingV1beta1Client
+func (c *Clientset) SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface {
+	return c.schedulingV1beta1
+}
+
+// Deprecated: Scheduling retrieves the default version of SchedulingClient.
+// Please explicitly pick a version.
+func (c *Clientset) Scheduling() schedulingv1beta1.SchedulingV1beta1Interface {
+	return c.schedulingV1beta1
+}
+
+// SettingsV1alpha1 retrieves the SettingsV1alpha1Client
+func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface {
+	return c.settingsV1alpha1
+}
+
+// Deprecated: Settings retrieves the default version of SettingsClient.
+// Please explicitly pick a version.
+func (c *Clientset) Settings() settingsv1alpha1.SettingsV1alpha1Interface {
+	return c.settingsV1alpha1
+}
+
+// StorageV1beta1 retrieves the StorageV1beta1Client
+func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface {
+	return c.storageV1beta1
+}
+
+// StorageV1 retrieves the StorageV1Client
+func (c *Clientset) StorageV1() storagev1.StorageV1Interface {
+	return c.storageV1
+}
+
+// Deprecated: Storage retrieves the default version of StorageClient.
+// Please explicitly pick a version.
+func (c *Clientset) Storage() storagev1.StorageV1Interface {
+	return c.storageV1
+}
+
+// StorageV1alpha1 retrieves the StorageV1alpha1Client
+func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface {
+	return c.storageV1alpha1
+}
+
+// Discovery retrieves the DiscoveryClient
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+	if c == nil {
+		return nil
+	}
+	return c.DiscoveryClient
+}
+
+// NewForConfig creates a new Clientset for the given config.
+func NewForConfig(c *rest.Config) (*Clientset, error) {
+	configShallowCopy := *c
+	if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
+		configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
+	}
+	var cs Clientset
+	var err error
+	cs.admissionregistrationV1alpha1, err = admissionregistrationv1alpha1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.appsV1beta1, err = appsv1beta1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.appsV1beta2, err = appsv1beta2.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.auditregistrationV1alpha1, err = auditregistrationv1alpha1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.authenticationV1, err = authenticationv1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.authenticationV1beta1, err = authenticationv1beta1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.authorizationV1, err = authorizationv1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.authorizationV1beta1, err = authorizationv1beta1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.autoscalingV1, err = autoscalingv1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.autoscalingV2beta1, err = autoscalingv2beta1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.autoscalingV2beta2, err = autoscalingv2beta2.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.batchV1, err = batchv1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.batchV1beta1, err = batchv1beta1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.batchV2alpha1, err = batchv2alpha1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.certificatesV1beta1, err = certificatesv1beta1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.coordinationV1beta1, err = coordinationv1beta1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.coreV1, err = corev1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.eventsV1beta1, err = eventsv1beta1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.extensionsV1beta1, err = extensionsv1beta1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.networkingV1, err = networkingv1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.policyV1beta1, err = policyv1beta1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.rbacV1, err = rbacv1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.rbacV1beta1, err = rbacv1beta1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.rbacV1alpha1, err = rbacv1alpha1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.schedulingV1alpha1, err = schedulingv1alpha1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.schedulingV1beta1, err = schedulingv1beta1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.settingsV1alpha1, err = settingsv1alpha1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.storageV1beta1, err = storagev1beta1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.storageV1, err = storagev1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	cs.storageV1alpha1, err = storagev1alpha1.NewForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+
+	cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
+	if err != nil {
+		return nil, err
+	}
+	return &cs, nil
+}
+
+// NewForConfigOrDie creates a new Clientset for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *Clientset {
+	var cs Clientset
+	cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.NewForConfigOrDie(c)
+	cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c)
+	cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c)
+	cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c)
+	cs.appsV1 = appsv1.NewForConfigOrDie(c)
+	cs.auditregistrationV1alpha1 = auditregistrationv1alpha1.NewForConfigOrDie(c)
+	cs.authenticationV1 = authenticationv1.NewForConfigOrDie(c)
+	cs.authenticationV1beta1 = authenticationv1beta1.NewForConfigOrDie(c)
+	cs.authorizationV1 = authorizationv1.NewForConfigOrDie(c)
+	cs.authorizationV1beta1 = authorizationv1beta1.NewForConfigOrDie(c)
+	cs.autoscalingV1 = autoscalingv1.NewForConfigOrDie(c)
+	cs.autoscalingV2beta1 = autoscalingv2beta1.NewForConfigOrDie(c)
+	cs.autoscalingV2beta2 = autoscalingv2beta2.NewForConfigOrDie(c)
+	cs.batchV1 = batchv1.NewForConfigOrDie(c)
+	cs.batchV1beta1 = batchv1beta1.NewForConfigOrDie(c)
+	cs.batchV2alpha1 = batchv2alpha1.NewForConfigOrDie(c)
+	cs.certificatesV1beta1 = certificatesv1beta1.NewForConfigOrDie(c)
+	cs.coordinationV1beta1 = coordinationv1beta1.NewForConfigOrDie(c)
+	cs.coreV1 = corev1.NewForConfigOrDie(c)
+	cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c)
+	cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c)
+	cs.networkingV1 = networkingv1.NewForConfigOrDie(c)
+	cs.policyV1beta1 = policyv1beta1.NewForConfigOrDie(c)
+	cs.rbacV1 = rbacv1.NewForConfigOrDie(c)
+	cs.rbacV1beta1 = rbacv1beta1.NewForConfigOrDie(c)
+	cs.rbacV1alpha1 = rbacv1alpha1.NewForConfigOrDie(c)
+	cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c)
+	cs.schedulingV1beta1 = schedulingv1beta1.NewForConfigOrDie(c)
+	cs.settingsV1alpha1 = settingsv1alpha1.NewForConfigOrDie(c)
+	cs.storageV1beta1 = storagev1beta1.NewForConfigOrDie(c)
+	cs.storageV1 = storagev1.NewForConfigOrDie(c)
+	cs.storageV1alpha1 = storagev1alpha1.NewForConfigOrDie(c)
+
+	cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
+	return &cs
+}
+
+// New creates a new Clientset for the given RESTClient.
+func New(c rest.Interface) *Clientset {
+	var cs Clientset
+	cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.New(c)
+	cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c)
+	cs.appsV1beta1 = appsv1beta1.New(c)
+	cs.appsV1beta2 = appsv1beta2.New(c)
+	cs.appsV1 = appsv1.New(c)
+	cs.auditregistrationV1alpha1 = auditregistrationv1alpha1.New(c)
+	cs.authenticationV1 = authenticationv1.New(c)
+	cs.authenticationV1beta1 = authenticationv1beta1.New(c)
+	cs.authorizationV1 = authorizationv1.New(c)
+	cs.authorizationV1beta1 = authorizationv1beta1.New(c)
+	cs.autoscalingV1 = autoscalingv1.New(c)
+	cs.autoscalingV2beta1 = autoscalingv2beta1.New(c)
+	cs.autoscalingV2beta2 = autoscalingv2beta2.New(c)
+	cs.batchV1 = batchv1.New(c)
+	cs.batchV1beta1 = batchv1beta1.New(c)
+	cs.batchV2alpha1 = batchv2alpha1.New(c)
+	cs.certificatesV1beta1 = certificatesv1beta1.New(c)
+	cs.coordinationV1beta1 = coordinationv1beta1.New(c)
+	cs.coreV1 = corev1.New(c)
+	cs.eventsV1beta1 = eventsv1beta1.New(c)
+	cs.extensionsV1beta1 = extensionsv1beta1.New(c)
+	cs.networkingV1 = networkingv1.New(c)
+	cs.policyV1beta1 = policyv1beta1.New(c)
+	cs.rbacV1 = rbacv1.New(c)
+	cs.rbacV1beta1 = rbacv1beta1.New(c)
+	cs.rbacV1alpha1 = rbacv1alpha1.New(c)
+	cs.schedulingV1alpha1 = schedulingv1alpha1.New(c)
+	cs.schedulingV1beta1 = schedulingv1beta1.New(c)
+	cs.settingsV1alpha1 = settingsv1alpha1.New(c)
+	cs.storageV1beta1 = storagev1beta1.New(c)
+	cs.storageV1 = storagev1.New(c)
+	cs.storageV1alpha1 = storagev1alpha1.New(c)
+
+	cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
+	return &cs
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/doc.go b/vendor/k8s.io/client-go/kubernetes/doc.go
new file mode 100644
index 0000000..b272334
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated clientset.
+package kubernetes
diff --git a/vendor/k8s.io/client-go/kubernetes/import.go b/vendor/k8s.io/client-go/kubernetes/import.go
new file mode 100644
index 0000000..c4f9a91
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/import.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file exists to enforce this clientset's vanity import path.
+
+package kubernetes // import "k8s.io/client-go/kubernetes"
diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/doc.go b/vendor/k8s.io/client-go/kubernetes/scheme/doc.go
new file mode 100644
index 0000000..7dc3756
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/scheme/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package contains the scheme of the automatically generated clientset.
+package scheme
diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go
new file mode 100644
index 0000000..e336eb9
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go
@@ -0,0 +1,118 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package scheme
+
+import (
+	admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
+	admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
+	appsv1 "k8s.io/api/apps/v1"
+	appsv1beta1 "k8s.io/api/apps/v1beta1"
+	appsv1beta2 "k8s.io/api/apps/v1beta2"
+	auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1"
+	authenticationv1 "k8s.io/api/authentication/v1"
+	authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
+	authorizationv1 "k8s.io/api/authorization/v1"
+	authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
+	autoscalingv1 "k8s.io/api/autoscaling/v1"
+	autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
+	autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
+	batchv1 "k8s.io/api/batch/v1"
+	batchv1beta1 "k8s.io/api/batch/v1beta1"
+	batchv2alpha1 "k8s.io/api/batch/v2alpha1"
+	certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
+	coordinationv1beta1 "k8s.io/api/coordination/v1beta1"
+	corev1 "k8s.io/api/core/v1"
+	eventsv1beta1 "k8s.io/api/events/v1beta1"
+	extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
+	networkingv1 "k8s.io/api/networking/v1"
+	policyv1beta1 "k8s.io/api/policy/v1beta1"
+	rbacv1 "k8s.io/api/rbac/v1"
+	rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
+	rbacv1beta1 "k8s.io/api/rbac/v1beta1"
+	schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
+	schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
+	settingsv1alpha1 "k8s.io/api/settings/v1alpha1"
+	storagev1 "k8s.io/api/storage/v1"
+	storagev1alpha1 "k8s.io/api/storage/v1alpha1"
+	storagev1beta1 "k8s.io/api/storage/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	schema "k8s.io/apimachinery/pkg/runtime/schema"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var Scheme = runtime.NewScheme()
+var Codecs = serializer.NewCodecFactory(Scheme)
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+	admissionregistrationv1alpha1.AddToScheme,
+	admissionregistrationv1beta1.AddToScheme,
+	appsv1beta1.AddToScheme,
+	appsv1beta2.AddToScheme,
+	appsv1.AddToScheme,
+	auditregistrationv1alpha1.AddToScheme,
+	authenticationv1.AddToScheme,
+	authenticationv1beta1.AddToScheme,
+	authorizationv1.AddToScheme,
+	authorizationv1beta1.AddToScheme,
+	autoscalingv1.AddToScheme,
+	autoscalingv2beta1.AddToScheme,
+	autoscalingv2beta2.AddToScheme,
+	batchv1.AddToScheme,
+	batchv1beta1.AddToScheme,
+	batchv2alpha1.AddToScheme,
+	certificatesv1beta1.AddToScheme,
+	coordinationv1beta1.AddToScheme,
+	corev1.AddToScheme,
+	eventsv1beta1.AddToScheme,
+	extensionsv1beta1.AddToScheme,
+	networkingv1.AddToScheme,
+	policyv1beta1.AddToScheme,
+	rbacv1.AddToScheme,
+	rbacv1beta1.AddToScheme,
+	rbacv1alpha1.AddToScheme,
+	schedulingv1alpha1.AddToScheme,
+	schedulingv1beta1.AddToScheme,
+	settingsv1alpha1.AddToScheme,
+	storagev1beta1.AddToScheme,
+	storagev1.AddToScheme,
+	storagev1alpha1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+//   import (
+//     "k8s.io/client-go/kubernetes"
+//     clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+//     aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+//   )
+//
+//   kclientset, _ := kubernetes.NewForConfig(c)
+//   _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+	v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+	utilruntime.Must(AddToScheme(Scheme))
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go
new file mode 100644
index 0000000..5e02f72
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	v1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type AdmissionregistrationV1alpha1Interface interface {
+	RESTClient() rest.Interface
+	InitializerConfigurationsGetter
+}
+
+// AdmissionregistrationV1alpha1Client is used to interact with features provided by the admissionregistration.k8s.io group.
+type AdmissionregistrationV1alpha1Client struct {
+	restClient rest.Interface
+}
+
+func (c *AdmissionregistrationV1alpha1Client) InitializerConfigurations() InitializerConfigurationInterface {
+	return newInitializerConfigurations(c)
+}
+
+// NewForConfig creates a new AdmissionregistrationV1alpha1Client for the given config.
+func NewForConfig(c *rest.Config) (*AdmissionregistrationV1alpha1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &AdmissionregistrationV1alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new AdmissionregistrationV1alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1alpha1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new AdmissionregistrationV1alpha1Client for the given RESTClient.
+func New(c rest.Interface) *AdmissionregistrationV1alpha1Client {
+	return &AdmissionregistrationV1alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1alpha1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *AdmissionregistrationV1alpha1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go
new file mode 100644
index 0000000..df51baa
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1alpha1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go
new file mode 100644
index 0000000..1e29b96
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+type InitializerConfigurationExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go
new file mode 100644
index 0000000..7b8acec
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	"time"
+
+	v1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// InitializerConfigurationsGetter has a method to return a InitializerConfigurationInterface.
+// A group's client should implement this interface.
+type InitializerConfigurationsGetter interface {
+	InitializerConfigurations() InitializerConfigurationInterface
+}
+
+// InitializerConfigurationInterface has methods to work with InitializerConfiguration resources.
+type InitializerConfigurationInterface interface {
+	Create(*v1alpha1.InitializerConfiguration) (*v1alpha1.InitializerConfiguration, error)
+	Update(*v1alpha1.InitializerConfiguration) (*v1alpha1.InitializerConfiguration, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1alpha1.InitializerConfiguration, error)
+	List(opts v1.ListOptions) (*v1alpha1.InitializerConfigurationList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error)
+	InitializerConfigurationExpansion
+}
+
+// initializerConfigurations implements InitializerConfigurationInterface
+type initializerConfigurations struct {
+	client rest.Interface
+}
+
+// newInitializerConfigurations returns a InitializerConfigurations
+func newInitializerConfigurations(c *AdmissionregistrationV1alpha1Client) *initializerConfigurations {
+	return &initializerConfigurations{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the initializerConfiguration, and returns the corresponding initializerConfiguration object, and an error if there is any.
+func (c *initializerConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.InitializerConfiguration, err error) {
+	result = &v1alpha1.InitializerConfiguration{}
+	err = c.client.Get().
+		Resource("initializerconfigurations").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of InitializerConfigurations that match those selectors.
+func (c *initializerConfigurations) List(opts v1.ListOptions) (result *v1alpha1.InitializerConfigurationList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1alpha1.InitializerConfigurationList{}
+	err = c.client.Get().
+		Resource("initializerconfigurations").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested initializerConfigurations.
+func (c *initializerConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("initializerconfigurations").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a initializerConfiguration and creates it.  Returns the server's representation of the initializerConfiguration, and an error, if there is any.
+func (c *initializerConfigurations) Create(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) {
+	result = &v1alpha1.InitializerConfiguration{}
+	err = c.client.Post().
+		Resource("initializerconfigurations").
+		Body(initializerConfiguration).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a initializerConfiguration and updates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any.
+func (c *initializerConfigurations) Update(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) {
+	result = &v1alpha1.InitializerConfiguration{}
+	err = c.client.Put().
+		Resource("initializerconfigurations").
+		Name(initializerConfiguration.Name).
+		Body(initializerConfiguration).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the initializerConfiguration and deletes it. Returns an error if one occurs.
+func (c *initializerConfigurations) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("initializerconfigurations").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *initializerConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("initializerconfigurations").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched initializerConfiguration.
+func (c *initializerConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error) {
+	result = &v1alpha1.InitializerConfiguration{}
+	err = c.client.Patch(pt).
+		Resource("initializerconfigurations").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go
new file mode 100644
index 0000000..b13ea79
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go
@@ -0,0 +1,95 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1beta1 "k8s.io/api/admissionregistration/v1beta1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type AdmissionregistrationV1beta1Interface interface {
+	RESTClient() rest.Interface
+	MutatingWebhookConfigurationsGetter
+	ValidatingWebhookConfigurationsGetter
+}
+
+// AdmissionregistrationV1beta1Client is used to interact with features provided by the admissionregistration.k8s.io group.
+type AdmissionregistrationV1beta1Client struct {
+	restClient rest.Interface
+}
+
+func (c *AdmissionregistrationV1beta1Client) MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface {
+	return newMutatingWebhookConfigurations(c)
+}
+
+func (c *AdmissionregistrationV1beta1Client) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface {
+	return newValidatingWebhookConfigurations(c)
+}
+
+// NewForConfig creates a new AdmissionregistrationV1beta1Client for the given config.
+func NewForConfig(c *rest.Config) (*AdmissionregistrationV1beta1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &AdmissionregistrationV1beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new AdmissionregistrationV1beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1beta1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new AdmissionregistrationV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *AdmissionregistrationV1beta1Client {
+	return &AdmissionregistrationV1beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1beta1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *AdmissionregistrationV1beta1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go
new file mode 100644
index 0000000..7711019
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go
new file mode 100644
index 0000000..2aeb9c9
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go
@@ -0,0 +1,23 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+type MutatingWebhookConfigurationExpansion interface{}
+
+type ValidatingWebhookConfigurationExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
new file mode 100644
index 0000000..4524896
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/admissionregistration/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface.
+// A group's client should implement this interface.
+type MutatingWebhookConfigurationsGetter interface {
+	MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface
+}
+
+// MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources.
+type MutatingWebhookConfigurationInterface interface {
+	Create(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error)
+	Update(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.MutatingWebhookConfiguration, error)
+	List(opts v1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error)
+	MutatingWebhookConfigurationExpansion
+}
+
+// mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface
+type mutatingWebhookConfigurations struct {
+	client rest.Interface
+}
+
+// newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations
+func newMutatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *mutatingWebhookConfigurations {
+	return &mutatingWebhookConfigurations{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any.
+func (c *mutatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) {
+	result = &v1beta1.MutatingWebhookConfiguration{}
+	err = c.client.Get().
+		Resource("mutatingwebhookconfigurations").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors.
+func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.MutatingWebhookConfigurationList{}
+	err = c.client.Get().
+		Resource("mutatingwebhookconfigurations").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations.
+func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("mutatingwebhookconfigurations").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a mutatingWebhookConfiguration and creates it.  Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any.
+func (c *mutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) {
+	result = &v1beta1.MutatingWebhookConfiguration{}
+	err = c.client.Post().
+		Resource("mutatingwebhookconfigurations").
+		Body(mutatingWebhookConfiguration).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any.
+func (c *mutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) {
+	result = &v1beta1.MutatingWebhookConfiguration{}
+	err = c.client.Put().
+		Resource("mutatingwebhookconfigurations").
+		Name(mutatingWebhookConfiguration.Name).
+		Body(mutatingWebhookConfiguration).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs.
+func (c *mutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("mutatingwebhookconfigurations").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("mutatingwebhookconfigurations").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched mutatingWebhookConfiguration.
+func (c *mutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) {
+	result = &v1beta1.MutatingWebhookConfiguration{}
+	err = c.client.Patch(pt).
+		Resource("mutatingwebhookconfigurations").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go
new file mode 100644
index 0000000..7e711b3
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/admissionregistration/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface.
+// A group's client should implement this interface.
+type ValidatingWebhookConfigurationsGetter interface {
+	ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface
+}
+
+// ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources.
+type ValidatingWebhookConfigurationInterface interface {
+	Create(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error)
+	Update(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.ValidatingWebhookConfiguration, error)
+	List(opts v1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error)
+	ValidatingWebhookConfigurationExpansion
+}
+
+// validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface
+type validatingWebhookConfigurations struct {
+	client rest.Interface
+}
+
+// newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations
+func newValidatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *validatingWebhookConfigurations {
+	return &validatingWebhookConfigurations{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any.
+func (c *validatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
+	result = &v1beta1.ValidatingWebhookConfiguration{}
+	err = c.client.Get().
+		Resource("validatingwebhookconfigurations").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors.
+func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.ValidatingWebhookConfigurationList{}
+	err = c.client.Get().
+		Resource("validatingwebhookconfigurations").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations.
+func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("validatingwebhookconfigurations").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a validatingWebhookConfiguration and creates it.  Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any.
+func (c *validatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
+	result = &v1beta1.ValidatingWebhookConfiguration{}
+	err = c.client.Post().
+		Resource("validatingwebhookconfigurations").
+		Body(validatingWebhookConfiguration).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any.
+func (c *validatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
+	result = &v1beta1.ValidatingWebhookConfiguration{}
+	err = c.client.Put().
+		Resource("validatingwebhookconfigurations").
+		Name(validatingWebhookConfiguration.Name).
+		Body(validatingWebhookConfiguration).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs.
+func (c *validatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("validatingwebhookconfigurations").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("validatingwebhookconfigurations").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched validatingWebhookConfiguration.
+func (c *validatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
+	result = &v1beta1.ValidatingWebhookConfiguration{}
+	err = c.client.Patch(pt).
+		Resource("validatingwebhookconfigurations").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go
new file mode 100644
index 0000000..da19c75
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go
@@ -0,0 +1,110 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	v1 "k8s.io/api/apps/v1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type AppsV1Interface interface {
+	RESTClient() rest.Interface
+	ControllerRevisionsGetter
+	DaemonSetsGetter
+	DeploymentsGetter
+	ReplicaSetsGetter
+	StatefulSetsGetter
+}
+
+// AppsV1Client is used to interact with features provided by the apps group.
+type AppsV1Client struct {
+	restClient rest.Interface
+}
+
+func (c *AppsV1Client) ControllerRevisions(namespace string) ControllerRevisionInterface {
+	return newControllerRevisions(c, namespace)
+}
+
+func (c *AppsV1Client) DaemonSets(namespace string) DaemonSetInterface {
+	return newDaemonSets(c, namespace)
+}
+
+func (c *AppsV1Client) Deployments(namespace string) DeploymentInterface {
+	return newDeployments(c, namespace)
+}
+
+func (c *AppsV1Client) ReplicaSets(namespace string) ReplicaSetInterface {
+	return newReplicaSets(c, namespace)
+}
+
+func (c *AppsV1Client) StatefulSets(namespace string) StatefulSetInterface {
+	return newStatefulSets(c, namespace)
+}
+
+// NewForConfig creates a new AppsV1Client for the given config.
+func NewForConfig(c *rest.Config) (*AppsV1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &AppsV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new AppsV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *AppsV1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new AppsV1Client for the given RESTClient.
+func New(c rest.Interface) *AppsV1Client {
+	return &AppsV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *AppsV1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go
new file mode 100644
index 0000000..e28e4d2
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/apps/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ControllerRevisionsGetter has a method to return a ControllerRevisionInterface.
+// A group's client should implement this interface.
+type ControllerRevisionsGetter interface {
+	ControllerRevisions(namespace string) ControllerRevisionInterface
+}
+
+// ControllerRevisionInterface has methods to work with ControllerRevision resources.
+type ControllerRevisionInterface interface {
+	Create(*v1.ControllerRevision) (*v1.ControllerRevision, error)
+	Update(*v1.ControllerRevision) (*v1.ControllerRevision, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.ControllerRevision, error)
+	List(opts metav1.ListOptions) (*v1.ControllerRevisionList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error)
+	ControllerRevisionExpansion
+}
+
+// controllerRevisions implements ControllerRevisionInterface
+type controllerRevisions struct {
+	client rest.Interface
+	ns     string
+}
+
+// newControllerRevisions returns a ControllerRevisions
+func newControllerRevisions(c *AppsV1Client, namespace string) *controllerRevisions {
+	return &controllerRevisions{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any.
+func (c *controllerRevisions) Get(name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) {
+	result = &v1.ControllerRevision{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors.
+func (c *controllerRevisions) List(opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.ControllerRevisionList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested controllerRevisions.
+func (c *controllerRevisions) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a controllerRevision and creates it.  Returns the server's representation of the controllerRevision, and an error, if there is any.
+func (c *controllerRevisions) Create(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) {
+	result = &v1.ControllerRevision{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		Body(controllerRevision).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
+func (c *controllerRevisions) Update(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) {
+	result = &v1.ControllerRevision{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		Name(controllerRevision.Name).
+		Body(controllerRevision).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs.
+func (c *controllerRevisions) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *controllerRevisions) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched controllerRevision.
+func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) {
+	result = &v1.ControllerRevision{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go
new file mode 100644
index 0000000..a535cda
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/apps/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// DaemonSetsGetter has a method to return a DaemonSetInterface.
+// A group's client should implement this interface.
+type DaemonSetsGetter interface {
+	DaemonSets(namespace string) DaemonSetInterface
+}
+
+// DaemonSetInterface has methods to work with DaemonSet resources.
+type DaemonSetInterface interface {
+	Create(*v1.DaemonSet) (*v1.DaemonSet, error)
+	Update(*v1.DaemonSet) (*v1.DaemonSet, error)
+	UpdateStatus(*v1.DaemonSet) (*v1.DaemonSet, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.DaemonSet, error)
+	List(opts metav1.ListOptions) (*v1.DaemonSetList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DaemonSet, err error)
+	DaemonSetExpansion
+}
+
+// daemonSets implements DaemonSetInterface
+type daemonSets struct {
+	client rest.Interface
+	ns     string
+}
+
+// newDaemonSets returns a DaemonSets
+func newDaemonSets(c *AppsV1Client, namespace string) *daemonSets {
+	return &daemonSets{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
+func (c *daemonSets) Get(name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) {
+	result = &v1.DaemonSet{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
+func (c *daemonSets) List(opts metav1.ListOptions) (result *v1.DaemonSetList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.DaemonSetList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested daemonSets.
+func (c *daemonSets) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a daemonSet and creates it.  Returns the server's representation of the daemonSet, and an error, if there is any.
+func (c *daemonSets) Create(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) {
+	result = &v1.DaemonSet{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		Body(daemonSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
+func (c *daemonSets) Update(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) {
+	result = &v1.DaemonSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		Name(daemonSet.Name).
+		Body(daemonSet).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *daemonSets) UpdateStatus(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) {
+	result = &v1.DaemonSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		Name(daemonSet.Name).
+		SubResource("status").
+		Body(daemonSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs.
+func (c *daemonSets) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *daemonSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched daemonSet.
+func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DaemonSet, err error) {
+	result = &v1.DaemonSet{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("daemonsets").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go
new file mode 100644
index 0000000..f9799a4
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go
@@ -0,0 +1,223 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/apps/v1"
+	autoscalingv1 "k8s.io/api/autoscaling/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// DeploymentsGetter has a method to return a DeploymentInterface.
+// A group's client should implement this interface.
+type DeploymentsGetter interface {
+	Deployments(namespace string) DeploymentInterface
+}
+
+// DeploymentInterface has methods to work with Deployment resources.
+type DeploymentInterface interface {
+	Create(*v1.Deployment) (*v1.Deployment, error)
+	Update(*v1.Deployment) (*v1.Deployment, error)
+	UpdateStatus(*v1.Deployment) (*v1.Deployment, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.Deployment, error)
+	List(opts metav1.ListOptions) (*v1.DeploymentList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error)
+	GetScale(deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
+	UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error)
+
+	DeploymentExpansion
+}
+
+// deployments implements DeploymentInterface
+type deployments struct {
+	client rest.Interface
+	ns     string
+}
+
+// newDeployments returns a Deployments
+func newDeployments(c *AppsV1Client, namespace string) *deployments {
+	return &deployments{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
+func (c *deployments) Get(name string, options metav1.GetOptions) (result *v1.Deployment, err error) {
+	result = &v1.Deployment{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Deployments that match those selectors.
+func (c *deployments) List(opts metav1.ListOptions) (result *v1.DeploymentList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.DeploymentList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("deployments").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested deployments.
+func (c *deployments) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("deployments").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a deployment and creates it.  Returns the server's representation of the deployment, and an error, if there is any.
+func (c *deployments) Create(deployment *v1.Deployment) (result *v1.Deployment, err error) {
+	result = &v1.Deployment{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("deployments").
+		Body(deployment).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
+func (c *deployments) Update(deployment *v1.Deployment) (result *v1.Deployment, err error) {
+	result = &v1.Deployment{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(deployment.Name).
+		Body(deployment).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *deployments) UpdateStatus(deployment *v1.Deployment) (result *v1.Deployment, err error) {
+	result = &v1.Deployment{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(deployment.Name).
+		SubResource("status").
+		Body(deployment).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the deployment and deletes it. Returns an error if one occurs.
+func (c *deployments) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *deployments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("deployments").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched deployment.
+func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) {
+	result = &v1.Deployment{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("deployments").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
+
+// GetScale takes name of the deployment, and returns the corresponding autoscalingv1.Scale object, and an error if there is any.
+func (c *deployments) GetScale(deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
+	result = &autoscalingv1.Scale{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(deploymentName).
+		SubResource("scale").
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
+func (c *deployments) UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) {
+	result = &autoscalingv1.Scale{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(deploymentName).
+		SubResource("scale").
+		Body(scale).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go
new file mode 100644
index 0000000..3af5d05
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go
new file mode 100644
index 0000000..88cfe4e
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go
@@ -0,0 +1,29 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type ControllerRevisionExpansion interface{}
+
+type DaemonSetExpansion interface{}
+
+type DeploymentExpansion interface{}
+
+type ReplicaSetExpansion interface{}
+
+type StatefulSetExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go
new file mode 100644
index 0000000..ff3504e
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go
@@ -0,0 +1,223 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/apps/v1"
+	autoscalingv1 "k8s.io/api/autoscaling/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ReplicaSetsGetter has a method to return a ReplicaSetInterface.
+// A group's client should implement this interface.
+type ReplicaSetsGetter interface {
+	ReplicaSets(namespace string) ReplicaSetInterface
+}
+
+// ReplicaSetInterface has methods to work with ReplicaSet resources.
+type ReplicaSetInterface interface {
+	Create(*v1.ReplicaSet) (*v1.ReplicaSet, error)
+	Update(*v1.ReplicaSet) (*v1.ReplicaSet, error)
+	UpdateStatus(*v1.ReplicaSet) (*v1.ReplicaSet, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.ReplicaSet, error)
+	List(opts metav1.ListOptions) (*v1.ReplicaSetList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error)
+	GetScale(replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
+	UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error)
+
+	ReplicaSetExpansion
+}
+
+// replicaSets implements ReplicaSetInterface
+type replicaSets struct {
+	client rest.Interface
+	ns     string
+}
+
+// newReplicaSets returns a ReplicaSets
+func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets {
+	return &replicaSets{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any.
+func (c *replicaSets) Get(name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) {
+	result = &v1.ReplicaSet{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors.
+func (c *replicaSets) List(opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.ReplicaSetList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("replicasets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested replicaSets.
+func (c *replicaSets) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("replicasets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a replicaSet and creates it.  Returns the server's representation of the replicaSet, and an error, if there is any.
+func (c *replicaSets) Create(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) {
+	result = &v1.ReplicaSet{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Body(replicaSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any.
+func (c *replicaSets) Update(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) {
+	result = &v1.ReplicaSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Name(replicaSet.Name).
+		Body(replicaSet).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *replicaSets) UpdateStatus(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) {
+	result = &v1.ReplicaSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Name(replicaSet.Name).
+		SubResource("status").
+		Body(replicaSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs.
+func (c *replicaSets) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *replicaSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("replicasets").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched replicaSet.
+func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) {
+	result = &v1.ReplicaSet{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("replicasets").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
+
+// GetScale takes name of the replicaSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any.
+func (c *replicaSets) GetScale(replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
+	result = &autoscalingv1.Scale{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Name(replicaSetName).
+		SubResource("scale").
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
+func (c *replicaSets) UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) {
+	result = &autoscalingv1.Scale{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Name(replicaSetName).
+		SubResource("scale").
+		Body(scale).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go
new file mode 100644
index 0000000..c12c470
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go
@@ -0,0 +1,223 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/apps/v1"
+	autoscalingv1 "k8s.io/api/autoscaling/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// StatefulSetsGetter has a method to return a StatefulSetInterface.
+// A group's client should implement this interface.
+type StatefulSetsGetter interface {
+	StatefulSets(namespace string) StatefulSetInterface
+}
+
+// StatefulSetInterface has methods to work with StatefulSet resources.
+type StatefulSetInterface interface {
+	Create(*v1.StatefulSet) (*v1.StatefulSet, error)
+	Update(*v1.StatefulSet) (*v1.StatefulSet, error)
+	UpdateStatus(*v1.StatefulSet) (*v1.StatefulSet, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.StatefulSet, error)
+	List(opts metav1.ListOptions) (*v1.StatefulSetList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error)
+	GetScale(statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
+	UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error)
+
+	StatefulSetExpansion
+}
+
+// statefulSets implements StatefulSetInterface
+type statefulSets struct {
+	client rest.Interface
+	ns     string
+}
+
+// newStatefulSets returns a StatefulSets
+func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets {
+	return &statefulSets{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any.
+func (c *statefulSets) Get(name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) {
+	result = &v1.StatefulSet{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of StatefulSets that match those selectors.
+func (c *statefulSets) List(opts metav1.ListOptions) (result *v1.StatefulSetList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.StatefulSetList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested statefulSets.
+func (c *statefulSets) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a statefulSet and creates it.  Returns the server's representation of the statefulSet, and an error, if there is any.
+func (c *statefulSets) Create(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) {
+	result = &v1.StatefulSet{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Body(statefulSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any.
+func (c *statefulSets) Update(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) {
+	result = &v1.StatefulSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Name(statefulSet.Name).
+		Body(statefulSet).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *statefulSets) UpdateStatus(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) {
+	result = &v1.StatefulSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Name(statefulSet.Name).
+		SubResource("status").
+		Body(statefulSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs.
+func (c *statefulSets) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *statefulSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched statefulSet.
+func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) {
+	result = &v1.StatefulSet{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("statefulsets").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
+
+// GetScale takes name of the statefulSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any.
+func (c *statefulSets) GetScale(statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
+	result = &autoscalingv1.Scale{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Name(statefulSetName).
+		SubResource("scale").
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
+func (c *statefulSets) UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) {
+	result = &autoscalingv1.Scale{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Name(statefulSetName).
+		SubResource("scale").
+		Body(scale).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go
new file mode 100644
index 0000000..2c9db88
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go
@@ -0,0 +1,100 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1beta1 "k8s.io/api/apps/v1beta1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type AppsV1beta1Interface interface {
+	RESTClient() rest.Interface
+	ControllerRevisionsGetter
+	DeploymentsGetter
+	StatefulSetsGetter
+}
+
+// AppsV1beta1Client is used to interact with features provided by the apps group.
+type AppsV1beta1Client struct {
+	restClient rest.Interface
+}
+
+func (c *AppsV1beta1Client) ControllerRevisions(namespace string) ControllerRevisionInterface {
+	return newControllerRevisions(c, namespace)
+}
+
+func (c *AppsV1beta1Client) Deployments(namespace string) DeploymentInterface {
+	return newDeployments(c, namespace)
+}
+
+func (c *AppsV1beta1Client) StatefulSets(namespace string) StatefulSetInterface {
+	return newStatefulSets(c, namespace)
+}
+
+// NewForConfig creates a new AppsV1beta1Client for the given config.
+func NewForConfig(c *rest.Config) (*AppsV1beta1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &AppsV1beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new AppsV1beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *AppsV1beta1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new AppsV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *AppsV1beta1Client {
+	return &AppsV1beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1beta1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *AppsV1beta1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go
new file mode 100644
index 0000000..45ddb91
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/apps/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ControllerRevisionsGetter has a method to return a ControllerRevisionInterface.
+// A group's client should implement this interface.
+type ControllerRevisionsGetter interface {
+	ControllerRevisions(namespace string) ControllerRevisionInterface
+}
+
+// ControllerRevisionInterface has methods to work with ControllerRevision resources.
+type ControllerRevisionInterface interface {
+	Create(*v1beta1.ControllerRevision) (*v1beta1.ControllerRevision, error)
+	Update(*v1beta1.ControllerRevision) (*v1beta1.ControllerRevision, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.ControllerRevision, error)
+	List(opts v1.ListOptions) (*v1beta1.ControllerRevisionList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error)
+	ControllerRevisionExpansion
+}
+
+// controllerRevisions implements ControllerRevisionInterface
+type controllerRevisions struct {
+	client rest.Interface
+	ns     string
+}
+
+// newControllerRevisions returns a ControllerRevisions
+func newControllerRevisions(c *AppsV1beta1Client, namespace string) *controllerRevisions {
+	return &controllerRevisions{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any.
+func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) {
+	result = &v1beta1.ControllerRevision{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors.
+func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.ControllerRevisionList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested controllerRevisions.
+func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a controllerRevision and creates it.  Returns the server's representation of the controllerRevision, and an error, if there is any.
+func (c *controllerRevisions) Create(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) {
+	result = &v1beta1.ControllerRevision{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		Body(controllerRevision).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
+func (c *controllerRevisions) Update(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) {
+	result = &v1beta1.ControllerRevision{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		Name(controllerRevision.Name).
+		Body(controllerRevision).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs.
+func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched controllerRevision.
+func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) {
+	result = &v1beta1.ControllerRevision{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go
new file mode 100644
index 0000000..05fdcb7
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/apps/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// DeploymentsGetter has a method to return a DeploymentInterface.
+// A group's client should implement this interface.
+type DeploymentsGetter interface {
+	Deployments(namespace string) DeploymentInterface
+}
+
+// DeploymentInterface has methods to work with Deployment resources.
+type DeploymentInterface interface {
+	Create(*v1beta1.Deployment) (*v1beta1.Deployment, error)
+	Update(*v1beta1.Deployment) (*v1beta1.Deployment, error)
+	UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error)
+	List(opts v1.ListOptions) (*v1beta1.DeploymentList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error)
+	DeploymentExpansion
+}
+
+// deployments implements DeploymentInterface
+type deployments struct {
+	client rest.Interface
+	ns     string
+}
+
+// newDeployments returns a Deployments
+func newDeployments(c *AppsV1beta1Client, namespace string) *deployments {
+	return &deployments{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
+func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) {
+	result = &v1beta1.Deployment{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Deployments that match those selectors.
+func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.DeploymentList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("deployments").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested deployments.
+func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("deployments").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a deployment and creates it.  Returns the server's representation of the deployment, and an error, if there is any.
+func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
+	result = &v1beta1.Deployment{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("deployments").
+		Body(deployment).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
+func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
+	result = &v1beta1.Deployment{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(deployment.Name).
+		Body(deployment).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
+	result = &v1beta1.Deployment{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(deployment.Name).
+		SubResource("status").
+		Body(deployment).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the deployment and deletes it. Returns an error if one occurs.
+func (c *deployments) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("deployments").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched deployment.
+func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) {
+	result = &v1beta1.Deployment{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("deployments").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go
new file mode 100644
index 0000000..7711019
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go
new file mode 100644
index 0000000..113455d
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go
@@ -0,0 +1,25 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+type ControllerRevisionExpansion interface{}
+
+type DeploymentExpansion interface{}
+
+type StatefulSetExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go
new file mode 100644
index 0000000..c4b35b4
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/apps/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// StatefulSetsGetter has a method to return a StatefulSetInterface.
+// A group's client should implement this interface.
+type StatefulSetsGetter interface {
+	StatefulSets(namespace string) StatefulSetInterface
+}
+
+// StatefulSetInterface has methods to work with StatefulSet resources.
+type StatefulSetInterface interface {
+	Create(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error)
+	Update(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error)
+	UpdateStatus(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.StatefulSet, error)
+	List(opts v1.ListOptions) (*v1beta1.StatefulSetList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error)
+	StatefulSetExpansion
+}
+
+// statefulSets implements StatefulSetInterface
+type statefulSets struct {
+	client rest.Interface
+	ns     string
+}
+
+// newStatefulSets returns a StatefulSets
+func newStatefulSets(c *AppsV1beta1Client, namespace string) *statefulSets {
+	return &statefulSets{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any.
+func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) {
+	result = &v1beta1.StatefulSet{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of StatefulSets that match those selectors.
+func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.StatefulSetList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested statefulSets.
+func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a statefulSet and creates it.  Returns the server's representation of the statefulSet, and an error, if there is any.
+func (c *statefulSets) Create(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) {
+	result = &v1beta1.StatefulSet{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Body(statefulSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any.
+func (c *statefulSets) Update(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) {
+	result = &v1beta1.StatefulSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Name(statefulSet.Name).
+		Body(statefulSet).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *statefulSets) UpdateStatus(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) {
+	result = &v1beta1.StatefulSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Name(statefulSet.Name).
+		SubResource("status").
+		Body(statefulSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs.
+func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched statefulSet.
+func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) {
+	result = &v1beta1.StatefulSet{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("statefulsets").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go
new file mode 100644
index 0000000..99d677f
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go
@@ -0,0 +1,110 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+	v1beta2 "k8s.io/api/apps/v1beta2"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type AppsV1beta2Interface interface {
+	RESTClient() rest.Interface
+	ControllerRevisionsGetter
+	DaemonSetsGetter
+	DeploymentsGetter
+	ReplicaSetsGetter
+	StatefulSetsGetter
+}
+
+// AppsV1beta2Client is used to interact with features provided by the apps group.
+type AppsV1beta2Client struct {
+	restClient rest.Interface
+}
+
+func (c *AppsV1beta2Client) ControllerRevisions(namespace string) ControllerRevisionInterface {
+	return newControllerRevisions(c, namespace)
+}
+
+func (c *AppsV1beta2Client) DaemonSets(namespace string) DaemonSetInterface {
+	return newDaemonSets(c, namespace)
+}
+
+func (c *AppsV1beta2Client) Deployments(namespace string) DeploymentInterface {
+	return newDeployments(c, namespace)
+}
+
+func (c *AppsV1beta2Client) ReplicaSets(namespace string) ReplicaSetInterface {
+	return newReplicaSets(c, namespace)
+}
+
+func (c *AppsV1beta2Client) StatefulSets(namespace string) StatefulSetInterface {
+	return newStatefulSets(c, namespace)
+}
+
+// NewForConfig creates a new AppsV1beta2Client for the given config.
+func NewForConfig(c *rest.Config) (*AppsV1beta2Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &AppsV1beta2Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new AppsV1beta2Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *AppsV1beta2Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new AppsV1beta2Client for the given RESTClient.
+func New(c rest.Interface) *AppsV1beta2Client {
+	return &AppsV1beta2Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1beta2.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *AppsV1beta2Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go
new file mode 100644
index 0000000..e1d6025
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+	"time"
+
+	v1beta2 "k8s.io/api/apps/v1beta2"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ControllerRevisionsGetter has a method to return a ControllerRevisionInterface.
+// A group's client should implement this interface.
+type ControllerRevisionsGetter interface {
+	ControllerRevisions(namespace string) ControllerRevisionInterface
+}
+
+// ControllerRevisionInterface has methods to work with ControllerRevision resources.
+type ControllerRevisionInterface interface {
+	Create(*v1beta2.ControllerRevision) (*v1beta2.ControllerRevision, error)
+	Update(*v1beta2.ControllerRevision) (*v1beta2.ControllerRevision, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta2.ControllerRevision, error)
+	List(opts v1.ListOptions) (*v1beta2.ControllerRevisionList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error)
+	ControllerRevisionExpansion
+}
+
+// controllerRevisions implements ControllerRevisionInterface
+type controllerRevisions struct {
+	client rest.Interface
+	ns     string
+}
+
+// newControllerRevisions returns a ControllerRevisions
+func newControllerRevisions(c *AppsV1beta2Client, namespace string) *controllerRevisions {
+	return &controllerRevisions{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any.
+func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) {
+	result = &v1beta2.ControllerRevision{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors.
+func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta2.ControllerRevisionList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested controllerRevisions.
+func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a controllerRevision and creates it.  Returns the server's representation of the controllerRevision, and an error, if there is any.
+func (c *controllerRevisions) Create(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) {
+	result = &v1beta2.ControllerRevision{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		Body(controllerRevision).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
+func (c *controllerRevisions) Update(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) {
+	result = &v1beta2.ControllerRevision{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		Name(controllerRevision.Name).
+		Body(controllerRevision).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs.
+func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched controllerRevision.
+func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) {
+	result = &v1beta2.ControllerRevision{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("controllerrevisions").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go
new file mode 100644
index 0000000..f8b7ac2
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+	"time"
+
+	v1beta2 "k8s.io/api/apps/v1beta2"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// DaemonSetsGetter has a method to return a DaemonSetInterface.
+// A group's client should implement this interface.
+type DaemonSetsGetter interface {
+	DaemonSets(namespace string) DaemonSetInterface
+}
+
+// DaemonSetInterface has methods to work with DaemonSet resources.
+type DaemonSetInterface interface {
+	Create(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error)
+	Update(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error)
+	UpdateStatus(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta2.DaemonSet, error)
+	List(opts v1.ListOptions) (*v1beta2.DaemonSetList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error)
+	DaemonSetExpansion
+}
+
+// daemonSets implements DaemonSetInterface
+type daemonSets struct {
+	client rest.Interface
+	ns     string
+}
+
+// newDaemonSets returns a DaemonSets
+func newDaemonSets(c *AppsV1beta2Client, namespace string) *daemonSets {
+	return &daemonSets{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
+func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) {
+	result = &v1beta2.DaemonSet{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
+func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta2.DaemonSetList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested daemonSets.
+func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a daemonSet and creates it.  Returns the server's representation of the daemonSet, and an error, if there is any.
+func (c *daemonSets) Create(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) {
+	result = &v1beta2.DaemonSet{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		Body(daemonSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
+func (c *daemonSets) Update(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) {
+	result = &v1beta2.DaemonSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		Name(daemonSet.Name).
+		Body(daemonSet).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *daemonSets) UpdateStatus(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) {
+	result = &v1beta2.DaemonSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		Name(daemonSet.Name).
+		SubResource("status").
+		Body(daemonSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs.
+func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched daemonSet.
+func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) {
+	result = &v1beta2.DaemonSet{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("daemonsets").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go
new file mode 100644
index 0000000..510250b
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+	"time"
+
+	v1beta2 "k8s.io/api/apps/v1beta2"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// DeploymentsGetter has a method to return a DeploymentInterface.
+// A group's client should implement this interface.
+type DeploymentsGetter interface {
+	Deployments(namespace string) DeploymentInterface
+}
+
+// DeploymentInterface has methods to work with Deployment resources.
+type DeploymentInterface interface {
+	Create(*v1beta2.Deployment) (*v1beta2.Deployment, error)
+	Update(*v1beta2.Deployment) (*v1beta2.Deployment, error)
+	UpdateStatus(*v1beta2.Deployment) (*v1beta2.Deployment, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta2.Deployment, error)
+	List(opts v1.ListOptions) (*v1beta2.DeploymentList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error)
+	DeploymentExpansion
+}
+
+// deployments implements DeploymentInterface
+type deployments struct {
+	client rest.Interface
+	ns     string
+}
+
+// newDeployments returns a Deployments
+func newDeployments(c *AppsV1beta2Client, namespace string) *deployments {
+	return &deployments{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
+func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) {
+	result = &v1beta2.Deployment{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Deployments that match those selectors.
+func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta2.DeploymentList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("deployments").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested deployments.
+func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("deployments").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a deployment and creates it.  Returns the server's representation of the deployment, and an error, if there is any.
+func (c *deployments) Create(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) {
+	result = &v1beta2.Deployment{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("deployments").
+		Body(deployment).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
+func (c *deployments) Update(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) {
+	result = &v1beta2.Deployment{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(deployment.Name).
+		Body(deployment).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *deployments) UpdateStatus(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) {
+	result = &v1beta2.Deployment{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(deployment.Name).
+		SubResource("status").
+		Body(deployment).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the deployment and deletes it. Returns an error if one occurs.
+func (c *deployments) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("deployments").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched deployment.
+func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) {
+	result = &v1beta2.Deployment{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("deployments").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go
new file mode 100644
index 0000000..56518ef
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta2
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go
new file mode 100644
index 0000000..6a21749
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go
@@ -0,0 +1,29 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+type ControllerRevisionExpansion interface{}
+
+type DaemonSetExpansion interface{}
+
+type DeploymentExpansion interface{}
+
+type ReplicaSetExpansion interface{}
+
+type StatefulSetExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go
new file mode 100644
index 0000000..7b73877
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+	"time"
+
+	v1beta2 "k8s.io/api/apps/v1beta2"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ReplicaSetsGetter has a method to return a ReplicaSetInterface.
+// A group's client should implement this interface.
+type ReplicaSetsGetter interface {
+	ReplicaSets(namespace string) ReplicaSetInterface
+}
+
+// ReplicaSetInterface has methods to work with ReplicaSet resources.
+type ReplicaSetInterface interface {
+	Create(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error)
+	Update(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error)
+	UpdateStatus(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta2.ReplicaSet, error)
+	List(opts v1.ListOptions) (*v1beta2.ReplicaSetList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error)
+	ReplicaSetExpansion
+}
+
+// replicaSets implements ReplicaSetInterface
+type replicaSets struct {
+	client rest.Interface
+	ns     string
+}
+
+// newReplicaSets returns a ReplicaSets
+func newReplicaSets(c *AppsV1beta2Client, namespace string) *replicaSets {
+	return &replicaSets{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any.
+func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) {
+	result = &v1beta2.ReplicaSet{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors.
+func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta2.ReplicaSetList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("replicasets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested replicaSets.
+func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("replicasets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a replicaSet and creates it.  Returns the server's representation of the replicaSet, and an error, if there is any.
+func (c *replicaSets) Create(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) {
+	result = &v1beta2.ReplicaSet{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Body(replicaSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any.
+func (c *replicaSets) Update(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) {
+	result = &v1beta2.ReplicaSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Name(replicaSet.Name).
+		Body(replicaSet).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *replicaSets) UpdateStatus(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) {
+	result = &v1beta2.ReplicaSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Name(replicaSet.Name).
+		SubResource("status").
+		Body(replicaSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs.
+func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("replicasets").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched replicaSet.
+func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) {
+	result = &v1beta2.ReplicaSet{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("replicasets").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go
new file mode 100644
index 0000000..de7c3db
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go
@@ -0,0 +1,222 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+	"time"
+
+	v1beta2 "k8s.io/api/apps/v1beta2"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// StatefulSetsGetter has a method to return a StatefulSetInterface.
+// A group's client should implement this interface.
+type StatefulSetsGetter interface {
+	StatefulSets(namespace string) StatefulSetInterface
+}
+
+// StatefulSetInterface has methods to work with StatefulSet resources.
+type StatefulSetInterface interface {
+	Create(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error)
+	Update(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error)
+	UpdateStatus(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta2.StatefulSet, error)
+	List(opts v1.ListOptions) (*v1beta2.StatefulSetList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error)
+	GetScale(statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error)
+	UpdateScale(statefulSetName string, scale *v1beta2.Scale) (*v1beta2.Scale, error)
+
+	StatefulSetExpansion
+}
+
+// statefulSets implements StatefulSetInterface
+type statefulSets struct {
+	client rest.Interface
+	ns     string
+}
+
+// newStatefulSets returns a StatefulSets
+func newStatefulSets(c *AppsV1beta2Client, namespace string) *statefulSets {
+	return &statefulSets{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any.
+func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) {
+	result = &v1beta2.StatefulSet{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of StatefulSets that match those selectors.
+func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta2.StatefulSetList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested statefulSets.
+func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a statefulSet and creates it.  Returns the server's representation of the statefulSet, and an error, if there is any.
+func (c *statefulSets) Create(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) {
+	result = &v1beta2.StatefulSet{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Body(statefulSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any.
+func (c *statefulSets) Update(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) {
+	result = &v1beta2.StatefulSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Name(statefulSet.Name).
+		Body(statefulSet).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *statefulSets) UpdateStatus(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) {
+	result = &v1beta2.StatefulSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Name(statefulSet.Name).
+		SubResource("status").
+		Body(statefulSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs.
+func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched statefulSet.
+func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) {
+	result = &v1beta2.StatefulSet{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("statefulsets").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
+
+// GetScale takes name of the statefulSet, and returns the corresponding v1beta2.Scale object, and an error if there is any.
+func (c *statefulSets) GetScale(statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) {
+	result = &v1beta2.Scale{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Name(statefulSetName).
+		SubResource("scale").
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
+func (c *statefulSets) UpdateScale(statefulSetName string, scale *v1beta2.Scale) (result *v1beta2.Scale, err error) {
+	result = &v1beta2.Scale{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("statefulsets").
+		Name(statefulSetName).
+		SubResource("scale").
+		Body(scale).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go
new file mode 100644
index 0000000..f007b05
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	v1alpha1 "k8s.io/api/auditregistration/v1alpha1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type AuditregistrationV1alpha1Interface interface {
+	RESTClient() rest.Interface
+	AuditSinksGetter
+}
+
+// AuditregistrationV1alpha1Client is used to interact with features provided by the auditregistration.k8s.io group.
+type AuditregistrationV1alpha1Client struct {
+	restClient rest.Interface
+}
+
+func (c *AuditregistrationV1alpha1Client) AuditSinks() AuditSinkInterface {
+	return newAuditSinks(c)
+}
+
+// NewForConfig creates a new AuditregistrationV1alpha1Client for the given config.
+func NewForConfig(c *rest.Config) (*AuditregistrationV1alpha1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &AuditregistrationV1alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new AuditregistrationV1alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *AuditregistrationV1alpha1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new AuditregistrationV1alpha1Client for the given RESTClient.
+func New(c rest.Interface) *AuditregistrationV1alpha1Client {
+	return &AuditregistrationV1alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1alpha1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *AuditregistrationV1alpha1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go
new file mode 100644
index 0000000..414d480
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	"time"
+
+	v1alpha1 "k8s.io/api/auditregistration/v1alpha1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// AuditSinksGetter has a method to return a AuditSinkInterface.
+// A group's client should implement this interface.
+type AuditSinksGetter interface {
+	AuditSinks() AuditSinkInterface
+}
+
+// AuditSinkInterface has methods to work with AuditSink resources.
+type AuditSinkInterface interface {
+	Create(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error)
+	Update(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1alpha1.AuditSink, error)
+	List(opts v1.ListOptions) (*v1alpha1.AuditSinkList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error)
+	AuditSinkExpansion
+}
+
+// auditSinks implements AuditSinkInterface
+type auditSinks struct {
+	client rest.Interface
+}
+
+// newAuditSinks returns a AuditSinks
+func newAuditSinks(c *AuditregistrationV1alpha1Client) *auditSinks {
+	return &auditSinks{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the auditSink, and returns the corresponding auditSink object, and an error if there is any.
+func (c *auditSinks) Get(name string, options v1.GetOptions) (result *v1alpha1.AuditSink, err error) {
+	result = &v1alpha1.AuditSink{}
+	err = c.client.Get().
+		Resource("auditsinks").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of AuditSinks that match those selectors.
+func (c *auditSinks) List(opts v1.ListOptions) (result *v1alpha1.AuditSinkList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1alpha1.AuditSinkList{}
+	err = c.client.Get().
+		Resource("auditsinks").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested auditSinks.
+func (c *auditSinks) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("auditsinks").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a auditSink and creates it.  Returns the server's representation of the auditSink, and an error, if there is any.
+func (c *auditSinks) Create(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) {
+	result = &v1alpha1.AuditSink{}
+	err = c.client.Post().
+		Resource("auditsinks").
+		Body(auditSink).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a auditSink and updates it. Returns the server's representation of the auditSink, and an error, if there is any.
+func (c *auditSinks) Update(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) {
+	result = &v1alpha1.AuditSink{}
+	err = c.client.Put().
+		Resource("auditsinks").
+		Name(auditSink.Name).
+		Body(auditSink).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the auditSink and deletes it. Returns an error if one occurs.
+func (c *auditSinks) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("auditsinks").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *auditSinks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("auditsinks").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched auditSink.
+func (c *auditSinks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) {
+	result = &v1alpha1.AuditSink{}
+	err = c.client.Patch(pt).
+		Resource("auditsinks").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go
new file mode 100644
index 0000000..df51baa
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1alpha1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go
new file mode 100644
index 0000000..f0f5117
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+type AuditSinkExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go
new file mode 100644
index 0000000..3bdcee5
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	v1 "k8s.io/api/authentication/v1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type AuthenticationV1Interface interface {
+	RESTClient() rest.Interface
+	TokenReviewsGetter
+}
+
+// AuthenticationV1Client is used to interact with features provided by the authentication.k8s.io group.
+type AuthenticationV1Client struct {
+	restClient rest.Interface
+}
+
+func (c *AuthenticationV1Client) TokenReviews() TokenReviewInterface {
+	return newTokenReviews(c)
+}
+
+// NewForConfig creates a new AuthenticationV1Client for the given config.
+func NewForConfig(c *rest.Config) (*AuthenticationV1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &AuthenticationV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new AuthenticationV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *AuthenticationV1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new AuthenticationV1Client for the given RESTClient.
+func New(c rest.Interface) *AuthenticationV1Client {
+	return &AuthenticationV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *AuthenticationV1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go
new file mode 100644
index 0000000..3af5d05
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go
new file mode 100644
index 0000000..177209e
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go
@@ -0,0 +1,19 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go
new file mode 100644
index 0000000..25a8d6a
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go
@@ -0,0 +1,46 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	rest "k8s.io/client-go/rest"
+)
+
+// TokenReviewsGetter has a method to return a TokenReviewInterface.
+// A group's client should implement this interface.
+type TokenReviewsGetter interface {
+	TokenReviews() TokenReviewInterface
+}
+
+// TokenReviewInterface has methods to work with TokenReview resources.
+type TokenReviewInterface interface {
+	TokenReviewExpansion
+}
+
+// tokenReviews implements TokenReviewInterface
+type tokenReviews struct {
+	client rest.Interface
+}
+
+// newTokenReviews returns a TokenReviews
+func newTokenReviews(c *AuthenticationV1Client) *tokenReviews {
+	return &tokenReviews{
+		client: c.RESTClient(),
+	}
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go
new file mode 100644
index 0000000..ea21f1b
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	authenticationapi "k8s.io/api/authentication/v1"
+)
+
+type TokenReviewExpansion interface {
+	Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error)
+}
+
+func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) {
+	result = &authenticationapi.TokenReview{}
+	err = c.client.Post().
+		Resource("tokenreviews").
+		Body(tokenReview).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go
new file mode 100644
index 0000000..7f3334a
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1beta1 "k8s.io/api/authentication/v1beta1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type AuthenticationV1beta1Interface interface {
+	RESTClient() rest.Interface
+	TokenReviewsGetter
+}
+
+// AuthenticationV1beta1Client is used to interact with features provided by the authentication.k8s.io group.
+type AuthenticationV1beta1Client struct {
+	restClient rest.Interface
+}
+
+func (c *AuthenticationV1beta1Client) TokenReviews() TokenReviewInterface {
+	return newTokenReviews(c)
+}
+
+// NewForConfig creates a new AuthenticationV1beta1Client for the given config.
+func NewForConfig(c *rest.Config) (*AuthenticationV1beta1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &AuthenticationV1beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new AuthenticationV1beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *AuthenticationV1beta1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new AuthenticationV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *AuthenticationV1beta1Client {
+	return &AuthenticationV1beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1beta1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *AuthenticationV1beta1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go
new file mode 100644
index 0000000..7711019
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go
new file mode 100644
index 0000000..f6df769
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go
@@ -0,0 +1,19 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go
new file mode 100644
index 0000000..0ac3561
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go
@@ -0,0 +1,46 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	rest "k8s.io/client-go/rest"
+)
+
+// TokenReviewsGetter has a method to return a TokenReviewInterface.
+// A group's client should implement this interface.
+type TokenReviewsGetter interface {
+	TokenReviews() TokenReviewInterface
+}
+
+// TokenReviewInterface has methods to work with TokenReview resources.
+type TokenReviewInterface interface {
+	TokenReviewExpansion
+}
+
+// tokenReviews implements TokenReviewInterface
+type tokenReviews struct {
+	client rest.Interface
+}
+
+// newTokenReviews returns a TokenReviews
+func newTokenReviews(c *AuthenticationV1beta1Client) *tokenReviews {
+	return &tokenReviews{
+		client: c.RESTClient(),
+	}
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go
new file mode 100644
index 0000000..8f186fa
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	authenticationapi "k8s.io/api/authentication/v1beta1"
+)
+
+type TokenReviewExpansion interface {
+	Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error)
+}
+
+func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) {
+	result = &authenticationapi.TokenReview{}
+	err = c.client.Post().
+		Resource("tokenreviews").
+		Body(tokenReview).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go
new file mode 100644
index 0000000..e84b900
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go
@@ -0,0 +1,105 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	v1 "k8s.io/api/authorization/v1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type AuthorizationV1Interface interface {
+	RESTClient() rest.Interface
+	LocalSubjectAccessReviewsGetter
+	SelfSubjectAccessReviewsGetter
+	SelfSubjectRulesReviewsGetter
+	SubjectAccessReviewsGetter
+}
+
+// AuthorizationV1Client is used to interact with features provided by the authorization.k8s.io group.
+type AuthorizationV1Client struct {
+	restClient rest.Interface
+}
+
+func (c *AuthorizationV1Client) LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface {
+	return newLocalSubjectAccessReviews(c, namespace)
+}
+
+func (c *AuthorizationV1Client) SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface {
+	return newSelfSubjectAccessReviews(c)
+}
+
+func (c *AuthorizationV1Client) SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface {
+	return newSelfSubjectRulesReviews(c)
+}
+
+func (c *AuthorizationV1Client) SubjectAccessReviews() SubjectAccessReviewInterface {
+	return newSubjectAccessReviews(c)
+}
+
+// NewForConfig creates a new AuthorizationV1Client for the given config.
+func NewForConfig(c *rest.Config) (*AuthorizationV1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &AuthorizationV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new AuthorizationV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *AuthorizationV1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new AuthorizationV1Client for the given RESTClient.
+func New(c rest.Interface) *AuthorizationV1Client {
+	return &AuthorizationV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *AuthorizationV1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go
new file mode 100644
index 0000000..3af5d05
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go
new file mode 100644
index 0000000..177209e
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go
@@ -0,0 +1,19 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go
new file mode 100644
index 0000000..0292c78
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	rest "k8s.io/client-go/rest"
+)
+
+// LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface.
+// A group's client should implement this interface.
+type LocalSubjectAccessReviewsGetter interface {
+	LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface
+}
+
+// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources.
+type LocalSubjectAccessReviewInterface interface {
+	LocalSubjectAccessReviewExpansion
+}
+
+// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface
+type localSubjectAccessReviews struct {
+	client rest.Interface
+	ns     string
+}
+
+// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews
+func newLocalSubjectAccessReviews(c *AuthorizationV1Client, namespace string) *localSubjectAccessReviews {
+	return &localSubjectAccessReviews{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go
new file mode 100644
index 0000000..0c123b0
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	authorizationapi "k8s.io/api/authorization/v1"
+)
+
+type LocalSubjectAccessReviewExpansion interface {
+	Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error)
+}
+
+func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) {
+	result = &authorizationapi.LocalSubjectAccessReview{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("localsubjectaccessreviews").
+		Body(sar).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go
new file mode 100644
index 0000000..1e3a458
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go
@@ -0,0 +1,46 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	rest "k8s.io/client-go/rest"
+)
+
+// SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface.
+// A group's client should implement this interface.
+type SelfSubjectAccessReviewsGetter interface {
+	SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface
+}
+
+// SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources.
+type SelfSubjectAccessReviewInterface interface {
+	SelfSubjectAccessReviewExpansion
+}
+
+// selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface
+type selfSubjectAccessReviews struct {
+	client rest.Interface
+}
+
+// newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews
+func newSelfSubjectAccessReviews(c *AuthorizationV1Client) *selfSubjectAccessReviews {
+	return &selfSubjectAccessReviews{
+		client: c.RESTClient(),
+	}
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go
new file mode 100644
index 0000000..5b70a27
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	authorizationapi "k8s.io/api/authorization/v1"
+)
+
+type SelfSubjectAccessReviewExpansion interface {
+	Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error)
+}
+
+func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) {
+	result = &authorizationapi.SelfSubjectAccessReview{}
+	err = c.client.Post().
+		Resource("selfsubjectaccessreviews").
+		Body(sar).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go
new file mode 100644
index 0000000..50a0233
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go
@@ -0,0 +1,46 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	rest "k8s.io/client-go/rest"
+)
+
+// SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface.
+// A group's client should implement this interface.
+type SelfSubjectRulesReviewsGetter interface {
+	SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface
+}
+
+// SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources.
+type SelfSubjectRulesReviewInterface interface {
+	SelfSubjectRulesReviewExpansion
+}
+
+// selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface
+type selfSubjectRulesReviews struct {
+	client rest.Interface
+}
+
+// newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews
+func newSelfSubjectRulesReviews(c *AuthorizationV1Client) *selfSubjectRulesReviews {
+	return &selfSubjectRulesReviews{
+		client: c.RESTClient(),
+	}
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go
new file mode 100644
index 0000000..e2cad88
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	authorizationapi "k8s.io/api/authorization/v1"
+)
+
+type SelfSubjectRulesReviewExpansion interface {
+	Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error)
+}
+
+func (c *selfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) {
+	result = &authorizationapi.SelfSubjectRulesReview{}
+	err = c.client.Post().
+		Resource("selfsubjectrulesreviews").
+		Body(srr).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go
new file mode 100644
index 0000000..9c09008
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go
@@ -0,0 +1,46 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	rest "k8s.io/client-go/rest"
+)
+
+// SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface.
+// A group's client should implement this interface.
+type SubjectAccessReviewsGetter interface {
+	SubjectAccessReviews() SubjectAccessReviewInterface
+}
+
+// SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources.
+type SubjectAccessReviewInterface interface {
+	SubjectAccessReviewExpansion
+}
+
+// subjectAccessReviews implements SubjectAccessReviewInterface
+type subjectAccessReviews struct {
+	client rest.Interface
+}
+
+// newSubjectAccessReviews returns a SubjectAccessReviews
+func newSubjectAccessReviews(c *AuthorizationV1Client) *subjectAccessReviews {
+	return &subjectAccessReviews{
+		client: c.RESTClient(),
+	}
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go
new file mode 100644
index 0000000..b5ed87d
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	authorizationapi "k8s.io/api/authorization/v1"
+)
+
+// The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface.
+type SubjectAccessReviewExpansion interface {
+	Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error)
+}
+
+func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) {
+	result = &authorizationapi.SubjectAccessReview{}
+	err = c.client.Post().
+		Resource("subjectaccessreviews").
+		Body(sar).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go
new file mode 100644
index 0000000..7f236f6
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go
@@ -0,0 +1,105 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1beta1 "k8s.io/api/authorization/v1beta1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type AuthorizationV1beta1Interface interface {
+	RESTClient() rest.Interface
+	LocalSubjectAccessReviewsGetter
+	SelfSubjectAccessReviewsGetter
+	SelfSubjectRulesReviewsGetter
+	SubjectAccessReviewsGetter
+}
+
+// AuthorizationV1beta1Client is used to interact with features provided by the authorization.k8s.io group.
+type AuthorizationV1beta1Client struct {
+	restClient rest.Interface
+}
+
+func (c *AuthorizationV1beta1Client) LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface {
+	return newLocalSubjectAccessReviews(c, namespace)
+}
+
+func (c *AuthorizationV1beta1Client) SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface {
+	return newSelfSubjectAccessReviews(c)
+}
+
+func (c *AuthorizationV1beta1Client) SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface {
+	return newSelfSubjectRulesReviews(c)
+}
+
+func (c *AuthorizationV1beta1Client) SubjectAccessReviews() SubjectAccessReviewInterface {
+	return newSubjectAccessReviews(c)
+}
+
+// NewForConfig creates a new AuthorizationV1beta1Client for the given config.
+func NewForConfig(c *rest.Config) (*AuthorizationV1beta1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &AuthorizationV1beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new AuthorizationV1beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *AuthorizationV1beta1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new AuthorizationV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *AuthorizationV1beta1Client {
+	return &AuthorizationV1beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1beta1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *AuthorizationV1beta1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go
new file mode 100644
index 0000000..7711019
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go
new file mode 100644
index 0000000..f6df769
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go
@@ -0,0 +1,19 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go
new file mode 100644
index 0000000..f5e86a7
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	rest "k8s.io/client-go/rest"
+)
+
+// LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface.
+// A group's client should implement this interface.
+type LocalSubjectAccessReviewsGetter interface {
+	LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface
+}
+
+// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources.
+type LocalSubjectAccessReviewInterface interface {
+	LocalSubjectAccessReviewExpansion
+}
+
+// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface
+type localSubjectAccessReviews struct {
+	client rest.Interface
+	ns     string
+}
+
+// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews
+func newLocalSubjectAccessReviews(c *AuthorizationV1beta1Client, namespace string) *localSubjectAccessReviews {
+	return &localSubjectAccessReviews{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go
new file mode 100644
index 0000000..bf1b8a5
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	authorizationapi "k8s.io/api/authorization/v1beta1"
+)
+
+type LocalSubjectAccessReviewExpansion interface {
+	Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error)
+}
+
+func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) {
+	result = &authorizationapi.LocalSubjectAccessReview{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("localsubjectaccessreviews").
+		Body(sar).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go
new file mode 100644
index 0000000..906712c
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go
@@ -0,0 +1,46 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	rest "k8s.io/client-go/rest"
+)
+
+// SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface.
+// A group's client should implement this interface.
+type SelfSubjectAccessReviewsGetter interface {
+	SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface
+}
+
+// SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources.
+type SelfSubjectAccessReviewInterface interface {
+	SelfSubjectAccessReviewExpansion
+}
+
+// selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface
+type selfSubjectAccessReviews struct {
+	client rest.Interface
+}
+
+// newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews
+func newSelfSubjectAccessReviews(c *AuthorizationV1beta1Client) *selfSubjectAccessReviews {
+	return &selfSubjectAccessReviews{
+		client: c.RESTClient(),
+	}
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go
new file mode 100644
index 0000000..58fecfd
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	authorizationapi "k8s.io/api/authorization/v1beta1"
+)
+
+type SelfSubjectAccessReviewExpansion interface {
+	Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error)
+}
+
+func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) {
+	result = &authorizationapi.SelfSubjectAccessReview{}
+	err = c.client.Post().
+		Resource("selfsubjectaccessreviews").
+		Body(sar).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go
new file mode 100644
index 0000000..56c0f99
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go
@@ -0,0 +1,46 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	rest "k8s.io/client-go/rest"
+)
+
+// SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface.
+// A group's client should implement this interface.
+type SelfSubjectRulesReviewsGetter interface {
+	SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface
+}
+
+// SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources.
+type SelfSubjectRulesReviewInterface interface {
+	SelfSubjectRulesReviewExpansion
+}
+
+// selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface
+type selfSubjectRulesReviews struct {
+	client rest.Interface
+}
+
+// newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews
+func newSelfSubjectRulesReviews(c *AuthorizationV1beta1Client) *selfSubjectRulesReviews {
+	return &selfSubjectRulesReviews{
+		client: c.RESTClient(),
+	}
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go
new file mode 100644
index 0000000..5f1f37e
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	authorizationapi "k8s.io/api/authorization/v1beta1"
+)
+
+type SelfSubjectRulesReviewExpansion interface {
+	Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error)
+}
+
+func (c *selfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) {
+	result = &authorizationapi.SelfSubjectRulesReview{}
+	err = c.client.Post().
+		Resource("selfsubjectrulesreviews").
+		Body(srr).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go
new file mode 100644
index 0000000..79f1ec5
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go
@@ -0,0 +1,46 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	rest "k8s.io/client-go/rest"
+)
+
+// SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface.
+// A group's client should implement this interface.
+type SubjectAccessReviewsGetter interface {
+	SubjectAccessReviews() SubjectAccessReviewInterface
+}
+
+// SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources.
+type SubjectAccessReviewInterface interface {
+	SubjectAccessReviewExpansion
+}
+
+// subjectAccessReviews implements SubjectAccessReviewInterface
+type subjectAccessReviews struct {
+	client rest.Interface
+}
+
+// newSubjectAccessReviews returns a SubjectAccessReviews
+func newSubjectAccessReviews(c *AuthorizationV1beta1Client) *subjectAccessReviews {
+	return &subjectAccessReviews{
+		client: c.RESTClient(),
+	}
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go
new file mode 100644
index 0000000..4f93689
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	authorizationapi "k8s.io/api/authorization/v1beta1"
+)
+
+// The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface.
+type SubjectAccessReviewExpansion interface {
+	Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error)
+}
+
+func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) {
+	result = &authorizationapi.SubjectAccessReview{}
+	err = c.client.Post().
+		Resource("subjectaccessreviews").
+		Body(sar).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go
new file mode 100644
index 0000000..2bd49e2
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	v1 "k8s.io/api/autoscaling/v1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type AutoscalingV1Interface interface {
+	RESTClient() rest.Interface
+	HorizontalPodAutoscalersGetter
+}
+
+// AutoscalingV1Client is used to interact with features provided by the autoscaling group.
+type AutoscalingV1Client struct {
+	restClient rest.Interface
+}
+
+func (c *AutoscalingV1Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface {
+	return newHorizontalPodAutoscalers(c, namespace)
+}
+
+// NewForConfig creates a new AutoscalingV1Client for the given config.
+func NewForConfig(c *rest.Config) (*AutoscalingV1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &AutoscalingV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new AutoscalingV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *AutoscalingV1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new AutoscalingV1Client for the given RESTClient.
+func New(c rest.Interface) *AutoscalingV1Client {
+	return &AutoscalingV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *AutoscalingV1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go
new file mode 100644
index 0000000..3af5d05
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go
new file mode 100644
index 0000000..c60028b
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type HorizontalPodAutoscalerExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go
new file mode 100644
index 0000000..0e0839f
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/autoscaling/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface.
+// A group's client should implement this interface.
+type HorizontalPodAutoscalersGetter interface {
+	HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface
+}
+
+// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources.
+type HorizontalPodAutoscalerInterface interface {
+	Create(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error)
+	Update(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error)
+	UpdateStatus(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.HorizontalPodAutoscaler, error)
+	List(opts metav1.ListOptions) (*v1.HorizontalPodAutoscalerList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error)
+	HorizontalPodAutoscalerExpansion
+}
+
+// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface
+type horizontalPodAutoscalers struct {
+	client rest.Interface
+	ns     string
+}
+
+// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers
+func newHorizontalPodAutoscalers(c *AutoscalingV1Client, namespace string) *horizontalPodAutoscalers {
+	return &horizontalPodAutoscalers{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any.
+func (c *horizontalPodAutoscalers) Get(name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) {
+	result = &v1.HorizontalPodAutoscaler{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors.
+func (c *horizontalPodAutoscalers) List(opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.HorizontalPodAutoscalerList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
+func (c *horizontalPodAutoscalers) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a horizontalPodAutoscaler and creates it.  Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
+func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) {
+	result = &v1.HorizontalPodAutoscaler{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		Body(horizontalPodAutoscaler).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
+func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) {
+	result = &v1.HorizontalPodAutoscaler{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		Name(horizontalPodAutoscaler.Name).
+		Body(horizontalPodAutoscaler).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) {
+	result = &v1.HorizontalPodAutoscaler{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		Name(horizontalPodAutoscaler.Name).
+		SubResource("status").
+		Body(horizontalPodAutoscaler).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs.
+func (c *horizontalPodAutoscalers) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *horizontalPodAutoscalers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched horizontalPodAutoscaler.
+func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) {
+	result = &v1.HorizontalPodAutoscaler{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go
new file mode 100644
index 0000000..3a49b26
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2beta1
+
+import (
+	v2beta1 "k8s.io/api/autoscaling/v2beta1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type AutoscalingV2beta1Interface interface {
+	RESTClient() rest.Interface
+	HorizontalPodAutoscalersGetter
+}
+
+// AutoscalingV2beta1Client is used to interact with features provided by the autoscaling group.
+type AutoscalingV2beta1Client struct {
+	restClient rest.Interface
+}
+
+func (c *AutoscalingV2beta1Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface {
+	return newHorizontalPodAutoscalers(c, namespace)
+}
+
+// NewForConfig creates a new AutoscalingV2beta1Client for the given config.
+func NewForConfig(c *rest.Config) (*AutoscalingV2beta1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &AutoscalingV2beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new AutoscalingV2beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *AutoscalingV2beta1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new AutoscalingV2beta1Client for the given RESTClient.
+func New(c rest.Interface) *AutoscalingV2beta1Client {
+	return &AutoscalingV2beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v2beta1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *AutoscalingV2beta1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go
new file mode 100644
index 0000000..06fd344
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v2beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go
new file mode 100644
index 0000000..6f1704f
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2beta1
+
+type HorizontalPodAutoscalerExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go
new file mode 100644
index 0000000..02d5cfb
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2beta1
+
+import (
+	"time"
+
+	v2beta1 "k8s.io/api/autoscaling/v2beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface.
+// A group's client should implement this interface.
+type HorizontalPodAutoscalersGetter interface {
+	HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface
+}
+
+// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources.
+type HorizontalPodAutoscalerInterface interface {
+	Create(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error)
+	Update(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error)
+	UpdateStatus(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v2beta1.HorizontalPodAutoscaler, error)
+	List(opts v1.ListOptions) (*v2beta1.HorizontalPodAutoscalerList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error)
+	HorizontalPodAutoscalerExpansion
+}
+
+// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface
+type horizontalPodAutoscalers struct {
+	client rest.Interface
+	ns     string
+}
+
+// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers
+func newHorizontalPodAutoscalers(c *AutoscalingV2beta1Client, namespace string) *horizontalPodAutoscalers {
+	return &horizontalPodAutoscalers{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any.
+func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) {
+	result = &v2beta1.HorizontalPodAutoscaler{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors.
+func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v2beta1.HorizontalPodAutoscalerList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
+func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a horizontalPodAutoscaler and creates it.  Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
+func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) {
+	result = &v2beta1.HorizontalPodAutoscaler{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		Body(horizontalPodAutoscaler).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
+func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) {
+	result = &v2beta1.HorizontalPodAutoscaler{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		Name(horizontalPodAutoscaler.Name).
+		Body(horizontalPodAutoscaler).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) {
+	result = &v2beta1.HorizontalPodAutoscaler{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		Name(horizontalPodAutoscaler.Name).
+		SubResource("status").
+		Body(horizontalPodAutoscaler).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs.
+func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched horizontalPodAutoscaler.
+func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) {
+	result = &v2beta1.HorizontalPodAutoscaler{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go
new file mode 100644
index 0000000..03fe25e
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2beta2
+
+import (
+	v2beta2 "k8s.io/api/autoscaling/v2beta2"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type AutoscalingV2beta2Interface interface {
+	RESTClient() rest.Interface
+	HorizontalPodAutoscalersGetter
+}
+
+// AutoscalingV2beta2Client is used to interact with features provided by the autoscaling group.
+type AutoscalingV2beta2Client struct {
+	restClient rest.Interface
+}
+
+func (c *AutoscalingV2beta2Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface {
+	return newHorizontalPodAutoscalers(c, namespace)
+}
+
+// NewForConfig creates a new AutoscalingV2beta2Client for the given config.
+func NewForConfig(c *rest.Config) (*AutoscalingV2beta2Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &AutoscalingV2beta2Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new AutoscalingV2beta2Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *AutoscalingV2beta2Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new AutoscalingV2beta2Client for the given RESTClient.
+func New(c rest.Interface) *AutoscalingV2beta2Client {
+	return &AutoscalingV2beta2Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v2beta2.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *AutoscalingV2beta2Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/doc.go
new file mode 100644
index 0000000..c600965
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v2beta2
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/generated_expansion.go
new file mode 100644
index 0000000..822e062
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2beta2
+
+type HorizontalPodAutoscalerExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go
new file mode 100644
index 0000000..91a0fa6
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2beta2
+
+import (
+	"time"
+
+	v2beta2 "k8s.io/api/autoscaling/v2beta2"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface.
+// A group's client should implement this interface.
+type HorizontalPodAutoscalersGetter interface {
+	HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface
+}
+
+// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources.
+type HorizontalPodAutoscalerInterface interface {
+	Create(*v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error)
+	Update(*v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error)
+	UpdateStatus(*v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v2beta2.HorizontalPodAutoscaler, error)
+	List(opts v1.ListOptions) (*v2beta2.HorizontalPodAutoscalerList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error)
+	HorizontalPodAutoscalerExpansion
+}
+
+// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface
+type horizontalPodAutoscalers struct {
+	client rest.Interface
+	ns     string
+}
+
+// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers
+func newHorizontalPodAutoscalers(c *AutoscalingV2beta2Client, namespace string) *horizontalPodAutoscalers {
+	return &horizontalPodAutoscalers{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any.
+func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) {
+	result = &v2beta2.HorizontalPodAutoscaler{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors.
+func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v2beta2.HorizontalPodAutoscalerList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
+func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a horizontalPodAutoscaler and creates it.  Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
+func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) {
+	result = &v2beta2.HorizontalPodAutoscaler{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		Body(horizontalPodAutoscaler).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
+func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) {
+	result = &v2beta2.HorizontalPodAutoscaler{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		Name(horizontalPodAutoscaler.Name).
+		Body(horizontalPodAutoscaler).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) {
+	result = &v2beta2.HorizontalPodAutoscaler{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		Name(horizontalPodAutoscaler.Name).
+		SubResource("status").
+		Body(horizontalPodAutoscaler).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs.
+func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched horizontalPodAutoscaler.
+func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) {
+	result = &v2beta2.HorizontalPodAutoscaler{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("horizontalpodautoscalers").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go
new file mode 100644
index 0000000..d5e35e6
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	v1 "k8s.io/api/batch/v1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type BatchV1Interface interface {
+	RESTClient() rest.Interface
+	JobsGetter
+}
+
+// BatchV1Client is used to interact with features provided by the batch group.
+type BatchV1Client struct {
+	restClient rest.Interface
+}
+
+func (c *BatchV1Client) Jobs(namespace string) JobInterface {
+	return newJobs(c, namespace)
+}
+
+// NewForConfig creates a new BatchV1Client for the given config.
+func NewForConfig(c *rest.Config) (*BatchV1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &BatchV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new BatchV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *BatchV1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new BatchV1Client for the given RESTClient.
+func New(c rest.Interface) *BatchV1Client {
+	return &BatchV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *BatchV1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go
new file mode 100644
index 0000000..3af5d05
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go
new file mode 100644
index 0000000..dc41429
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type JobExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go
new file mode 100644
index 0000000..b55c602
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/batch/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// JobsGetter has a method to return a JobInterface.
+// A group's client should implement this interface.
+type JobsGetter interface {
+	Jobs(namespace string) JobInterface
+}
+
+// JobInterface has methods to work with Job resources.
+type JobInterface interface {
+	Create(*v1.Job) (*v1.Job, error)
+	Update(*v1.Job) (*v1.Job, error)
+	UpdateStatus(*v1.Job) (*v1.Job, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.Job, error)
+	List(opts metav1.ListOptions) (*v1.JobList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error)
+	JobExpansion
+}
+
+// jobs implements JobInterface
+type jobs struct {
+	client rest.Interface
+	ns     string
+}
+
+// newJobs returns a Jobs
+func newJobs(c *BatchV1Client, namespace string) *jobs {
+	return &jobs{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the job, and returns the corresponding job object, and an error if there is any.
+func (c *jobs) Get(name string, options metav1.GetOptions) (result *v1.Job, err error) {
+	result = &v1.Job{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("jobs").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Jobs that match those selectors.
+func (c *jobs) List(opts metav1.ListOptions) (result *v1.JobList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.JobList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("jobs").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested jobs.
+func (c *jobs) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("jobs").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a job and creates it.  Returns the server's representation of the job, and an error, if there is any.
+func (c *jobs) Create(job *v1.Job) (result *v1.Job, err error) {
+	result = &v1.Job{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("jobs").
+		Body(job).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any.
+func (c *jobs) Update(job *v1.Job) (result *v1.Job, err error) {
+	result = &v1.Job{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("jobs").
+		Name(job.Name).
+		Body(job).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *jobs) UpdateStatus(job *v1.Job) (result *v1.Job, err error) {
+	result = &v1.Job{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("jobs").
+		Name(job.Name).
+		SubResource("status").
+		Body(job).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the job and deletes it. Returns an error if one occurs.
+func (c *jobs) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("jobs").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *jobs) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("jobs").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched job.
+func (c *jobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) {
+	result = &v1.Job{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("jobs").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go
new file mode 100644
index 0000000..aa71ca8
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1beta1 "k8s.io/api/batch/v1beta1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type BatchV1beta1Interface interface {
+	RESTClient() rest.Interface
+	CronJobsGetter
+}
+
+// BatchV1beta1Client is used to interact with features provided by the batch group.
+type BatchV1beta1Client struct {
+	restClient rest.Interface
+}
+
+func (c *BatchV1beta1Client) CronJobs(namespace string) CronJobInterface {
+	return newCronJobs(c, namespace)
+}
+
+// NewForConfig creates a new BatchV1beta1Client for the given config.
+func NewForConfig(c *rest.Config) (*BatchV1beta1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &BatchV1beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new BatchV1beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *BatchV1beta1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new BatchV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *BatchV1beta1Client {
+	return &BatchV1beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1beta1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *BatchV1beta1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go
new file mode 100644
index 0000000..d89d2fa
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/batch/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// CronJobsGetter has a method to return a CronJobInterface.
+// A group's client should implement this interface.
+type CronJobsGetter interface {
+	CronJobs(namespace string) CronJobInterface
+}
+
+// CronJobInterface has methods to work with CronJob resources.
+type CronJobInterface interface {
+	Create(*v1beta1.CronJob) (*v1beta1.CronJob, error)
+	Update(*v1beta1.CronJob) (*v1beta1.CronJob, error)
+	UpdateStatus(*v1beta1.CronJob) (*v1beta1.CronJob, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.CronJob, error)
+	List(opts v1.ListOptions) (*v1beta1.CronJobList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error)
+	CronJobExpansion
+}
+
+// cronJobs implements CronJobInterface
+type cronJobs struct {
+	client rest.Interface
+	ns     string
+}
+
+// newCronJobs returns a CronJobs
+func newCronJobs(c *BatchV1beta1Client, namespace string) *cronJobs {
+	return &cronJobs{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any.
+func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) {
+	result = &v1beta1.CronJob{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("cronjobs").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of CronJobs that match those selectors.
+func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.CronJobList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("cronjobs").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested cronJobs.
+func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("cronjobs").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a cronJob and creates it.  Returns the server's representation of the cronJob, and an error, if there is any.
+func (c *cronJobs) Create(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) {
+	result = &v1beta1.CronJob{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("cronjobs").
+		Body(cronJob).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any.
+func (c *cronJobs) Update(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) {
+	result = &v1beta1.CronJob{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("cronjobs").
+		Name(cronJob.Name).
+		Body(cronJob).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *cronJobs) UpdateStatus(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) {
+	result = &v1beta1.CronJob{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("cronjobs").
+		Name(cronJob.Name).
+		SubResource("status").
+		Body(cronJob).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the cronJob and deletes it. Returns an error if one occurs.
+func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("cronjobs").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("cronjobs").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched cronJob.
+func (c *cronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) {
+	result = &v1beta1.CronJob{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("cronjobs").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go
new file mode 100644
index 0000000..7711019
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go
new file mode 100644
index 0000000..145e14a
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+type CronJobExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go
new file mode 100644
index 0000000..e6c6306
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2alpha1
+
+import (
+	v2alpha1 "k8s.io/api/batch/v2alpha1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type BatchV2alpha1Interface interface {
+	RESTClient() rest.Interface
+	CronJobsGetter
+}
+
+// BatchV2alpha1Client is used to interact with features provided by the batch group.
+type BatchV2alpha1Client struct {
+	restClient rest.Interface
+}
+
+func (c *BatchV2alpha1Client) CronJobs(namespace string) CronJobInterface {
+	return newCronJobs(c, namespace)
+}
+
+// NewForConfig creates a new BatchV2alpha1Client for the given config.
+func NewForConfig(c *rest.Config) (*BatchV2alpha1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &BatchV2alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new BatchV2alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *BatchV2alpha1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new BatchV2alpha1Client for the given RESTClient.
+func New(c rest.Interface) *BatchV2alpha1Client {
+	return &BatchV2alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v2alpha1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *BatchV2alpha1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go
new file mode 100644
index 0000000..19123b6
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2alpha1
+
+import (
+	"time"
+
+	v2alpha1 "k8s.io/api/batch/v2alpha1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// CronJobsGetter has a method to return a CronJobInterface.
+// A group's client should implement this interface.
+type CronJobsGetter interface {
+	CronJobs(namespace string) CronJobInterface
+}
+
+// CronJobInterface has methods to work with CronJob resources.
+type CronJobInterface interface {
+	Create(*v2alpha1.CronJob) (*v2alpha1.CronJob, error)
+	Update(*v2alpha1.CronJob) (*v2alpha1.CronJob, error)
+	UpdateStatus(*v2alpha1.CronJob) (*v2alpha1.CronJob, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v2alpha1.CronJob, error)
+	List(opts v1.ListOptions) (*v2alpha1.CronJobList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error)
+	CronJobExpansion
+}
+
+// cronJobs implements CronJobInterface
+type cronJobs struct {
+	client rest.Interface
+	ns     string
+}
+
+// newCronJobs returns a CronJobs
+func newCronJobs(c *BatchV2alpha1Client, namespace string) *cronJobs {
+	return &cronJobs{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any.
+func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.CronJob, err error) {
+	result = &v2alpha1.CronJob{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("cronjobs").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of CronJobs that match those selectors.
+func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v2alpha1.CronJobList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("cronjobs").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested cronJobs.
+func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("cronjobs").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a cronJob and creates it.  Returns the server's representation of the cronJob, and an error, if there is any.
+func (c *cronJobs) Create(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) {
+	result = &v2alpha1.CronJob{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("cronjobs").
+		Body(cronJob).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any.
+func (c *cronJobs) Update(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) {
+	result = &v2alpha1.CronJob{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("cronjobs").
+		Name(cronJob.Name).
+		Body(cronJob).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *cronJobs) UpdateStatus(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) {
+	result = &v2alpha1.CronJob{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("cronjobs").
+		Name(cronJob.Name).
+		SubResource("status").
+		Body(cronJob).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the cronJob and deletes it. Returns an error if one occurs.
+func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("cronjobs").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("cronjobs").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched cronJob.
+func (c *cronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) {
+	result = &v2alpha1.CronJob{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("cronjobs").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go
new file mode 100644
index 0000000..3efe0d2
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v2alpha1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go
new file mode 100644
index 0000000..34dafc4
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2alpha1
+
+type CronJobExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go
new file mode 100644
index 0000000..baac42e
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1beta1 "k8s.io/api/certificates/v1beta1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type CertificatesV1beta1Interface interface {
+	RESTClient() rest.Interface
+	CertificateSigningRequestsGetter
+}
+
+// CertificatesV1beta1Client is used to interact with features provided by the certificates.k8s.io group.
+type CertificatesV1beta1Client struct {
+	restClient rest.Interface
+}
+
+func (c *CertificatesV1beta1Client) CertificateSigningRequests() CertificateSigningRequestInterface {
+	return newCertificateSigningRequests(c)
+}
+
+// NewForConfig creates a new CertificatesV1beta1Client for the given config.
+func NewForConfig(c *rest.Config) (*CertificatesV1beta1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &CertificatesV1beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new CertificatesV1beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *CertificatesV1beta1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new CertificatesV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *CertificatesV1beta1Client {
+	return &CertificatesV1beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1beta1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *CertificatesV1beta1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go
new file mode 100644
index 0000000..712d3a0
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go
@@ -0,0 +1,180 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/certificates/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface.
+// A group's client should implement this interface.
+type CertificateSigningRequestsGetter interface {
+	CertificateSigningRequests() CertificateSigningRequestInterface
+}
+
+// CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources.
+type CertificateSigningRequestInterface interface {
+	Create(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error)
+	Update(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error)
+	UpdateStatus(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.CertificateSigningRequest, error)
+	List(opts v1.ListOptions) (*v1beta1.CertificateSigningRequestList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error)
+	CertificateSigningRequestExpansion
+}
+
+// certificateSigningRequests implements CertificateSigningRequestInterface
+type certificateSigningRequests struct {
+	client rest.Interface
+}
+
+// newCertificateSigningRequests returns a CertificateSigningRequests
+func newCertificateSigningRequests(c *CertificatesV1beta1Client) *certificateSigningRequests {
+	return &certificateSigningRequests{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any.
+func (c *certificateSigningRequests) Get(name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) {
+	result = &v1beta1.CertificateSigningRequest{}
+	err = c.client.Get().
+		Resource("certificatesigningrequests").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors.
+func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.CertificateSigningRequestList{}
+	err = c.client.Get().
+		Resource("certificatesigningrequests").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested certificateSigningRequests.
+func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("certificatesigningrequests").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a certificateSigningRequest and creates it.  Returns the server's representation of the certificateSigningRequest, and an error, if there is any.
+func (c *certificateSigningRequests) Create(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) {
+	result = &v1beta1.CertificateSigningRequest{}
+	err = c.client.Post().
+		Resource("certificatesigningrequests").
+		Body(certificateSigningRequest).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any.
+func (c *certificateSigningRequests) Update(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) {
+	result = &v1beta1.CertificateSigningRequest{}
+	err = c.client.Put().
+		Resource("certificatesigningrequests").
+		Name(certificateSigningRequest.Name).
+		Body(certificateSigningRequest).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *certificateSigningRequests) UpdateStatus(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) {
+	result = &v1beta1.CertificateSigningRequest{}
+	err = c.client.Put().
+		Resource("certificatesigningrequests").
+		Name(certificateSigningRequest.Name).
+		SubResource("status").
+		Body(certificateSigningRequest).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs.
+func (c *certificateSigningRequests) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("certificatesigningrequests").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *certificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("certificatesigningrequests").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched certificateSigningRequest.
+func (c *certificateSigningRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) {
+	result = &v1beta1.CertificateSigningRequest{}
+	err = c.client.Patch(pt).
+		Resource("certificatesigningrequests").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go
new file mode 100644
index 0000000..c63b806
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	certificates "k8s.io/api/certificates/v1beta1"
+)
+
+type CertificateSigningRequestExpansion interface {
+	UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error)
+}
+
+func (c *certificateSigningRequests) UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) {
+	result = &certificates.CertificateSigningRequest{}
+	err = c.client.Put().
+		Resource("certificatesigningrequests").
+		Name(certificateSigningRequest.Name).
+		Body(certificateSigningRequest).
+		SubResource("approval").
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go
new file mode 100644
index 0000000..7711019
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go
new file mode 100644
index 0000000..f6df769
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go
@@ -0,0 +1,19 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go
new file mode 100644
index 0000000..91a7648
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1beta1 "k8s.io/api/coordination/v1beta1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type CoordinationV1beta1Interface interface {
+	RESTClient() rest.Interface
+	LeasesGetter
+}
+
+// CoordinationV1beta1Client is used to interact with features provided by the coordination.k8s.io group.
+type CoordinationV1beta1Client struct {
+	restClient rest.Interface
+}
+
+func (c *CoordinationV1beta1Client) Leases(namespace string) LeaseInterface {
+	return newLeases(c, namespace)
+}
+
+// NewForConfig creates a new CoordinationV1beta1Client for the given config.
+func NewForConfig(c *rest.Config) (*CoordinationV1beta1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &CoordinationV1beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new CoordinationV1beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *CoordinationV1beta1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new CoordinationV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *CoordinationV1beta1Client {
+	return &CoordinationV1beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1beta1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *CoordinationV1beta1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/doc.go
new file mode 100644
index 0000000..7711019
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/generated_expansion.go
new file mode 100644
index 0000000..dfd180d
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+type LeaseExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go
new file mode 100644
index 0000000..490d815
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/coordination/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// LeasesGetter has a method to return a LeaseInterface.
+// A group's client should implement this interface.
+type LeasesGetter interface {
+	Leases(namespace string) LeaseInterface
+}
+
+// LeaseInterface has methods to work with Lease resources.
+type LeaseInterface interface {
+	Create(*v1beta1.Lease) (*v1beta1.Lease, error)
+	Update(*v1beta1.Lease) (*v1beta1.Lease, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.Lease, error)
+	List(opts v1.ListOptions) (*v1beta1.LeaseList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Lease, err error)
+	LeaseExpansion
+}
+
+// leases implements LeaseInterface
+type leases struct {
+	client rest.Interface
+	ns     string
+}
+
+// newLeases returns a Leases
+func newLeases(c *CoordinationV1beta1Client, namespace string) *leases {
+	return &leases{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any.
+func (c *leases) Get(name string, options v1.GetOptions) (result *v1beta1.Lease, err error) {
+	result = &v1beta1.Lease{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("leases").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Leases that match those selectors.
+func (c *leases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.LeaseList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("leases").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested leases.
+func (c *leases) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("leases").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a lease and creates it.  Returns the server's representation of the lease, and an error, if there is any.
+func (c *leases) Create(lease *v1beta1.Lease) (result *v1beta1.Lease, err error) {
+	result = &v1beta1.Lease{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("leases").
+		Body(lease).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any.
+func (c *leases) Update(lease *v1beta1.Lease) (result *v1beta1.Lease, err error) {
+	result = &v1beta1.Lease{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("leases").
+		Name(lease.Name).
+		Body(lease).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the lease and deletes it. Returns an error if one occurs.
+func (c *leases) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("leases").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *leases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("leases").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched lease.
+func (c *leases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Lease, err error) {
+	result = &v1beta1.Lease{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("leases").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go
new file mode 100644
index 0000000..302b2fd
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ComponentStatusesGetter has a method to return a ComponentStatusInterface.
+// A group's client should implement this interface.
+type ComponentStatusesGetter interface {
+	ComponentStatuses() ComponentStatusInterface
+}
+
+// ComponentStatusInterface has methods to work with ComponentStatus resources.
+type ComponentStatusInterface interface {
+	Create(*v1.ComponentStatus) (*v1.ComponentStatus, error)
+	Update(*v1.ComponentStatus) (*v1.ComponentStatus, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.ComponentStatus, error)
+	List(opts metav1.ListOptions) (*v1.ComponentStatusList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error)
+	ComponentStatusExpansion
+}
+
+// componentStatuses implements ComponentStatusInterface
+type componentStatuses struct {
+	client rest.Interface
+}
+
+// newComponentStatuses returns a ComponentStatuses
+func newComponentStatuses(c *CoreV1Client) *componentStatuses {
+	return &componentStatuses{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any.
+func (c *componentStatuses) Get(name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) {
+	result = &v1.ComponentStatus{}
+	err = c.client.Get().
+		Resource("componentstatuses").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors.
+func (c *componentStatuses) List(opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.ComponentStatusList{}
+	err = c.client.Get().
+		Resource("componentstatuses").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested componentStatuses.
+func (c *componentStatuses) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("componentstatuses").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a componentStatus and creates it.  Returns the server's representation of the componentStatus, and an error, if there is any.
+func (c *componentStatuses) Create(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) {
+	result = &v1.ComponentStatus{}
+	err = c.client.Post().
+		Resource("componentstatuses").
+		Body(componentStatus).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any.
+func (c *componentStatuses) Update(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) {
+	result = &v1.ComponentStatus{}
+	err = c.client.Put().
+		Resource("componentstatuses").
+		Name(componentStatus.Name).
+		Body(componentStatus).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs.
+func (c *componentStatuses) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("componentstatuses").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *componentStatuses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("componentstatuses").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched componentStatus.
+func (c *componentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) {
+	result = &v1.ComponentStatus{}
+	err = c.client.Patch(pt).
+		Resource("componentstatuses").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go
new file mode 100644
index 0000000..18ce954
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ConfigMapsGetter has a method to return a ConfigMapInterface.
+// A group's client should implement this interface.
+type ConfigMapsGetter interface {
+	ConfigMaps(namespace string) ConfigMapInterface
+}
+
+// ConfigMapInterface has methods to work with ConfigMap resources.
+type ConfigMapInterface interface {
+	Create(*v1.ConfigMap) (*v1.ConfigMap, error)
+	Update(*v1.ConfigMap) (*v1.ConfigMap, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.ConfigMap, error)
+	List(opts metav1.ListOptions) (*v1.ConfigMapList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error)
+	ConfigMapExpansion
+}
+
+// configMaps implements ConfigMapInterface
+type configMaps struct {
+	client rest.Interface
+	ns     string
+}
+
+// newConfigMaps returns a ConfigMaps
+func newConfigMaps(c *CoreV1Client, namespace string) *configMaps {
+	return &configMaps{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any.
+func (c *configMaps) Get(name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) {
+	result = &v1.ConfigMap{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("configmaps").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors.
+func (c *configMaps) List(opts metav1.ListOptions) (result *v1.ConfigMapList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.ConfigMapList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("configmaps").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested configMaps.
+func (c *configMaps) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("configmaps").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a configMap and creates it.  Returns the server's representation of the configMap, and an error, if there is any.
+func (c *configMaps) Create(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) {
+	result = &v1.ConfigMap{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("configmaps").
+		Body(configMap).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any.
+func (c *configMaps) Update(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) {
+	result = &v1.ConfigMap{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("configmaps").
+		Name(configMap.Name).
+		Body(configMap).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the configMap and deletes it. Returns an error if one occurs.
+func (c *configMaps) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("configmaps").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *configMaps) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("configmaps").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched configMap.
+func (c *configMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) {
+	result = &v1.ConfigMap{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("configmaps").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go
new file mode 100644
index 0000000..044a28e
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go
@@ -0,0 +1,165 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	v1 "k8s.io/api/core/v1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type CoreV1Interface interface {
+	RESTClient() rest.Interface
+	ComponentStatusesGetter
+	ConfigMapsGetter
+	EndpointsGetter
+	EventsGetter
+	LimitRangesGetter
+	NamespacesGetter
+	NodesGetter
+	PersistentVolumesGetter
+	PersistentVolumeClaimsGetter
+	PodsGetter
+	PodTemplatesGetter
+	ReplicationControllersGetter
+	ResourceQuotasGetter
+	SecretsGetter
+	ServicesGetter
+	ServiceAccountsGetter
+}
+
+// CoreV1Client is used to interact with features provided by the  group.
+type CoreV1Client struct {
+	restClient rest.Interface
+}
+
+func (c *CoreV1Client) ComponentStatuses() ComponentStatusInterface {
+	return newComponentStatuses(c)
+}
+
+func (c *CoreV1Client) ConfigMaps(namespace string) ConfigMapInterface {
+	return newConfigMaps(c, namespace)
+}
+
+func (c *CoreV1Client) Endpoints(namespace string) EndpointsInterface {
+	return newEndpoints(c, namespace)
+}
+
+func (c *CoreV1Client) Events(namespace string) EventInterface {
+	return newEvents(c, namespace)
+}
+
+func (c *CoreV1Client) LimitRanges(namespace string) LimitRangeInterface {
+	return newLimitRanges(c, namespace)
+}
+
+func (c *CoreV1Client) Namespaces() NamespaceInterface {
+	return newNamespaces(c)
+}
+
+func (c *CoreV1Client) Nodes() NodeInterface {
+	return newNodes(c)
+}
+
+func (c *CoreV1Client) PersistentVolumes() PersistentVolumeInterface {
+	return newPersistentVolumes(c)
+}
+
+func (c *CoreV1Client) PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface {
+	return newPersistentVolumeClaims(c, namespace)
+}
+
+func (c *CoreV1Client) Pods(namespace string) PodInterface {
+	return newPods(c, namespace)
+}
+
+func (c *CoreV1Client) PodTemplates(namespace string) PodTemplateInterface {
+	return newPodTemplates(c, namespace)
+}
+
+func (c *CoreV1Client) ReplicationControllers(namespace string) ReplicationControllerInterface {
+	return newReplicationControllers(c, namespace)
+}
+
+func (c *CoreV1Client) ResourceQuotas(namespace string) ResourceQuotaInterface {
+	return newResourceQuotas(c, namespace)
+}
+
+func (c *CoreV1Client) Secrets(namespace string) SecretInterface {
+	return newSecrets(c, namespace)
+}
+
+func (c *CoreV1Client) Services(namespace string) ServiceInterface {
+	return newServices(c, namespace)
+}
+
+func (c *CoreV1Client) ServiceAccounts(namespace string) ServiceAccountInterface {
+	return newServiceAccounts(c, namespace)
+}
+
+// NewForConfig creates a new CoreV1Client for the given config.
+func NewForConfig(c *rest.Config) (*CoreV1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &CoreV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new CoreV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *CoreV1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new CoreV1Client for the given RESTClient.
+func New(c rest.Interface) *CoreV1Client {
+	return &CoreV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/api"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *CoreV1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go
new file mode 100644
index 0000000..3af5d05
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go
new file mode 100644
index 0000000..978a2a1
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// EndpointsGetter has a method to return a EndpointsInterface.
+// A group's client should implement this interface.
+type EndpointsGetter interface {
+	Endpoints(namespace string) EndpointsInterface
+}
+
+// EndpointsInterface has methods to work with Endpoints resources.
+type EndpointsInterface interface {
+	Create(*v1.Endpoints) (*v1.Endpoints, error)
+	Update(*v1.Endpoints) (*v1.Endpoints, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.Endpoints, error)
+	List(opts metav1.ListOptions) (*v1.EndpointsList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error)
+	EndpointsExpansion
+}
+
+// endpoints implements EndpointsInterface
+type endpoints struct {
+	client rest.Interface
+	ns     string
+}
+
+// newEndpoints returns a Endpoints
+func newEndpoints(c *CoreV1Client, namespace string) *endpoints {
+	return &endpoints{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any.
+func (c *endpoints) Get(name string, options metav1.GetOptions) (result *v1.Endpoints, err error) {
+	result = &v1.Endpoints{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("endpoints").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Endpoints that match those selectors.
+func (c *endpoints) List(opts metav1.ListOptions) (result *v1.EndpointsList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.EndpointsList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("endpoints").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested endpoints.
+func (c *endpoints) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("endpoints").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a endpoints and creates it.  Returns the server's representation of the endpoints, and an error, if there is any.
+func (c *endpoints) Create(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) {
+	result = &v1.Endpoints{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("endpoints").
+		Body(endpoints).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any.
+func (c *endpoints) Update(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) {
+	result = &v1.Endpoints{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("endpoints").
+		Name(endpoints.Name).
+		Body(endpoints).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the endpoints and deletes it. Returns an error if one occurs.
+func (c *endpoints) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("endpoints").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *endpoints) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("endpoints").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched endpoints.
+func (c *endpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) {
+	result = &v1.Endpoints{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("endpoints").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go
new file mode 100644
index 0000000..55cfa09
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// EventsGetter has a method to return a EventInterface.
+// A group's client should implement this interface.
+type EventsGetter interface {
+	Events(namespace string) EventInterface
+}
+
+// EventInterface has methods to work with Event resources.
+type EventInterface interface {
+	Create(*v1.Event) (*v1.Event, error)
+	Update(*v1.Event) (*v1.Event, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.Event, error)
+	List(opts metav1.ListOptions) (*v1.EventList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error)
+	EventExpansion
+}
+
+// events implements EventInterface
+type events struct {
+	client rest.Interface
+	ns     string
+}
+
+// newEvents returns a Events
+func newEvents(c *CoreV1Client, namespace string) *events {
+	return &events{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the event, and returns the corresponding event object, and an error if there is any.
+func (c *events) Get(name string, options metav1.GetOptions) (result *v1.Event, err error) {
+	result = &v1.Event{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("events").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Events that match those selectors.
+func (c *events) List(opts metav1.ListOptions) (result *v1.EventList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.EventList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("events").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested events.
+func (c *events) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("events").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a event and creates it.  Returns the server's representation of the event, and an error, if there is any.
+func (c *events) Create(event *v1.Event) (result *v1.Event, err error) {
+	result = &v1.Event{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("events").
+		Body(event).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any.
+func (c *events) Update(event *v1.Event) (result *v1.Event, err error) {
+	result = &v1.Event{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("events").
+		Name(event.Name).
+		Body(event).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the event and deletes it. Returns an error if one occurs.
+func (c *events) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("events").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *events) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("events").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched event.
+func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) {
+	result = &v1.Event{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("events").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go
new file mode 100644
index 0000000..6929ade
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go
@@ -0,0 +1,164 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"fmt"
+
+	"k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/fields"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/types"
+	ref "k8s.io/client-go/tools/reference"
+)
+
+// The EventExpansion interface allows manually adding extra methods to the EventInterface.
+type EventExpansion interface {
+	// CreateWithEventNamespace is the same as a Create, except that it sends the request to the event.Namespace.
+	CreateWithEventNamespace(event *v1.Event) (*v1.Event, error)
+	// UpdateWithEventNamespace is the same as a Update, except that it sends the request to the event.Namespace.
+	UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error)
+	PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.Event, error)
+	// Search finds events about the specified object
+	Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error)
+	// Returns the appropriate field selector based on the API version being used to communicate with the server.
+	// The returned field selector can be used with List and Watch to filter desired events.
+	GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector
+}
+
+// CreateWithEventNamespace makes a new event. Returns the copy of the event the server returns,
+// or an error. The namespace to create the event within is deduced from the
+// event; it must either match this event client's namespace, or this event
+// client must have been created with the "" namespace.
+func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) {
+	if e.ns != "" && event.Namespace != e.ns {
+		return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns)
+	}
+	result := &v1.Event{}
+	err := e.client.Post().
+		NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0).
+		Resource("events").
+		Body(event).
+		Do().
+		Into(result)
+	return result, err
+}
+
+// UpdateWithEventNamespace modifies an existing event. It returns the copy of the event that the server returns,
+// or an error. The namespace and key to update the event within is deduced from the event. The
+// namespace must either match this event client's namespace, or this event client must have been
+// created with the "" namespace. Update also requires the ResourceVersion to be set in the event
+// object.
+func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) {
+	result := &v1.Event{}
+	err := e.client.Put().
+		NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0).
+		Resource("events").
+		Name(event.Name).
+		Body(event).
+		Do().
+		Into(result)
+	return result, err
+}
+
+// PatchWithEventNamespace modifies an existing event. It returns the copy of
+// the event that the server returns, or an error. The namespace and name of the
+// target event is deduced from the incompleteEvent. The namespace must either
+// match this event client's namespace, or this event client must have been
+// created with the "" namespace.
+func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte) (*v1.Event, error) {
+	if e.ns != "" && incompleteEvent.Namespace != e.ns {
+		return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.ns)
+	}
+	result := &v1.Event{}
+	err := e.client.Patch(types.StrategicMergePatchType).
+		NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0).
+		Resource("events").
+		Name(incompleteEvent.Name).
+		Body(data).
+		Do().
+		Into(result)
+	return result, err
+}
+
+// Search finds events about the specified object. The namespace of the
+// object must match this event's client namespace unless the event client
+// was made with the "" namespace.
+func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) {
+	ref, err := ref.GetReference(scheme, objOrRef)
+	if err != nil {
+		return nil, err
+	}
+	if e.ns != "" && ref.Namespace != e.ns {
+		return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns)
+	}
+	stringRefKind := string(ref.Kind)
+	var refKind *string
+	if stringRefKind != "" {
+		refKind = &stringRefKind
+	}
+	stringRefUID := string(ref.UID)
+	var refUID *string
+	if stringRefUID != "" {
+		refUID = &stringRefUID
+	}
+	fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID)
+	return e.List(metav1.ListOptions{FieldSelector: fieldSelector.String()})
+}
+
+// Returns the appropriate field selector based on the API version being used to communicate with the server.
+// The returned field selector can be used with List and Watch to filter desired events.
+func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector {
+	apiVersion := e.client.APIVersion().String()
+	field := fields.Set{}
+	if involvedObjectName != nil {
+		field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName
+	}
+	if involvedObjectNamespace != nil {
+		field["involvedObject.namespace"] = *involvedObjectNamespace
+	}
+	if involvedObjectKind != nil {
+		field["involvedObject.kind"] = *involvedObjectKind
+	}
+	if involvedObjectUID != nil {
+		field["involvedObject.uid"] = *involvedObjectUID
+	}
+	return field.AsSelector()
+}
+
+// Returns the appropriate field label to use for name of the involved object as per the given API version.
+func GetInvolvedObjectNameFieldLabel(version string) string {
+	return "involvedObject.name"
+}
+
+// TODO: This is a temporary arrangement and will be removed once all clients are moved to use the clientset.
+type EventSinkImpl struct {
+	Interface EventInterface
+}
+
+func (e *EventSinkImpl) Create(event *v1.Event) (*v1.Event, error) {
+	return e.Interface.CreateWithEventNamespace(event)
+}
+
+func (e *EventSinkImpl) Update(event *v1.Event) (*v1.Event, error) {
+	return e.Interface.UpdateWithEventNamespace(event)
+}
+
+func (e *EventSinkImpl) Patch(event *v1.Event, data []byte) (*v1.Event, error) {
+	return e.Interface.PatchWithEventNamespace(event, data)
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go
new file mode 100644
index 0000000..6e8591b
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go
@@ -0,0 +1,39 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type ComponentStatusExpansion interface{}
+
+type ConfigMapExpansion interface{}
+
+type EndpointsExpansion interface{}
+
+type LimitRangeExpansion interface{}
+
+type PersistentVolumeExpansion interface{}
+
+type PersistentVolumeClaimExpansion interface{}
+
+type PodTemplateExpansion interface{}
+
+type ReplicationControllerExpansion interface{}
+
+type ResourceQuotaExpansion interface{}
+
+type SecretExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go
new file mode 100644
index 0000000..2eeae11
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// LimitRangesGetter has a method to return a LimitRangeInterface.
+// A group's client should implement this interface.
+type LimitRangesGetter interface {
+	LimitRanges(namespace string) LimitRangeInterface
+}
+
+// LimitRangeInterface has methods to work with LimitRange resources.
+type LimitRangeInterface interface {
+	Create(*v1.LimitRange) (*v1.LimitRange, error)
+	Update(*v1.LimitRange) (*v1.LimitRange, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.LimitRange, error)
+	List(opts metav1.ListOptions) (*v1.LimitRangeList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error)
+	LimitRangeExpansion
+}
+
+// limitRanges implements LimitRangeInterface
+type limitRanges struct {
+	client rest.Interface
+	ns     string
+}
+
+// newLimitRanges returns a LimitRanges
+func newLimitRanges(c *CoreV1Client, namespace string) *limitRanges {
+	return &limitRanges{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any.
+func (c *limitRanges) Get(name string, options metav1.GetOptions) (result *v1.LimitRange, err error) {
+	result = &v1.LimitRange{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("limitranges").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of LimitRanges that match those selectors.
+func (c *limitRanges) List(opts metav1.ListOptions) (result *v1.LimitRangeList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.LimitRangeList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("limitranges").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested limitRanges.
+func (c *limitRanges) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("limitranges").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a limitRange and creates it.  Returns the server's representation of the limitRange, and an error, if there is any.
+func (c *limitRanges) Create(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) {
+	result = &v1.LimitRange{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("limitranges").
+		Body(limitRange).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any.
+func (c *limitRanges) Update(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) {
+	result = &v1.LimitRange{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("limitranges").
+		Name(limitRange.Name).
+		Body(limitRange).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the limitRange and deletes it. Returns an error if one occurs.
+func (c *limitRanges) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("limitranges").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *limitRanges) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("limitranges").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched limitRange.
+func (c *limitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) {
+	result = &v1.LimitRange{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("limitranges").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go
new file mode 100644
index 0000000..8a81fe8
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// NamespacesGetter has a method to return a NamespaceInterface.
+// A group's client should implement this interface.
+type NamespacesGetter interface {
+	Namespaces() NamespaceInterface
+}
+
+// NamespaceInterface has methods to work with Namespace resources.
+type NamespaceInterface interface {
+	Create(*v1.Namespace) (*v1.Namespace, error)
+	Update(*v1.Namespace) (*v1.Namespace, error)
+	UpdateStatus(*v1.Namespace) (*v1.Namespace, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.Namespace, error)
+	List(opts metav1.ListOptions) (*v1.NamespaceList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error)
+	NamespaceExpansion
+}
+
+// namespaces implements NamespaceInterface
+type namespaces struct {
+	client rest.Interface
+}
+
+// newNamespaces returns a Namespaces
+func newNamespaces(c *CoreV1Client) *namespaces {
+	return &namespaces{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any.
+func (c *namespaces) Get(name string, options metav1.GetOptions) (result *v1.Namespace, err error) {
+	result = &v1.Namespace{}
+	err = c.client.Get().
+		Resource("namespaces").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Namespaces that match those selectors.
+func (c *namespaces) List(opts metav1.ListOptions) (result *v1.NamespaceList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.NamespaceList{}
+	err = c.client.Get().
+		Resource("namespaces").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested namespaces.
+func (c *namespaces) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("namespaces").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a namespace and creates it.  Returns the server's representation of the namespace, and an error, if there is any.
+func (c *namespaces) Create(namespace *v1.Namespace) (result *v1.Namespace, err error) {
+	result = &v1.Namespace{}
+	err = c.client.Post().
+		Resource("namespaces").
+		Body(namespace).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any.
+func (c *namespaces) Update(namespace *v1.Namespace) (result *v1.Namespace, err error) {
+	result = &v1.Namespace{}
+	err = c.client.Put().
+		Resource("namespaces").
+		Name(namespace.Name).
+		Body(namespace).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *namespaces) UpdateStatus(namespace *v1.Namespace) (result *v1.Namespace, err error) {
+	result = &v1.Namespace{}
+	err = c.client.Put().
+		Resource("namespaces").
+		Name(namespace.Name).
+		SubResource("status").
+		Body(namespace).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the namespace and deletes it. Returns an error if one occurs.
+func (c *namespaces) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("namespaces").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched namespace.
+func (c *namespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) {
+	result = &v1.Namespace{}
+	err = c.client.Patch(pt).
+		Resource("namespaces").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go
new file mode 100644
index 0000000..17effe2
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go
@@ -0,0 +1,31 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import "k8s.io/api/core/v1"
+
+// The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface.
+type NamespaceExpansion interface {
+	Finalize(item *v1.Namespace) (*v1.Namespace, error)
+}
+
+// Finalize takes the representation of a namespace to update.  Returns the server's representation of the namespace, and an error, if it occurs.
+func (c *namespaces) Finalize(namespace *v1.Namespace) (result *v1.Namespace, err error) {
+	result = &v1.Namespace{}
+	err = c.client.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go
new file mode 100644
index 0000000..d19fab8
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go
@@ -0,0 +1,180 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// NodesGetter has a method to return a NodeInterface.
+// A group's client should implement this interface.
+type NodesGetter interface {
+	Nodes() NodeInterface
+}
+
+// NodeInterface has methods to work with Node resources.
+type NodeInterface interface {
+	Create(*v1.Node) (*v1.Node, error)
+	Update(*v1.Node) (*v1.Node, error)
+	UpdateStatus(*v1.Node) (*v1.Node, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.Node, error)
+	List(opts metav1.ListOptions) (*v1.NodeList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error)
+	NodeExpansion
+}
+
+// nodes implements NodeInterface
+type nodes struct {
+	client rest.Interface
+}
+
+// newNodes returns a Nodes
+func newNodes(c *CoreV1Client) *nodes {
+	return &nodes{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the node, and returns the corresponding node object, and an error if there is any.
+func (c *nodes) Get(name string, options metav1.GetOptions) (result *v1.Node, err error) {
+	result = &v1.Node{}
+	err = c.client.Get().
+		Resource("nodes").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Nodes that match those selectors.
+func (c *nodes) List(opts metav1.ListOptions) (result *v1.NodeList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.NodeList{}
+	err = c.client.Get().
+		Resource("nodes").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested nodes.
+func (c *nodes) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("nodes").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a node and creates it.  Returns the server's representation of the node, and an error, if there is any.
+func (c *nodes) Create(node *v1.Node) (result *v1.Node, err error) {
+	result = &v1.Node{}
+	err = c.client.Post().
+		Resource("nodes").
+		Body(node).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any.
+func (c *nodes) Update(node *v1.Node) (result *v1.Node, err error) {
+	result = &v1.Node{}
+	err = c.client.Put().
+		Resource("nodes").
+		Name(node.Name).
+		Body(node).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *nodes) UpdateStatus(node *v1.Node) (result *v1.Node, err error) {
+	result = &v1.Node{}
+	err = c.client.Put().
+		Resource("nodes").
+		Name(node.Name).
+		SubResource("status").
+		Body(node).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the node and deletes it. Returns an error if one occurs.
+func (c *nodes) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("nodes").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *nodes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("nodes").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched node.
+func (c *nodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) {
+	result = &v1.Node{}
+	err = c.client.Patch(pt).
+		Resource("nodes").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go
new file mode 100644
index 0000000..5db29c3
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+// The NodeExpansion interface allows manually adding extra methods to the NodeInterface.
+type NodeExpansion interface {
+	// PatchStatus modifies the status of an existing node. It returns the copy
+	// of the node that the server returns, or an error.
+	PatchStatus(nodeName string, data []byte) (*v1.Node, error)
+}
+
+// PatchStatus modifies the status of an existing node. It returns the copy of
+// the node that the server returns, or an error.
+func (c *nodes) PatchStatus(nodeName string, data []byte) (*v1.Node, error) {
+	result := &v1.Node{}
+	err := c.client.Patch(types.StrategicMergePatchType).
+		Resource("nodes").
+		Name(nodeName).
+		SubResource("status").
+		Body(data).
+		Do().
+		Into(result)
+	return result, err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go
new file mode 100644
index 0000000..7451482
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go
@@ -0,0 +1,180 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// PersistentVolumesGetter has a method to return a PersistentVolumeInterface.
+// A group's client should implement this interface.
+type PersistentVolumesGetter interface {
+	PersistentVolumes() PersistentVolumeInterface
+}
+
+// PersistentVolumeInterface has methods to work with PersistentVolume resources.
+type PersistentVolumeInterface interface {
+	Create(*v1.PersistentVolume) (*v1.PersistentVolume, error)
+	Update(*v1.PersistentVolume) (*v1.PersistentVolume, error)
+	UpdateStatus(*v1.PersistentVolume) (*v1.PersistentVolume, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.PersistentVolume, error)
+	List(opts metav1.ListOptions) (*v1.PersistentVolumeList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error)
+	PersistentVolumeExpansion
+}
+
+// persistentVolumes implements PersistentVolumeInterface
+type persistentVolumes struct {
+	client rest.Interface
+}
+
+// newPersistentVolumes returns a PersistentVolumes
+func newPersistentVolumes(c *CoreV1Client) *persistentVolumes {
+	return &persistentVolumes{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any.
+func (c *persistentVolumes) Get(name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) {
+	result = &v1.PersistentVolume{}
+	err = c.client.Get().
+		Resource("persistentvolumes").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors.
+func (c *persistentVolumes) List(opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.PersistentVolumeList{}
+	err = c.client.Get().
+		Resource("persistentvolumes").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested persistentVolumes.
+func (c *persistentVolumes) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("persistentvolumes").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a persistentVolume and creates it.  Returns the server's representation of the persistentVolume, and an error, if there is any.
+func (c *persistentVolumes) Create(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) {
+	result = &v1.PersistentVolume{}
+	err = c.client.Post().
+		Resource("persistentvolumes").
+		Body(persistentVolume).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any.
+func (c *persistentVolumes) Update(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) {
+	result = &v1.PersistentVolume{}
+	err = c.client.Put().
+		Resource("persistentvolumes").
+		Name(persistentVolume.Name).
+		Body(persistentVolume).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *persistentVolumes) UpdateStatus(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) {
+	result = &v1.PersistentVolume{}
+	err = c.client.Put().
+		Resource("persistentvolumes").
+		Name(persistentVolume.Name).
+		SubResource("status").
+		Body(persistentVolume).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs.
+func (c *persistentVolumes) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("persistentvolumes").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *persistentVolumes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("persistentvolumes").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched persistentVolume.
+func (c *persistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) {
+	result = &v1.PersistentVolume{}
+	err = c.client.Patch(pt).
+		Resource("persistentvolumes").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go
new file mode 100644
index 0000000..410ab37
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// PersistentVolumeClaimsGetter has a method to return a PersistentVolumeClaimInterface.
+// A group's client should implement this interface.
+type PersistentVolumeClaimsGetter interface {
+	PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface
+}
+
+// PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources.
+type PersistentVolumeClaimInterface interface {
+	Create(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error)
+	Update(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error)
+	UpdateStatus(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.PersistentVolumeClaim, error)
+	List(opts metav1.ListOptions) (*v1.PersistentVolumeClaimList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error)
+	PersistentVolumeClaimExpansion
+}
+
+// persistentVolumeClaims implements PersistentVolumeClaimInterface
+type persistentVolumeClaims struct {
+	client rest.Interface
+	ns     string
+}
+
+// newPersistentVolumeClaims returns a PersistentVolumeClaims
+func newPersistentVolumeClaims(c *CoreV1Client, namespace string) *persistentVolumeClaims {
+	return &persistentVolumeClaims{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any.
+func (c *persistentVolumeClaims) Get(name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) {
+	result = &v1.PersistentVolumeClaim{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("persistentvolumeclaims").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors.
+func (c *persistentVolumeClaims) List(opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.PersistentVolumeClaimList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("persistentvolumeclaims").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested persistentVolumeClaims.
+func (c *persistentVolumeClaims) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("persistentvolumeclaims").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a persistentVolumeClaim and creates it.  Returns the server's representation of the persistentVolumeClaim, and an error, if there is any.
+func (c *persistentVolumeClaims) Create(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) {
+	result = &v1.PersistentVolumeClaim{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("persistentvolumeclaims").
+		Body(persistentVolumeClaim).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any.
+func (c *persistentVolumeClaims) Update(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) {
+	result = &v1.PersistentVolumeClaim{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("persistentvolumeclaims").
+		Name(persistentVolumeClaim.Name).
+		Body(persistentVolumeClaim).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *persistentVolumeClaims) UpdateStatus(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) {
+	result = &v1.PersistentVolumeClaim{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("persistentvolumeclaims").
+		Name(persistentVolumeClaim.Name).
+		SubResource("status").
+		Body(persistentVolumeClaim).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs.
+func (c *persistentVolumeClaims) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("persistentvolumeclaims").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *persistentVolumeClaims) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("persistentvolumeclaims").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched persistentVolumeClaim.
+func (c *persistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) {
+	result = &v1.PersistentVolumeClaim{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("persistentvolumeclaims").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go
new file mode 100644
index 0000000..8d6b6e8
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// PodsGetter has a method to return a PodInterface.
+// A group's client should implement this interface.
+type PodsGetter interface {
+	Pods(namespace string) PodInterface
+}
+
+// PodInterface has methods to work with Pod resources.
+type PodInterface interface {
+	Create(*v1.Pod) (*v1.Pod, error)
+	Update(*v1.Pod) (*v1.Pod, error)
+	UpdateStatus(*v1.Pod) (*v1.Pod, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.Pod, error)
+	List(opts metav1.ListOptions) (*v1.PodList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error)
+	PodExpansion
+}
+
+// pods implements PodInterface
+type pods struct {
+	client rest.Interface
+	ns     string
+}
+
+// newPods returns a Pods
+func newPods(c *CoreV1Client, namespace string) *pods {
+	return &pods{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any.
+func (c *pods) Get(name string, options metav1.GetOptions) (result *v1.Pod, err error) {
+	result = &v1.Pod{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("pods").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Pods that match those selectors.
+func (c *pods) List(opts metav1.ListOptions) (result *v1.PodList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.PodList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("pods").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested pods.
+func (c *pods) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("pods").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a pod and creates it.  Returns the server's representation of the pod, and an error, if there is any.
+func (c *pods) Create(pod *v1.Pod) (result *v1.Pod, err error) {
+	result = &v1.Pod{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("pods").
+		Body(pod).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any.
+func (c *pods) Update(pod *v1.Pod) (result *v1.Pod, err error) {
+	result = &v1.Pod{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("pods").
+		Name(pod.Name).
+		Body(pod).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *pods) UpdateStatus(pod *v1.Pod) (result *v1.Pod, err error) {
+	result = &v1.Pod{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("pods").
+		Name(pod.Name).
+		SubResource("status").
+		Body(pod).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the pod and deletes it. Returns an error if one occurs.
+func (c *pods) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("pods").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *pods) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("pods").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched pod.
+func (c *pods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) {
+	result = &v1.Pod{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("pods").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go
new file mode 100644
index 0000000..ed876be
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/api/core/v1"
+	policy "k8s.io/api/policy/v1beta1"
+	"k8s.io/client-go/kubernetes/scheme"
+	restclient "k8s.io/client-go/rest"
+)
+
+// The PodExpansion interface allows manually adding extra methods to the PodInterface.
+type PodExpansion interface {
+	Bind(binding *v1.Binding) error
+	Evict(eviction *policy.Eviction) error
+	GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request
+}
+
+// Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored).
+func (c *pods) Bind(binding *v1.Binding) error {
+	return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error()
+}
+
+func (c *pods) Evict(eviction *policy.Eviction) error {
+	return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do().Error()
+}
+
+// Get constructs a request for getting the logs for a pod
+func (c *pods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request {
+	return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, scheme.ParameterCodec)
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go
new file mode 100644
index 0000000..84d7c98
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// PodTemplatesGetter has a method to return a PodTemplateInterface.
+// A group's client should implement this interface.
+type PodTemplatesGetter interface {
+	PodTemplates(namespace string) PodTemplateInterface
+}
+
+// PodTemplateInterface has methods to work with PodTemplate resources.
+type PodTemplateInterface interface {
+	Create(*v1.PodTemplate) (*v1.PodTemplate, error)
+	Update(*v1.PodTemplate) (*v1.PodTemplate, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.PodTemplate, error)
+	List(opts metav1.ListOptions) (*v1.PodTemplateList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error)
+	PodTemplateExpansion
+}
+
+// podTemplates implements PodTemplateInterface
+type podTemplates struct {
+	client rest.Interface
+	ns     string
+}
+
+// newPodTemplates returns a PodTemplates
+func newPodTemplates(c *CoreV1Client, namespace string) *podTemplates {
+	return &podTemplates{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any.
+func (c *podTemplates) Get(name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) {
+	result = &v1.PodTemplate{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("podtemplates").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of PodTemplates that match those selectors.
+func (c *podTemplates) List(opts metav1.ListOptions) (result *v1.PodTemplateList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.PodTemplateList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("podtemplates").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested podTemplates.
+func (c *podTemplates) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("podtemplates").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a podTemplate and creates it.  Returns the server's representation of the podTemplate, and an error, if there is any.
+func (c *podTemplates) Create(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) {
+	result = &v1.PodTemplate{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("podtemplates").
+		Body(podTemplate).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any.
+func (c *podTemplates) Update(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) {
+	result = &v1.PodTemplate{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("podtemplates").
+		Name(podTemplate.Name).
+		Body(podTemplate).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs.
+func (c *podTemplates) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("podtemplates").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *podTemplates) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("podtemplates").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched podTemplate.
+func (c *podTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) {
+	result = &v1.PodTemplate{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("podtemplates").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go
new file mode 100644
index 0000000..dd3182d
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go
@@ -0,0 +1,223 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	autoscalingv1 "k8s.io/api/autoscaling/v1"
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ReplicationControllersGetter has a method to return a ReplicationControllerInterface.
+// A group's client should implement this interface.
+type ReplicationControllersGetter interface {
+	ReplicationControllers(namespace string) ReplicationControllerInterface
+}
+
+// ReplicationControllerInterface has methods to work with ReplicationController resources.
+type ReplicationControllerInterface interface {
+	Create(*v1.ReplicationController) (*v1.ReplicationController, error)
+	Update(*v1.ReplicationController) (*v1.ReplicationController, error)
+	UpdateStatus(*v1.ReplicationController) (*v1.ReplicationController, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.ReplicationController, error)
+	List(opts metav1.ListOptions) (*v1.ReplicationControllerList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error)
+	GetScale(replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
+	UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error)
+
+	ReplicationControllerExpansion
+}
+
+// replicationControllers implements ReplicationControllerInterface
+type replicationControllers struct {
+	client rest.Interface
+	ns     string
+}
+
+// newReplicationControllers returns a ReplicationControllers
+func newReplicationControllers(c *CoreV1Client, namespace string) *replicationControllers {
+	return &replicationControllers{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any.
+func (c *replicationControllers) Get(name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) {
+	result = &v1.ReplicationController{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("replicationcontrollers").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors.
+func (c *replicationControllers) List(opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.ReplicationControllerList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("replicationcontrollers").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested replicationControllers.
+func (c *replicationControllers) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("replicationcontrollers").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a replicationController and creates it.  Returns the server's representation of the replicationController, and an error, if there is any.
+func (c *replicationControllers) Create(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) {
+	result = &v1.ReplicationController{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("replicationcontrollers").
+		Body(replicationController).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any.
+func (c *replicationControllers) Update(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) {
+	result = &v1.ReplicationController{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("replicationcontrollers").
+		Name(replicationController.Name).
+		Body(replicationController).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *replicationControllers) UpdateStatus(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) {
+	result = &v1.ReplicationController{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("replicationcontrollers").
+		Name(replicationController.Name).
+		SubResource("status").
+		Body(replicationController).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the replicationController and deletes it. Returns an error if one occurs.
+func (c *replicationControllers) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("replicationcontrollers").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *replicationControllers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("replicationcontrollers").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched replicationController.
+func (c *replicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) {
+	result = &v1.ReplicationController{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("replicationcontrollers").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
+
+// GetScale takes name of the replicationController, and returns the corresponding autoscalingv1.Scale object, and an error if there is any.
+func (c *replicationControllers) GetScale(replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
+	result = &autoscalingv1.Scale{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("replicationcontrollers").
+		Name(replicationControllerName).
+		SubResource("scale").
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
+func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) {
+	result = &autoscalingv1.Scale{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("replicationcontrollers").
+		Name(replicationControllerName).
+		SubResource("scale").
+		Body(scale).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go
new file mode 100644
index 0000000..5a17899
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ResourceQuotasGetter has a method to return a ResourceQuotaInterface.
+// A group's client should implement this interface.
+type ResourceQuotasGetter interface {
+	ResourceQuotas(namespace string) ResourceQuotaInterface
+}
+
+// ResourceQuotaInterface has methods to work with ResourceQuota resources.
+type ResourceQuotaInterface interface {
+	Create(*v1.ResourceQuota) (*v1.ResourceQuota, error)
+	Update(*v1.ResourceQuota) (*v1.ResourceQuota, error)
+	UpdateStatus(*v1.ResourceQuota) (*v1.ResourceQuota, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.ResourceQuota, error)
+	List(opts metav1.ListOptions) (*v1.ResourceQuotaList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error)
+	ResourceQuotaExpansion
+}
+
+// resourceQuotas implements ResourceQuotaInterface
+type resourceQuotas struct {
+	client rest.Interface
+	ns     string
+}
+
+// newResourceQuotas returns a ResourceQuotas
+func newResourceQuotas(c *CoreV1Client, namespace string) *resourceQuotas {
+	return &resourceQuotas{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any.
+func (c *resourceQuotas) Get(name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) {
+	result = &v1.ResourceQuota{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("resourcequotas").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors.
+func (c *resourceQuotas) List(opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.ResourceQuotaList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("resourcequotas").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested resourceQuotas.
+func (c *resourceQuotas) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("resourcequotas").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a resourceQuota and creates it.  Returns the server's representation of the resourceQuota, and an error, if there is any.
+func (c *resourceQuotas) Create(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) {
+	result = &v1.ResourceQuota{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("resourcequotas").
+		Body(resourceQuota).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any.
+func (c *resourceQuotas) Update(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) {
+	result = &v1.ResourceQuota{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("resourcequotas").
+		Name(resourceQuota.Name).
+		Body(resourceQuota).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *resourceQuotas) UpdateStatus(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) {
+	result = &v1.ResourceQuota{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("resourcequotas").
+		Name(resourceQuota.Name).
+		SubResource("status").
+		Body(resourceQuota).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs.
+func (c *resourceQuotas) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("resourcequotas").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *resourceQuotas) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("resourcequotas").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched resourceQuota.
+func (c *resourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) {
+	result = &v1.ResourceQuota{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("resourcequotas").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go
new file mode 100644
index 0000000..85c143b
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// SecretsGetter has a method to return a SecretInterface.
+// A group's client should implement this interface.
+type SecretsGetter interface {
+	Secrets(namespace string) SecretInterface
+}
+
+// SecretInterface has methods to work with Secret resources.
+type SecretInterface interface {
+	Create(*v1.Secret) (*v1.Secret, error)
+	Update(*v1.Secret) (*v1.Secret, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.Secret, error)
+	List(opts metav1.ListOptions) (*v1.SecretList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error)
+	SecretExpansion
+}
+
+// secrets implements SecretInterface
+type secrets struct {
+	client rest.Interface
+	ns     string
+}
+
+// newSecrets returns a Secrets
+func newSecrets(c *CoreV1Client, namespace string) *secrets {
+	return &secrets{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any.
+func (c *secrets) Get(name string, options metav1.GetOptions) (result *v1.Secret, err error) {
+	result = &v1.Secret{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("secrets").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Secrets that match those selectors.
+func (c *secrets) List(opts metav1.ListOptions) (result *v1.SecretList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.SecretList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("secrets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested secrets.
+func (c *secrets) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("secrets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a secret and creates it.  Returns the server's representation of the secret, and an error, if there is any.
+func (c *secrets) Create(secret *v1.Secret) (result *v1.Secret, err error) {
+	result = &v1.Secret{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("secrets").
+		Body(secret).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any.
+func (c *secrets) Update(secret *v1.Secret) (result *v1.Secret, err error) {
+	result = &v1.Secret{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("secrets").
+		Name(secret.Name).
+		Body(secret).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the secret and deletes it. Returns an error if one occurs.
+func (c *secrets) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("secrets").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *secrets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("secrets").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched secret.
+func (c *secrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) {
+	result = &v1.Secret{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("secrets").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go
new file mode 100644
index 0000000..b0e0941
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ServicesGetter has a method to return a ServiceInterface.
+// A group's client should implement this interface.
+type ServicesGetter interface {
+	Services(namespace string) ServiceInterface
+}
+
+// ServiceInterface has methods to work with Service resources.
+type ServiceInterface interface {
+	Create(*v1.Service) (*v1.Service, error)
+	Update(*v1.Service) (*v1.Service, error)
+	UpdateStatus(*v1.Service) (*v1.Service, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.Service, error)
+	List(opts metav1.ListOptions) (*v1.ServiceList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error)
+	ServiceExpansion
+}
+
+// services implements ServiceInterface
+type services struct {
+	client rest.Interface
+	ns     string
+}
+
+// newServices returns a Services
+func newServices(c *CoreV1Client, namespace string) *services {
+	return &services{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the service, and returns the corresponding service object, and an error if there is any.
+func (c *services) Get(name string, options metav1.GetOptions) (result *v1.Service, err error) {
+	result = &v1.Service{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("services").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Services that match those selectors.
+func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.ServiceList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("services").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested services.
+func (c *services) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("services").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a service and creates it.  Returns the server's representation of the service, and an error, if there is any.
+func (c *services) Create(service *v1.Service) (result *v1.Service, err error) {
+	result = &v1.Service{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("services").
+		Body(service).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any.
+func (c *services) Update(service *v1.Service) (result *v1.Service, err error) {
+	result = &v1.Service{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("services").
+		Name(service.Name).
+		Body(service).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *services) UpdateStatus(service *v1.Service) (result *v1.Service, err error) {
+	result = &v1.Service{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("services").
+		Name(service.Name).
+		SubResource("status").
+		Body(service).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the service and deletes it. Returns an error if one occurs.
+func (c *services) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("services").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched service.
+func (c *services) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) {
+	result = &v1.Service{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("services").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go
new file mode 100644
index 0000000..4937fd1
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go
@@ -0,0 +1,41 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/util/net"
+	restclient "k8s.io/client-go/rest"
+)
+
+// The ServiceExpansion interface allows manually adding extra methods to the ServiceInterface.
+type ServiceExpansion interface {
+	ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper
+}
+
+// ProxyGet returns a response of the service by calling it through the proxy.
+func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper {
+	request := c.client.Get().
+		Namespace(c.ns).
+		Resource("services").
+		SubResource("proxy").
+		Name(net.JoinSchemeNamePort(scheme, name, port)).
+		Suffix(path)
+	for k, v := range params {
+		request = request.Param(k, v)
+	}
+	return request
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go
new file mode 100644
index 0000000..50af6a2
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ServiceAccountsGetter has a method to return a ServiceAccountInterface.
+// A group's client should implement this interface.
+type ServiceAccountsGetter interface {
+	ServiceAccounts(namespace string) ServiceAccountInterface
+}
+
+// ServiceAccountInterface has methods to work with ServiceAccount resources.
+type ServiceAccountInterface interface {
+	Create(*v1.ServiceAccount) (*v1.ServiceAccount, error)
+	Update(*v1.ServiceAccount) (*v1.ServiceAccount, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.ServiceAccount, error)
+	List(opts metav1.ListOptions) (*v1.ServiceAccountList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error)
+	ServiceAccountExpansion
+}
+
+// serviceAccounts implements ServiceAccountInterface
+type serviceAccounts struct {
+	client rest.Interface
+	ns     string
+}
+
+// newServiceAccounts returns a ServiceAccounts
+func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts {
+	return &serviceAccounts{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any.
+func (c *serviceAccounts) Get(name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) {
+	result = &v1.ServiceAccount{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("serviceaccounts").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors.
+func (c *serviceAccounts) List(opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.ServiceAccountList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("serviceaccounts").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested serviceAccounts.
+func (c *serviceAccounts) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("serviceaccounts").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a serviceAccount and creates it.  Returns the server's representation of the serviceAccount, and an error, if there is any.
+func (c *serviceAccounts) Create(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) {
+	result = &v1.ServiceAccount{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("serviceaccounts").
+		Body(serviceAccount).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any.
+func (c *serviceAccounts) Update(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) {
+	result = &v1.ServiceAccount{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("serviceaccounts").
+		Name(serviceAccount.Name).
+		Body(serviceAccount).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs.
+func (c *serviceAccounts) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("serviceaccounts").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *serviceAccounts) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("serviceaccounts").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched serviceAccount.
+func (c *serviceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) {
+	result = &v1.ServiceAccount{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("serviceaccounts").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go
new file mode 100644
index 0000000..eaf643f
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go
@@ -0,0 +1,41 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	authenticationv1 "k8s.io/api/authentication/v1"
+)
+
+// The ServiceAccountExpansion interface allows manually adding extra methods
+// to the ServiceAccountInterface.
+type ServiceAccountExpansion interface {
+	CreateToken(name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error)
+}
+
+// CreateToken creates a new token for a serviceaccount.
+func (c *serviceAccounts) CreateToken(name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
+	result := &authenticationv1.TokenRequest{}
+	err := c.client.Post().
+		Namespace(c.ns).
+		Resource("serviceaccounts").
+		SubResource("token").
+		Name(name).
+		Body(tr).
+		Do().
+		Into(result)
+	return result, err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go
new file mode 100644
index 0000000..7711019
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go
new file mode 100644
index 0000000..143281b
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/events/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// EventsGetter has a method to return a EventInterface.
+// A group's client should implement this interface.
+type EventsGetter interface {
+	Events(namespace string) EventInterface
+}
+
+// EventInterface has methods to work with Event resources.
+type EventInterface interface {
+	Create(*v1beta1.Event) (*v1beta1.Event, error)
+	Update(*v1beta1.Event) (*v1beta1.Event, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.Event, error)
+	List(opts v1.ListOptions) (*v1beta1.EventList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error)
+	EventExpansion
+}
+
+// events implements EventInterface
+type events struct {
+	client rest.Interface
+	ns     string
+}
+
+// newEvents returns a Events
+func newEvents(c *EventsV1beta1Client, namespace string) *events {
+	return &events{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the event, and returns the corresponding event object, and an error if there is any.
+func (c *events) Get(name string, options v1.GetOptions) (result *v1beta1.Event, err error) {
+	result = &v1beta1.Event{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("events").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Events that match those selectors.
+func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.EventList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("events").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested events.
+func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("events").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a event and creates it.  Returns the server's representation of the event, and an error, if there is any.
+func (c *events) Create(event *v1beta1.Event) (result *v1beta1.Event, err error) {
+	result = &v1beta1.Event{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("events").
+		Body(event).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any.
+func (c *events) Update(event *v1beta1.Event) (result *v1beta1.Event, err error) {
+	result = &v1beta1.Event{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("events").
+		Name(event.Name).
+		Body(event).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the event and deletes it. Returns an error if one occurs.
+func (c *events) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("events").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("events").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched event.
+func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) {
+	result = &v1beta1.Event{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("events").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go
new file mode 100644
index 0000000..fb59635
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1beta1 "k8s.io/api/events/v1beta1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type EventsV1beta1Interface interface {
+	RESTClient() rest.Interface
+	EventsGetter
+}
+
+// EventsV1beta1Client is used to interact with features provided by the events.k8s.io group.
+type EventsV1beta1Client struct {
+	restClient rest.Interface
+}
+
+func (c *EventsV1beta1Client) Events(namespace string) EventInterface {
+	return newEvents(c, namespace)
+}
+
+// NewForConfig creates a new EventsV1beta1Client for the given config.
+func NewForConfig(c *rest.Config) (*EventsV1beta1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &EventsV1beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new EventsV1beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *EventsV1beta1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new EventsV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *EventsV1beta1Client {
+	return &EventsV1beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1beta1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *EventsV1beta1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go
new file mode 100644
index 0000000..e27f693
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+type EventExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go
new file mode 100644
index 0000000..93b1ae9
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/extensions/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// DaemonSetsGetter has a method to return a DaemonSetInterface.
+// A group's client should implement this interface.
+type DaemonSetsGetter interface {
+	DaemonSets(namespace string) DaemonSetInterface
+}
+
+// DaemonSetInterface has methods to work with DaemonSet resources.
+type DaemonSetInterface interface {
+	Create(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error)
+	Update(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error)
+	UpdateStatus(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.DaemonSet, error)
+	List(opts v1.ListOptions) (*v1beta1.DaemonSetList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error)
+	DaemonSetExpansion
+}
+
+// daemonSets implements DaemonSetInterface
+type daemonSets struct {
+	client rest.Interface
+	ns     string
+}
+
+// newDaemonSets returns a DaemonSets
+func newDaemonSets(c *ExtensionsV1beta1Client, namespace string) *daemonSets {
+	return &daemonSets{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
+func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) {
+	result = &v1beta1.DaemonSet{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
+func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.DaemonSetList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested daemonSets.
+func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a daemonSet and creates it.  Returns the server's representation of the daemonSet, and an error, if there is any.
+func (c *daemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) {
+	result = &v1beta1.DaemonSet{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		Body(daemonSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
+func (c *daemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) {
+	result = &v1beta1.DaemonSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		Name(daemonSet.Name).
+		Body(daemonSet).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *daemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) {
+	result = &v1beta1.DaemonSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		Name(daemonSet.Name).
+		SubResource("status").
+		Body(daemonSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs.
+func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("daemonsets").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched daemonSet.
+func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) {
+	result = &v1beta1.DaemonSet{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("daemonsets").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go
new file mode 100644
index 0000000..5557b9f
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go
@@ -0,0 +1,222 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/extensions/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// DeploymentsGetter has a method to return a DeploymentInterface.
+// A group's client should implement this interface.
+type DeploymentsGetter interface {
+	Deployments(namespace string) DeploymentInterface
+}
+
+// DeploymentInterface has methods to work with Deployment resources.
+type DeploymentInterface interface {
+	Create(*v1beta1.Deployment) (*v1beta1.Deployment, error)
+	Update(*v1beta1.Deployment) (*v1beta1.Deployment, error)
+	UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error)
+	List(opts v1.ListOptions) (*v1beta1.DeploymentList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error)
+	GetScale(deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error)
+	UpdateScale(deploymentName string, scale *v1beta1.Scale) (*v1beta1.Scale, error)
+
+	DeploymentExpansion
+}
+
+// deployments implements DeploymentInterface
+type deployments struct {
+	client rest.Interface
+	ns     string
+}
+
+// newDeployments returns a Deployments
+func newDeployments(c *ExtensionsV1beta1Client, namespace string) *deployments {
+	return &deployments{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
+func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) {
+	result = &v1beta1.Deployment{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Deployments that match those selectors.
+func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.DeploymentList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("deployments").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested deployments.
+func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("deployments").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a deployment and creates it.  Returns the server's representation of the deployment, and an error, if there is any.
+func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
+	result = &v1beta1.Deployment{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("deployments").
+		Body(deployment).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
+func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
+	result = &v1beta1.Deployment{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(deployment.Name).
+		Body(deployment).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
+	result = &v1beta1.Deployment{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(deployment.Name).
+		SubResource("status").
+		Body(deployment).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the deployment and deletes it. Returns an error if one occurs.
+func (c *deployments) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("deployments").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched deployment.
+func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) {
+	result = &v1beta1.Deployment{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("deployments").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
+
+// GetScale takes name of the deployment, and returns the corresponding v1beta1.Scale object, and an error if there is any.
+func (c *deployments) GetScale(deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) {
+	result = &v1beta1.Scale{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(deploymentName).
+		SubResource("scale").
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
+func (c *deployments) UpdateScale(deploymentName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) {
+	result = &v1beta1.Scale{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("deployments").
+		Name(deploymentName).
+		SubResource("scale").
+		Body(scale).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go
new file mode 100644
index 0000000..24734be
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go
@@ -0,0 +1,29 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import "k8s.io/api/extensions/v1beta1"
+
+// The DeploymentExpansion interface allows manually adding extra methods to the DeploymentInterface.
+type DeploymentExpansion interface {
+	Rollback(*v1beta1.DeploymentRollback) error
+}
+
+// Rollback applied the provided DeploymentRollback to the named deployment in the current namespace.
+func (c *deployments) Rollback(deploymentRollback *v1beta1.DeploymentRollback) error {
+	return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error()
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go
new file mode 100644
index 0000000..7711019
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go
new file mode 100644
index 0000000..0e9edf5
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go
@@ -0,0 +1,110 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1beta1 "k8s.io/api/extensions/v1beta1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type ExtensionsV1beta1Interface interface {
+	RESTClient() rest.Interface
+	DaemonSetsGetter
+	DeploymentsGetter
+	IngressesGetter
+	PodSecurityPoliciesGetter
+	ReplicaSetsGetter
+}
+
+// ExtensionsV1beta1Client is used to interact with features provided by the extensions group.
+type ExtensionsV1beta1Client struct {
+	restClient rest.Interface
+}
+
+func (c *ExtensionsV1beta1Client) DaemonSets(namespace string) DaemonSetInterface {
+	return newDaemonSets(c, namespace)
+}
+
+func (c *ExtensionsV1beta1Client) Deployments(namespace string) DeploymentInterface {
+	return newDeployments(c, namespace)
+}
+
+func (c *ExtensionsV1beta1Client) Ingresses(namespace string) IngressInterface {
+	return newIngresses(c, namespace)
+}
+
+func (c *ExtensionsV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface {
+	return newPodSecurityPolicies(c)
+}
+
+func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterface {
+	return newReplicaSets(c, namespace)
+}
+
+// NewForConfig creates a new ExtensionsV1beta1Client for the given config.
+func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &ExtensionsV1beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new ExtensionsV1beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *ExtensionsV1beta1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new ExtensionsV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *ExtensionsV1beta1Client {
+	return &ExtensionsV1beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1beta1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *ExtensionsV1beta1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go
new file mode 100644
index 0000000..cfaeebd
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go
@@ -0,0 +1,27 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+type DaemonSetExpansion interface{}
+
+type IngressExpansion interface{}
+
+type PodSecurityPolicyExpansion interface{}
+
+type ReplicaSetExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go
new file mode 100644
index 0000000..4da51c3
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/extensions/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// IngressesGetter has a method to return a IngressInterface.
+// A group's client should implement this interface.
+type IngressesGetter interface {
+	Ingresses(namespace string) IngressInterface
+}
+
+// IngressInterface has methods to work with Ingress resources.
+type IngressInterface interface {
+	Create(*v1beta1.Ingress) (*v1beta1.Ingress, error)
+	Update(*v1beta1.Ingress) (*v1beta1.Ingress, error)
+	UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.Ingress, error)
+	List(opts v1.ListOptions) (*v1beta1.IngressList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error)
+	IngressExpansion
+}
+
+// ingresses implements IngressInterface
+type ingresses struct {
+	client rest.Interface
+	ns     string
+}
+
+// newIngresses returns a Ingresses
+func newIngresses(c *ExtensionsV1beta1Client, namespace string) *ingresses {
+	return &ingresses{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any.
+func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) {
+	result = &v1beta1.Ingress{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("ingresses").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Ingresses that match those selectors.
+func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.IngressList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("ingresses").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested ingresses.
+func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("ingresses").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a ingress and creates it.  Returns the server's representation of the ingress, and an error, if there is any.
+func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) {
+	result = &v1beta1.Ingress{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("ingresses").
+		Body(ingress).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any.
+func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) {
+	result = &v1beta1.Ingress{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("ingresses").
+		Name(ingress.Name).
+		Body(ingress).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) {
+	result = &v1beta1.Ingress{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("ingresses").
+		Name(ingress.Name).
+		SubResource("status").
+		Body(ingress).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the ingress and deletes it. Returns an error if one occurs.
+func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("ingresses").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("ingresses").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched ingress.
+func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) {
+	result = &v1beta1.Ingress{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("ingresses").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go
new file mode 100644
index 0000000..a947a54
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/extensions/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface.
+// A group's client should implement this interface.
+type PodSecurityPoliciesGetter interface {
+	PodSecurityPolicies() PodSecurityPolicyInterface
+}
+
+// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources.
+type PodSecurityPolicyInterface interface {
+	Create(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error)
+	Update(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.PodSecurityPolicy, error)
+	List(opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error)
+	PodSecurityPolicyExpansion
+}
+
+// podSecurityPolicies implements PodSecurityPolicyInterface
+type podSecurityPolicies struct {
+	client rest.Interface
+}
+
+// newPodSecurityPolicies returns a PodSecurityPolicies
+func newPodSecurityPolicies(c *ExtensionsV1beta1Client) *podSecurityPolicies {
+	return &podSecurityPolicies{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any.
+func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) {
+	result = &v1beta1.PodSecurityPolicy{}
+	err = c.client.Get().
+		Resource("podsecuritypolicies").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors.
+func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.PodSecurityPolicyList{}
+	err = c.client.Get().
+		Resource("podsecuritypolicies").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested podSecurityPolicies.
+func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("podsecuritypolicies").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a podSecurityPolicy and creates it.  Returns the server's representation of the podSecurityPolicy, and an error, if there is any.
+func (c *podSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) {
+	result = &v1beta1.PodSecurityPolicy{}
+	err = c.client.Post().
+		Resource("podsecuritypolicies").
+		Body(podSecurityPolicy).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any.
+func (c *podSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) {
+	result = &v1beta1.PodSecurityPolicy{}
+	err = c.client.Put().
+		Resource("podsecuritypolicies").
+		Name(podSecurityPolicy.Name).
+		Body(podSecurityPolicy).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs.
+func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("podsecuritypolicies").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("podsecuritypolicies").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched podSecurityPolicy.
+func (c *podSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) {
+	result = &v1beta1.PodSecurityPolicy{}
+	err = c.client.Patch(pt).
+		Resource("podsecuritypolicies").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go
new file mode 100644
index 0000000..4440290
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go
@@ -0,0 +1,222 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/extensions/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ReplicaSetsGetter has a method to return a ReplicaSetInterface.
+// A group's client should implement this interface.
+type ReplicaSetsGetter interface {
+	ReplicaSets(namespace string) ReplicaSetInterface
+}
+
+// ReplicaSetInterface has methods to work with ReplicaSet resources.
+type ReplicaSetInterface interface {
+	Create(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error)
+	Update(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error)
+	UpdateStatus(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.ReplicaSet, error)
+	List(opts v1.ListOptions) (*v1beta1.ReplicaSetList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error)
+	GetScale(replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error)
+	UpdateScale(replicaSetName string, scale *v1beta1.Scale) (*v1beta1.Scale, error)
+
+	ReplicaSetExpansion
+}
+
+// replicaSets implements ReplicaSetInterface
+type replicaSets struct {
+	client rest.Interface
+	ns     string
+}
+
+// newReplicaSets returns a ReplicaSets
+func newReplicaSets(c *ExtensionsV1beta1Client, namespace string) *replicaSets {
+	return &replicaSets{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any.
+func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) {
+	result = &v1beta1.ReplicaSet{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors.
+func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.ReplicaSetList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("replicasets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested replicaSets.
+func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("replicasets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a replicaSet and creates it.  Returns the server's representation of the replicaSet, and an error, if there is any.
+func (c *replicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) {
+	result = &v1beta1.ReplicaSet{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Body(replicaSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any.
+func (c *replicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) {
+	result = &v1beta1.ReplicaSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Name(replicaSet.Name).
+		Body(replicaSet).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *replicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) {
+	result = &v1beta1.ReplicaSet{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Name(replicaSet.Name).
+		SubResource("status").
+		Body(replicaSet).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs.
+func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("replicasets").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched replicaSet.
+func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) {
+	result = &v1beta1.ReplicaSet{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("replicasets").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
+
+// GetScale takes name of the replicaSet, and returns the corresponding v1beta1.Scale object, and an error if there is any.
+func (c *replicaSets) GetScale(replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) {
+	result = &v1beta1.Scale{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Name(replicaSetName).
+		SubResource("scale").
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
+func (c *replicaSets) UpdateScale(replicaSetName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) {
+	result = &v1beta1.Scale{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("replicasets").
+		Name(replicaSetName).
+		SubResource("scale").
+		Body(scale).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go
new file mode 100644
index 0000000..3af5d05
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go
new file mode 100644
index 0000000..7d77495
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type NetworkPolicyExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go
new file mode 100644
index 0000000..8684db4
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	v1 "k8s.io/api/networking/v1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type NetworkingV1Interface interface {
+	RESTClient() rest.Interface
+	NetworkPoliciesGetter
+}
+
+// NetworkingV1Client is used to interact with features provided by the networking.k8s.io group.
+type NetworkingV1Client struct {
+	restClient rest.Interface
+}
+
+func (c *NetworkingV1Client) NetworkPolicies(namespace string) NetworkPolicyInterface {
+	return newNetworkPolicies(c, namespace)
+}
+
+// NewForConfig creates a new NetworkingV1Client for the given config.
+func NewForConfig(c *rest.Config) (*NetworkingV1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &NetworkingV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new NetworkingV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *NetworkingV1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new NetworkingV1Client for the given RESTClient.
+func New(c rest.Interface) *NetworkingV1Client {
+	return &NetworkingV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *NetworkingV1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go
new file mode 100644
index 0000000..3f39be9
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/networking/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// NetworkPoliciesGetter has a method to return a NetworkPolicyInterface.
+// A group's client should implement this interface.
+type NetworkPoliciesGetter interface {
+	NetworkPolicies(namespace string) NetworkPolicyInterface
+}
+
+// NetworkPolicyInterface has methods to work with NetworkPolicy resources.
+type NetworkPolicyInterface interface {
+	Create(*v1.NetworkPolicy) (*v1.NetworkPolicy, error)
+	Update(*v1.NetworkPolicy) (*v1.NetworkPolicy, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.NetworkPolicy, error)
+	List(opts metav1.ListOptions) (*v1.NetworkPolicyList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NetworkPolicy, err error)
+	NetworkPolicyExpansion
+}
+
+// networkPolicies implements NetworkPolicyInterface
+type networkPolicies struct {
+	client rest.Interface
+	ns     string
+}
+
+// newNetworkPolicies returns a NetworkPolicies
+func newNetworkPolicies(c *NetworkingV1Client, namespace string) *networkPolicies {
+	return &networkPolicies{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any.
+func (c *networkPolicies) Get(name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) {
+	result = &v1.NetworkPolicy{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("networkpolicies").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors.
+func (c *networkPolicies) List(opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.NetworkPolicyList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("networkpolicies").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested networkPolicies.
+func (c *networkPolicies) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("networkpolicies").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a networkPolicy and creates it.  Returns the server's representation of the networkPolicy, and an error, if there is any.
+func (c *networkPolicies) Create(networkPolicy *v1.NetworkPolicy) (result *v1.NetworkPolicy, err error) {
+	result = &v1.NetworkPolicy{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("networkpolicies").
+		Body(networkPolicy).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any.
+func (c *networkPolicies) Update(networkPolicy *v1.NetworkPolicy) (result *v1.NetworkPolicy, err error) {
+	result = &v1.NetworkPolicy{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("networkpolicies").
+		Name(networkPolicy.Name).
+		Body(networkPolicy).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs.
+func (c *networkPolicies) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("networkpolicies").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *networkPolicies) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("networkpolicies").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched networkPolicy.
+func (c *networkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NetworkPolicy, err error) {
+	result = &v1.NetworkPolicy{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("networkpolicies").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go
new file mode 100644
index 0000000..7711019
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go
new file mode 100644
index 0000000..12e8e76
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	rest "k8s.io/client-go/rest"
+)
+
+// EvictionsGetter has a method to return a EvictionInterface.
+// A group's client should implement this interface.
+type EvictionsGetter interface {
+	Evictions(namespace string) EvictionInterface
+}
+
+// EvictionInterface has methods to work with Eviction resources.
+type EvictionInterface interface {
+	EvictionExpansion
+}
+
+// evictions implements EvictionInterface
+type evictions struct {
+	client rest.Interface
+	ns     string
+}
+
+// newEvictions returns a Evictions
+func newEvictions(c *PolicyV1beta1Client, namespace string) *evictions {
+	return &evictions{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go
new file mode 100644
index 0000000..40bad26
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go
@@ -0,0 +1,38 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	policy "k8s.io/api/policy/v1beta1"
+)
+
+// The EvictionExpansion interface allows manually adding extra methods to the ScaleInterface.
+type EvictionExpansion interface {
+	Evict(eviction *policy.Eviction) error
+}
+
+func (c *evictions) Evict(eviction *policy.Eviction) error {
+	return c.client.Post().
+		AbsPath("/api/v1").
+		Namespace(eviction.Namespace).
+		Resource("pods").
+		Name(eviction.Name).
+		SubResource("eviction").
+		Body(eviction).
+		Do().
+		Error()
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go
new file mode 100644
index 0000000..078c16d
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go
@@ -0,0 +1,23 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+type PodDisruptionBudgetExpansion interface{}
+
+type PodSecurityPolicyExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go
new file mode 100644
index 0000000..864af9a
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/policy/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface.
+// A group's client should implement this interface.
+type PodDisruptionBudgetsGetter interface {
+	PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface
+}
+
+// PodDisruptionBudgetInterface has methods to work with PodDisruptionBudget resources.
+type PodDisruptionBudgetInterface interface {
+	Create(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error)
+	Update(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error)
+	UpdateStatus(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.PodDisruptionBudget, error)
+	List(opts v1.ListOptions) (*v1beta1.PodDisruptionBudgetList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error)
+	PodDisruptionBudgetExpansion
+}
+
+// podDisruptionBudgets implements PodDisruptionBudgetInterface
+type podDisruptionBudgets struct {
+	client rest.Interface
+	ns     string
+}
+
+// newPodDisruptionBudgets returns a PodDisruptionBudgets
+func newPodDisruptionBudgets(c *PolicyV1beta1Client, namespace string) *podDisruptionBudgets {
+	return &podDisruptionBudgets{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any.
+func (c *podDisruptionBudgets) Get(name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) {
+	result = &v1beta1.PodDisruptionBudget{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("poddisruptionbudgets").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors.
+func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.PodDisruptionBudgetList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("poddisruptionbudgets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested podDisruptionBudgets.
+func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("poddisruptionbudgets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a podDisruptionBudget and creates it.  Returns the server's representation of the podDisruptionBudget, and an error, if there is any.
+func (c *podDisruptionBudgets) Create(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) {
+	result = &v1beta1.PodDisruptionBudget{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("poddisruptionbudgets").
+		Body(podDisruptionBudget).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any.
+func (c *podDisruptionBudgets) Update(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) {
+	result = &v1beta1.PodDisruptionBudget{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("poddisruptionbudgets").
+		Name(podDisruptionBudget.Name).
+		Body(podDisruptionBudget).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *podDisruptionBudgets) UpdateStatus(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) {
+	result = &v1beta1.PodDisruptionBudget{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("poddisruptionbudgets").
+		Name(podDisruptionBudget.Name).
+		SubResource("status").
+		Body(podDisruptionBudget).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs.
+func (c *podDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("poddisruptionbudgets").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *podDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("poddisruptionbudgets").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched podDisruptionBudget.
+func (c *podDisruptionBudgets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) {
+	result = &v1beta1.PodDisruptionBudget{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("poddisruptionbudgets").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go
new file mode 100644
index 0000000..d02096d
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/policy/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface.
+// A group's client should implement this interface.
+type PodSecurityPoliciesGetter interface {
+	PodSecurityPolicies() PodSecurityPolicyInterface
+}
+
+// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources.
+type PodSecurityPolicyInterface interface {
+	Create(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error)
+	Update(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.PodSecurityPolicy, error)
+	List(opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error)
+	PodSecurityPolicyExpansion
+}
+
+// podSecurityPolicies implements PodSecurityPolicyInterface
+type podSecurityPolicies struct {
+	client rest.Interface
+}
+
+// newPodSecurityPolicies returns a PodSecurityPolicies
+func newPodSecurityPolicies(c *PolicyV1beta1Client) *podSecurityPolicies {
+	return &podSecurityPolicies{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any.
+func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) {
+	result = &v1beta1.PodSecurityPolicy{}
+	err = c.client.Get().
+		Resource("podsecuritypolicies").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors.
+func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.PodSecurityPolicyList{}
+	err = c.client.Get().
+		Resource("podsecuritypolicies").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested podSecurityPolicies.
+func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("podsecuritypolicies").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a podSecurityPolicy and creates it.  Returns the server's representation of the podSecurityPolicy, and an error, if there is any.
+func (c *podSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) {
+	result = &v1beta1.PodSecurityPolicy{}
+	err = c.client.Post().
+		Resource("podsecuritypolicies").
+		Body(podSecurityPolicy).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any.
+func (c *podSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) {
+	result = &v1beta1.PodSecurityPolicy{}
+	err = c.client.Put().
+		Resource("podsecuritypolicies").
+		Name(podSecurityPolicy.Name).
+		Body(podSecurityPolicy).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs.
+func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("podsecuritypolicies").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("podsecuritypolicies").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched podSecurityPolicy.
+func (c *podSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) {
+	result = &v1beta1.PodSecurityPolicy{}
+	err = c.client.Patch(pt).
+		Resource("podsecuritypolicies").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go
new file mode 100644
index 0000000..020e185
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go
@@ -0,0 +1,100 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1beta1 "k8s.io/api/policy/v1beta1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type PolicyV1beta1Interface interface {
+	RESTClient() rest.Interface
+	EvictionsGetter
+	PodDisruptionBudgetsGetter
+	PodSecurityPoliciesGetter
+}
+
+// PolicyV1beta1Client is used to interact with features provided by the policy group.
+type PolicyV1beta1Client struct {
+	restClient rest.Interface
+}
+
+func (c *PolicyV1beta1Client) Evictions(namespace string) EvictionInterface {
+	return newEvictions(c, namespace)
+}
+
+func (c *PolicyV1beta1Client) PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface {
+	return newPodDisruptionBudgets(c, namespace)
+}
+
+func (c *PolicyV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface {
+	return newPodSecurityPolicies(c)
+}
+
+// NewForConfig creates a new PolicyV1beta1Client for the given config.
+func NewForConfig(c *rest.Config) (*PolicyV1beta1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &PolicyV1beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new PolicyV1beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *PolicyV1beta1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new PolicyV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *PolicyV1beta1Client {
+	return &PolicyV1beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1beta1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *PolicyV1beta1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go
new file mode 100644
index 0000000..0a47c44
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/rbac/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ClusterRolesGetter has a method to return a ClusterRoleInterface.
+// A group's client should implement this interface.
+type ClusterRolesGetter interface {
+	ClusterRoles() ClusterRoleInterface
+}
+
+// ClusterRoleInterface has methods to work with ClusterRole resources.
+type ClusterRoleInterface interface {
+	Create(*v1.ClusterRole) (*v1.ClusterRole, error)
+	Update(*v1.ClusterRole) (*v1.ClusterRole, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.ClusterRole, error)
+	List(opts metav1.ListOptions) (*v1.ClusterRoleList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRole, err error)
+	ClusterRoleExpansion
+}
+
+// clusterRoles implements ClusterRoleInterface
+type clusterRoles struct {
+	client rest.Interface
+}
+
+// newClusterRoles returns a ClusterRoles
+func newClusterRoles(c *RbacV1Client) *clusterRoles {
+	return &clusterRoles{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any.
+func (c *clusterRoles) Get(name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) {
+	result = &v1.ClusterRole{}
+	err = c.client.Get().
+		Resource("clusterroles").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors.
+func (c *clusterRoles) List(opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.ClusterRoleList{}
+	err = c.client.Get().
+		Resource("clusterroles").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRoles.
+func (c *clusterRoles) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("clusterroles").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a clusterRole and creates it.  Returns the server's representation of the clusterRole, and an error, if there is any.
+func (c *clusterRoles) Create(clusterRole *v1.ClusterRole) (result *v1.ClusterRole, err error) {
+	result = &v1.ClusterRole{}
+	err = c.client.Post().
+		Resource("clusterroles").
+		Body(clusterRole).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any.
+func (c *clusterRoles) Update(clusterRole *v1.ClusterRole) (result *v1.ClusterRole, err error) {
+	result = &v1.ClusterRole{}
+	err = c.client.Put().
+		Resource("clusterroles").
+		Name(clusterRole.Name).
+		Body(clusterRole).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs.
+func (c *clusterRoles) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("clusterroles").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *clusterRoles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("clusterroles").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched clusterRole.
+func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRole, err error) {
+	result = &v1.ClusterRole{}
+	err = c.client.Patch(pt).
+		Resource("clusterroles").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go
new file mode 100644
index 0000000..c16ebc3
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/rbac/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface.
+// A group's client should implement this interface.
+type ClusterRoleBindingsGetter interface {
+	ClusterRoleBindings() ClusterRoleBindingInterface
+}
+
+// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources.
+type ClusterRoleBindingInterface interface {
+	Create(*v1.ClusterRoleBinding) (*v1.ClusterRoleBinding, error)
+	Update(*v1.ClusterRoleBinding) (*v1.ClusterRoleBinding, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.ClusterRoleBinding, error)
+	List(opts metav1.ListOptions) (*v1.ClusterRoleBindingList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRoleBinding, err error)
+	ClusterRoleBindingExpansion
+}
+
+// clusterRoleBindings implements ClusterRoleBindingInterface
+type clusterRoleBindings struct {
+	client rest.Interface
+}
+
+// newClusterRoleBindings returns a ClusterRoleBindings
+func newClusterRoleBindings(c *RbacV1Client) *clusterRoleBindings {
+	return &clusterRoleBindings{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any.
+func (c *clusterRoleBindings) Get(name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) {
+	result = &v1.ClusterRoleBinding{}
+	err = c.client.Get().
+		Resource("clusterrolebindings").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors.
+func (c *clusterRoleBindings) List(opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.ClusterRoleBindingList{}
+	err = c.client.Get().
+		Resource("clusterrolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRoleBindings.
+func (c *clusterRoleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("clusterrolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a clusterRoleBinding and creates it.  Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
+func (c *clusterRoleBindings) Create(clusterRoleBinding *v1.ClusterRoleBinding) (result *v1.ClusterRoleBinding, err error) {
+	result = &v1.ClusterRoleBinding{}
+	err = c.client.Post().
+		Resource("clusterrolebindings").
+		Body(clusterRoleBinding).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
+func (c *clusterRoleBindings) Update(clusterRoleBinding *v1.ClusterRoleBinding) (result *v1.ClusterRoleBinding, err error) {
+	result = &v1.ClusterRoleBinding{}
+	err = c.client.Put().
+		Resource("clusterrolebindings").
+		Name(clusterRoleBinding.Name).
+		Body(clusterRoleBinding).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs.
+func (c *clusterRoleBindings) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("clusterrolebindings").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *clusterRoleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("clusterrolebindings").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched clusterRoleBinding.
+func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRoleBinding, err error) {
+	result = &v1.ClusterRoleBinding{}
+	err = c.client.Patch(pt).
+		Resource("clusterrolebindings").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go
new file mode 100644
index 0000000..3af5d05
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go
new file mode 100644
index 0000000..e3f1b02
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go
@@ -0,0 +1,27 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type ClusterRoleExpansion interface{}
+
+type ClusterRoleBindingExpansion interface{}
+
+type RoleExpansion interface{}
+
+type RoleBindingExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go
new file mode 100644
index 0000000..e3855bb
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go
@@ -0,0 +1,105 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	v1 "k8s.io/api/rbac/v1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type RbacV1Interface interface {
+	RESTClient() rest.Interface
+	ClusterRolesGetter
+	ClusterRoleBindingsGetter
+	RolesGetter
+	RoleBindingsGetter
+}
+
+// RbacV1Client is used to interact with features provided by the rbac.authorization.k8s.io group.
+type RbacV1Client struct {
+	restClient rest.Interface
+}
+
+func (c *RbacV1Client) ClusterRoles() ClusterRoleInterface {
+	return newClusterRoles(c)
+}
+
+func (c *RbacV1Client) ClusterRoleBindings() ClusterRoleBindingInterface {
+	return newClusterRoleBindings(c)
+}
+
+func (c *RbacV1Client) Roles(namespace string) RoleInterface {
+	return newRoles(c, namespace)
+}
+
+func (c *RbacV1Client) RoleBindings(namespace string) RoleBindingInterface {
+	return newRoleBindings(c, namespace)
+}
+
+// NewForConfig creates a new RbacV1Client for the given config.
+func NewForConfig(c *rest.Config) (*RbacV1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &RbacV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new RbacV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *RbacV1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new RbacV1Client for the given RESTClient.
+func New(c rest.Interface) *RbacV1Client {
+	return &RbacV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *RbacV1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go
new file mode 100644
index 0000000..a17d791
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/rbac/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// RolesGetter has a method to return a RoleInterface.
+// A group's client should implement this interface.
+type RolesGetter interface {
+	Roles(namespace string) RoleInterface
+}
+
+// RoleInterface has methods to work with Role resources.
+type RoleInterface interface {
+	Create(*v1.Role) (*v1.Role, error)
+	Update(*v1.Role) (*v1.Role, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.Role, error)
+	List(opts metav1.ListOptions) (*v1.RoleList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Role, err error)
+	RoleExpansion
+}
+
+// roles implements RoleInterface
+type roles struct {
+	client rest.Interface
+	ns     string
+}
+
+// newRoles returns a Roles
+func newRoles(c *RbacV1Client, namespace string) *roles {
+	return &roles{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the role, and returns the corresponding role object, and an error if there is any.
+func (c *roles) Get(name string, options metav1.GetOptions) (result *v1.Role, err error) {
+	result = &v1.Role{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("roles").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Roles that match those selectors.
+func (c *roles) List(opts metav1.ListOptions) (result *v1.RoleList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.RoleList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("roles").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested roles.
+func (c *roles) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("roles").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a role and creates it.  Returns the server's representation of the role, and an error, if there is any.
+func (c *roles) Create(role *v1.Role) (result *v1.Role, err error) {
+	result = &v1.Role{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("roles").
+		Body(role).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any.
+func (c *roles) Update(role *v1.Role) (result *v1.Role, err error) {
+	result = &v1.Role{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("roles").
+		Name(role.Name).
+		Body(role).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the role and deletes it. Returns an error if one occurs.
+func (c *roles) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("roles").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *roles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("roles").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched role.
+func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Role, err error) {
+	result = &v1.Role{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("roles").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go
new file mode 100644
index 0000000..c87e457
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/rbac/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// RoleBindingsGetter has a method to return a RoleBindingInterface.
+// A group's client should implement this interface.
+type RoleBindingsGetter interface {
+	RoleBindings(namespace string) RoleBindingInterface
+}
+
+// RoleBindingInterface has methods to work with RoleBinding resources.
+type RoleBindingInterface interface {
+	Create(*v1.RoleBinding) (*v1.RoleBinding, error)
+	Update(*v1.RoleBinding) (*v1.RoleBinding, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.RoleBinding, error)
+	List(opts metav1.ListOptions) (*v1.RoleBindingList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.RoleBinding, err error)
+	RoleBindingExpansion
+}
+
+// roleBindings implements RoleBindingInterface
+type roleBindings struct {
+	client rest.Interface
+	ns     string
+}
+
+// newRoleBindings returns a RoleBindings
+func newRoleBindings(c *RbacV1Client, namespace string) *roleBindings {
+	return &roleBindings{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any.
+func (c *roleBindings) Get(name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) {
+	result = &v1.RoleBinding{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of RoleBindings that match those selectors.
+func (c *roleBindings) List(opts metav1.ListOptions) (result *v1.RoleBindingList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.RoleBindingList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested roleBindings.
+func (c *roleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a roleBinding and creates it.  Returns the server's representation of the roleBinding, and an error, if there is any.
+func (c *roleBindings) Create(roleBinding *v1.RoleBinding) (result *v1.RoleBinding, err error) {
+	result = &v1.RoleBinding{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		Body(roleBinding).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any.
+func (c *roleBindings) Update(roleBinding *v1.RoleBinding) (result *v1.RoleBinding, err error) {
+	result = &v1.RoleBinding{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		Name(roleBinding.Name).
+		Body(roleBinding).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs.
+func (c *roleBindings) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *roleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched roleBinding.
+func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.RoleBinding, err error) {
+	result = &v1.RoleBinding{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("rolebindings").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go
new file mode 100644
index 0000000..77e6687
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	"time"
+
+	v1alpha1 "k8s.io/api/rbac/v1alpha1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ClusterRolesGetter has a method to return a ClusterRoleInterface.
+// A group's client should implement this interface.
+type ClusterRolesGetter interface {
+	ClusterRoles() ClusterRoleInterface
+}
+
+// ClusterRoleInterface has methods to work with ClusterRole resources.
+type ClusterRoleInterface interface {
+	Create(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error)
+	Update(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1alpha1.ClusterRole, error)
+	List(opts v1.ListOptions) (*v1alpha1.ClusterRoleList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error)
+	ClusterRoleExpansion
+}
+
+// clusterRoles implements ClusterRoleInterface
+type clusterRoles struct {
+	client rest.Interface
+}
+
+// newClusterRoles returns a ClusterRoles
+func newClusterRoles(c *RbacV1alpha1Client) *clusterRoles {
+	return &clusterRoles{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any.
+func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) {
+	result = &v1alpha1.ClusterRole{}
+	err = c.client.Get().
+		Resource("clusterroles").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors.
+func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1alpha1.ClusterRoleList{}
+	err = c.client.Get().
+		Resource("clusterroles").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRoles.
+func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("clusterroles").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a clusterRole and creates it.  Returns the server's representation of the clusterRole, and an error, if there is any.
+func (c *clusterRoles) Create(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) {
+	result = &v1alpha1.ClusterRole{}
+	err = c.client.Post().
+		Resource("clusterroles").
+		Body(clusterRole).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any.
+func (c *clusterRoles) Update(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) {
+	result = &v1alpha1.ClusterRole{}
+	err = c.client.Put().
+		Resource("clusterroles").
+		Name(clusterRole.Name).
+		Body(clusterRole).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs.
+func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("clusterroles").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("clusterroles").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched clusterRole.
+func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) {
+	result = &v1alpha1.ClusterRole{}
+	err = c.client.Patch(pt).
+		Resource("clusterroles").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go
new file mode 100644
index 0000000..0d1b9d2
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	"time"
+
+	v1alpha1 "k8s.io/api/rbac/v1alpha1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface.
+// A group's client should implement this interface.
+type ClusterRoleBindingsGetter interface {
+	ClusterRoleBindings() ClusterRoleBindingInterface
+}
+
+// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources.
+type ClusterRoleBindingInterface interface {
+	Create(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error)
+	Update(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1alpha1.ClusterRoleBinding, error)
+	List(opts v1.ListOptions) (*v1alpha1.ClusterRoleBindingList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error)
+	ClusterRoleBindingExpansion
+}
+
+// clusterRoleBindings implements ClusterRoleBindingInterface
+type clusterRoleBindings struct {
+	client rest.Interface
+}
+
+// newClusterRoleBindings returns a ClusterRoleBindings
+func newClusterRoleBindings(c *RbacV1alpha1Client) *clusterRoleBindings {
+	return &clusterRoleBindings{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any.
+func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) {
+	result = &v1alpha1.ClusterRoleBinding{}
+	err = c.client.Get().
+		Resource("clusterrolebindings").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors.
+func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1alpha1.ClusterRoleBindingList{}
+	err = c.client.Get().
+		Resource("clusterrolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRoleBindings.
+func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("clusterrolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a clusterRoleBinding and creates it.  Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
+func (c *clusterRoleBindings) Create(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) {
+	result = &v1alpha1.ClusterRoleBinding{}
+	err = c.client.Post().
+		Resource("clusterrolebindings").
+		Body(clusterRoleBinding).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
+func (c *clusterRoleBindings) Update(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) {
+	result = &v1alpha1.ClusterRoleBinding{}
+	err = c.client.Put().
+		Resource("clusterrolebindings").
+		Name(clusterRoleBinding.Name).
+		Body(clusterRoleBinding).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs.
+func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("clusterrolebindings").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("clusterrolebindings").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched clusterRoleBinding.
+func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) {
+	result = &v1alpha1.ClusterRoleBinding{}
+	err = c.client.Patch(pt).
+		Resource("clusterrolebindings").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go
new file mode 100644
index 0000000..df51baa
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1alpha1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go
new file mode 100644
index 0000000..b8b5c78
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go
@@ -0,0 +1,27 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+type ClusterRoleExpansion interface{}
+
+type ClusterRoleBindingExpansion interface{}
+
+type RoleExpansion interface{}
+
+type RoleBindingExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go
new file mode 100644
index 0000000..de83531
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go
@@ -0,0 +1,105 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	v1alpha1 "k8s.io/api/rbac/v1alpha1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type RbacV1alpha1Interface interface {
+	RESTClient() rest.Interface
+	ClusterRolesGetter
+	ClusterRoleBindingsGetter
+	RolesGetter
+	RoleBindingsGetter
+}
+
+// RbacV1alpha1Client is used to interact with features provided by the rbac.authorization.k8s.io group.
+type RbacV1alpha1Client struct {
+	restClient rest.Interface
+}
+
+func (c *RbacV1alpha1Client) ClusterRoles() ClusterRoleInterface {
+	return newClusterRoles(c)
+}
+
+func (c *RbacV1alpha1Client) ClusterRoleBindings() ClusterRoleBindingInterface {
+	return newClusterRoleBindings(c)
+}
+
+func (c *RbacV1alpha1Client) Roles(namespace string) RoleInterface {
+	return newRoles(c, namespace)
+}
+
+func (c *RbacV1alpha1Client) RoleBindings(namespace string) RoleBindingInterface {
+	return newRoleBindings(c, namespace)
+}
+
+// NewForConfig creates a new RbacV1alpha1Client for the given config.
+func NewForConfig(c *rest.Config) (*RbacV1alpha1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &RbacV1alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new RbacV1alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *RbacV1alpha1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new RbacV1alpha1Client for the given RESTClient.
+func New(c rest.Interface) *RbacV1alpha1Client {
+	return &RbacV1alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1alpha1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *RbacV1alpha1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go
new file mode 100644
index 0000000..4a4b672
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	"time"
+
+	v1alpha1 "k8s.io/api/rbac/v1alpha1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// RolesGetter has a method to return a RoleInterface.
+// A group's client should implement this interface.
+type RolesGetter interface {
+	Roles(namespace string) RoleInterface
+}
+
+// RoleInterface has methods to work with Role resources.
+type RoleInterface interface {
+	Create(*v1alpha1.Role) (*v1alpha1.Role, error)
+	Update(*v1alpha1.Role) (*v1alpha1.Role, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1alpha1.Role, error)
+	List(opts v1.ListOptions) (*v1alpha1.RoleList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error)
+	RoleExpansion
+}
+
+// roles implements RoleInterface
+type roles struct {
+	client rest.Interface
+	ns     string
+}
+
+// newRoles returns a Roles
+func newRoles(c *RbacV1alpha1Client, namespace string) *roles {
+	return &roles{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the role, and returns the corresponding role object, and an error if there is any.
+func (c *roles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, err error) {
+	result = &v1alpha1.Role{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("roles").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Roles that match those selectors.
+func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1alpha1.RoleList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("roles").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested roles.
+func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("roles").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a role and creates it.  Returns the server's representation of the role, and an error, if there is any.
+func (c *roles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err error) {
+	result = &v1alpha1.Role{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("roles").
+		Body(role).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any.
+func (c *roles) Update(role *v1alpha1.Role) (result *v1alpha1.Role, err error) {
+	result = &v1alpha1.Role{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("roles").
+		Name(role.Name).
+		Body(role).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the role and deletes it. Returns an error if one occurs.
+func (c *roles) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("roles").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("roles").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched role.
+func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) {
+	result = &v1alpha1.Role{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("roles").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go
new file mode 100644
index 0000000..bf4e5a1
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	"time"
+
+	v1alpha1 "k8s.io/api/rbac/v1alpha1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// RoleBindingsGetter has a method to return a RoleBindingInterface.
+// A group's client should implement this interface.
+type RoleBindingsGetter interface {
+	RoleBindings(namespace string) RoleBindingInterface
+}
+
+// RoleBindingInterface has methods to work with RoleBinding resources.
+type RoleBindingInterface interface {
+	Create(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error)
+	Update(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1alpha1.RoleBinding, error)
+	List(opts v1.ListOptions) (*v1alpha1.RoleBindingList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error)
+	RoleBindingExpansion
+}
+
+// roleBindings implements RoleBindingInterface
+type roleBindings struct {
+	client rest.Interface
+	ns     string
+}
+
+// newRoleBindings returns a RoleBindings
+func newRoleBindings(c *RbacV1alpha1Client, namespace string) *roleBindings {
+	return &roleBindings{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any.
+func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) {
+	result = &v1alpha1.RoleBinding{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of RoleBindings that match those selectors.
+func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1alpha1.RoleBindingList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested roleBindings.
+func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a roleBinding and creates it.  Returns the server's representation of the roleBinding, and an error, if there is any.
+func (c *roleBindings) Create(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) {
+	result = &v1alpha1.RoleBinding{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		Body(roleBinding).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any.
+func (c *roleBindings) Update(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) {
+	result = &v1alpha1.RoleBinding{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		Name(roleBinding.Name).
+		Body(roleBinding).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs.
+func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched roleBinding.
+func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) {
+	result = &v1alpha1.RoleBinding{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("rolebindings").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go
new file mode 100644
index 0000000..21d3cab
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/rbac/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ClusterRolesGetter has a method to return a ClusterRoleInterface.
+// A group's client should implement this interface.
+type ClusterRolesGetter interface {
+	ClusterRoles() ClusterRoleInterface
+}
+
+// ClusterRoleInterface has methods to work with ClusterRole resources.
+type ClusterRoleInterface interface {
+	Create(*v1beta1.ClusterRole) (*v1beta1.ClusterRole, error)
+	Update(*v1beta1.ClusterRole) (*v1beta1.ClusterRole, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.ClusterRole, error)
+	List(opts v1.ListOptions) (*v1beta1.ClusterRoleList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error)
+	ClusterRoleExpansion
+}
+
+// clusterRoles implements ClusterRoleInterface
+type clusterRoles struct {
+	client rest.Interface
+}
+
+// newClusterRoles returns a ClusterRoles
+func newClusterRoles(c *RbacV1beta1Client) *clusterRoles {
+	return &clusterRoles{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any.
+func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) {
+	result = &v1beta1.ClusterRole{}
+	err = c.client.Get().
+		Resource("clusterroles").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors.
+func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.ClusterRoleList{}
+	err = c.client.Get().
+		Resource("clusterroles").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRoles.
+func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("clusterroles").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a clusterRole and creates it.  Returns the server's representation of the clusterRole, and an error, if there is any.
+func (c *clusterRoles) Create(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) {
+	result = &v1beta1.ClusterRole{}
+	err = c.client.Post().
+		Resource("clusterroles").
+		Body(clusterRole).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any.
+func (c *clusterRoles) Update(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) {
+	result = &v1beta1.ClusterRole{}
+	err = c.client.Put().
+		Resource("clusterroles").
+		Name(clusterRole.Name).
+		Body(clusterRole).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs.
+func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("clusterroles").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("clusterroles").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched clusterRole.
+func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) {
+	result = &v1beta1.ClusterRole{}
+	err = c.client.Patch(pt).
+		Resource("clusterroles").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go
new file mode 100644
index 0000000..47eb9e4
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/rbac/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface.
+// A group's client should implement this interface.
+type ClusterRoleBindingsGetter interface {
+	ClusterRoleBindings() ClusterRoleBindingInterface
+}
+
+// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources.
+type ClusterRoleBindingInterface interface {
+	Create(*v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error)
+	Update(*v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.ClusterRoleBinding, error)
+	List(opts v1.ListOptions) (*v1beta1.ClusterRoleBindingList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error)
+	ClusterRoleBindingExpansion
+}
+
+// clusterRoleBindings implements ClusterRoleBindingInterface
+type clusterRoleBindings struct {
+	client rest.Interface
+}
+
+// newClusterRoleBindings returns a ClusterRoleBindings
+func newClusterRoleBindings(c *RbacV1beta1Client) *clusterRoleBindings {
+	return &clusterRoleBindings{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any.
+func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) {
+	result = &v1beta1.ClusterRoleBinding{}
+	err = c.client.Get().
+		Resource("clusterrolebindings").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors.
+func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.ClusterRoleBindingList{}
+	err = c.client.Get().
+		Resource("clusterrolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRoleBindings.
+func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("clusterrolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a clusterRoleBinding and creates it.  Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
+func (c *clusterRoleBindings) Create(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) {
+	result = &v1beta1.ClusterRoleBinding{}
+	err = c.client.Post().
+		Resource("clusterrolebindings").
+		Body(clusterRoleBinding).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
+func (c *clusterRoleBindings) Update(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) {
+	result = &v1beta1.ClusterRoleBinding{}
+	err = c.client.Put().
+		Resource("clusterrolebindings").
+		Name(clusterRoleBinding.Name).
+		Body(clusterRoleBinding).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs.
+func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("clusterrolebindings").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("clusterrolebindings").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched clusterRoleBinding.
+func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) {
+	result = &v1beta1.ClusterRoleBinding{}
+	err = c.client.Patch(pt).
+		Resource("clusterrolebindings").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go
new file mode 100644
index 0000000..7711019
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go
new file mode 100644
index 0000000..e7be79f
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go
@@ -0,0 +1,27 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+type ClusterRoleExpansion interface{}
+
+type ClusterRoleBindingExpansion interface{}
+
+type RoleExpansion interface{}
+
+type RoleBindingExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go
new file mode 100644
index 0000000..46718d7
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go
@@ -0,0 +1,105 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1beta1 "k8s.io/api/rbac/v1beta1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type RbacV1beta1Interface interface {
+	RESTClient() rest.Interface
+	ClusterRolesGetter
+	ClusterRoleBindingsGetter
+	RolesGetter
+	RoleBindingsGetter
+}
+
+// RbacV1beta1Client is used to interact with features provided by the rbac.authorization.k8s.io group.
+type RbacV1beta1Client struct {
+	restClient rest.Interface
+}
+
+func (c *RbacV1beta1Client) ClusterRoles() ClusterRoleInterface {
+	return newClusterRoles(c)
+}
+
+func (c *RbacV1beta1Client) ClusterRoleBindings() ClusterRoleBindingInterface {
+	return newClusterRoleBindings(c)
+}
+
+func (c *RbacV1beta1Client) Roles(namespace string) RoleInterface {
+	return newRoles(c, namespace)
+}
+
+func (c *RbacV1beta1Client) RoleBindings(namespace string) RoleBindingInterface {
+	return newRoleBindings(c, namespace)
+}
+
+// NewForConfig creates a new RbacV1beta1Client for the given config.
+func NewForConfig(c *rest.Config) (*RbacV1beta1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &RbacV1beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new RbacV1beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *RbacV1beta1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new RbacV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *RbacV1beta1Client {
+	return &RbacV1beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1beta1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *RbacV1beta1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go
new file mode 100644
index 0000000..2b61aad
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/rbac/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// RolesGetter has a method to return a RoleInterface.
+// A group's client should implement this interface.
+type RolesGetter interface {
+	Roles(namespace string) RoleInterface
+}
+
+// RoleInterface has methods to work with Role resources.
+type RoleInterface interface {
+	Create(*v1beta1.Role) (*v1beta1.Role, error)
+	Update(*v1beta1.Role) (*v1beta1.Role, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.Role, error)
+	List(opts v1.ListOptions) (*v1beta1.RoleList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error)
+	RoleExpansion
+}
+
+// roles implements RoleInterface
+type roles struct {
+	client rest.Interface
+	ns     string
+}
+
+// newRoles returns a Roles
+func newRoles(c *RbacV1beta1Client, namespace string) *roles {
+	return &roles{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the role, and returns the corresponding role object, and an error if there is any.
+func (c *roles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, err error) {
+	result = &v1beta1.Role{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("roles").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of Roles that match those selectors.
+func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.RoleList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("roles").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested roles.
+func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("roles").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a role and creates it.  Returns the server's representation of the role, and an error, if there is any.
+func (c *roles) Create(role *v1beta1.Role) (result *v1beta1.Role, err error) {
+	result = &v1beta1.Role{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("roles").
+		Body(role).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any.
+func (c *roles) Update(role *v1beta1.Role) (result *v1beta1.Role, err error) {
+	result = &v1beta1.Role{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("roles").
+		Name(role.Name).
+		Body(role).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the role and deletes it. Returns an error if one occurs.
+func (c *roles) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("roles").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("roles").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched role.
+func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) {
+	result = &v1beta1.Role{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("roles").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go
new file mode 100644
index 0000000..0bd118f
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/rbac/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// RoleBindingsGetter has a method to return a RoleBindingInterface.
+// A group's client should implement this interface.
+type RoleBindingsGetter interface {
+	RoleBindings(namespace string) RoleBindingInterface
+}
+
+// RoleBindingInterface has methods to work with RoleBinding resources.
+type RoleBindingInterface interface {
+	Create(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error)
+	Update(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.RoleBinding, error)
+	List(opts v1.ListOptions) (*v1beta1.RoleBindingList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error)
+	RoleBindingExpansion
+}
+
+// roleBindings implements RoleBindingInterface
+type roleBindings struct {
+	client rest.Interface
+	ns     string
+}
+
+// newRoleBindings returns a RoleBindings
+func newRoleBindings(c *RbacV1beta1Client, namespace string) *roleBindings {
+	return &roleBindings{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any.
+func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) {
+	result = &v1beta1.RoleBinding{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of RoleBindings that match those selectors.
+func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.RoleBindingList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested roleBindings.
+func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a roleBinding and creates it.  Returns the server's representation of the roleBinding, and an error, if there is any.
+func (c *roleBindings) Create(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) {
+	result = &v1beta1.RoleBinding{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		Body(roleBinding).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any.
+func (c *roleBindings) Update(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) {
+	result = &v1beta1.RoleBinding{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		Name(roleBinding.Name).
+		Body(roleBinding).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs.
+func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("rolebindings").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched roleBinding.
+func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) {
+	result = &v1beta1.RoleBinding{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("rolebindings").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go
new file mode 100644
index 0000000..df51baa
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1alpha1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go
new file mode 100644
index 0000000..52f81d8
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+type PriorityClassExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go
new file mode 100644
index 0000000..29d646f
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	"time"
+
+	v1alpha1 "k8s.io/api/scheduling/v1alpha1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// PriorityClassesGetter has a method to return a PriorityClassInterface.
+// A group's client should implement this interface.
+type PriorityClassesGetter interface {
+	PriorityClasses() PriorityClassInterface
+}
+
+// PriorityClassInterface has methods to work with PriorityClass resources.
+type PriorityClassInterface interface {
+	Create(*v1alpha1.PriorityClass) (*v1alpha1.PriorityClass, error)
+	Update(*v1alpha1.PriorityClass) (*v1alpha1.PriorityClass, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1alpha1.PriorityClass, error)
+	List(opts v1.ListOptions) (*v1alpha1.PriorityClassList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error)
+	PriorityClassExpansion
+}
+
+// priorityClasses implements PriorityClassInterface
+type priorityClasses struct {
+	client rest.Interface
+}
+
+// newPriorityClasses returns a PriorityClasses
+func newPriorityClasses(c *SchedulingV1alpha1Client) *priorityClasses {
+	return &priorityClasses{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any.
+func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) {
+	result = &v1alpha1.PriorityClass{}
+	err = c.client.Get().
+		Resource("priorityclasses").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors.
+func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1alpha1.PriorityClassList{}
+	err = c.client.Get().
+		Resource("priorityclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested priorityClasses.
+func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("priorityclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a priorityClass and creates it.  Returns the server's representation of the priorityClass, and an error, if there is any.
+func (c *priorityClasses) Create(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) {
+	result = &v1alpha1.PriorityClass{}
+	err = c.client.Post().
+		Resource("priorityclasses").
+		Body(priorityClass).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any.
+func (c *priorityClasses) Update(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) {
+	result = &v1alpha1.PriorityClass{}
+	err = c.client.Put().
+		Resource("priorityclasses").
+		Name(priorityClass.Name).
+		Body(priorityClass).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs.
+func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("priorityclasses").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("priorityclasses").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched priorityClass.
+func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) {
+	result = &v1alpha1.PriorityClass{}
+	err = c.client.Patch(pt).
+		Resource("priorityclasses").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go
new file mode 100644
index 0000000..375f41b
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	v1alpha1 "k8s.io/api/scheduling/v1alpha1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type SchedulingV1alpha1Interface interface {
+	RESTClient() rest.Interface
+	PriorityClassesGetter
+}
+
+// SchedulingV1alpha1Client is used to interact with features provided by the scheduling.k8s.io group.
+type SchedulingV1alpha1Client struct {
+	restClient rest.Interface
+}
+
+func (c *SchedulingV1alpha1Client) PriorityClasses() PriorityClassInterface {
+	return newPriorityClasses(c)
+}
+
+// NewForConfig creates a new SchedulingV1alpha1Client for the given config.
+func NewForConfig(c *rest.Config) (*SchedulingV1alpha1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &SchedulingV1alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new SchedulingV1alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *SchedulingV1alpha1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new SchedulingV1alpha1Client for the given RESTClient.
+func New(c rest.Interface) *SchedulingV1alpha1Client {
+	return &SchedulingV1alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1alpha1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *SchedulingV1alpha1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go
new file mode 100644
index 0000000..7711019
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go
new file mode 100644
index 0000000..3bab873
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+type PriorityClassExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go
new file mode 100644
index 0000000..5e402f8
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/scheduling/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// PriorityClassesGetter has a method to return a PriorityClassInterface.
+// A group's client should implement this interface.
+type PriorityClassesGetter interface {
+	PriorityClasses() PriorityClassInterface
+}
+
+// PriorityClassInterface has methods to work with PriorityClass resources.
+type PriorityClassInterface interface {
+	Create(*v1beta1.PriorityClass) (*v1beta1.PriorityClass, error)
+	Update(*v1beta1.PriorityClass) (*v1beta1.PriorityClass, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.PriorityClass, error)
+	List(opts v1.ListOptions) (*v1beta1.PriorityClassList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error)
+	PriorityClassExpansion
+}
+
+// priorityClasses implements PriorityClassInterface
+type priorityClasses struct {
+	client rest.Interface
+}
+
+// newPriorityClasses returns a PriorityClasses
+func newPriorityClasses(c *SchedulingV1beta1Client) *priorityClasses {
+	return &priorityClasses{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any.
+func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) {
+	result = &v1beta1.PriorityClass{}
+	err = c.client.Get().
+		Resource("priorityclasses").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors.
+func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.PriorityClassList{}
+	err = c.client.Get().
+		Resource("priorityclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested priorityClasses.
+func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("priorityclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a priorityClass and creates it.  Returns the server's representation of the priorityClass, and an error, if there is any.
+func (c *priorityClasses) Create(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) {
+	result = &v1beta1.PriorityClass{}
+	err = c.client.Post().
+		Resource("priorityclasses").
+		Body(priorityClass).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any.
+func (c *priorityClasses) Update(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) {
+	result = &v1beta1.PriorityClass{}
+	err = c.client.Put().
+		Resource("priorityclasses").
+		Name(priorityClass.Name).
+		Body(priorityClass).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs.
+func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("priorityclasses").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("priorityclasses").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched priorityClass.
+func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) {
+	result = &v1beta1.PriorityClass{}
+	err = c.client.Patch(pt).
+		Resource("priorityclasses").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go
new file mode 100644
index 0000000..6feec4a
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1beta1 "k8s.io/api/scheduling/v1beta1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type SchedulingV1beta1Interface interface {
+	RESTClient() rest.Interface
+	PriorityClassesGetter
+}
+
+// SchedulingV1beta1Client is used to interact with features provided by the scheduling.k8s.io group.
+type SchedulingV1beta1Client struct {
+	restClient rest.Interface
+}
+
+func (c *SchedulingV1beta1Client) PriorityClasses() PriorityClassInterface {
+	return newPriorityClasses(c)
+}
+
+// NewForConfig creates a new SchedulingV1beta1Client for the given config.
+func NewForConfig(c *rest.Config) (*SchedulingV1beta1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &SchedulingV1beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new SchedulingV1beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *SchedulingV1beta1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new SchedulingV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *SchedulingV1beta1Client {
+	return &SchedulingV1beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1beta1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *SchedulingV1beta1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go
new file mode 100644
index 0000000..df51baa
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1alpha1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go
new file mode 100644
index 0000000..23d9f94
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+type PodPresetExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go
new file mode 100644
index 0000000..8fd6adc
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go
@@ -0,0 +1,174 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	"time"
+
+	v1alpha1 "k8s.io/api/settings/v1alpha1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// PodPresetsGetter has a method to return a PodPresetInterface.
+// A group's client should implement this interface.
+type PodPresetsGetter interface {
+	PodPresets(namespace string) PodPresetInterface
+}
+
+// PodPresetInterface has methods to work with PodPreset resources.
+type PodPresetInterface interface {
+	Create(*v1alpha1.PodPreset) (*v1alpha1.PodPreset, error)
+	Update(*v1alpha1.PodPreset) (*v1alpha1.PodPreset, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1alpha1.PodPreset, error)
+	List(opts v1.ListOptions) (*v1alpha1.PodPresetList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error)
+	PodPresetExpansion
+}
+
+// podPresets implements PodPresetInterface
+type podPresets struct {
+	client rest.Interface
+	ns     string
+}
+
+// newPodPresets returns a PodPresets
+func newPodPresets(c *SettingsV1alpha1Client, namespace string) *podPresets {
+	return &podPresets{
+		client: c.RESTClient(),
+		ns:     namespace,
+	}
+}
+
+// Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any.
+func (c *podPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) {
+	result = &v1alpha1.PodPreset{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("podpresets").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of PodPresets that match those selectors.
+func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1alpha1.PodPresetList{}
+	err = c.client.Get().
+		Namespace(c.ns).
+		Resource("podpresets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested podPresets.
+func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Namespace(c.ns).
+		Resource("podpresets").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a podPreset and creates it.  Returns the server's representation of the podPreset, and an error, if there is any.
+func (c *podPresets) Create(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) {
+	result = &v1alpha1.PodPreset{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("podpresets").
+		Body(podPreset).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any.
+func (c *podPresets) Update(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) {
+	result = &v1alpha1.PodPreset{}
+	err = c.client.Put().
+		Namespace(c.ns).
+		Resource("podpresets").
+		Name(podPreset.Name).
+		Body(podPreset).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the podPreset and deletes it. Returns an error if one occurs.
+func (c *podPresets) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("podpresets").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *podPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Namespace(c.ns).
+		Resource("podpresets").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched podPreset.
+func (c *podPresets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) {
+	result = &v1alpha1.PodPreset{}
+	err = c.client.Patch(pt).
+		Namespace(c.ns).
+		Resource("podpresets").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go
new file mode 100644
index 0000000..c2a03b9
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	v1alpha1 "k8s.io/api/settings/v1alpha1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type SettingsV1alpha1Interface interface {
+	RESTClient() rest.Interface
+	PodPresetsGetter
+}
+
+// SettingsV1alpha1Client is used to interact with features provided by the settings.k8s.io group.
+type SettingsV1alpha1Client struct {
+	restClient rest.Interface
+}
+
+func (c *SettingsV1alpha1Client) PodPresets(namespace string) PodPresetInterface {
+	return newPodPresets(c, namespace)
+}
+
+// NewForConfig creates a new SettingsV1alpha1Client for the given config.
+func NewForConfig(c *rest.Config) (*SettingsV1alpha1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &SettingsV1alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new SettingsV1alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *SettingsV1alpha1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new SettingsV1alpha1Client for the given RESTClient.
+func New(c rest.Interface) *SettingsV1alpha1Client {
+	return &SettingsV1alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1alpha1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *SettingsV1alpha1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go
new file mode 100644
index 0000000..3af5d05
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go
new file mode 100644
index 0000000..ccac161
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go
@@ -0,0 +1,23 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type StorageClassExpansion interface{}
+
+type VolumeAttachmentExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go
new file mode 100644
index 0000000..92378cf
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go
@@ -0,0 +1,95 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	v1 "k8s.io/api/storage/v1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type StorageV1Interface interface {
+	RESTClient() rest.Interface
+	StorageClassesGetter
+	VolumeAttachmentsGetter
+}
+
+// StorageV1Client is used to interact with features provided by the storage.k8s.io group.
+type StorageV1Client struct {
+	restClient rest.Interface
+}
+
+func (c *StorageV1Client) StorageClasses() StorageClassInterface {
+	return newStorageClasses(c)
+}
+
+func (c *StorageV1Client) VolumeAttachments() VolumeAttachmentInterface {
+	return newVolumeAttachments(c)
+}
+
+// NewForConfig creates a new StorageV1Client for the given config.
+func NewForConfig(c *rest.Config) (*StorageV1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &StorageV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new StorageV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *StorageV1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new StorageV1Client for the given RESTClient.
+func New(c rest.Interface) *StorageV1Client {
+	return &StorageV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *StorageV1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go
new file mode 100644
index 0000000..3f4c48f
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/storage/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// StorageClassesGetter has a method to return a StorageClassInterface.
+// A group's client should implement this interface.
+type StorageClassesGetter interface {
+	StorageClasses() StorageClassInterface
+}
+
+// StorageClassInterface has methods to work with StorageClass resources.
+type StorageClassInterface interface {
+	Create(*v1.StorageClass) (*v1.StorageClass, error)
+	Update(*v1.StorageClass) (*v1.StorageClass, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.StorageClass, error)
+	List(opts metav1.ListOptions) (*v1.StorageClassList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error)
+	StorageClassExpansion
+}
+
+// storageClasses implements StorageClassInterface
+type storageClasses struct {
+	client rest.Interface
+}
+
+// newStorageClasses returns a StorageClasses
+func newStorageClasses(c *StorageV1Client) *storageClasses {
+	return &storageClasses{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any.
+func (c *storageClasses) Get(name string, options metav1.GetOptions) (result *v1.StorageClass, err error) {
+	result = &v1.StorageClass{}
+	err = c.client.Get().
+		Resource("storageclasses").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of StorageClasses that match those selectors.
+func (c *storageClasses) List(opts metav1.ListOptions) (result *v1.StorageClassList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.StorageClassList{}
+	err = c.client.Get().
+		Resource("storageclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested storageClasses.
+func (c *storageClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("storageclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a storageClass and creates it.  Returns the server's representation of the storageClass, and an error, if there is any.
+func (c *storageClasses) Create(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) {
+	result = &v1.StorageClass{}
+	err = c.client.Post().
+		Resource("storageclasses").
+		Body(storageClass).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any.
+func (c *storageClasses) Update(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) {
+	result = &v1.StorageClass{}
+	err = c.client.Put().
+		Resource("storageclasses").
+		Name(storageClass.Name).
+		Body(storageClass).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the storageClass and deletes it. Returns an error if one occurs.
+func (c *storageClasses) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("storageclasses").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *storageClasses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("storageclasses").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched storageClass.
+func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) {
+	result = &v1.StorageClass{}
+	err = c.client.Patch(pt).
+		Resource("storageclasses").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go
new file mode 100644
index 0000000..0f45097
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go
@@ -0,0 +1,180 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"time"
+
+	v1 "k8s.io/api/storage/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface.
+// A group's client should implement this interface.
+type VolumeAttachmentsGetter interface {
+	VolumeAttachments() VolumeAttachmentInterface
+}
+
+// VolumeAttachmentInterface has methods to work with VolumeAttachment resources.
+type VolumeAttachmentInterface interface {
+	Create(*v1.VolumeAttachment) (*v1.VolumeAttachment, error)
+	Update(*v1.VolumeAttachment) (*v1.VolumeAttachment, error)
+	UpdateStatus(*v1.VolumeAttachment) (*v1.VolumeAttachment, error)
+	Delete(name string, options *metav1.DeleteOptions) error
+	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(name string, options metav1.GetOptions) (*v1.VolumeAttachment, error)
+	List(opts metav1.ListOptions) (*v1.VolumeAttachmentList, error)
+	Watch(opts metav1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error)
+	VolumeAttachmentExpansion
+}
+
+// volumeAttachments implements VolumeAttachmentInterface
+type volumeAttachments struct {
+	client rest.Interface
+}
+
+// newVolumeAttachments returns a VolumeAttachments
+func newVolumeAttachments(c *StorageV1Client) *volumeAttachments {
+	return &volumeAttachments{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any.
+func (c *volumeAttachments) Get(name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) {
+	result = &v1.VolumeAttachment{}
+	err = c.client.Get().
+		Resource("volumeattachments").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors.
+func (c *volumeAttachments) List(opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.VolumeAttachmentList{}
+	err = c.client.Get().
+		Resource("volumeattachments").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested volumeAttachments.
+func (c *volumeAttachments) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("volumeattachments").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a volumeAttachment and creates it.  Returns the server's representation of the volumeAttachment, and an error, if there is any.
+func (c *volumeAttachments) Create(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) {
+	result = &v1.VolumeAttachment{}
+	err = c.client.Post().
+		Resource("volumeattachments").
+		Body(volumeAttachment).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any.
+func (c *volumeAttachments) Update(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) {
+	result = &v1.VolumeAttachment{}
+	err = c.client.Put().
+		Resource("volumeattachments").
+		Name(volumeAttachment.Name).
+		Body(volumeAttachment).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) {
+	result = &v1.VolumeAttachment{}
+	err = c.client.Put().
+		Resource("volumeattachments").
+		Name(volumeAttachment.Name).
+		SubResource("status").
+		Body(volumeAttachment).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs.
+func (c *volumeAttachments) Delete(name string, options *metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("volumeattachments").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *volumeAttachments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("volumeattachments").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched volumeAttachment.
+func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) {
+	result = &v1.VolumeAttachment{}
+	err = c.client.Patch(pt).
+		Resource("volumeattachments").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go
new file mode 100644
index 0000000..df51baa
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1alpha1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go
new file mode 100644
index 0000000..cdb7ab2
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+type VolumeAttachmentExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go
new file mode 100644
index 0000000..c52f630
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	v1alpha1 "k8s.io/api/storage/v1alpha1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type StorageV1alpha1Interface interface {
+	RESTClient() rest.Interface
+	VolumeAttachmentsGetter
+}
+
+// StorageV1alpha1Client is used to interact with features provided by the storage.k8s.io group.
+type StorageV1alpha1Client struct {
+	restClient rest.Interface
+}
+
+func (c *StorageV1alpha1Client) VolumeAttachments() VolumeAttachmentInterface {
+	return newVolumeAttachments(c)
+}
+
+// NewForConfig creates a new StorageV1alpha1Client for the given config.
+func NewForConfig(c *rest.Config) (*StorageV1alpha1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &StorageV1alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new StorageV1alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *StorageV1alpha1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new StorageV1alpha1Client for the given RESTClient.
+func New(c rest.Interface) *StorageV1alpha1Client {
+	return &StorageV1alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1alpha1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *StorageV1alpha1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go
new file mode 100644
index 0000000..7fef94e
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go
@@ -0,0 +1,180 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	"time"
+
+	v1alpha1 "k8s.io/api/storage/v1alpha1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface.
+// A group's client should implement this interface.
+type VolumeAttachmentsGetter interface {
+	VolumeAttachments() VolumeAttachmentInterface
+}
+
+// VolumeAttachmentInterface has methods to work with VolumeAttachment resources.
+type VolumeAttachmentInterface interface {
+	Create(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error)
+	Update(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error)
+	UpdateStatus(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1alpha1.VolumeAttachment, error)
+	List(opts v1.ListOptions) (*v1alpha1.VolumeAttachmentList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error)
+	VolumeAttachmentExpansion
+}
+
+// volumeAttachments implements VolumeAttachmentInterface
+type volumeAttachments struct {
+	client rest.Interface
+}
+
+// newVolumeAttachments returns a VolumeAttachments
+func newVolumeAttachments(c *StorageV1alpha1Client) *volumeAttachments {
+	return &volumeAttachments{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any.
+func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) {
+	result = &v1alpha1.VolumeAttachment{}
+	err = c.client.Get().
+		Resource("volumeattachments").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors.
+func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1alpha1.VolumeAttachmentList{}
+	err = c.client.Get().
+		Resource("volumeattachments").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested volumeAttachments.
+func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("volumeattachments").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a volumeAttachment and creates it.  Returns the server's representation of the volumeAttachment, and an error, if there is any.
+func (c *volumeAttachments) Create(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) {
+	result = &v1alpha1.VolumeAttachment{}
+	err = c.client.Post().
+		Resource("volumeattachments").
+		Body(volumeAttachment).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any.
+func (c *volumeAttachments) Update(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) {
+	result = &v1alpha1.VolumeAttachment{}
+	err = c.client.Put().
+		Resource("volumeattachments").
+		Name(volumeAttachment.Name).
+		Body(volumeAttachment).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) {
+	result = &v1alpha1.VolumeAttachment{}
+	err = c.client.Put().
+		Resource("volumeattachments").
+		Name(volumeAttachment.Name).
+		SubResource("status").
+		Body(volumeAttachment).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs.
+func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("volumeattachments").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("volumeattachments").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched volumeAttachment.
+func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) {
+	result = &v1alpha1.VolumeAttachment{}
+	err = c.client.Patch(pt).
+		Resource("volumeattachments").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go
new file mode 100644
index 0000000..7711019
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go
new file mode 100644
index 0000000..559f88f
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go
@@ -0,0 +1,23 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+type StorageClassExpansion interface{}
+
+type VolumeAttachmentExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go
new file mode 100644
index 0000000..4bdebb8
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go
@@ -0,0 +1,95 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1beta1 "k8s.io/api/storage/v1beta1"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+type StorageV1beta1Interface interface {
+	RESTClient() rest.Interface
+	StorageClassesGetter
+	VolumeAttachmentsGetter
+}
+
+// StorageV1beta1Client is used to interact with features provided by the storage.k8s.io group.
+type StorageV1beta1Client struct {
+	restClient rest.Interface
+}
+
+func (c *StorageV1beta1Client) StorageClasses() StorageClassInterface {
+	return newStorageClasses(c)
+}
+
+func (c *StorageV1beta1Client) VolumeAttachments() VolumeAttachmentInterface {
+	return newVolumeAttachments(c)
+}
+
+// NewForConfig creates a new StorageV1beta1Client for the given config.
+func NewForConfig(c *rest.Config) (*StorageV1beta1Client, error) {
+	config := *c
+	if err := setConfigDefaults(&config); err != nil {
+		return nil, err
+	}
+	client, err := rest.RESTClientFor(&config)
+	if err != nil {
+		return nil, err
+	}
+	return &StorageV1beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new StorageV1beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *StorageV1beta1Client {
+	client, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return client
+}
+
+// New creates a new StorageV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *StorageV1beta1Client {
+	return &StorageV1beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+	gv := v1beta1.SchemeGroupVersion
+	config.GroupVersion = &gv
+	config.APIPath = "/apis"
+	config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+
+	return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *StorageV1beta1Client) RESTClient() rest.Interface {
+	if c == nil {
+		return nil
+	}
+	return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go
new file mode 100644
index 0000000..8a8f389
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/storage/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// StorageClassesGetter has a method to return a StorageClassInterface.
+// A group's client should implement this interface.
+type StorageClassesGetter interface {
+	StorageClasses() StorageClassInterface
+}
+
+// StorageClassInterface has methods to work with StorageClass resources.
+type StorageClassInterface interface {
+	Create(*v1beta1.StorageClass) (*v1beta1.StorageClass, error)
+	Update(*v1beta1.StorageClass) (*v1beta1.StorageClass, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.StorageClass, error)
+	List(opts v1.ListOptions) (*v1beta1.StorageClassList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error)
+	StorageClassExpansion
+}
+
+// storageClasses implements StorageClassInterface
+type storageClasses struct {
+	client rest.Interface
+}
+
+// newStorageClasses returns a StorageClasses
+func newStorageClasses(c *StorageV1beta1Client) *storageClasses {
+	return &storageClasses{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any.
+func (c *storageClasses) Get(name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) {
+	result = &v1beta1.StorageClass{}
+	err = c.client.Get().
+		Resource("storageclasses").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of StorageClasses that match those selectors.
+func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.StorageClassList{}
+	err = c.client.Get().
+		Resource("storageclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested storageClasses.
+func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("storageclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a storageClass and creates it.  Returns the server's representation of the storageClass, and an error, if there is any.
+func (c *storageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) {
+	result = &v1beta1.StorageClass{}
+	err = c.client.Post().
+		Resource("storageclasses").
+		Body(storageClass).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any.
+func (c *storageClasses) Update(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) {
+	result = &v1beta1.StorageClass{}
+	err = c.client.Put().
+		Resource("storageclasses").
+		Name(storageClass.Name).
+		Body(storageClass).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the storageClass and deletes it. Returns an error if one occurs.
+func (c *storageClasses) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("storageclasses").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *storageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("storageclasses").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched storageClass.
+func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) {
+	result = &v1beta1.StorageClass{}
+	err = c.client.Patch(pt).
+		Resource("storageclasses").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go
new file mode 100644
index 0000000..d319407
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go
@@ -0,0 +1,180 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"time"
+
+	v1beta1 "k8s.io/api/storage/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface.
+// A group's client should implement this interface.
+type VolumeAttachmentsGetter interface {
+	VolumeAttachments() VolumeAttachmentInterface
+}
+
+// VolumeAttachmentInterface has methods to work with VolumeAttachment resources.
+type VolumeAttachmentInterface interface {
+	Create(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error)
+	Update(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error)
+	UpdateStatus(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error)
+	Delete(name string, options *v1.DeleteOptions) error
+	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+	Get(name string, options v1.GetOptions) (*v1beta1.VolumeAttachment, error)
+	List(opts v1.ListOptions) (*v1beta1.VolumeAttachmentList, error)
+	Watch(opts v1.ListOptions) (watch.Interface, error)
+	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error)
+	VolumeAttachmentExpansion
+}
+
+// volumeAttachments implements VolumeAttachmentInterface
+type volumeAttachments struct {
+	client rest.Interface
+}
+
+// newVolumeAttachments returns a VolumeAttachments
+func newVolumeAttachments(c *StorageV1beta1Client) *volumeAttachments {
+	return &volumeAttachments{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any.
+func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) {
+	result = &v1beta1.VolumeAttachment{}
+	err = c.client.Get().
+		Resource("volumeattachments").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do().
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors.
+func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.VolumeAttachmentList{}
+	err = c.client.Get().
+		Resource("volumeattachments").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do().
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested volumeAttachments.
+func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("volumeattachments").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch()
+}
+
+// Create takes the representation of a volumeAttachment and creates it.  Returns the server's representation of the volumeAttachment, and an error, if there is any.
+func (c *volumeAttachments) Create(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) {
+	result = &v1beta1.VolumeAttachment{}
+	err = c.client.Post().
+		Resource("volumeattachments").
+		Body(volumeAttachment).
+		Do().
+		Into(result)
+	return
+}
+
+// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any.
+func (c *volumeAttachments) Update(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) {
+	result = &v1beta1.VolumeAttachment{}
+	err = c.client.Put().
+		Resource("volumeattachments").
+		Name(volumeAttachment.Name).
+		Body(volumeAttachment).
+		Do().
+		Into(result)
+	return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) {
+	result = &v1beta1.VolumeAttachment{}
+	err = c.client.Put().
+		Resource("volumeattachments").
+		Name(volumeAttachment.Name).
+		SubResource("status").
+		Body(volumeAttachment).
+		Do().
+		Into(result)
+	return
+}
+
+// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs.
+func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("volumeattachments").
+		Name(name).
+		Body(options).
+		Do().
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+	var timeout time.Duration
+	if listOptions.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("volumeattachments").
+		VersionedParams(&listOptions, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(options).
+		Do().
+		Error()
+}
+
+// Patch applies the patch and returns the patched volumeAttachment.
+func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) {
+	result = &v1beta1.VolumeAttachment{}
+	err = c.client.Patch(pt).
+		Resource("volumeattachments").
+		SubResource(subresources...).
+		Name(name).
+		Body(data).
+		Do().
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS
new file mode 100644
index 0000000..3b7ea1b
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS
@@ -0,0 +1,7 @@
+# approval on api packages bubbles to api-approvers
+reviewers:
+- sig-auth-authenticators-approvers
+- sig-auth-authenticators-reviewers
+labels:
+- sig/auth
+
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go
new file mode 100644
index 0000000..b994597
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +groupName=client.authentication.k8s.io
+
+package clientauthentication // import "k8s.io/client-go/pkg/apis/clientauthentication"
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go
new file mode 100644
index 0000000..e4fbc3e
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientauthentication
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "client.authentication.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+	return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+	AddToScheme   = SchemeBuilder.AddToScheme
+)
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&ExecCredential{},
+	)
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go
new file mode 100644
index 0000000..6fb53ce
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go
@@ -0,0 +1,77 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientauthentication
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ExecCredentials is used by exec-based plugins to communicate credentials to
+// HTTP transports.
+type ExecCredential struct {
+	metav1.TypeMeta
+
+	// Spec holds information passed to the plugin by the transport. This contains
+	// request and runtime specific information, such as if the session is interactive.
+	Spec ExecCredentialSpec
+
+	// Status is filled in by the plugin and holds the credentials that the transport
+	// should use to contact the API.
+	// +optional
+	Status *ExecCredentialStatus
+}
+
+// ExecCredenitalSpec holds request and runtime specific information provided by
+// the transport.
+type ExecCredentialSpec struct {
+	// Response is populated when the transport encounters HTTP status codes, such as 401,
+	// suggesting previous credentials were invalid.
+	// +optional
+	Response *Response
+
+	// Interactive is true when the transport detects the command is being called from an
+	// interactive prompt.
+	// +optional
+	Interactive bool
+}
+
+// ExecCredentialStatus holds credentials for the transport to use.
+type ExecCredentialStatus struct {
+	// ExpirationTimestamp indicates a time when the provided credentials expire.
+	// +optional
+	ExpirationTimestamp *metav1.Time
+	// Token is a bearer token used by the client for request authentication.
+	// +optional
+	Token string
+	// PEM-encoded client TLS certificate.
+	// +optional
+	ClientCertificateData string
+	// PEM-encoded client TLS private key.
+	// +optional
+	ClientKeyData string
+}
+
+// Response defines metadata about a failed request, including HTTP status code and
+// response headers.
+type Response struct {
+	// Headers holds HTTP headers returned by the server.
+	Header map[string][]string
+	// Code is the HTTP status code returned by the server.
+	Code int32
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go
new file mode 100644
index 0000000..19ab776
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+
+// +groupName=client.authentication.k8s.io
+
+package v1alpha1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1"
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go
new file mode 100644
index 0000000..2acd13d
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "client.authentication.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	SchemeBuilder      runtime.SchemeBuilder
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+	// We only register manually written functions here. The registration of the
+	// generated functions takes place in the generated files. The separation
+	// makes the code compile even when the generated files are missing.
+	localSchemeBuilder.Register(addKnownTypes)
+}
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&ExecCredential{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go
new file mode 100644
index 0000000..921f3a2
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go
@@ -0,0 +1,78 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ExecCredentials is used by exec-based plugins to communicate credentials to
+// HTTP transports.
+type ExecCredential struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Spec holds information passed to the plugin by the transport. This contains
+	// request and runtime specific information, such as if the session is interactive.
+	Spec ExecCredentialSpec `json:"spec,omitempty"`
+
+	// Status is filled in by the plugin and holds the credentials that the transport
+	// should use to contact the API.
+	// +optional
+	Status *ExecCredentialStatus `json:"status,omitempty"`
+}
+
+// ExecCredenitalSpec holds request and runtime specific information provided by
+// the transport.
+type ExecCredentialSpec struct {
+	// Response is populated when the transport encounters HTTP status codes, such as 401,
+	// suggesting previous credentials were invalid.
+	// +optional
+	Response *Response `json:"response,omitempty"`
+
+	// Interactive is true when the transport detects the command is being called from an
+	// interactive prompt.
+	// +optional
+	Interactive bool `json:"interactive,omitempty"`
+}
+
+// ExecCredentialStatus holds credentials for the transport to use.
+//
+// Token and ClientKeyData are sensitive fields. This data should only be
+// transmitted in-memory between client and exec plugin process. Exec plugin
+// itself should at least be protected via file permissions.
+type ExecCredentialStatus struct {
+	// ExpirationTimestamp indicates a time when the provided credentials expire.
+	// +optional
+	ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"`
+	// Token is a bearer token used by the client for request authentication.
+	Token string `json:"token,omitempty"`
+	// PEM-encoded client TLS certificates (including intermediates, if any).
+	ClientCertificateData string `json:"clientCertificateData,omitempty"`
+	// PEM-encoded private key for the above certificate.
+	ClientKeyData string `json:"clientKeyData,omitempty"`
+}
+
+// Response defines metadata about a failed request, including HTTP status code and
+// response headers.
+type Response struct {
+	// Header holds HTTP headers returned by the server.
+	Header map[string][]string `json:"header,omitempty"`
+	// Code is the HTTP status code returned by the server.
+	Code int32 `json:"code,omitempty"`
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go
new file mode 100644
index 0000000..461c20b
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go
@@ -0,0 +1,176 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	unsafe "unsafe"
+
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication"
+)
+
+func init() {
+	localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+	if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredential)(nil), (*ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(a.(*clientauthentication.ExecCredential), b.(*ExecCredential), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ExecCredentialSpec)(nil), (*clientauthentication.ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(a.(*ExecCredentialSpec), b.(*clientauthentication.ExecCredentialSpec), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialStatus)(nil), (*ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(a.(*clientauthentication.ExecCredentialStatus), b.(*ExecCredentialStatus), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*Response)(nil), (*clientauthentication.Response)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_Response_To_clientauthentication_Response(a.(*Response), b.(*clientauthentication.Response), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*clientauthentication.Response)(nil), (*Response)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_clientauthentication_Response_To_v1alpha1_Response(a.(*clientauthentication.Response), b.(*Response), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func autoConvert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error {
+	if err := Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil {
+		return err
+	}
+	out.Status = (*clientauthentication.ExecCredentialStatus)(unsafe.Pointer(in.Status))
+	return nil
+}
+
+// Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential is an autogenerated conversion function.
+func Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error {
+	return autoConvert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error {
+	if err := Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil {
+		return err
+	}
+	out.Status = (*ExecCredentialStatus)(unsafe.Pointer(in.Status))
+	return nil
+}
+
+// Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential is an autogenerated conversion function.
+func Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error {
+	return autoConvert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in, out, s)
+}
+
+func autoConvert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error {
+	out.Response = (*clientauthentication.Response)(unsafe.Pointer(in.Response))
+	out.Interactive = in.Interactive
+	return nil
+}
+
+// Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec is an autogenerated conversion function.
+func Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error {
+	return autoConvert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
+	out.Response = (*Response)(unsafe.Pointer(in.Response))
+	out.Interactive = in.Interactive
+	return nil
+}
+
+// Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec is an autogenerated conversion function.
+func Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
+	return autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in, out, s)
+}
+
+func autoConvert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error {
+	out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
+	out.Token = in.Token
+	out.ClientCertificateData = in.ClientCertificateData
+	out.ClientKeyData = in.ClientKeyData
+	return nil
+}
+
+// Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus is an autogenerated conversion function.
+func Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error {
+	return autoConvert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error {
+	out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
+	out.Token = in.Token
+	out.ClientCertificateData = in.ClientCertificateData
+	out.ClientKeyData = in.ClientKeyData
+	return nil
+}
+
+// Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus is an autogenerated conversion function.
+func Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error {
+	return autoConvert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in, out, s)
+}
+
+func autoConvert_v1alpha1_Response_To_clientauthentication_Response(in *Response, out *clientauthentication.Response, s conversion.Scope) error {
+	out.Header = *(*map[string][]string)(unsafe.Pointer(&in.Header))
+	out.Code = in.Code
+	return nil
+}
+
+// Convert_v1alpha1_Response_To_clientauthentication_Response is an autogenerated conversion function.
+func Convert_v1alpha1_Response_To_clientauthentication_Response(in *Response, out *clientauthentication.Response, s conversion.Scope) error {
+	return autoConvert_v1alpha1_Response_To_clientauthentication_Response(in, out, s)
+}
+
+func autoConvert_clientauthentication_Response_To_v1alpha1_Response(in *clientauthentication.Response, out *Response, s conversion.Scope) error {
+	out.Header = *(*map[string][]string)(unsafe.Pointer(&in.Header))
+	out.Code = in.Code
+	return nil
+}
+
+// Convert_clientauthentication_Response_To_v1alpha1_Response is an autogenerated conversion function.
+func Convert_clientauthentication_Response_To_v1alpha1_Response(in *clientauthentication.Response, out *Response, s conversion.Scope) error {
+	return autoConvert_clientauthentication_Response_To_v1alpha1_Response(in, out, s)
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..a73d31b
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,128 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredential) DeepCopyInto(out *ExecCredential) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.Spec.DeepCopyInto(&out.Spec)
+	if in.Status != nil {
+		in, out := &in.Status, &out.Status
+		*out = new(ExecCredentialStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential.
+func (in *ExecCredential) DeepCopy() *ExecCredential {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredential)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ExecCredential) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) {
+	*out = *in
+	if in.Response != nil {
+		in, out := &in.Response, &out.Response
+		*out = new(Response)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec.
+func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredentialSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) {
+	*out = *in
+	if in.ExpirationTimestamp != nil {
+		in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp
+		*out = (*in).DeepCopy()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus.
+func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredentialStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Response) DeepCopyInto(out *Response) {
+	*out = *in
+	if in.Header != nil {
+		in, out := &in.Header, &out.Header
+		*out = make(map[string][]string, len(*in))
+		for key, val := range *in {
+			var outVal []string
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = make([]string, len(*in))
+				copy(*out, *in)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Response.
+func (in *Response) DeepCopy() *Response {
+	if in == nil {
+		return nil
+	}
+	out := new(Response)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go
new file mode 100644
index 0000000..dd621a3
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go
@@ -0,0 +1,32 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go
new file mode 100644
index 0000000..f543806
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go
@@ -0,0 +1,26 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication"
+)
+
+func Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go
new file mode 100644
index 0000000..22d1c58
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+
+// +groupName=client.authentication.k8s.io
+
+package v1beta1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go
new file mode 100644
index 0000000..0bb92f1
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "client.authentication.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	SchemeBuilder      runtime.SchemeBuilder
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+	// We only register manually written functions here. The registration of the
+	// generated functions takes place in the generated files. The separation
+	// makes the code compile even when the generated files are missing.
+	localSchemeBuilder.Register(addKnownTypes)
+}
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&ExecCredential{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go
new file mode 100644
index 0000000..d6e2674
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ExecCredentials is used by exec-based plugins to communicate credentials to
+// HTTP transports.
+type ExecCredential struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Spec holds information passed to the plugin by the transport. This contains
+	// request and runtime specific information, such as if the session is interactive.
+	Spec ExecCredentialSpec `json:"spec,omitempty"`
+
+	// Status is filled in by the plugin and holds the credentials that the transport
+	// should use to contact the API.
+	// +optional
+	Status *ExecCredentialStatus `json:"status,omitempty"`
+}
+
+// ExecCredenitalSpec holds request and runtime specific information provided by
+// the transport.
+type ExecCredentialSpec struct{}
+
+// ExecCredentialStatus holds credentials for the transport to use.
+//
+// Token and ClientKeyData are sensitive fields. This data should only be
+// transmitted in-memory between client and exec plugin process. Exec plugin
+// itself should at least be protected via file permissions.
+type ExecCredentialStatus struct {
+	// ExpirationTimestamp indicates a time when the provided credentials expire.
+	// +optional
+	ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"`
+	// Token is a bearer token used by the client for request authentication.
+	Token string `json:"token,omitempty"`
+	// PEM-encoded client TLS certificates (including intermediates, if any).
+	ClientCertificateData string `json:"clientCertificateData,omitempty"`
+	// PEM-encoded private key for the above certificate.
+	ClientKeyData string `json:"clientKeyData,omitempty"`
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go
new file mode 100644
index 0000000..94ef4b7
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go
@@ -0,0 +1,142 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	unsafe "unsafe"
+
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication"
+)
+
+func init() {
+	localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+	if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredential)(nil), (*ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(a.(*clientauthentication.ExecCredential), b.(*ExecCredential), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ExecCredentialSpec)(nil), (*clientauthentication.ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(a.(*ExecCredentialSpec), b.(*clientauthentication.ExecCredentialSpec), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialStatus)(nil), (*ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(a.(*clientauthentication.ExecCredentialStatus), b.(*ExecCredentialStatus), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func autoConvert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error {
+	if err := Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil {
+		return err
+	}
+	out.Status = (*clientauthentication.ExecCredentialStatus)(unsafe.Pointer(in.Status))
+	return nil
+}
+
+// Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential is an autogenerated conversion function.
+func Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error {
+	return autoConvert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error {
+	if err := Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil {
+		return err
+	}
+	out.Status = (*ExecCredentialStatus)(unsafe.Pointer(in.Status))
+	return nil
+}
+
+// Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential is an autogenerated conversion function.
+func Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error {
+	return autoConvert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in, out, s)
+}
+
+func autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error {
+	return nil
+}
+
+// Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec is an autogenerated conversion function.
+func Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error {
+	return autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
+	// WARNING: in.Response requires manual conversion: does not exist in peer-type
+	// WARNING: in.Interactive requires manual conversion: does not exist in peer-type
+	return nil
+}
+
+func autoConvert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error {
+	out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
+	out.Token = in.Token
+	out.ClientCertificateData = in.ClientCertificateData
+	out.ClientKeyData = in.ClientKeyData
+	return nil
+}
+
+// Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus is an autogenerated conversion function.
+func Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error {
+	return autoConvert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error {
+	out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
+	out.Token = in.Token
+	out.ClientCertificateData = in.ClientCertificateData
+	out.ClientKeyData = in.ClientKeyData
+	return nil
+}
+
+// Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus is an autogenerated conversion function.
+func Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error {
+	return autoConvert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in, out, s)
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..736b8cf
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,92 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredential) DeepCopyInto(out *ExecCredential) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.Spec = in.Spec
+	if in.Status != nil {
+		in, out := &in.Status, &out.Status
+		*out = new(ExecCredentialStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential.
+func (in *ExecCredential) DeepCopy() *ExecCredential {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredential)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ExecCredential) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec.
+func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredentialSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) {
+	*out = *in
+	if in.ExpirationTimestamp != nil {
+		in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp
+		*out = (*in).DeepCopy()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus.
+func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredentialStatus)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go
new file mode 100644
index 0000000..73e63fc
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go
@@ -0,0 +1,32 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go
new file mode 100644
index 0000000..c568a6f
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go
@@ -0,0 +1,128 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package clientauthentication
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredential) DeepCopyInto(out *ExecCredential) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.Spec.DeepCopyInto(&out.Spec)
+	if in.Status != nil {
+		in, out := &in.Status, &out.Status
+		*out = new(ExecCredentialStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential.
+func (in *ExecCredential) DeepCopy() *ExecCredential {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredential)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ExecCredential) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) {
+	*out = *in
+	if in.Response != nil {
+		in, out := &in.Response, &out.Response
+		*out = new(Response)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec.
+func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredentialSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) {
+	*out = *in
+	if in.ExpirationTimestamp != nil {
+		in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp
+		*out = (*in).DeepCopy()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus.
+func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredentialStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Response) DeepCopyInto(out *Response) {
+	*out = *in
+	if in.Header != nil {
+		in, out := &in.Header, &out.Header
+		*out = make(map[string][]string, len(*in))
+		for key, val := range *in {
+			var outVal []string
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = make([]string, len(*in))
+				copy(*out, *in)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Response.
+func (in *Response) DeepCopy() *Response {
+	if in == nil {
+		return nil
+	}
+	out := new(Response)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/client-go/pkg/version/.gitattributes b/vendor/k8s.io/client-go/pkg/version/.gitattributes
new file mode 100644
index 0000000..7e349ef
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/version/.gitattributes
@@ -0,0 +1 @@
+base.go export-subst
diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go
new file mode 100644
index 0000000..9b4c79f
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/version/base.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+// Base version information.
+//
+// This is the fallback data used when version information from git is not
+// provided via go ldflags. It provides an approximation of the Kubernetes
+// version for ad-hoc builds (e.g. `go build`) that cannot get the version
+// information from git.
+//
+// If you are looking at these fields in the git tree, they look
+// strange. They are modified on the fly by the build process. The
+// in-tree values are dummy values used for "git archive", which also
+// works for GitHub tar downloads.
+//
+// When releasing a new Kubernetes version, this file is updated by
+// build/mark_new_version.sh to reflect the new version, and then a
+// git annotated tag (using format vX.Y where X == Major version and Y
+// == Minor version) is created to point to the commit that updates
+// pkg/version/base.go
+var (
+	// TODO: Deprecate gitMajor and gitMinor, use only gitVersion
+	// instead. First step in deprecation, keep the fields but make
+	// them irrelevant. (Next we'll take it out, which may muck with
+	// scripts consuming the kubectl version output - but most of
+	// these should be looking at gitVersion already anyways.)
+	gitMajor string = "" // major version, always numeric
+	gitMinor string = "" // minor version, numeric possibly followed by "+"
+
+	// semantic version, derived by build scripts (see
+	// https://git.k8s.io/community/contributors/design-proposals/release/versioning.md
+	// for a detailed discussion of this field)
+	//
+	// TODO: This field is still called "gitVersion" for legacy
+	// reasons. For prerelease versions, the build metadata on the
+	// semantic version is a git hash, but the version itself is no
+	// longer the direct output of "git describe", but a slight
+	// translation to be semver compliant.
+
+	// NOTE: The $Format strings are replaced during 'git archive' thanks to the
+	// companion .gitattributes file containing 'export-subst' in this same
+	// directory.  See also https://git-scm.com/docs/gitattributes
+	gitVersion   string = "v0.0.0-master+$Format:%h$"
+	gitCommit    string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
+	gitTreeState string = ""            // state of git tree, either "clean" or "dirty"
+
+	buildDate string = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
+)
diff --git a/vendor/k8s.io/client-go/pkg/version/def.bzl b/vendor/k8s.io/client-go/pkg/version/def.bzl
new file mode 100644
index 0000000..9c018a4
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/version/def.bzl
@@ -0,0 +1,38 @@
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Implements hack/lib/version.sh's kube::version::ldflags() for Bazel.
+def version_x_defs():
+  # This should match the list of packages in kube::version::ldflag
+  stamp_pkgs = [
+      "k8s.io/kubernetes/pkg/version",
+      # In hack/lib/version.sh, this has a vendor/ prefix. That isn't needed here?
+      "k8s.io/client-go/pkg/version",
+      ]
+  # This should match the list of vars in kube::version::ldflags
+  # It should also match the list of vars set in hack/print-workspace-status.sh.
+  stamp_vars = [
+      "buildDate",
+      "gitCommit",
+      "gitMajor",
+      "gitMinor",
+      "gitTreeState",
+      "gitVersion",
+  ]
+  # Generate the cross-product.
+  x_defs = {}
+  for pkg in stamp_pkgs:
+    for var in stamp_vars:
+      x_defs["%s.%s" % (pkg, var)] = "{%s}" % var
+  return x_defs
diff --git a/vendor/k8s.io/client-go/pkg/version/doc.go b/vendor/k8s.io/client-go/pkg/version/doc.go
new file mode 100644
index 0000000..05e997e
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/version/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:openapi-gen=true
+
+// Package version supplies version information collected at build time to
+// kubernetes components.
+package version // import "k8s.io/client-go/pkg/version"
diff --git a/vendor/k8s.io/client-go/pkg/version/version.go b/vendor/k8s.io/client-go/pkg/version/version.go
new file mode 100644
index 0000000..8c8350d
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/version/version.go
@@ -0,0 +1,42 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+	"fmt"
+	"runtime"
+
+	apimachineryversion "k8s.io/apimachinery/pkg/version"
+)
+
+// Get returns the overall codebase version. It's for detecting
+// what code a binary was built from.
+func Get() apimachineryversion.Info {
+	// These variables typically come from -ldflags settings and in
+	// their absence fallback to the settings in pkg/version/base.go
+	return apimachineryversion.Info{
+		Major:        gitMajor,
+		Minor:        gitMinor,
+		GitVersion:   gitVersion,
+		GitCommit:    gitCommit,
+		GitTreeState: gitTreeState,
+		BuildDate:    buildDate,
+		GoVersion:    runtime.Version(),
+		Compiler:     runtime.Compiler,
+		Platform:     fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
+	}
+}
diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
new file mode 100644
index 0000000..4d72526
--- /dev/null
+++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
@@ -0,0 +1,361 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package exec
+
+import (
+	"bytes"
+	"context"
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"os"
+	"os/exec"
+	"reflect"
+	"sync"
+	"time"
+
+	"golang.org/x/crypto/ssh/terminal"
+	"k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/client-go/pkg/apis/clientauthentication"
+	"k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1"
+	"k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
+	"k8s.io/client-go/tools/clientcmd/api"
+	"k8s.io/client-go/transport"
+	"k8s.io/client-go/util/connrotation"
+	"k8s.io/klog"
+)
+
+const execInfoEnv = "KUBERNETES_EXEC_INFO"
+
+var scheme = runtime.NewScheme()
+var codecs = serializer.NewCodecFactory(scheme)
+
+func init() {
+	v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
+	utilruntime.Must(v1alpha1.AddToScheme(scheme))
+	utilruntime.Must(v1beta1.AddToScheme(scheme))
+	utilruntime.Must(clientauthentication.AddToScheme(scheme))
+}
+
+var (
+	// Since transports can be constantly re-initialized by programs like kubectl,
+	// keep a cache of initialized authenticators keyed by a hash of their config.
+	globalCache = newCache()
+	// The list of API versions we accept.
+	apiVersions = map[string]schema.GroupVersion{
+		v1alpha1.SchemeGroupVersion.String(): v1alpha1.SchemeGroupVersion,
+		v1beta1.SchemeGroupVersion.String():  v1beta1.SchemeGroupVersion,
+	}
+)
+
+func newCache() *cache {
+	return &cache{m: make(map[string]*Authenticator)}
+}
+
+func cacheKey(c *api.ExecConfig) string {
+	return fmt.Sprintf("%#v", c)
+}
+
+type cache struct {
+	mu sync.Mutex
+	m  map[string]*Authenticator
+}
+
+func (c *cache) get(s string) (*Authenticator, bool) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	a, ok := c.m[s]
+	return a, ok
+}
+
+// put inserts an authenticator into the cache. If an authenticator is already
+// associated with the key, the first one is returned instead.
+func (c *cache) put(s string, a *Authenticator) *Authenticator {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	existing, ok := c.m[s]
+	if ok {
+		return existing
+	}
+	c.m[s] = a
+	return a
+}
+
+// GetAuthenticator returns an exec-based plugin for providing client credentials.
+func GetAuthenticator(config *api.ExecConfig) (*Authenticator, error) {
+	return newAuthenticator(globalCache, config)
+}
+
+func newAuthenticator(c *cache, config *api.ExecConfig) (*Authenticator, error) {
+	key := cacheKey(config)
+	if a, ok := c.get(key); ok {
+		return a, nil
+	}
+
+	gv, ok := apiVersions[config.APIVersion]
+	if !ok {
+		return nil, fmt.Errorf("exec plugin: invalid apiVersion %q", config.APIVersion)
+	}
+
+	a := &Authenticator{
+		cmd:   config.Command,
+		args:  config.Args,
+		group: gv,
+
+		stdin:       os.Stdin,
+		stderr:      os.Stderr,
+		interactive: terminal.IsTerminal(int(os.Stdout.Fd())),
+		now:         time.Now,
+		environ:     os.Environ,
+	}
+
+	for _, env := range config.Env {
+		a.env = append(a.env, env.Name+"="+env.Value)
+	}
+
+	return c.put(key, a), nil
+}
+
+// Authenticator is a client credential provider that rotates credentials by executing a plugin.
+// The plugin input and output are defined by the API group client.authentication.k8s.io.
+type Authenticator struct {
+	// Set by the config
+	cmd   string
+	args  []string
+	group schema.GroupVersion
+	env   []string
+
+	// Stubbable for testing
+	stdin       io.Reader
+	stderr      io.Writer
+	interactive bool
+	now         func() time.Time
+	environ     func() []string
+
+	// Cached results.
+	//
+	// The mutex also guards calling the plugin. Since the plugin could be
+	// interactive we want to make sure it's only called once.
+	mu          sync.Mutex
+	cachedCreds *credentials
+	exp         time.Time
+
+	onRotate func()
+}
+
+type credentials struct {
+	token string
+	cert  *tls.Certificate
+}
+
+// UpdateTransportConfig updates the transport.Config to use credentials
+// returned by the plugin.
+func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error {
+	wt := c.WrapTransport
+	c.WrapTransport = func(rt http.RoundTripper) http.RoundTripper {
+		if wt != nil {
+			rt = wt(rt)
+		}
+		return &roundTripper{a, rt}
+	}
+
+	if c.TLS.GetCert != nil {
+		return errors.New("can't add TLS certificate callback: transport.Config.TLS.GetCert already set")
+	}
+	c.TLS.GetCert = a.cert
+
+	var dial func(ctx context.Context, network, addr string) (net.Conn, error)
+	if c.Dial != nil {
+		dial = c.Dial
+	} else {
+		dial = (&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second}).DialContext
+	}
+	d := connrotation.NewDialer(dial)
+	a.onRotate = d.CloseAll
+	c.Dial = d.DialContext
+
+	return nil
+}
+
+type roundTripper struct {
+	a    *Authenticator
+	base http.RoundTripper
+}
+
+func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	// If a user has already set credentials, use that. This makes commands like
+	// "kubectl get --token (token) pods" work.
+	if req.Header.Get("Authorization") != "" {
+		return r.base.RoundTrip(req)
+	}
+
+	creds, err := r.a.getCreds()
+	if err != nil {
+		return nil, fmt.Errorf("getting credentials: %v", err)
+	}
+	if creds.token != "" {
+		req.Header.Set("Authorization", "Bearer "+creds.token)
+	}
+
+	res, err := r.base.RoundTrip(req)
+	if err != nil {
+		return nil, err
+	}
+	if res.StatusCode == http.StatusUnauthorized {
+		resp := &clientauthentication.Response{
+			Header: res.Header,
+			Code:   int32(res.StatusCode),
+		}
+		if err := r.a.maybeRefreshCreds(creds, resp); err != nil {
+			klog.Errorf("refreshing credentials: %v", err)
+		}
+	}
+	return res, nil
+}
+
+func (a *Authenticator) credsExpired() bool {
+	if a.exp.IsZero() {
+		return false
+	}
+	return a.now().After(a.exp)
+}
+
+func (a *Authenticator) cert() (*tls.Certificate, error) {
+	creds, err := a.getCreds()
+	if err != nil {
+		return nil, err
+	}
+	return creds.cert, nil
+}
+
+func (a *Authenticator) getCreds() (*credentials, error) {
+	a.mu.Lock()
+	defer a.mu.Unlock()
+	if a.cachedCreds != nil && !a.credsExpired() {
+		return a.cachedCreds, nil
+	}
+
+	if err := a.refreshCredsLocked(nil); err != nil {
+		return nil, err
+	}
+	return a.cachedCreds, nil
+}
+
+// maybeRefreshCreds executes the plugin to force a rotation of the
+// credentials, unless they were rotated already.
+func (a *Authenticator) maybeRefreshCreds(creds *credentials, r *clientauthentication.Response) error {
+	a.mu.Lock()
+	defer a.mu.Unlock()
+
+	// Since we're not making a new pointer to a.cachedCreds in getCreds, no
+	// need to do deep comparison.
+	if creds != a.cachedCreds {
+		// Credentials already rotated.
+		return nil
+	}
+
+	return a.refreshCredsLocked(r)
+}
+
+// refreshCredsLocked executes the plugin and reads the credentials from
+// stdout. It must be called while holding the Authenticator's mutex.
+func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) error {
+	cred := &clientauthentication.ExecCredential{
+		Spec: clientauthentication.ExecCredentialSpec{
+			Response:    r,
+			Interactive: a.interactive,
+		},
+	}
+
+	env := append(a.environ(), a.env...)
+	if a.group == v1alpha1.SchemeGroupVersion {
+		// Input spec disabled for beta due to lack of use. Possibly re-enable this later if
+		// someone wants it back.
+		//
+		// See: https://github.com/kubernetes/kubernetes/issues/61796
+		data, err := runtime.Encode(codecs.LegacyCodec(a.group), cred)
+		if err != nil {
+			return fmt.Errorf("encode ExecCredentials: %v", err)
+		}
+		env = append(env, fmt.Sprintf("%s=%s", execInfoEnv, data))
+	}
+
+	stdout := &bytes.Buffer{}
+	cmd := exec.Command(a.cmd, a.args...)
+	cmd.Env = env
+	cmd.Stderr = a.stderr
+	cmd.Stdout = stdout
+	if a.interactive {
+		cmd.Stdin = a.stdin
+	}
+
+	if err := cmd.Run(); err != nil {
+		return fmt.Errorf("exec: %v", err)
+	}
+
+	_, gvk, err := codecs.UniversalDecoder(a.group).Decode(stdout.Bytes(), nil, cred)
+	if err != nil {
+		return fmt.Errorf("decoding stdout: %v", err)
+	}
+	if gvk.Group != a.group.Group || gvk.Version != a.group.Version {
+		return fmt.Errorf("exec plugin is configured to use API version %s, plugin returned version %s",
+			a.group, schema.GroupVersion{Group: gvk.Group, Version: gvk.Version})
+	}
+
+	if cred.Status == nil {
+		return fmt.Errorf("exec plugin didn't return a status field")
+	}
+	if cred.Status.Token == "" && cred.Status.ClientCertificateData == "" && cred.Status.ClientKeyData == "" {
+		return fmt.Errorf("exec plugin didn't return a token or cert/key pair")
+	}
+	if (cred.Status.ClientCertificateData == "") != (cred.Status.ClientKeyData == "") {
+		return fmt.Errorf("exec plugin returned only certificate or key, not both")
+	}
+
+	if cred.Status.ExpirationTimestamp != nil {
+		a.exp = cred.Status.ExpirationTimestamp.Time
+	} else {
+		a.exp = time.Time{}
+	}
+
+	newCreds := &credentials{
+		token: cred.Status.Token,
+	}
+	if cred.Status.ClientKeyData != "" && cred.Status.ClientCertificateData != "" {
+		cert, err := tls.X509KeyPair([]byte(cred.Status.ClientCertificateData), []byte(cred.Status.ClientKeyData))
+		if err != nil {
+			return fmt.Errorf("failed parsing client key/certificate: %v", err)
+		}
+		newCreds.cert = &cert
+	}
+
+	oldCreds := a.cachedCreds
+	a.cachedCreds = newCreds
+	// Only close all connections when TLS cert rotates. Token rotation doesn't
+	// need the extra noise.
+	if a.onRotate != nil && oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) {
+		a.onRotate()
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/rest/OWNERS b/vendor/k8s.io/client-go/rest/OWNERS
new file mode 100755
index 0000000..8d97da0
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/OWNERS
@@ -0,0 +1,24 @@
+reviewers:
+- thockin
+- smarterclayton
+- caesarxuchao
+- wojtek-t
+- deads2k
+- brendandburns
+- liggitt
+- nikhiljindal
+- gmarek
+- erictune
+- sttts
+- luxas
+- dims
+- errordeveloper
+- hongchaodeng
+- krousey
+- resouer
+- cjcullen
+- rmmh
+- lixiaobing10051267
+- asalkeld
+- juanvallejo
+- lojies
diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go
new file mode 100644
index 0000000..927403c
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/client.go
@@ -0,0 +1,258 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"fmt"
+	"mime"
+	"net/http"
+	"net/url"
+	"os"
+	"strconv"
+	"strings"
+	"time"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/client-go/util/flowcontrol"
+)
+
+const (
+	// Environment variables: Note that the duration should be long enough that the backoff
+	// persists for some reasonable time (i.e. 120 seconds).  The typical base might be "1".
+	envBackoffBase     = "KUBE_CLIENT_BACKOFF_BASE"
+	envBackoffDuration = "KUBE_CLIENT_BACKOFF_DURATION"
+)
+
+// Interface captures the set of operations for generically interacting with Kubernetes REST apis.
+type Interface interface {
+	GetRateLimiter() flowcontrol.RateLimiter
+	Verb(verb string) *Request
+	Post() *Request
+	Put() *Request
+	Patch(pt types.PatchType) *Request
+	Get() *Request
+	Delete() *Request
+	APIVersion() schema.GroupVersion
+}
+
+// RESTClient imposes common Kubernetes API conventions on a set of resource paths.
+// The baseURL is expected to point to an HTTP or HTTPS path that is the parent
+// of one or more resources.  The server should return a decodable API resource
+// object, or an api.Status object which contains information about the reason for
+// any failure.
+//
+// Most consumers should use client.New() to get a Kubernetes API client.
+type RESTClient struct {
+	// base is the root URL for all invocations of the client
+	base *url.URL
+	// versionedAPIPath is a path segment connecting the base URL to the resource root
+	versionedAPIPath string
+
+	// contentConfig is the information used to communicate with the server.
+	contentConfig ContentConfig
+
+	// serializers contain all serializers for underlying content type.
+	serializers Serializers
+
+	// creates BackoffManager that is passed to requests.
+	createBackoffMgr func() BackoffManager
+
+	// TODO extract this into a wrapper interface via the RESTClient interface in kubectl.
+	Throttle flowcontrol.RateLimiter
+
+	// Set specific behavior of the client.  If not set http.DefaultClient will be used.
+	Client *http.Client
+}
+
+type Serializers struct {
+	Encoder             runtime.Encoder
+	Decoder             runtime.Decoder
+	StreamingSerializer runtime.Serializer
+	Framer              runtime.Framer
+	RenegotiatedDecoder func(contentType string, params map[string]string) (runtime.Decoder, error)
+}
+
+// NewRESTClient creates a new RESTClient. This client performs generic REST functions
+// such as Get, Put, Post, and Delete on specified paths.  Codec controls encoding and
+// decoding of responses from the server.
+func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) {
+	base := *baseURL
+	if !strings.HasSuffix(base.Path, "/") {
+		base.Path += "/"
+	}
+	base.RawQuery = ""
+	base.Fragment = ""
+
+	if config.GroupVersion == nil {
+		config.GroupVersion = &schema.GroupVersion{}
+	}
+	if len(config.ContentType) == 0 {
+		config.ContentType = "application/json"
+	}
+	serializers, err := createSerializers(config)
+	if err != nil {
+		return nil, err
+	}
+
+	var throttle flowcontrol.RateLimiter
+	if maxQPS > 0 && rateLimiter == nil {
+		throttle = flowcontrol.NewTokenBucketRateLimiter(maxQPS, maxBurst)
+	} else if rateLimiter != nil {
+		throttle = rateLimiter
+	}
+	return &RESTClient{
+		base:             &base,
+		versionedAPIPath: versionedAPIPath,
+		contentConfig:    config,
+		serializers:      *serializers,
+		createBackoffMgr: readExpBackoffConfig,
+		Throttle:         throttle,
+		Client:           client,
+	}, nil
+}
+
+// GetRateLimiter returns rate limier for a given client, or nil if it's called on a nil client
+func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter {
+	if c == nil {
+		return nil
+	}
+	return c.Throttle
+}
+
+// readExpBackoffConfig handles the internal logic of determining what the
+// backoff policy is.  By default if no information is available, NoBackoff.
+// TODO Generalize this see #17727 .
+func readExpBackoffConfig() BackoffManager {
+	backoffBase := os.Getenv(envBackoffBase)
+	backoffDuration := os.Getenv(envBackoffDuration)
+
+	backoffBaseInt, errBase := strconv.ParseInt(backoffBase, 10, 64)
+	backoffDurationInt, errDuration := strconv.ParseInt(backoffDuration, 10, 64)
+	if errBase != nil || errDuration != nil {
+		return &NoBackoff{}
+	}
+	return &URLBackoff{
+		Backoff: flowcontrol.NewBackOff(
+			time.Duration(backoffBaseInt)*time.Second,
+			time.Duration(backoffDurationInt)*time.Second)}
+}
+
+// createSerializers creates all necessary serializers for given contentType.
+// TODO: the negotiated serializer passed to this method should probably return
+//   serializers that control decoding and versioning without this package
+//   being aware of the types. Depends on whether RESTClient must deal with
+//   generic infrastructure.
+func createSerializers(config ContentConfig) (*Serializers, error) {
+	mediaTypes := config.NegotiatedSerializer.SupportedMediaTypes()
+	contentType := config.ContentType
+	mediaType, _, err := mime.ParseMediaType(contentType)
+	if err != nil {
+		return nil, fmt.Errorf("the content type specified in the client configuration is not recognized: %v", err)
+	}
+	info, ok := runtime.SerializerInfoForMediaType(mediaTypes, mediaType)
+	if !ok {
+		if len(contentType) != 0 || len(mediaTypes) == 0 {
+			return nil, fmt.Errorf("no serializers registered for %s", contentType)
+		}
+		info = mediaTypes[0]
+	}
+
+	internalGV := schema.GroupVersions{
+		{
+			Group:   config.GroupVersion.Group,
+			Version: runtime.APIVersionInternal,
+		},
+		// always include the legacy group as a decoding target to handle non-error `Status` return types
+		{
+			Group:   "",
+			Version: runtime.APIVersionInternal,
+		},
+	}
+
+	s := &Serializers{
+		Encoder: config.NegotiatedSerializer.EncoderForVersion(info.Serializer, *config.GroupVersion),
+		Decoder: config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV),
+
+		RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) {
+			info, ok := runtime.SerializerInfoForMediaType(mediaTypes, contentType)
+			if !ok {
+				return nil, fmt.Errorf("serializer for %s not registered", contentType)
+			}
+			return config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), nil
+		},
+	}
+	if info.StreamSerializer != nil {
+		s.StreamingSerializer = info.StreamSerializer.Serializer
+		s.Framer = info.StreamSerializer.Framer
+	}
+
+	return s, nil
+}
+
+// Verb begins a request with a verb (GET, POST, PUT, DELETE).
+//
+// Example usage of RESTClient's request building interface:
+// c, err := NewRESTClient(...)
+// if err != nil { ... }
+// resp, err := c.Verb("GET").
+//  Path("pods").
+//  SelectorParam("labels", "area=staging").
+//  Timeout(10*time.Second).
+//  Do()
+// if err != nil { ... }
+// list, ok := resp.(*api.PodList)
+//
+func (c *RESTClient) Verb(verb string) *Request {
+	backoff := c.createBackoffMgr()
+
+	if c.Client == nil {
+		return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle, 0)
+	}
+	return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle, c.Client.Timeout)
+}
+
+// Post begins a POST request. Short for c.Verb("POST").
+func (c *RESTClient) Post() *Request {
+	return c.Verb("POST")
+}
+
+// Put begins a PUT request. Short for c.Verb("PUT").
+func (c *RESTClient) Put() *Request {
+	return c.Verb("PUT")
+}
+
+// Patch begins a PATCH request. Short for c.Verb("Patch").
+func (c *RESTClient) Patch(pt types.PatchType) *Request {
+	return c.Verb("PATCH").SetHeader("Content-Type", string(pt))
+}
+
+// Get begins a GET request. Short for c.Verb("GET").
+func (c *RESTClient) Get() *Request {
+	return c.Verb("GET")
+}
+
+// Delete begins a DELETE request. Short for c.Verb("DELETE").
+func (c *RESTClient) Delete() *Request {
+	return c.Verb("DELETE")
+}
+
+// APIVersion returns the APIVersion this RESTClient is expected to use.
+func (c *RESTClient) APIVersion() schema.GroupVersion {
+	return *c.contentConfig.GroupVersion
+}
diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go
new file mode 100644
index 0000000..438eb3b
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/config.go
@@ -0,0 +1,466 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"os"
+	"path/filepath"
+	gruntime "runtime"
+	"strings"
+	"time"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/client-go/pkg/version"
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+	certutil "k8s.io/client-go/util/cert"
+	"k8s.io/client-go/util/flowcontrol"
+	"k8s.io/klog"
+)
+
+const (
+	DefaultQPS   float32 = 5.0
+	DefaultBurst int     = 10
+)
+
+var ErrNotInCluster = errors.New("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined")
+
+// Config holds the common attributes that can be passed to a Kubernetes client on
+// initialization.
+type Config struct {
+	// Host must be a host string, a host:port pair, or a URL to the base of the apiserver.
+	// If a URL is given then the (optional) Path of that URL represents a prefix that must
+	// be appended to all request URIs used to access the apiserver. This allows a frontend
+	// proxy to easily relocate all of the apiserver endpoints.
+	Host string
+	// APIPath is a sub-path that points to an API root.
+	APIPath string
+
+	// ContentConfig contains settings that affect how objects are transformed when
+	// sent to the server.
+	ContentConfig
+
+	// Server requires Basic authentication
+	Username string
+	Password string
+
+	// Server requires Bearer authentication. This client will not attempt to use
+	// refresh tokens for an OAuth2 flow.
+	// TODO: demonstrate an OAuth2 compatible client.
+	BearerToken string
+
+	// Impersonate is the configuration that RESTClient will use for impersonation.
+	Impersonate ImpersonationConfig
+
+	// Server requires plugin-specified authentication.
+	AuthProvider *clientcmdapi.AuthProviderConfig
+
+	// Callback to persist config for AuthProvider.
+	AuthConfigPersister AuthProviderConfigPersister
+
+	// Exec-based authentication provider.
+	ExecProvider *clientcmdapi.ExecConfig
+
+	// TLSClientConfig contains settings to enable transport layer security
+	TLSClientConfig
+
+	// UserAgent is an optional field that specifies the caller of this request.
+	UserAgent string
+
+	// Transport may be used for custom HTTP behavior. This attribute may not
+	// be specified with the TLS client certificate options. Use WrapTransport
+	// for most client level operations.
+	Transport http.RoundTripper
+	// WrapTransport will be invoked for custom HTTP behavior after the underlying
+	// transport is initialized (either the transport created from TLSClientConfig,
+	// Transport, or http.DefaultTransport). The config may layer other RoundTrippers
+	// on top of the returned RoundTripper.
+	WrapTransport func(rt http.RoundTripper) http.RoundTripper
+
+	// QPS indicates the maximum QPS to the master from this client.
+	// If it's zero, the created RESTClient will use DefaultQPS: 5
+	QPS float32
+
+	// Maximum burst for throttle.
+	// If it's zero, the created RESTClient will use DefaultBurst: 10.
+	Burst int
+
+	// Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst
+	RateLimiter flowcontrol.RateLimiter
+
+	// The maximum length of time to wait before giving up on a server request. A value of zero means no timeout.
+	Timeout time.Duration
+
+	// Dial specifies the dial function for creating unencrypted TCP connections.
+	Dial func(ctx context.Context, network, address string) (net.Conn, error)
+
+	// Version forces a specific version to be used (if registered)
+	// Do we need this?
+	// Version string
+}
+
+// ImpersonationConfig has all the available impersonation options
+type ImpersonationConfig struct {
+	// UserName is the username to impersonate on each request.
+	UserName string
+	// Groups are the groups to impersonate on each request.
+	Groups []string
+	// Extra is a free-form field which can be used to link some authentication information
+	// to authorization information.  This field allows you to impersonate it.
+	Extra map[string][]string
+}
+
+// +k8s:deepcopy-gen=true
+// TLSClientConfig contains settings to enable transport layer security
+type TLSClientConfig struct {
+	// Server should be accessed without verifying the TLS certificate. For testing only.
+	Insecure bool
+	// ServerName is passed to the server for SNI and is used in the client to check server
+	// ceritificates against. If ServerName is empty, the hostname used to contact the
+	// server is used.
+	ServerName string
+
+	// Server requires TLS client certificate authentication
+	CertFile string
+	// Server requires TLS client certificate authentication
+	KeyFile string
+	// Trusted root certificates for server
+	CAFile string
+
+	// CertData holds PEM-encoded bytes (typically read from a client certificate file).
+	// CertData takes precedence over CertFile
+	CertData []byte
+	// KeyData holds PEM-encoded bytes (typically read from a client certificate key file).
+	// KeyData takes precedence over KeyFile
+	KeyData []byte
+	// CAData holds PEM-encoded bytes (typically read from a root certificates bundle).
+	// CAData takes precedence over CAFile
+	CAData []byte
+}
+
+type ContentConfig struct {
+	// AcceptContentTypes specifies the types the client will accept and is optional.
+	// If not set, ContentType will be used to define the Accept header
+	AcceptContentTypes string
+	// ContentType specifies the wire format used to communicate with the server.
+	// This value will be set as the Accept header on requests made to the server, and
+	// as the default content type on any object sent to the server. If not set,
+	// "application/json" is used.
+	ContentType string
+	// GroupVersion is the API version to talk to. Must be provided when initializing
+	// a RESTClient directly. When initializing a Client, will be set with the default
+	// code version.
+	GroupVersion *schema.GroupVersion
+	// NegotiatedSerializer is used for obtaining encoders and decoders for multiple
+	// supported media types.
+	NegotiatedSerializer runtime.NegotiatedSerializer
+}
+
+// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config
+// object. Note that a RESTClient may require fields that are optional when initializing a Client.
+// A RESTClient created by this method is generic - it expects to operate on an API that follows
+// the Kubernetes conventions, but may not be the Kubernetes API.
+func RESTClientFor(config *Config) (*RESTClient, error) {
+	if config.GroupVersion == nil {
+		return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient")
+	}
+	if config.NegotiatedSerializer == nil {
+		return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient")
+	}
+	qps := config.QPS
+	if config.QPS == 0.0 {
+		qps = DefaultQPS
+	}
+	burst := config.Burst
+	if config.Burst == 0 {
+		burst = DefaultBurst
+	}
+
+	baseURL, versionedAPIPath, err := defaultServerUrlFor(config)
+	if err != nil {
+		return nil, err
+	}
+
+	transport, err := TransportFor(config)
+	if err != nil {
+		return nil, err
+	}
+
+	var httpClient *http.Client
+	if transport != http.DefaultTransport {
+		httpClient = &http.Client{Transport: transport}
+		if config.Timeout > 0 {
+			httpClient.Timeout = config.Timeout
+		}
+	}
+
+	return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, qps, burst, config.RateLimiter, httpClient)
+}
+
+// UnversionedRESTClientFor is the same as RESTClientFor, except that it allows
+// the config.Version to be empty.
+func UnversionedRESTClientFor(config *Config) (*RESTClient, error) {
+	if config.NegotiatedSerializer == nil {
+		return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient")
+	}
+
+	baseURL, versionedAPIPath, err := defaultServerUrlFor(config)
+	if err != nil {
+		return nil, err
+	}
+
+	transport, err := TransportFor(config)
+	if err != nil {
+		return nil, err
+	}
+
+	var httpClient *http.Client
+	if transport != http.DefaultTransport {
+		httpClient = &http.Client{Transport: transport}
+		if config.Timeout > 0 {
+			httpClient.Timeout = config.Timeout
+		}
+	}
+
+	versionConfig := config.ContentConfig
+	if versionConfig.GroupVersion == nil {
+		v := metav1.SchemeGroupVersion
+		versionConfig.GroupVersion = &v
+	}
+
+	return NewRESTClient(baseURL, versionedAPIPath, versionConfig, config.QPS, config.Burst, config.RateLimiter, httpClient)
+}
+
+// SetKubernetesDefaults sets default values on the provided client config for accessing the
+// Kubernetes API or returns an error if any of the defaults are impossible or invalid.
+func SetKubernetesDefaults(config *Config) error {
+	if len(config.UserAgent) == 0 {
+		config.UserAgent = DefaultKubernetesUserAgent()
+	}
+	return nil
+}
+
+// adjustCommit returns sufficient significant figures of the commit's git hash.
+func adjustCommit(c string) string {
+	if len(c) == 0 {
+		return "unknown"
+	}
+	if len(c) > 7 {
+		return c[:7]
+	}
+	return c
+}
+
+// adjustVersion strips "alpha", "beta", etc. from version in form
+// major.minor.patch-[alpha|beta|etc].
+func adjustVersion(v string) string {
+	if len(v) == 0 {
+		return "unknown"
+	}
+	seg := strings.SplitN(v, "-", 2)
+	return seg[0]
+}
+
+// adjustCommand returns the last component of the
+// OS-specific command path for use in User-Agent.
+func adjustCommand(p string) string {
+	// Unlikely, but better than returning "".
+	if len(p) == 0 {
+		return "unknown"
+	}
+	return filepath.Base(p)
+}
+
+// buildUserAgent builds a User-Agent string from given args.
+func buildUserAgent(command, version, os, arch, commit string) string {
+	return fmt.Sprintf(
+		"%s/%s (%s/%s) kubernetes/%s", command, version, os, arch, commit)
+}
+
+// DefaultKubernetesUserAgent returns a User-Agent string built from static global vars.
+func DefaultKubernetesUserAgent() string {
+	return buildUserAgent(
+		adjustCommand(os.Args[0]),
+		adjustVersion(version.Get().GitVersion),
+		gruntime.GOOS,
+		gruntime.GOARCH,
+		adjustCommit(version.Get().GitCommit))
+}
+
+// InClusterConfig returns a config object which uses the service account
+// kubernetes gives to pods. It's intended for clients that expect to be
+// running inside a pod running on kubernetes. It will return ErrNotInCluster
+// if called from a process not running in a kubernetes environment.
+func InClusterConfig() (*Config, error) {
+	const (
+		tokenFile  = "/var/run/secrets/kubernetes.io/serviceaccount/token"
+		rootCAFile = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+	)
+	host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT")
+	if len(host) == 0 || len(port) == 0 {
+		return nil, ErrNotInCluster
+	}
+
+	ts := NewCachedFileTokenSource(tokenFile)
+
+	if _, err := ts.Token(); err != nil {
+		return nil, err
+	}
+
+	tlsClientConfig := TLSClientConfig{}
+
+	if _, err := certutil.NewPool(rootCAFile); err != nil {
+		klog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err)
+	} else {
+		tlsClientConfig.CAFile = rootCAFile
+	}
+
+	return &Config{
+		// TODO: switch to using cluster DNS.
+		Host:            "https://" + net.JoinHostPort(host, port),
+		TLSClientConfig: tlsClientConfig,
+		WrapTransport:   TokenSourceWrapTransport(ts),
+	}, nil
+}
+
+// IsConfigTransportTLS returns true if and only if the provided
+// config will result in a protected connection to the server when it
+// is passed to restclient.RESTClientFor().  Use to determine when to
+// send credentials over the wire.
+//
+// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are
+// still possible.
+func IsConfigTransportTLS(config Config) bool {
+	baseURL, _, err := defaultServerUrlFor(&config)
+	if err != nil {
+		return false
+	}
+	return baseURL.Scheme == "https"
+}
+
+// LoadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData,
+// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are
+// either populated or were empty to start.
+func LoadTLSFiles(c *Config) error {
+	var err error
+	c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile)
+	if err != nil {
+		return err
+	}
+
+	c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile)
+	if err != nil {
+		return err
+	}
+
+	c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file,
+// or an error if an error occurred reading the file
+func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
+	if len(data) > 0 {
+		return data, nil
+	}
+	if len(file) > 0 {
+		fileData, err := ioutil.ReadFile(file)
+		if err != nil {
+			return []byte{}, err
+		}
+		return fileData, nil
+	}
+	return nil, nil
+}
+
+func AddUserAgent(config *Config, userAgent string) *Config {
+	fullUserAgent := DefaultKubernetesUserAgent() + "/" + userAgent
+	config.UserAgent = fullUserAgent
+	return config
+}
+
+// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) removed
+func AnonymousClientConfig(config *Config) *Config {
+	// copy only known safe fields
+	return &Config{
+		Host:          config.Host,
+		APIPath:       config.APIPath,
+		ContentConfig: config.ContentConfig,
+		TLSClientConfig: TLSClientConfig{
+			Insecure:   config.Insecure,
+			ServerName: config.ServerName,
+			CAFile:     config.TLSClientConfig.CAFile,
+			CAData:     config.TLSClientConfig.CAData,
+		},
+		RateLimiter:   config.RateLimiter,
+		UserAgent:     config.UserAgent,
+		Transport:     config.Transport,
+		WrapTransport: config.WrapTransport,
+		QPS:           config.QPS,
+		Burst:         config.Burst,
+		Timeout:       config.Timeout,
+		Dial:          config.Dial,
+	}
+}
+
+// CopyConfig returns a copy of the given config
+func CopyConfig(config *Config) *Config {
+	return &Config{
+		Host:          config.Host,
+		APIPath:       config.APIPath,
+		ContentConfig: config.ContentConfig,
+		Username:      config.Username,
+		Password:      config.Password,
+		BearerToken:   config.BearerToken,
+		Impersonate: ImpersonationConfig{
+			Groups:   config.Impersonate.Groups,
+			Extra:    config.Impersonate.Extra,
+			UserName: config.Impersonate.UserName,
+		},
+		AuthProvider:        config.AuthProvider,
+		AuthConfigPersister: config.AuthConfigPersister,
+		ExecProvider:        config.ExecProvider,
+		TLSClientConfig: TLSClientConfig{
+			Insecure:   config.TLSClientConfig.Insecure,
+			ServerName: config.TLSClientConfig.ServerName,
+			CertFile:   config.TLSClientConfig.CertFile,
+			KeyFile:    config.TLSClientConfig.KeyFile,
+			CAFile:     config.TLSClientConfig.CAFile,
+			CertData:   config.TLSClientConfig.CertData,
+			KeyData:    config.TLSClientConfig.KeyData,
+			CAData:     config.TLSClientConfig.CAData,
+		},
+		UserAgent:     config.UserAgent,
+		Transport:     config.Transport,
+		WrapTransport: config.WrapTransport,
+		QPS:           config.QPS,
+		Burst:         config.Burst,
+		RateLimiter:   config.RateLimiter,
+		Timeout:       config.Timeout,
+		Dial:          config.Dial,
+	}
+}
diff --git a/vendor/k8s.io/client-go/rest/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go
new file mode 100644
index 0000000..83ef5ae
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/plugin.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"fmt"
+	"net/http"
+	"sync"
+
+	"k8s.io/klog"
+
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+)
+
+type AuthProvider interface {
+	// WrapTransport allows the plugin to create a modified RoundTripper that
+	// attaches authorization headers (or other info) to requests.
+	WrapTransport(http.RoundTripper) http.RoundTripper
+	// Login allows the plugin to initialize its configuration. It must not
+	// require direct user interaction.
+	Login() error
+}
+
+// Factory generates an AuthProvider plugin.
+//  clusterAddress is the address of the current cluster.
+//  config is the initial configuration for this plugin.
+//  persister allows the plugin to save updated configuration.
+type Factory func(clusterAddress string, config map[string]string, persister AuthProviderConfigPersister) (AuthProvider, error)
+
+// AuthProviderConfigPersister allows a plugin to persist configuration info
+// for just itself.
+type AuthProviderConfigPersister interface {
+	Persist(map[string]string) error
+}
+
+// All registered auth provider plugins.
+var pluginsLock sync.Mutex
+var plugins = make(map[string]Factory)
+
+func RegisterAuthProviderPlugin(name string, plugin Factory) error {
+	pluginsLock.Lock()
+	defer pluginsLock.Unlock()
+	if _, found := plugins[name]; found {
+		return fmt.Errorf("Auth Provider Plugin %q was registered twice", name)
+	}
+	klog.V(4).Infof("Registered Auth Provider Plugin %q", name)
+	plugins[name] = plugin
+	return nil
+}
+
+func GetAuthProvider(clusterAddress string, apc *clientcmdapi.AuthProviderConfig, persister AuthProviderConfigPersister) (AuthProvider, error) {
+	pluginsLock.Lock()
+	defer pluginsLock.Unlock()
+	p, ok := plugins[apc.Name]
+	if !ok {
+		return nil, fmt.Errorf("No Auth Provider found for name %q", apc.Name)
+	}
+	return p(clusterAddress, apc.Config, persister)
+}
diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go
new file mode 100644
index 0000000..64901fb
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/request.go
@@ -0,0 +1,1201 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"bytes"
+	"context"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"mime"
+	"net/http"
+	"net/url"
+	"path"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+
+	"golang.org/x/net/http2"
+	"k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer/streaming"
+	"k8s.io/apimachinery/pkg/util/net"
+	"k8s.io/apimachinery/pkg/watch"
+	restclientwatch "k8s.io/client-go/rest/watch"
+	"k8s.io/client-go/tools/metrics"
+	"k8s.io/client-go/util/flowcontrol"
+	"k8s.io/klog"
+)
+
+var (
+	// longThrottleLatency defines threshold for logging requests. All requests being
+	// throttle for more than longThrottleLatency will be logged.
+	longThrottleLatency = 50 * time.Millisecond
+)
+
+// HTTPClient is an interface for testing a request object.
+type HTTPClient interface {
+	Do(req *http.Request) (*http.Response, error)
+}
+
+// ResponseWrapper is an interface for getting a response.
+// The response may be either accessed as a raw data (the whole output is put into memory) or as a stream.
+type ResponseWrapper interface {
+	DoRaw() ([]byte, error)
+	Stream() (io.ReadCloser, error)
+}
+
+// RequestConstructionError is returned when there's an error assembling a request.
+type RequestConstructionError struct {
+	Err error
+}
+
+// Error returns a textual description of 'r'.
+func (r *RequestConstructionError) Error() string {
+	return fmt.Sprintf("request construction error: '%v'", r.Err)
+}
+
+// Request allows for building up a request to a server in a chained fashion.
+// Any errors are stored until the end of your call, so you only have to
+// check once.
+type Request struct {
+	// required
+	client HTTPClient
+	verb   string
+
+	baseURL     *url.URL
+	content     ContentConfig
+	serializers Serializers
+
+	// generic components accessible via method setters
+	pathPrefix string
+	subpath    string
+	params     url.Values
+	headers    http.Header
+
+	// structural elements of the request that are part of the Kubernetes API conventions
+	namespace    string
+	namespaceSet bool
+	resource     string
+	resourceName string
+	subresource  string
+	timeout      time.Duration
+
+	// output
+	err  error
+	body io.Reader
+
+	// This is only used for per-request timeouts, deadlines, and cancellations.
+	ctx context.Context
+
+	backoffMgr BackoffManager
+	throttle   flowcontrol.RateLimiter
+}
+
+// NewRequest creates a new request helper object for accessing runtime.Objects on a server.
+func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter, timeout time.Duration) *Request {
+	if backoff == nil {
+		klog.V(2).Infof("Not implementing request backoff strategy.")
+		backoff = &NoBackoff{}
+	}
+
+	pathPrefix := "/"
+	if baseURL != nil {
+		pathPrefix = path.Join(pathPrefix, baseURL.Path)
+	}
+	r := &Request{
+		client:      client,
+		verb:        verb,
+		baseURL:     baseURL,
+		pathPrefix:  path.Join(pathPrefix, versionedAPIPath),
+		content:     content,
+		serializers: serializers,
+		backoffMgr:  backoff,
+		throttle:    throttle,
+		timeout:     timeout,
+	}
+	switch {
+	case len(content.AcceptContentTypes) > 0:
+		r.SetHeader("Accept", content.AcceptContentTypes)
+	case len(content.ContentType) > 0:
+		r.SetHeader("Accept", content.ContentType+", */*")
+	}
+	return r
+}
+
+// Prefix adds segments to the relative beginning to the request path. These
+// items will be placed before the optional Namespace, Resource, or Name sections.
+// Setting AbsPath will clear any previously set Prefix segments
+func (r *Request) Prefix(segments ...string) *Request {
+	if r.err != nil {
+		return r
+	}
+	r.pathPrefix = path.Join(r.pathPrefix, path.Join(segments...))
+	return r
+}
+
+// Suffix appends segments to the end of the path. These items will be placed after the prefix and optional
+// Namespace, Resource, or Name sections.
+func (r *Request) Suffix(segments ...string) *Request {
+	if r.err != nil {
+		return r
+	}
+	r.subpath = path.Join(r.subpath, path.Join(segments...))
+	return r
+}
+
+// Resource sets the resource to access (<resource>/[ns/<namespace>/]<name>)
+func (r *Request) Resource(resource string) *Request {
+	if r.err != nil {
+		return r
+	}
+	if len(r.resource) != 0 {
+		r.err = fmt.Errorf("resource already set to %q, cannot change to %q", r.resource, resource)
+		return r
+	}
+	if msgs := IsValidPathSegmentName(resource); len(msgs) != 0 {
+		r.err = fmt.Errorf("invalid resource %q: %v", resource, msgs)
+		return r
+	}
+	r.resource = resource
+	return r
+}
+
+// BackOff sets the request's backoff manager to the one specified,
+// or defaults to the stub implementation if nil is provided
+func (r *Request) BackOff(manager BackoffManager) *Request {
+	if manager == nil {
+		r.backoffMgr = &NoBackoff{}
+		return r
+	}
+
+	r.backoffMgr = manager
+	return r
+}
+
+// Throttle receives a rate-limiter and sets or replaces an existing request limiter
+func (r *Request) Throttle(limiter flowcontrol.RateLimiter) *Request {
+	r.throttle = limiter
+	return r
+}
+
+// SubResource sets a sub-resource path which can be multiple segments after the resource
+// name but before the suffix.
+func (r *Request) SubResource(subresources ...string) *Request {
+	if r.err != nil {
+		return r
+	}
+	subresource := path.Join(subresources...)
+	if len(r.subresource) != 0 {
+		r.err = fmt.Errorf("subresource already set to %q, cannot change to %q", r.resource, subresource)
+		return r
+	}
+	for _, s := range subresources {
+		if msgs := IsValidPathSegmentName(s); len(msgs) != 0 {
+			r.err = fmt.Errorf("invalid subresource %q: %v", s, msgs)
+			return r
+		}
+	}
+	r.subresource = subresource
+	return r
+}
+
+// Name sets the name of a resource to access (<resource>/[ns/<namespace>/]<name>)
+func (r *Request) Name(resourceName string) *Request {
+	if r.err != nil {
+		return r
+	}
+	if len(resourceName) == 0 {
+		r.err = fmt.Errorf("resource name may not be empty")
+		return r
+	}
+	if len(r.resourceName) != 0 {
+		r.err = fmt.Errorf("resource name already set to %q, cannot change to %q", r.resourceName, resourceName)
+		return r
+	}
+	if msgs := IsValidPathSegmentName(resourceName); len(msgs) != 0 {
+		r.err = fmt.Errorf("invalid resource name %q: %v", resourceName, msgs)
+		return r
+	}
+	r.resourceName = resourceName
+	return r
+}
+
+// Namespace applies the namespace scope to a request (<resource>/[ns/<namespace>/]<name>)
+func (r *Request) Namespace(namespace string) *Request {
+	if r.err != nil {
+		return r
+	}
+	if r.namespaceSet {
+		r.err = fmt.Errorf("namespace already set to %q, cannot change to %q", r.namespace, namespace)
+		return r
+	}
+	if msgs := IsValidPathSegmentName(namespace); len(msgs) != 0 {
+		r.err = fmt.Errorf("invalid namespace %q: %v", namespace, msgs)
+		return r
+	}
+	r.namespaceSet = true
+	r.namespace = namespace
+	return r
+}
+
+// NamespaceIfScoped is a convenience function to set a namespace if scoped is true
+func (r *Request) NamespaceIfScoped(namespace string, scoped bool) *Request {
+	if scoped {
+		return r.Namespace(namespace)
+	}
+	return r
+}
+
+// AbsPath overwrites an existing path with the segments provided. Trailing slashes are preserved
+// when a single segment is passed.
+func (r *Request) AbsPath(segments ...string) *Request {
+	if r.err != nil {
+		return r
+	}
+	r.pathPrefix = path.Join(r.baseURL.Path, path.Join(segments...))
+	if len(segments) == 1 && (len(r.baseURL.Path) > 1 || len(segments[0]) > 1) && strings.HasSuffix(segments[0], "/") {
+		// preserve any trailing slashes for legacy behavior
+		r.pathPrefix += "/"
+	}
+	return r
+}
+
+// RequestURI overwrites existing path and parameters with the value of the provided server relative
+// URI.
+func (r *Request) RequestURI(uri string) *Request {
+	if r.err != nil {
+		return r
+	}
+	locator, err := url.Parse(uri)
+	if err != nil {
+		r.err = err
+		return r
+	}
+	r.pathPrefix = locator.Path
+	if len(locator.Query()) > 0 {
+		if r.params == nil {
+			r.params = make(url.Values)
+		}
+		for k, v := range locator.Query() {
+			r.params[k] = v
+		}
+	}
+	return r
+}
+
+// Param creates a query parameter with the given string value.
+func (r *Request) Param(paramName, s string) *Request {
+	if r.err != nil {
+		return r
+	}
+	return r.setParam(paramName, s)
+}
+
+// VersionedParams will take the provided object, serialize it to a map[string][]string using the
+// implicit RESTClient API version and the default parameter codec, and then add those as parameters
+// to the request. Use this to provide versioned query parameters from client libraries.
+// VersionedParams will not write query parameters that have omitempty set and are empty. If a
+// parameter has already been set it is appended to (Params and VersionedParams are additive).
+func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request {
+	return r.SpecificallyVersionedParams(obj, codec, *r.content.GroupVersion)
+}
+
+func (r *Request) SpecificallyVersionedParams(obj runtime.Object, codec runtime.ParameterCodec, version schema.GroupVersion) *Request {
+	if r.err != nil {
+		return r
+	}
+	params, err := codec.EncodeParameters(obj, version)
+	if err != nil {
+		r.err = err
+		return r
+	}
+	for k, v := range params {
+		if r.params == nil {
+			r.params = make(url.Values)
+		}
+		r.params[k] = append(r.params[k], v...)
+	}
+	return r
+}
+
+func (r *Request) setParam(paramName, value string) *Request {
+	if r.params == nil {
+		r.params = make(url.Values)
+	}
+	r.params[paramName] = append(r.params[paramName], value)
+	return r
+}
+
+func (r *Request) SetHeader(key string, values ...string) *Request {
+	if r.headers == nil {
+		r.headers = http.Header{}
+	}
+	r.headers.Del(key)
+	for _, value := range values {
+		r.headers.Add(key, value)
+	}
+	return r
+}
+
+// Timeout makes the request use the given duration as an overall timeout for the
+// request. Additionally, if set passes the value as "timeout" parameter in URL.
+func (r *Request) Timeout(d time.Duration) *Request {
+	if r.err != nil {
+		return r
+	}
+	r.timeout = d
+	return r
+}
+
+// Body makes the request use obj as the body. Optional.
+// If obj is a string, try to read a file of that name.
+// If obj is a []byte, send it directly.
+// If obj is an io.Reader, use it directly.
+// If obj is a runtime.Object, marshal it correctly, and set Content-Type header.
+// If obj is a runtime.Object and nil, do nothing.
+// Otherwise, set an error.
+func (r *Request) Body(obj interface{}) *Request {
+	if r.err != nil {
+		return r
+	}
+	switch t := obj.(type) {
+	case string:
+		data, err := ioutil.ReadFile(t)
+		if err != nil {
+			r.err = err
+			return r
+		}
+		glogBody("Request Body", data)
+		r.body = bytes.NewReader(data)
+	case []byte:
+		glogBody("Request Body", t)
+		r.body = bytes.NewReader(t)
+	case io.Reader:
+		r.body = t
+	case runtime.Object:
+		// callers may pass typed interface pointers, therefore we must check nil with reflection
+		if reflect.ValueOf(t).IsNil() {
+			return r
+		}
+		data, err := runtime.Encode(r.serializers.Encoder, t)
+		if err != nil {
+			r.err = err
+			return r
+		}
+		glogBody("Request Body", data)
+		r.body = bytes.NewReader(data)
+		r.SetHeader("Content-Type", r.content.ContentType)
+	default:
+		r.err = fmt.Errorf("unknown type used for body: %+v", obj)
+	}
+	return r
+}
+
+// Context adds a context to the request. Contexts are only used for
+// timeouts, deadlines, and cancellations.
+func (r *Request) Context(ctx context.Context) *Request {
+	r.ctx = ctx
+	return r
+}
+
+// URL returns the current working URL.
+func (r *Request) URL() *url.URL {
+	p := r.pathPrefix
+	if r.namespaceSet && len(r.namespace) > 0 {
+		p = path.Join(p, "namespaces", r.namespace)
+	}
+	if len(r.resource) != 0 {
+		p = path.Join(p, strings.ToLower(r.resource))
+	}
+	// Join trims trailing slashes, so preserve r.pathPrefix's trailing slash for backwards compatibility if nothing was changed
+	if len(r.resourceName) != 0 || len(r.subpath) != 0 || len(r.subresource) != 0 {
+		p = path.Join(p, r.resourceName, r.subresource, r.subpath)
+	}
+
+	finalURL := &url.URL{}
+	if r.baseURL != nil {
+		*finalURL = *r.baseURL
+	}
+	finalURL.Path = p
+
+	query := url.Values{}
+	for key, values := range r.params {
+		for _, value := range values {
+			query.Add(key, value)
+		}
+	}
+
+	// timeout is handled specially here.
+	if r.timeout != 0 {
+		query.Set("timeout", r.timeout.String())
+	}
+	finalURL.RawQuery = query.Encode()
+	return finalURL
+}
+
+// finalURLTemplate is similar to URL(), but will make all specific parameter values equal
+// - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query
+// parameters will be reset. This creates a copy of the url so as not to change the
+// underlying object.
+func (r Request) finalURLTemplate() url.URL {
+	newParams := url.Values{}
+	v := []string{"{value}"}
+	for k := range r.params {
+		newParams[k] = v
+	}
+	r.params = newParams
+	url := r.URL()
+	segments := strings.Split(r.URL().Path, "/")
+	groupIndex := 0
+	index := 0
+	if r.URL() != nil && r.baseURL != nil && strings.Contains(r.URL().Path, r.baseURL.Path) {
+		groupIndex += len(strings.Split(r.baseURL.Path, "/"))
+	}
+	if groupIndex >= len(segments) {
+		return *url
+	}
+
+	const CoreGroupPrefix = "api"
+	const NamedGroupPrefix = "apis"
+	isCoreGroup := segments[groupIndex] == CoreGroupPrefix
+	isNamedGroup := segments[groupIndex] == NamedGroupPrefix
+	if isCoreGroup {
+		// checking the case of core group with /api/v1/... format
+		index = groupIndex + 2
+	} else if isNamedGroup {
+		// checking the case of named group with /apis/apps/v1/... format
+		index = groupIndex + 3
+	} else {
+		// this should not happen that the only two possibilities are /api... and /apis..., just want to put an
+		// outlet here in case more API groups are added in future if ever possible:
+		// https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups
+		// if a wrong API groups name is encountered, return the {prefix} for url.Path
+		url.Path = "/{prefix}"
+		url.RawQuery = ""
+		return *url
+	}
+	//switch segLength := len(segments) - index; segLength {
+	switch {
+	// case len(segments) - index == 1:
+	// resource (with no name) do nothing
+	case len(segments)-index == 2:
+		// /$RESOURCE/$NAME: replace $NAME with {name}
+		segments[index+1] = "{name}"
+	case len(segments)-index == 3:
+		if segments[index+2] == "finalize" || segments[index+2] == "status" {
+			// /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name}
+			segments[index+1] = "{name}"
+		} else {
+			// /namespace/$NAMESPACE/$RESOURCE: replace $NAMESPACE with {namespace}
+			segments[index+1] = "{namespace}"
+		}
+	case len(segments)-index >= 4:
+		segments[index+1] = "{namespace}"
+		// /namespace/$NAMESPACE/$RESOURCE/$NAME: replace $NAMESPACE with {namespace},  $NAME with {name}
+		if segments[index+3] != "finalize" && segments[index+3] != "status" {
+			// /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name}
+			segments[index+3] = "{name}"
+		}
+	}
+	url.Path = path.Join(segments...)
+	return *url
+}
+
+func (r *Request) tryThrottle() {
+	now := time.Now()
+	if r.throttle != nil {
+		r.throttle.Accept()
+	}
+	if latency := time.Since(now); latency > longThrottleLatency {
+		klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String())
+	}
+}
+
+// Watch attempts to begin watching the requested location.
+// Returns a watch.Interface, or an error.
+func (r *Request) Watch() (watch.Interface, error) {
+	return r.WatchWithSpecificDecoders(
+		func(body io.ReadCloser) streaming.Decoder {
+			framer := r.serializers.Framer.NewFrameReader(body)
+			return streaming.NewDecoder(framer, r.serializers.StreamingSerializer)
+		},
+		r.serializers.Decoder,
+	)
+}
+
+// WatchWithSpecificDecoders attempts to begin watching the requested location with a *different* decoder.
+// Turns out that you want one "standard" decoder for the watch event and one "personal" decoder for the content
+// Returns a watch.Interface, or an error.
+func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) streaming.Decoder, embeddedDecoder runtime.Decoder) (watch.Interface, error) {
+	// We specifically don't want to rate limit watches, so we
+	// don't use r.throttle here.
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.serializers.Framer == nil {
+		return nil, fmt.Errorf("watching resources is not possible with this client (content-type: %s)", r.content.ContentType)
+	}
+
+	url := r.URL().String()
+	req, err := http.NewRequest(r.verb, url, r.body)
+	if err != nil {
+		return nil, err
+	}
+	if r.ctx != nil {
+		req = req.WithContext(r.ctx)
+	}
+	req.Header = r.headers
+	client := r.client
+	if client == nil {
+		client = http.DefaultClient
+	}
+	r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
+	resp, err := client.Do(req)
+	updateURLMetrics(r, resp, err)
+	if r.baseURL != nil {
+		if err != nil {
+			r.backoffMgr.UpdateBackoff(r.baseURL, err, 0)
+		} else {
+			r.backoffMgr.UpdateBackoff(r.baseURL, err, resp.StatusCode)
+		}
+	}
+	if err != nil {
+		// The watch stream mechanism handles many common partial data errors, so closed
+		// connections can be retried in many cases.
+		if net.IsProbableEOF(err) {
+			return watch.NewEmptyWatch(), nil
+		}
+		return nil, err
+	}
+	if resp.StatusCode != http.StatusOK {
+		defer resp.Body.Close()
+		if result := r.transformResponse(resp, req); result.err != nil {
+			return nil, result.err
+		}
+		return nil, fmt.Errorf("for request '%+v', got status: %v", url, resp.StatusCode)
+	}
+	wrapperDecoder := wrapperDecoderFn(resp.Body)
+	return watch.NewStreamWatcher(restclientwatch.NewDecoder(wrapperDecoder, embeddedDecoder)), nil
+}
+
+// updateURLMetrics is a convenience function for pushing metrics.
+// It also handles corner cases for incomplete/invalid request data.
+func updateURLMetrics(req *Request, resp *http.Response, err error) {
+	url := "none"
+	if req.baseURL != nil {
+		url = req.baseURL.Host
+	}
+
+	// Errors can be arbitrary strings. Unbound label cardinality is not suitable for a metric
+	// system so we just report them as `<error>`.
+	if err != nil {
+		metrics.RequestResult.Increment("<error>", req.verb, url)
+	} else {
+		//Metrics for failure codes
+		metrics.RequestResult.Increment(strconv.Itoa(resp.StatusCode), req.verb, url)
+	}
+}
+
+// Stream formats and executes the request, and offers streaming of the response.
+// Returns io.ReadCloser which could be used for streaming of the response, or an error
+// Any non-2xx http status code causes an error.  If we get a non-2xx code, we try to convert the body into an APIStatus object.
+// If we can, we return that as an error.  Otherwise, we create an error that lists the http status and the content of the response.
+func (r *Request) Stream() (io.ReadCloser, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+
+	r.tryThrottle()
+
+	url := r.URL().String()
+	req, err := http.NewRequest(r.verb, url, nil)
+	if err != nil {
+		return nil, err
+	}
+	if r.ctx != nil {
+		req = req.WithContext(r.ctx)
+	}
+	req.Header = r.headers
+	client := r.client
+	if client == nil {
+		client = http.DefaultClient
+	}
+	r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
+	resp, err := client.Do(req)
+	updateURLMetrics(r, resp, err)
+	if r.baseURL != nil {
+		if err != nil {
+			r.backoffMgr.UpdateBackoff(r.URL(), err, 0)
+		} else {
+			r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode)
+		}
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	switch {
+	case (resp.StatusCode >= 200) && (resp.StatusCode < 300):
+		return resp.Body, nil
+
+	default:
+		// ensure we close the body before returning the error
+		defer resp.Body.Close()
+
+		result := r.transformResponse(resp, req)
+		err := result.Error()
+		if err == nil {
+			err = fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body))
+		}
+		return nil, err
+	}
+}
+
+// request connects to the server and invokes the provided function when a server response is
+// received. It handles retry behavior and up front validation of requests. It will invoke
+// fn at most once. It will return an error if a problem occurred prior to connecting to the
+// server - the provided function is responsible for handling server errors.
+func (r *Request) request(fn func(*http.Request, *http.Response)) error {
+	//Metrics for total request latency
+	start := time.Now()
+	defer func() {
+		metrics.RequestLatency.Observe(r.verb, r.finalURLTemplate(), time.Since(start))
+	}()
+
+	if r.err != nil {
+		klog.V(4).Infof("Error in request: %v", r.err)
+		return r.err
+	}
+
+	// TODO: added to catch programmer errors (invoking operations with an object with an empty namespace)
+	if (r.verb == "GET" || r.verb == "PUT" || r.verb == "DELETE") && r.namespaceSet && len(r.resourceName) > 0 && len(r.namespace) == 0 {
+		return fmt.Errorf("an empty namespace may not be set when a resource name is provided")
+	}
+	if (r.verb == "POST") && r.namespaceSet && len(r.namespace) == 0 {
+		return fmt.Errorf("an empty namespace may not be set during creation")
+	}
+
+	client := r.client
+	if client == nil {
+		client = http.DefaultClient
+	}
+
+	// Right now we make about ten retry attempts if we get a Retry-After response.
+	maxRetries := 10
+	retries := 0
+	for {
+		url := r.URL().String()
+		req, err := http.NewRequest(r.verb, url, r.body)
+		if err != nil {
+			return err
+		}
+		if r.timeout > 0 {
+			if r.ctx == nil {
+				r.ctx = context.Background()
+			}
+			var cancelFn context.CancelFunc
+			r.ctx, cancelFn = context.WithTimeout(r.ctx, r.timeout)
+			defer cancelFn()
+		}
+		if r.ctx != nil {
+			req = req.WithContext(r.ctx)
+		}
+		req.Header = r.headers
+
+		r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
+		if retries > 0 {
+			// We are retrying the request that we already send to apiserver
+			// at least once before.
+			// This request should also be throttled with the client-internal throttler.
+			r.tryThrottle()
+		}
+		resp, err := client.Do(req)
+		updateURLMetrics(r, resp, err)
+		if err != nil {
+			r.backoffMgr.UpdateBackoff(r.URL(), err, 0)
+		} else {
+			r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode)
+		}
+		if err != nil {
+			// "Connection reset by peer" is usually a transient error.
+			// Thus in case of "GET" operations, we simply retry it.
+			// We are not automatically retrying "write" operations, as
+			// they are not idempotent.
+			if !net.IsConnectionReset(err) || r.verb != "GET" {
+				return err
+			}
+			// For the purpose of retry, we set the artificial "retry-after" response.
+			// TODO: Should we clean the original response if it exists?
+			resp = &http.Response{
+				StatusCode: http.StatusInternalServerError,
+				Header:     http.Header{"Retry-After": []string{"1"}},
+				Body:       ioutil.NopCloser(bytes.NewReader([]byte{})),
+			}
+		}
+
+		done := func() bool {
+			// Ensure the response body is fully read and closed
+			// before we reconnect, so that we reuse the same TCP
+			// connection.
+			defer func() {
+				const maxBodySlurpSize = 2 << 10
+				if resp.ContentLength <= maxBodySlurpSize {
+					io.Copy(ioutil.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize})
+				}
+				resp.Body.Close()
+			}()
+
+			retries++
+			if seconds, wait := checkWait(resp); wait && retries < maxRetries {
+				if seeker, ok := r.body.(io.Seeker); ok && r.body != nil {
+					_, err := seeker.Seek(0, 0)
+					if err != nil {
+						klog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body)
+						fn(req, resp)
+						return true
+					}
+				}
+
+				klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url)
+				r.backoffMgr.Sleep(time.Duration(seconds) * time.Second)
+				return false
+			}
+			fn(req, resp)
+			return true
+		}()
+		if done {
+			return nil
+		}
+	}
+}
+
+// Do formats and executes the request. Returns a Result object for easy response
+// processing.
+//
+// Error type:
+//  * If the request can't be constructed, or an error happened earlier while building its
+//    arguments: *RequestConstructionError
+//  * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError
+//  * http.Client.Do errors are returned directly.
+func (r *Request) Do() Result {
+	r.tryThrottle()
+
+	var result Result
+	err := r.request(func(req *http.Request, resp *http.Response) {
+		result = r.transformResponse(resp, req)
+	})
+	if err != nil {
+		return Result{err: err}
+	}
+	return result
+}
+
+// DoRaw executes the request but does not process the response body.
+func (r *Request) DoRaw() ([]byte, error) {
+	r.tryThrottle()
+
+	var result Result
+	err := r.request(func(req *http.Request, resp *http.Response) {
+		result.body, result.err = ioutil.ReadAll(resp.Body)
+		glogBody("Response Body", result.body)
+		if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent {
+			result.err = r.transformUnstructuredResponseError(resp, req, result.body)
+		}
+	})
+	if err != nil {
+		return nil, err
+	}
+	return result.body, result.err
+}
+
+// transformResponse converts an API response into a structured API object
+func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result {
+	var body []byte
+	if resp.Body != nil {
+		data, err := ioutil.ReadAll(resp.Body)
+		switch err.(type) {
+		case nil:
+			body = data
+		case http2.StreamError:
+			// This is trying to catch the scenario that the server may close the connection when sending the
+			// response body. This can be caused by server timeout due to a slow network connection.
+			// TODO: Add test for this. Steps may be:
+			// 1. client-go (or kubectl) sends a GET request.
+			// 2. Apiserver sends back the headers and then part of the body
+			// 3. Apiserver closes connection.
+			// 4. client-go should catch this and return an error.
+			klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err)
+			streamErr := fmt.Errorf("Stream error %#v when reading response body, may be caused by closed connection. Please retry.", err)
+			return Result{
+				err: streamErr,
+			}
+		default:
+			klog.Errorf("Unexpected error when reading response body: %#v", err)
+			unexpectedErr := fmt.Errorf("Unexpected error %#v when reading response body. Please retry.", err)
+			return Result{
+				err: unexpectedErr,
+			}
+		}
+	}
+
+	glogBody("Response Body", body)
+
+	// verify the content type is accurate
+	contentType := resp.Header.Get("Content-Type")
+	decoder := r.serializers.Decoder
+	if len(contentType) > 0 && (decoder == nil || (len(r.content.ContentType) > 0 && contentType != r.content.ContentType)) {
+		mediaType, params, err := mime.ParseMediaType(contentType)
+		if err != nil {
+			return Result{err: errors.NewInternalError(err)}
+		}
+		decoder, err = r.serializers.RenegotiatedDecoder(mediaType, params)
+		if err != nil {
+			// if we fail to negotiate a decoder, treat this as an unstructured error
+			switch {
+			case resp.StatusCode == http.StatusSwitchingProtocols:
+				// no-op, we've been upgraded
+			case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent:
+				return Result{err: r.transformUnstructuredResponseError(resp, req, body)}
+			}
+			return Result{
+				body:        body,
+				contentType: contentType,
+				statusCode:  resp.StatusCode,
+			}
+		}
+	}
+
+	switch {
+	case resp.StatusCode == http.StatusSwitchingProtocols:
+		// no-op, we've been upgraded
+	case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent:
+		// calculate an unstructured error from the response which the Result object may use if the caller
+		// did not return a structured error.
+		retryAfter, _ := retryAfterSeconds(resp)
+		err := r.newUnstructuredResponseError(body, isTextResponse(resp), resp.StatusCode, req.Method, retryAfter)
+		return Result{
+			body:        body,
+			contentType: contentType,
+			statusCode:  resp.StatusCode,
+			decoder:     decoder,
+			err:         err,
+		}
+	}
+
+	return Result{
+		body:        body,
+		contentType: contentType,
+		statusCode:  resp.StatusCode,
+		decoder:     decoder,
+	}
+}
+
+// truncateBody decides if the body should be truncated, based on the glog Verbosity.
+func truncateBody(body string) string {
+	max := 0
+	switch {
+	case bool(klog.V(10)):
+		return body
+	case bool(klog.V(9)):
+		max = 10240
+	case bool(klog.V(8)):
+		max = 1024
+	}
+
+	if len(body) <= max {
+		return body
+	}
+
+	return body[:max] + fmt.Sprintf(" [truncated %d chars]", len(body)-max)
+}
+
+// glogBody logs a body output that could be either JSON or protobuf. It explicitly guards against
+// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine
+// whether the body is printable.
+func glogBody(prefix string, body []byte) {
+	if klog.V(8) {
+		if bytes.IndexFunc(body, func(r rune) bool {
+			return r < 0x0a
+		}) != -1 {
+			klog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body)))
+		} else {
+			klog.Infof("%s: %s", prefix, truncateBody(string(body)))
+		}
+	}
+}
+
+// maxUnstructuredResponseTextBytes is an upper bound on how much output to include in the unstructured error.
+const maxUnstructuredResponseTextBytes = 2048
+
+// transformUnstructuredResponseError handles an error from the server that is not in a structured form.
+// It is expected to transform any response that is not recognizable as a clear server sent error from the
+// K8S API using the information provided with the request. In practice, HTTP proxies and client libraries
+// introduce a level of uncertainty to the responses returned by servers that in common use result in
+// unexpected responses. The rough structure is:
+//
+// 1. Assume the server sends you something sane - JSON + well defined error objects + proper codes
+//    - this is the happy path
+//    - when you get this output, trust what the server sends
+// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to
+//    generate a reasonable facsimile of the original failure.
+//    - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above
+// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error
+// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected
+//    initial contact, the presence of mismatched body contents from posted content types
+//    - Give these a separate distinct error type and capture as much as possible of the original message
+//
+// TODO: introduce transformation of generic http.Client.Do() errors that separates 4.
+func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error {
+	if body == nil && resp.Body != nil {
+		if data, err := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: maxUnstructuredResponseTextBytes}); err == nil {
+			body = data
+		}
+	}
+	retryAfter, _ := retryAfterSeconds(resp)
+	return r.newUnstructuredResponseError(body, isTextResponse(resp), resp.StatusCode, req.Method, retryAfter)
+}
+
+// newUnstructuredResponseError instantiates the appropriate generic error for the provided input. It also logs the body.
+func (r *Request) newUnstructuredResponseError(body []byte, isTextResponse bool, statusCode int, method string, retryAfter int) error {
+	// cap the amount of output we create
+	if len(body) > maxUnstructuredResponseTextBytes {
+		body = body[:maxUnstructuredResponseTextBytes]
+	}
+
+	message := "unknown"
+	if isTextResponse {
+		message = strings.TrimSpace(string(body))
+	}
+	var groupResource schema.GroupResource
+	if len(r.resource) > 0 {
+		groupResource.Group = r.content.GroupVersion.Group
+		groupResource.Resource = r.resource
+	}
+	return errors.NewGenericServerResponse(
+		statusCode,
+		method,
+		groupResource,
+		r.resourceName,
+		message,
+		retryAfter,
+		true,
+	)
+}
+
+// isTextResponse returns true if the response appears to be a textual media type.
+func isTextResponse(resp *http.Response) bool {
+	contentType := resp.Header.Get("Content-Type")
+	if len(contentType) == 0 {
+		return true
+	}
+	media, _, err := mime.ParseMediaType(contentType)
+	if err != nil {
+		return false
+	}
+	return strings.HasPrefix(media, "text/")
+}
+
+// checkWait returns true along with a number of seconds if the server instructed us to wait
+// before retrying.
+func checkWait(resp *http.Response) (int, bool) {
+	switch r := resp.StatusCode; {
+	// any 500 error code and 429 can trigger a wait
+	case r == http.StatusTooManyRequests, r >= 500:
+	default:
+		return 0, false
+	}
+	i, ok := retryAfterSeconds(resp)
+	return i, ok
+}
+
+// retryAfterSeconds returns the value of the Retry-After header and true, or 0 and false if
+// the header was missing or not a valid number.
+func retryAfterSeconds(resp *http.Response) (int, bool) {
+	if h := resp.Header.Get("Retry-After"); len(h) > 0 {
+		if i, err := strconv.Atoi(h); err == nil {
+			return i, true
+		}
+	}
+	return 0, false
+}
+
+// Result contains the result of calling Request.Do().
+type Result struct {
+	body        []byte
+	contentType string
+	err         error
+	statusCode  int
+
+	decoder runtime.Decoder
+}
+
+// Raw returns the raw result.
+func (r Result) Raw() ([]byte, error) {
+	return r.body, r.err
+}
+
+// Get returns the result as an object, which means it passes through the decoder.
+// If the returned object is of type Status and has .Status != StatusSuccess, the
+// additional information in Status will be used to enrich the error.
+func (r Result) Get() (runtime.Object, error) {
+	if r.err != nil {
+		// Check whether the result has a Status object in the body and prefer that.
+		return nil, r.Error()
+	}
+	if r.decoder == nil {
+		return nil, fmt.Errorf("serializer for %s doesn't exist", r.contentType)
+	}
+
+	// decode, but if the result is Status return that as an error instead.
+	out, _, err := r.decoder.Decode(r.body, nil, nil)
+	if err != nil {
+		return nil, err
+	}
+	switch t := out.(type) {
+	case *metav1.Status:
+		// any status besides StatusSuccess is considered an error.
+		if t.Status != metav1.StatusSuccess {
+			return nil, errors.FromObject(t)
+		}
+	}
+	return out, nil
+}
+
+// StatusCode returns the HTTP status code of the request. (Only valid if no
+// error was returned.)
+func (r Result) StatusCode(statusCode *int) Result {
+	*statusCode = r.statusCode
+	return r
+}
+
+// Into stores the result into obj, if possible. If obj is nil it is ignored.
+// If the returned object is of type Status and has .Status != StatusSuccess, the
+// additional information in Status will be used to enrich the error.
+func (r Result) Into(obj runtime.Object) error {
+	if r.err != nil {
+		// Check whether the result has a Status object in the body and prefer that.
+		return r.Error()
+	}
+	if r.decoder == nil {
+		return fmt.Errorf("serializer for %s doesn't exist", r.contentType)
+	}
+	if len(r.body) == 0 {
+		return fmt.Errorf("0-length response")
+	}
+
+	out, _, err := r.decoder.Decode(r.body, nil, obj)
+	if err != nil || out == obj {
+		return err
+	}
+	// if a different object is returned, see if it is Status and avoid double decoding
+	// the object.
+	switch t := out.(type) {
+	case *metav1.Status:
+		// any status besides StatusSuccess is considered an error.
+		if t.Status != metav1.StatusSuccess {
+			return errors.FromObject(t)
+		}
+	}
+	return nil
+}
+
+// WasCreated updates the provided bool pointer to whether the server returned
+// 201 created or a different response.
+func (r Result) WasCreated(wasCreated *bool) Result {
+	*wasCreated = r.statusCode == http.StatusCreated
+	return r
+}
+
+// Error returns the error executing the request, nil if no error occurred.
+// If the returned object is of type Status and has Status != StatusSuccess, the
+// additional information in Status will be used to enrich the error.
+// See the Request.Do() comment for what errors you might get.
+func (r Result) Error() error {
+	// if we have received an unexpected server error, and we have a body and decoder, we can try to extract
+	// a Status object.
+	if r.err == nil || !errors.IsUnexpectedServerError(r.err) || len(r.body) == 0 || r.decoder == nil {
+		return r.err
+	}
+
+	// attempt to convert the body into a Status object
+	// to be backwards compatible with old servers that do not return a version, default to "v1"
+	out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil)
+	if err != nil {
+		klog.V(5).Infof("body was not decodable (unable to check for Status): %v", err)
+		return r.err
+	}
+	switch t := out.(type) {
+	case *metav1.Status:
+		// because we default the kind, we *must* check for StatusFailure
+		if t.Status == metav1.StatusFailure {
+			return errors.FromObject(t)
+		}
+	}
+	return r.err
+}
+
+// NameMayNotBe specifies strings that cannot be used as names specified as path segments (like the REST API or etcd store)
+var NameMayNotBe = []string{".", ".."}
+
+// NameMayNotContain specifies substrings that cannot be used in names specified as path segments (like the REST API or etcd store)
+var NameMayNotContain = []string{"/", "%"}
+
+// IsValidPathSegmentName validates the name can be safely encoded as a path segment
+func IsValidPathSegmentName(name string) []string {
+	for _, illegalName := range NameMayNotBe {
+		if name == illegalName {
+			return []string{fmt.Sprintf(`may not be '%s'`, illegalName)}
+		}
+	}
+
+	var errors []string
+	for _, illegalContent := range NameMayNotContain {
+		if strings.Contains(name, illegalContent) {
+			errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent))
+		}
+	}
+
+	return errors
+}
+
+// IsValidPathSegmentPrefix validates the name can be used as a prefix for a name which will be encoded as a path segment
+// It does not check for exact matches with disallowed names, since an arbitrary suffix might make the name valid
+func IsValidPathSegmentPrefix(name string) []string {
+	var errors []string
+	for _, illegalContent := range NameMayNotContain {
+		if strings.Contains(name, illegalContent) {
+			errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent))
+		}
+	}
+
+	return errors
+}
+
+// ValidatePathSegmentName validates the name can be safely encoded as a path segment
+func ValidatePathSegmentName(name string, prefix bool) []string {
+	if prefix {
+		return IsValidPathSegmentPrefix(name)
+	} else {
+		return IsValidPathSegmentName(name)
+	}
+}
diff --git a/vendor/k8s.io/client-go/rest/token_source.go b/vendor/k8s.io/client-go/rest/token_source.go
new file mode 100644
index 0000000..c251b5e
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/token_source.go
@@ -0,0 +1,140 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/oauth2"
+	"k8s.io/klog"
+)
+
+// TokenSourceWrapTransport returns a WrapTransport that injects bearer tokens
+// authentication from an oauth2.TokenSource.
+func TokenSourceWrapTransport(ts oauth2.TokenSource) func(http.RoundTripper) http.RoundTripper {
+	return func(rt http.RoundTripper) http.RoundTripper {
+		return &tokenSourceTransport{
+			base: rt,
+			ort: &oauth2.Transport{
+				Source: ts,
+				Base:   rt,
+			},
+		}
+	}
+}
+
+// NewCachedFileTokenSource returns a oauth2.TokenSource reads a token from a
+// file at a specified path and periodically reloads it.
+func NewCachedFileTokenSource(path string) oauth2.TokenSource {
+	return &cachingTokenSource{
+		now:    time.Now,
+		leeway: 1 * time.Minute,
+		base: &fileTokenSource{
+			path: path,
+			// This period was picked because it is half of the minimum validity
+			// duration for a token provisioned by they TokenRequest API. This is
+			// unsophisticated and should induce rotation at a frequency that should
+			// work with the token volume source.
+			period: 5 * time.Minute,
+		},
+	}
+}
+
+type tokenSourceTransport struct {
+	base http.RoundTripper
+	ort  http.RoundTripper
+}
+
+func (tst *tokenSourceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+	// This is to allow --token to override other bearer token providers.
+	if req.Header.Get("Authorization") != "" {
+		return tst.base.RoundTrip(req)
+	}
+	return tst.ort.RoundTrip(req)
+}
+
+type fileTokenSource struct {
+	path   string
+	period time.Duration
+}
+
+var _ = oauth2.TokenSource(&fileTokenSource{})
+
+func (ts *fileTokenSource) Token() (*oauth2.Token, error) {
+	tokb, err := ioutil.ReadFile(ts.path)
+	if err != nil {
+		return nil, fmt.Errorf("failed to read token file %q: %v", ts.path, err)
+	}
+	tok := strings.TrimSpace(string(tokb))
+	if len(tok) == 0 {
+		return nil, fmt.Errorf("read empty token from file %q", ts.path)
+	}
+
+	return &oauth2.Token{
+		AccessToken: tok,
+		Expiry:      time.Now().Add(ts.period),
+	}, nil
+}
+
+type cachingTokenSource struct {
+	base   oauth2.TokenSource
+	leeway time.Duration
+
+	sync.RWMutex
+	tok *oauth2.Token
+
+	// for testing
+	now func() time.Time
+}
+
+var _ = oauth2.TokenSource(&cachingTokenSource{})
+
+func (ts *cachingTokenSource) Token() (*oauth2.Token, error) {
+	now := ts.now()
+	// fast path
+	ts.RLock()
+	tok := ts.tok
+	ts.RUnlock()
+
+	if tok != nil && tok.Expiry.Add(-1*ts.leeway).After(now) {
+		return tok, nil
+	}
+
+	// slow path
+	ts.Lock()
+	defer ts.Unlock()
+	if tok := ts.tok; tok != nil && tok.Expiry.Add(-1*ts.leeway).After(now) {
+		return tok, nil
+	}
+
+	tok, err := ts.base.Token()
+	if err != nil {
+		if ts.tok == nil {
+			return nil, err
+		}
+		klog.Errorf("Unable to rotate token: %v", err)
+		return ts.tok, nil
+	}
+
+	ts.tok = tok
+	return tok, nil
+}
diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go
new file mode 100644
index 0000000..25c1801
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/transport.go
@@ -0,0 +1,116 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"crypto/tls"
+	"errors"
+	"net/http"
+
+	"k8s.io/client-go/plugin/pkg/client/auth/exec"
+	"k8s.io/client-go/transport"
+)
+
+// TLSConfigFor returns a tls.Config that will provide the transport level security defined
+// by the provided Config. Will return nil if no transport level security is requested.
+func TLSConfigFor(config *Config) (*tls.Config, error) {
+	cfg, err := config.TransportConfig()
+	if err != nil {
+		return nil, err
+	}
+	return transport.TLSConfigFor(cfg)
+}
+
+// TransportFor returns an http.RoundTripper that will provide the authentication
+// or transport level security defined by the provided Config. Will return the
+// default http.DefaultTransport if no special case behavior is needed.
+func TransportFor(config *Config) (http.RoundTripper, error) {
+	cfg, err := config.TransportConfig()
+	if err != nil {
+		return nil, err
+	}
+	return transport.New(cfg)
+}
+
+// HTTPWrappersForConfig wraps a round tripper with any relevant layered behavior from the
+// config. Exposed to allow more clients that need HTTP-like behavior but then must hijack
+// the underlying connection (like WebSocket or HTTP2 clients). Pure HTTP clients should use
+// the higher level TransportFor or RESTClientFor methods.
+func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) {
+	cfg, err := config.TransportConfig()
+	if err != nil {
+		return nil, err
+	}
+	return transport.HTTPWrappersForConfig(cfg, rt)
+}
+
+// TransportConfig converts a client config to an appropriate transport config.
+func (c *Config) TransportConfig() (*transport.Config, error) {
+	conf := &transport.Config{
+		UserAgent:     c.UserAgent,
+		Transport:     c.Transport,
+		WrapTransport: c.WrapTransport,
+		TLS: transport.TLSConfig{
+			Insecure:   c.Insecure,
+			ServerName: c.ServerName,
+			CAFile:     c.CAFile,
+			CAData:     c.CAData,
+			CertFile:   c.CertFile,
+			CertData:   c.CertData,
+			KeyFile:    c.KeyFile,
+			KeyData:    c.KeyData,
+		},
+		Username:    c.Username,
+		Password:    c.Password,
+		BearerToken: c.BearerToken,
+		Impersonate: transport.ImpersonationConfig{
+			UserName: c.Impersonate.UserName,
+			Groups:   c.Impersonate.Groups,
+			Extra:    c.Impersonate.Extra,
+		},
+		Dial: c.Dial,
+	}
+
+	if c.ExecProvider != nil && c.AuthProvider != nil {
+		return nil, errors.New("execProvider and authProvider cannot be used in combination")
+	}
+
+	if c.ExecProvider != nil {
+		provider, err := exec.GetAuthenticator(c.ExecProvider)
+		if err != nil {
+			return nil, err
+		}
+		if err := provider.UpdateTransportConfig(conf); err != nil {
+			return nil, err
+		}
+	}
+	if c.AuthProvider != nil {
+		provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister)
+		if err != nil {
+			return nil, err
+		}
+		wt := conf.WrapTransport
+		if wt != nil {
+			conf.WrapTransport = func(rt http.RoundTripper) http.RoundTripper {
+				return provider.WrapTransport(wt(rt))
+			}
+		} else {
+			conf.WrapTransport = provider.WrapTransport
+		}
+	}
+	return conf, nil
+}
diff --git a/vendor/k8s.io/client-go/rest/url_utils.go b/vendor/k8s.io/client-go/rest/url_utils.go
new file mode 100644
index 0000000..a56d183
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/url_utils.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"fmt"
+	"net/url"
+	"path"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// DefaultServerURL converts a host, host:port, or URL string to the default base server API path
+// to use with a Client at a given API version following the standard conventions for a
+// Kubernetes API.
+func DefaultServerURL(host, apiPath string, groupVersion schema.GroupVersion, defaultTLS bool) (*url.URL, string, error) {
+	if host == "" {
+		return nil, "", fmt.Errorf("host must be a URL or a host:port pair")
+	}
+	base := host
+	hostURL, err := url.Parse(base)
+	if err != nil || hostURL.Scheme == "" || hostURL.Host == "" {
+		scheme := "http://"
+		if defaultTLS {
+			scheme = "https://"
+		}
+		hostURL, err = url.Parse(scheme + base)
+		if err != nil {
+			return nil, "", err
+		}
+		if hostURL.Path != "" && hostURL.Path != "/" {
+			return nil, "", fmt.Errorf("host must be a URL or a host:port pair: %q", base)
+		}
+	}
+
+	// hostURL.Path is optional; a non-empty Path is treated as a prefix that is to be applied to
+	// all URIs used to access the host. this is useful when there's a proxy in front of the
+	// apiserver that has relocated the apiserver endpoints, forwarding all requests from, for
+	// example, /a/b/c to the apiserver. in this case the Path should be /a/b/c.
+	//
+	// if running without a frontend proxy (that changes the location of the apiserver), then
+	// hostURL.Path should be blank.
+	//
+	// versionedAPIPath, a path relative to baseURL.Path, points to a versioned API base
+	versionedAPIPath := DefaultVersionedAPIPath(apiPath, groupVersion)
+
+	return hostURL, versionedAPIPath, nil
+}
+
+// DefaultVersionedAPIPathFor constructs the default path for the given group version, assuming the given
+// API path, following the standard conventions of the Kubernetes API.
+func DefaultVersionedAPIPath(apiPath string, groupVersion schema.GroupVersion) string {
+	versionedAPIPath := path.Join("/", apiPath)
+
+	// Add the version to the end of the path
+	if len(groupVersion.Group) > 0 {
+		versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Group, groupVersion.Version)
+
+	} else {
+		versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Version)
+	}
+
+	return versionedAPIPath
+}
+
+// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It
+// requires Host and Version to be set prior to being called.
+func defaultServerUrlFor(config *Config) (*url.URL, string, error) {
+	// TODO: move the default to secure when the apiserver supports TLS by default
+	// config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA."
+	hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0
+	hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0
+	defaultTLS := hasCA || hasCert || config.Insecure
+	host := config.Host
+	if host == "" {
+		host = "localhost"
+	}
+
+	if config.GroupVersion != nil {
+		return DefaultServerURL(host, config.APIPath, *config.GroupVersion, defaultTLS)
+	}
+	return DefaultServerURL(host, config.APIPath, schema.GroupVersion{}, defaultTLS)
+}
diff --git a/vendor/k8s.io/client-go/rest/urlbackoff.go b/vendor/k8s.io/client-go/rest/urlbackoff.go
new file mode 100644
index 0000000..d00e42f
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/urlbackoff.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"net/url"
+	"time"
+
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/client-go/util/flowcontrol"
+	"k8s.io/klog"
+)
+
+// Set of resp. Codes that we backoff for.
+// In general these should be errors that indicate a server is overloaded.
+// These shouldn't be configured by any user, we set them based on conventions
+// described in
+var serverIsOverloadedSet = sets.NewInt(429)
+var maxResponseCode = 499
+
+type BackoffManager interface {
+	UpdateBackoff(actualUrl *url.URL, err error, responseCode int)
+	CalculateBackoff(actualUrl *url.URL) time.Duration
+	Sleep(d time.Duration)
+}
+
+// URLBackoff struct implements the semantics on top of Backoff which
+// we need for URL specific exponential backoff.
+type URLBackoff struct {
+	// Uses backoff as underlying implementation.
+	Backoff *flowcontrol.Backoff
+}
+
+// NoBackoff is a stub implementation, can be used for mocking or else as a default.
+type NoBackoff struct {
+}
+
+func (n *NoBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) {
+	// do nothing.
+}
+
+func (n *NoBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration {
+	return 0 * time.Second
+}
+
+func (n *NoBackoff) Sleep(d time.Duration) {
+	time.Sleep(d)
+}
+
+// Disable makes the backoff trivial, i.e., sets it to zero.  This might be used
+// by tests which want to run 1000s of mock requests without slowing down.
+func (b *URLBackoff) Disable() {
+	klog.V(4).Infof("Disabling backoff strategy")
+	b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second)
+}
+
+// baseUrlKey returns the key which urls will be mapped to.
+// For example, 127.0.0.1:8080/api/v2/abcde -> 127.0.0.1:8080.
+func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string {
+	// Simple implementation for now, just the host.
+	// We may backoff specific paths (i.e. "pods") differentially
+	// in the future.
+	host, err := url.Parse(rawurl.String())
+	if err != nil {
+		klog.V(4).Infof("Error extracting url: %v", rawurl)
+		panic("bad url!")
+	}
+	return host.Host
+}
+
+// UpdateBackoff updates backoff metadata
+func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) {
+	// range for retry counts that we store is [0,13]
+	if responseCode > maxResponseCode || serverIsOverloadedSet.Has(responseCode) {
+		b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now())
+		return
+	} else if responseCode >= 300 || err != nil {
+		klog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err)
+	}
+
+	//If we got this far, there is no backoff required for this URL anymore.
+	b.Backoff.Reset(b.baseUrlKey(actualUrl))
+}
+
+// CalculateBackoff takes a url and back's off exponentially,
+// based on its knowledge of existing failures.
+func (b *URLBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration {
+	return b.Backoff.Get(b.baseUrlKey(actualUrl))
+}
+
+func (b *URLBackoff) Sleep(d time.Duration) {
+	b.Backoff.Clock.Sleep(d)
+}
diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go
new file mode 100644
index 0000000..73bb63a
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/watch/decoder.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package versioned
+
+import (
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/serializer/streaming"
+	"k8s.io/apimachinery/pkg/watch"
+)
+
+// Decoder implements the watch.Decoder interface for io.ReadClosers that
+// have contents which consist of a series of watchEvent objects encoded
+// with the given streaming decoder. The internal objects will be then
+// decoded by the embedded decoder.
+type Decoder struct {
+	decoder         streaming.Decoder
+	embeddedDecoder runtime.Decoder
+}
+
+// NewDecoder creates an Decoder for the given writer and codec.
+func NewDecoder(decoder streaming.Decoder, embeddedDecoder runtime.Decoder) *Decoder {
+	return &Decoder{
+		decoder:         decoder,
+		embeddedDecoder: embeddedDecoder,
+	}
+}
+
+// Decode blocks until it can return the next object in the reader. Returns an error
+// if the reader is closed or an object can't be decoded.
+func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) {
+	var got metav1.WatchEvent
+	res, _, err := d.decoder.Decode(nil, &got)
+	if err != nil {
+		return "", nil, err
+	}
+	if res != &got {
+		return "", nil, fmt.Errorf("unable to decode to metav1.Event")
+	}
+	switch got.Type {
+	case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error):
+	default:
+		return "", nil, fmt.Errorf("got invalid watch event type: %v", got.Type)
+	}
+
+	obj, err := runtime.Decode(d.embeddedDecoder, got.Object.Raw)
+	if err != nil {
+		return "", nil, fmt.Errorf("unable to decode watch event: %v", err)
+	}
+	return watch.EventType(got.Type), obj, nil
+}
+
+// Close closes the underlying r.
+func (d *Decoder) Close() {
+	d.decoder.Close()
+}
diff --git a/vendor/k8s.io/client-go/rest/watch/encoder.go b/vendor/k8s.io/client-go/rest/watch/encoder.go
new file mode 100644
index 0000000..e55aa12
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/watch/encoder.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package versioned
+
+import (
+	"encoding/json"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/serializer/streaming"
+	"k8s.io/apimachinery/pkg/watch"
+)
+
+// Encoder serializes watch.Events into io.Writer. The internal objects
+// are encoded using embedded encoder, and the outer Event is serialized
+// using encoder.
+// TODO: this type is only used by tests
+type Encoder struct {
+	encoder         streaming.Encoder
+	embeddedEncoder runtime.Encoder
+}
+
+func NewEncoder(encoder streaming.Encoder, embeddedEncoder runtime.Encoder) *Encoder {
+	return &Encoder{
+		encoder:         encoder,
+		embeddedEncoder: embeddedEncoder,
+	}
+}
+
+// Encode writes an event to the writer. Returns an error
+// if the writer is closed or an object can't be encoded.
+func (e *Encoder) Encode(event *watch.Event) error {
+	data, err := runtime.Encode(e.embeddedEncoder, event.Object)
+	if err != nil {
+		return err
+	}
+	// FIXME: get rid of json.RawMessage.
+	return e.encoder.Encode(&metav1.WatchEvent{
+		Type:   string(event.Type),
+		Object: runtime.RawExtension{Raw: json.RawMessage(data)},
+	})
+}
diff --git a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go
new file mode 100644
index 0000000..c1ab45f
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go
@@ -0,0 +1,52 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package rest
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSClientConfig) DeepCopyInto(out *TLSClientConfig) {
+	*out = *in
+	if in.CertData != nil {
+		in, out := &in.CertData, &out.CertData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.KeyData != nil {
+		in, out := &in.KeyData, &out.KeyData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.CAData != nil {
+		in, out := &in.CAData, &out.CAData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSClientConfig.
+func (in *TLSClientConfig) DeepCopy() *TLSClientConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(TLSClientConfig)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
new file mode 100644
index 0000000..5871575
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+
+package api
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
new file mode 100644
index 0000000..65a3693
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
@@ -0,0 +1,188 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path"
+	"path/filepath"
+)
+
+func init() {
+	sDec, _ := base64.StdEncoding.DecodeString("REDACTED+")
+	redactedBytes = []byte(string(sDec))
+	sDec, _ = base64.StdEncoding.DecodeString("DATA+OMITTED")
+	dataOmittedBytes = []byte(string(sDec))
+}
+
+// IsConfigEmpty returns true if the config is empty.
+func IsConfigEmpty(config *Config) bool {
+	return len(config.AuthInfos) == 0 && len(config.Clusters) == 0 && len(config.Contexts) == 0 &&
+		len(config.CurrentContext) == 0 &&
+		len(config.Preferences.Extensions) == 0 && !config.Preferences.Colors &&
+		len(config.Extensions) == 0
+}
+
+// MinifyConfig read the current context and uses that to keep only the relevant pieces of config
+// This is useful for making secrets based on kubeconfig files
+func MinifyConfig(config *Config) error {
+	if len(config.CurrentContext) == 0 {
+		return errors.New("current-context must exist in order to minify")
+	}
+
+	currContext, exists := config.Contexts[config.CurrentContext]
+	if !exists {
+		return fmt.Errorf("cannot locate context %v", config.CurrentContext)
+	}
+
+	newContexts := map[string]*Context{}
+	newContexts[config.CurrentContext] = currContext
+
+	newClusters := map[string]*Cluster{}
+	if len(currContext.Cluster) > 0 {
+		if _, exists := config.Clusters[currContext.Cluster]; !exists {
+			return fmt.Errorf("cannot locate cluster %v", currContext.Cluster)
+		}
+
+		newClusters[currContext.Cluster] = config.Clusters[currContext.Cluster]
+	}
+
+	newAuthInfos := map[string]*AuthInfo{}
+	if len(currContext.AuthInfo) > 0 {
+		if _, exists := config.AuthInfos[currContext.AuthInfo]; !exists {
+			return fmt.Errorf("cannot locate user %v", currContext.AuthInfo)
+		}
+
+		newAuthInfos[currContext.AuthInfo] = config.AuthInfos[currContext.AuthInfo]
+	}
+
+	config.AuthInfos = newAuthInfos
+	config.Clusters = newClusters
+	config.Contexts = newContexts
+
+	return nil
+}
+
+var (
+	redactedBytes    []byte
+	dataOmittedBytes []byte
+)
+
+// Flatten redacts raw data entries from the config object for a human-readable view.
+func ShortenConfig(config *Config) {
+	// trick json encoder into printing a human readable string in the raw data
+	// by base64 decoding what we want to print. Relies on implementation of
+	// http://golang.org/pkg/encoding/json/#Marshal using base64 to encode []byte
+	for key, authInfo := range config.AuthInfos {
+		if len(authInfo.ClientKeyData) > 0 {
+			authInfo.ClientKeyData = redactedBytes
+		}
+		if len(authInfo.ClientCertificateData) > 0 {
+			authInfo.ClientCertificateData = redactedBytes
+		}
+		config.AuthInfos[key] = authInfo
+	}
+	for key, cluster := range config.Clusters {
+		if len(cluster.CertificateAuthorityData) > 0 {
+			cluster.CertificateAuthorityData = dataOmittedBytes
+		}
+		config.Clusters[key] = cluster
+	}
+}
+
+// Flatten changes the config object into a self contained config (useful for making secrets)
+func FlattenConfig(config *Config) error {
+	for key, authInfo := range config.AuthInfos {
+		baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "")
+		if err != nil {
+			return err
+		}
+
+		if err := FlattenContent(&authInfo.ClientCertificate, &authInfo.ClientCertificateData, baseDir); err != nil {
+			return err
+		}
+		if err := FlattenContent(&authInfo.ClientKey, &authInfo.ClientKeyData, baseDir); err != nil {
+			return err
+		}
+
+		config.AuthInfos[key] = authInfo
+	}
+	for key, cluster := range config.Clusters {
+		baseDir, err := MakeAbs(path.Dir(cluster.LocationOfOrigin), "")
+		if err != nil {
+			return err
+		}
+
+		if err := FlattenContent(&cluster.CertificateAuthority, &cluster.CertificateAuthorityData, baseDir); err != nil {
+			return err
+		}
+
+		config.Clusters[key] = cluster
+	}
+
+	return nil
+}
+
+func FlattenContent(path *string, contents *[]byte, baseDir string) error {
+	if len(*path) != 0 {
+		if len(*contents) > 0 {
+			return errors.New("cannot have values for both path and contents")
+		}
+
+		var err error
+		absPath := ResolvePath(*path, baseDir)
+		*contents, err = ioutil.ReadFile(absPath)
+		if err != nil {
+			return err
+		}
+
+		*path = ""
+	}
+
+	return nil
+}
+
+// ResolvePath returns the path as an absolute paths, relative to the given base directory
+func ResolvePath(path string, base string) string {
+	// Don't resolve empty paths
+	if len(path) > 0 {
+		// Don't resolve absolute paths
+		if !filepath.IsAbs(path) {
+			return filepath.Join(base, path)
+		}
+	}
+
+	return path
+}
+
+func MakeAbs(path, base string) (string, error) {
+	if filepath.IsAbs(path) {
+		return path, nil
+	}
+	if len(base) == 0 {
+		cwd, err := os.Getwd()
+		if err != nil {
+			return "", err
+		}
+		base = cwd
+	}
+	return filepath.Join(base, path), nil
+}
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/register.go b/vendor/k8s.io/client-go/tools/clientcmd/api/register.go
new file mode 100644
index 0000000..2eec388
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/register.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+// TODO this should be in the "kubeconfig" group
+var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal}
+
+var (
+	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+	AddToScheme   = SchemeBuilder.AddToScheme
+)
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Config{},
+	)
+	return nil
+}
+
+func (obj *Config) GetObjectKind() schema.ObjectKind { return obj }
+func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) {
+	obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
+}
+func (obj *Config) GroupVersionKind() schema.GroupVersionKind {
+	return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
+}
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
new file mode 100644
index 0000000..1391df7
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
@@ -0,0 +1,218 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// Where possible, json tags match the cli argument names.
+// Top level config objects and all values required for proper functioning are not "omitempty".  Any truly optional piece of config is allowed to be omitted.
+
+// Config holds the information needed to build connect to remote kubernetes clusters as a given user
+// IMPORTANT if you add fields to this struct, please update IsConfigEmpty()
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type Config struct {
+	// Legacy field from pkg/api/types.go TypeMeta.
+	// TODO(jlowdermilk): remove this after eliminating downstream dependencies.
+	// +optional
+	Kind string `json:"kind,omitempty"`
+	// Legacy field from pkg/api/types.go TypeMeta.
+	// TODO(jlowdermilk): remove this after eliminating downstream dependencies.
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty"`
+	// Preferences holds general information to be use for cli interactions
+	Preferences Preferences `json:"preferences"`
+	// Clusters is a map of referencable names to cluster configs
+	Clusters map[string]*Cluster `json:"clusters"`
+	// AuthInfos is a map of referencable names to user configs
+	AuthInfos map[string]*AuthInfo `json:"users"`
+	// Contexts is a map of referencable names to context configs
+	Contexts map[string]*Context `json:"contexts"`
+	// CurrentContext is the name of the context that you would like to use by default
+	CurrentContext string `json:"current-context"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// IMPORTANT if you add fields to this struct, please update IsConfigEmpty()
+type Preferences struct {
+	// +optional
+	Colors bool `json:"colors,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// Cluster contains information about how to communicate with a kubernetes cluster
+type Cluster struct {
+	// LocationOfOrigin indicates where this object came from.  It is used for round tripping config post-merge, but never serialized.
+	LocationOfOrigin string
+	// Server is the address of the kubernetes cluster (https://hostname:port).
+	Server string `json:"server"`
+	// InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.
+	// +optional
+	InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
+	// CertificateAuthority is the path to a cert file for the certificate authority.
+	// +optional
+	CertificateAuthority string `json:"certificate-authority,omitempty"`
+	// CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority
+	// +optional
+	CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// AuthInfo contains information that describes identity information.  This is use to tell the kubernetes cluster who you are.
+type AuthInfo struct {
+	// LocationOfOrigin indicates where this object came from.  It is used for round tripping config post-merge, but never serialized.
+	LocationOfOrigin string
+	// ClientCertificate is the path to a client cert file for TLS.
+	// +optional
+	ClientCertificate string `json:"client-certificate,omitempty"`
+	// ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate
+	// +optional
+	ClientCertificateData []byte `json:"client-certificate-data,omitempty"`
+	// ClientKey is the path to a client key file for TLS.
+	// +optional
+	ClientKey string `json:"client-key,omitempty"`
+	// ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
+	// +optional
+	ClientKeyData []byte `json:"client-key-data,omitempty"`
+	// Token is the bearer token for authentication to the kubernetes cluster.
+	// +optional
+	Token string `json:"token,omitempty"`
+	// TokenFile is a pointer to a file that contains a bearer token (as described above).  If both Token and TokenFile are present, Token takes precedence.
+	// +optional
+	TokenFile string `json:"tokenFile,omitempty"`
+	// Impersonate is the username to act-as.
+	// +optional
+	Impersonate string `json:"act-as,omitempty"`
+	// ImpersonateGroups is the groups to imperonate.
+	// +optional
+	ImpersonateGroups []string `json:"act-as-groups,omitempty"`
+	// ImpersonateUserExtra contains additional information for impersonated user.
+	// +optional
+	ImpersonateUserExtra map[string][]string `json:"act-as-user-extra,omitempty"`
+	// Username is the username for basic authentication to the kubernetes cluster.
+	// +optional
+	Username string `json:"username,omitempty"`
+	// Password is the password for basic authentication to the kubernetes cluster.
+	// +optional
+	Password string `json:"password,omitempty"`
+	// AuthProvider specifies a custom authentication plugin for the kubernetes cluster.
+	// +optional
+	AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"`
+	// Exec specifies a custom exec-based authentication plugin for the kubernetes cluster.
+	// +optional
+	Exec *ExecConfig `json:"exec,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with)
+type Context struct {
+	// LocationOfOrigin indicates where this object came from.  It is used for round tripping config post-merge, but never serialized.
+	LocationOfOrigin string
+	// Cluster is the name of the cluster for this context
+	Cluster string `json:"cluster"`
+	// AuthInfo is the name of the authInfo for this context
+	AuthInfo string `json:"user"`
+	// Namespace is the default namespace to use on unspecified requests
+	// +optional
+	Namespace string `json:"namespace,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// AuthProviderConfig holds the configuration for a specified auth provider.
+type AuthProviderConfig struct {
+	Name string `json:"name"`
+	// +optional
+	Config map[string]string `json:"config,omitempty"`
+}
+
+// ExecConfig specifies a command to provide client credentials. The command is exec'd
+// and outputs structured stdout holding credentials.
+//
+// See the client.authentiction.k8s.io API group for specifications of the exact input
+// and output format
+type ExecConfig struct {
+	// Command to execute.
+	Command string `json:"command"`
+	// Arguments to pass to the command when executing it.
+	// +optional
+	Args []string `json:"args"`
+	// Env defines additional environment variables to expose to the process. These
+	// are unioned with the host's environment, as well as variables client-go uses
+	// to pass argument to the plugin.
+	// +optional
+	Env []ExecEnvVar `json:"env"`
+
+	// Preferred input version of the ExecInfo. The returned ExecCredentials MUST use
+	// the same encoding version as the input.
+	APIVersion string `json:"apiVersion,omitempty"`
+}
+
+// ExecEnvVar is used for setting environment variables when executing an exec-based
+// credential plugin.
+type ExecEnvVar struct {
+	Name  string `json:"name"`
+	Value string `json:"value"`
+}
+
+// NewConfig is a convenience function that returns a new Config object with non-nil maps
+func NewConfig() *Config {
+	return &Config{
+		Preferences: *NewPreferences(),
+		Clusters:    make(map[string]*Cluster),
+		AuthInfos:   make(map[string]*AuthInfo),
+		Contexts:    make(map[string]*Context),
+		Extensions:  make(map[string]runtime.Object),
+	}
+}
+
+// NewContext is a convenience function that returns a new Context
+// object with non-nil maps
+func NewContext() *Context {
+	return &Context{Extensions: make(map[string]runtime.Object)}
+}
+
+// NewCluster is a convenience function that returns a new Cluster
+// object with non-nil maps
+func NewCluster() *Cluster {
+	return &Cluster{Extensions: make(map[string]runtime.Object)}
+}
+
+// NewAuthInfo is a convenience function that returns a new AuthInfo
+// object with non-nil maps
+func NewAuthInfo() *AuthInfo {
+	return &AuthInfo{
+		Extensions:           make(map[string]runtime.Object),
+		ImpersonateUserExtra: make(map[string][]string),
+	}
+}
+
+// NewPreferences is a convenience function that returns a new
+// Preferences object with non-nil maps
+func NewPreferences() *Preferences {
+	return &Preferences{Extensions: make(map[string]runtime.Object)}
+}
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
new file mode 100644
index 0000000..3240a7a
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
@@ -0,0 +1,324 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package api
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthInfo) DeepCopyInto(out *AuthInfo) {
+	*out = *in
+	if in.ClientCertificateData != nil {
+		in, out := &in.ClientCertificateData, &out.ClientCertificateData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.ClientKeyData != nil {
+		in, out := &in.ClientKeyData, &out.ClientKeyData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.ImpersonateGroups != nil {
+		in, out := &in.ImpersonateGroups, &out.ImpersonateGroups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ImpersonateUserExtra != nil {
+		in, out := &in.ImpersonateUserExtra, &out.ImpersonateUserExtra
+		*out = make(map[string][]string, len(*in))
+		for key, val := range *in {
+			var outVal []string
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = make([]string, len(*in))
+				copy(*out, *in)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	if in.AuthProvider != nil {
+		in, out := &in.AuthProvider, &out.AuthProvider
+		*out = new(AuthProviderConfig)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Exec != nil {
+		in, out := &in.Exec, &out.Exec
+		*out = new(ExecConfig)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make(map[string]runtime.Object, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = val.DeepCopyObject()
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthInfo.
+func (in *AuthInfo) DeepCopy() *AuthInfo {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthInfo)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthProviderConfig) DeepCopyInto(out *AuthProviderConfig) {
+	*out = *in
+	if in.Config != nil {
+		in, out := &in.Config, &out.Config
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthProviderConfig.
+func (in *AuthProviderConfig) DeepCopy() *AuthProviderConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthProviderConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Cluster) DeepCopyInto(out *Cluster) {
+	*out = *in
+	if in.CertificateAuthorityData != nil {
+		in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make(map[string]runtime.Object, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = val.DeepCopyObject()
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
+func (in *Cluster) DeepCopy() *Cluster {
+	if in == nil {
+		return nil
+	}
+	out := new(Cluster)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Config) DeepCopyInto(out *Config) {
+	*out = *in
+	in.Preferences.DeepCopyInto(&out.Preferences)
+	if in.Clusters != nil {
+		in, out := &in.Clusters, &out.Clusters
+		*out = make(map[string]*Cluster, len(*in))
+		for key, val := range *in {
+			var outVal *Cluster
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = new(Cluster)
+				(*in).DeepCopyInto(*out)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	if in.AuthInfos != nil {
+		in, out := &in.AuthInfos, &out.AuthInfos
+		*out = make(map[string]*AuthInfo, len(*in))
+		for key, val := range *in {
+			var outVal *AuthInfo
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = new(AuthInfo)
+				(*in).DeepCopyInto(*out)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	if in.Contexts != nil {
+		in, out := &in.Contexts, &out.Contexts
+		*out = make(map[string]*Context, len(*in))
+		for key, val := range *in {
+			var outVal *Context
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = new(Context)
+				(*in).DeepCopyInto(*out)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make(map[string]runtime.Object, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = val.DeepCopyObject()
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
+func (in *Config) DeepCopy() *Config {
+	if in == nil {
+		return nil
+	}
+	out := new(Config)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Config) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Context) DeepCopyInto(out *Context) {
+	*out = *in
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make(map[string]runtime.Object, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = val.DeepCopyObject()
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Context.
+func (in *Context) DeepCopy() *Context {
+	if in == nil {
+		return nil
+	}
+	out := new(Context)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecConfig) DeepCopyInto(out *ExecConfig) {
+	*out = *in
+	if in.Args != nil {
+		in, out := &in.Args, &out.Args
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Env != nil {
+		in, out := &in.Env, &out.Env
+		*out = make([]ExecEnvVar, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecConfig.
+func (in *ExecConfig) DeepCopy() *ExecConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar.
+func (in *ExecEnvVar) DeepCopy() *ExecEnvVar {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecEnvVar)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Preferences) DeepCopyInto(out *Preferences) {
+	*out = *in
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make(map[string]runtime.Object, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = val.DeepCopyObject()
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preferences.
+func (in *Preferences) DeepCopy() *Preferences {
+	if in == nil {
+		return nil
+	}
+	out := new(Preferences)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/client-go/tools/metrics/OWNERS b/vendor/k8s.io/client-go/tools/metrics/OWNERS
new file mode 100755
index 0000000..ff51798
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/metrics/OWNERS
@@ -0,0 +1,7 @@
+reviewers:
+- wojtek-t
+- eparis
+- krousey
+- jayunit100
+- fgrzadkowski
+- tmrts
diff --git a/vendor/k8s.io/client-go/tools/metrics/metrics.go b/vendor/k8s.io/client-go/tools/metrics/metrics.go
new file mode 100644
index 0000000..a01306c
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/metrics/metrics.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package metrics provides abstractions for registering which metrics
+// to record.
+package metrics
+
+import (
+	"net/url"
+	"sync"
+	"time"
+)
+
+var registerMetrics sync.Once
+
+// LatencyMetric observes client latency partitioned by verb and url.
+type LatencyMetric interface {
+	Observe(verb string, u url.URL, latency time.Duration)
+}
+
+// ResultMetric counts response codes partitioned by method and host.
+type ResultMetric interface {
+	Increment(code string, method string, host string)
+}
+
+var (
+	// RequestLatency is the latency metric that rest clients will update.
+	RequestLatency LatencyMetric = noopLatency{}
+	// RequestResult is the result metric that rest clients will update.
+	RequestResult ResultMetric = noopResult{}
+)
+
+// Register registers metrics for the rest client to use. This can
+// only be called once.
+func Register(lm LatencyMetric, rm ResultMetric) {
+	registerMetrics.Do(func() {
+		RequestLatency = lm
+		RequestResult = rm
+	})
+}
+
+type noopLatency struct{}
+
+func (noopLatency) Observe(string, url.URL, time.Duration) {}
+
+type noopResult struct{}
+
+func (noopResult) Increment(string, string, string) {}
diff --git a/vendor/k8s.io/client-go/tools/reference/ref.go b/vendor/k8s.io/client-go/tools/reference/ref.go
new file mode 100644
index 0000000..573d948
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/reference/ref.go
@@ -0,0 +1,126 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package reference
+
+import (
+	"errors"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+var (
+	// Errors that could be returned by GetReference.
+	ErrNilObject  = errors.New("can't reference a nil object")
+	ErrNoSelfLink = errors.New("selfLink was empty, can't make reference")
+)
+
+// GetReference returns an ObjectReference which refers to the given
+// object, or an error if the object doesn't follow the conventions
+// that would allow this.
+// TODO: should take a meta.Interface see http://issue.k8s.io/7127
+func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReference, error) {
+	if obj == nil {
+		return nil, ErrNilObject
+	}
+	if ref, ok := obj.(*v1.ObjectReference); ok {
+		// Don't make a reference to a reference.
+		return ref, nil
+	}
+
+	gvk := obj.GetObjectKind().GroupVersionKind()
+
+	// if the object referenced is actually persisted, we can just get kind from meta
+	// if we are building an object reference to something not yet persisted, we should fallback to scheme
+	kind := gvk.Kind
+	if len(kind) == 0 {
+		// TODO: this is wrong
+		gvks, _, err := scheme.ObjectKinds(obj)
+		if err != nil {
+			return nil, err
+		}
+		kind = gvks[0].Kind
+	}
+
+	// An object that implements only List has enough metadata to build a reference
+	var listMeta metav1.Common
+	objectMeta, err := meta.Accessor(obj)
+	if err != nil {
+		listMeta, err = meta.CommonAccessor(obj)
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		listMeta = objectMeta
+	}
+
+	// if the object referenced is actually persisted, we can also get version from meta
+	version := gvk.GroupVersion().String()
+	if len(version) == 0 {
+		selfLink := listMeta.GetSelfLink()
+		if len(selfLink) == 0 {
+			return nil, ErrNoSelfLink
+		}
+		selfLinkUrl, err := url.Parse(selfLink)
+		if err != nil {
+			return nil, err
+		}
+		// example paths: /<prefix>/<version>/*
+		parts := strings.Split(selfLinkUrl.Path, "/")
+		if len(parts) < 4 {
+			return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version)
+		}
+		if parts[1] == "api" {
+			version = parts[2]
+		} else {
+			version = parts[2] + "/" + parts[3]
+		}
+	}
+
+	// only has list metadata
+	if objectMeta == nil {
+		return &v1.ObjectReference{
+			Kind:            kind,
+			APIVersion:      version,
+			ResourceVersion: listMeta.GetResourceVersion(),
+		}, nil
+	}
+
+	return &v1.ObjectReference{
+		Kind:            kind,
+		APIVersion:      version,
+		Name:            objectMeta.GetName(),
+		Namespace:       objectMeta.GetNamespace(),
+		UID:             objectMeta.GetUID(),
+		ResourceVersion: objectMeta.GetResourceVersion(),
+	}, nil
+}
+
+// GetPartialReference is exactly like GetReference, but allows you to set the FieldPath.
+func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*v1.ObjectReference, error) {
+	ref, err := GetReference(scheme, obj)
+	if err != nil {
+		return nil, err
+	}
+	ref.FieldPath = fieldPath
+	return ref, nil
+}
diff --git a/vendor/k8s.io/client-go/transport/OWNERS b/vendor/k8s.io/client-go/transport/OWNERS
new file mode 100755
index 0000000..bf0ba5b
--- /dev/null
+++ b/vendor/k8s.io/client-go/transport/OWNERS
@@ -0,0 +1,7 @@
+reviewers:
+- smarterclayton
+- wojtek-t
+- deads2k
+- liggitt
+- krousey
+- caesarxuchao
diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go
new file mode 100644
index 0000000..7cffe2a
--- /dev/null
+++ b/vendor/k8s.io/client-go/transport/cache.go
@@ -0,0 +1,117 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transport
+
+import (
+	"fmt"
+	"net"
+	"net/http"
+	"sync"
+	"time"
+
+	utilnet "k8s.io/apimachinery/pkg/util/net"
+)
+
+// TlsTransportCache caches TLS http.RoundTrippers different configurations. The
+// same RoundTripper will be returned for configs with identical TLS options If
+// the config has no custom TLS options, http.DefaultTransport is returned.
+type tlsTransportCache struct {
+	mu         sync.Mutex
+	transports map[tlsCacheKey]*http.Transport
+}
+
+const idleConnsPerHost = 25
+
+var tlsCache = &tlsTransportCache{transports: make(map[tlsCacheKey]*http.Transport)}
+
+type tlsCacheKey struct {
+	insecure   bool
+	caData     string
+	certData   string
+	keyData    string
+	getCert    string
+	serverName string
+	dial       string
+}
+
+func (t tlsCacheKey) String() string {
+	keyText := "<none>"
+	if len(t.keyData) > 0 {
+		keyText = "<redacted>"
+	}
+	return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial)
+}
+
+func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
+	key, err := tlsConfigKey(config)
+	if err != nil {
+		return nil, err
+	}
+
+	// Ensure we only create a single transport for the given TLS options
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	// See if we already have a custom transport for this config
+	if t, ok := c.transports[key]; ok {
+		return t, nil
+	}
+
+	// Get the TLS options for this client config
+	tlsConfig, err := TLSConfigFor(config)
+	if err != nil {
+		return nil, err
+	}
+	// The options didn't require a custom TLS config
+	if tlsConfig == nil && config.Dial == nil {
+		return http.DefaultTransport, nil
+	}
+
+	dial := config.Dial
+	if dial == nil {
+		dial = (&net.Dialer{
+			Timeout:   30 * time.Second,
+			KeepAlive: 30 * time.Second,
+		}).DialContext
+	}
+	// Cache a single transport for these options
+	c.transports[key] = utilnet.SetTransportDefaults(&http.Transport{
+		Proxy:               http.ProxyFromEnvironment,
+		TLSHandshakeTimeout: 10 * time.Second,
+		TLSClientConfig:     tlsConfig,
+		MaxIdleConnsPerHost: idleConnsPerHost,
+		DialContext:         dial,
+	})
+	return c.transports[key], nil
+}
+
+// tlsConfigKey returns a unique key for tls.Config objects returned from TLSConfigFor
+func tlsConfigKey(c *Config) (tlsCacheKey, error) {
+	// Make sure ca/key/cert content is loaded
+	if err := loadTLSFiles(c); err != nil {
+		return tlsCacheKey{}, err
+	}
+	return tlsCacheKey{
+		insecure:   c.TLS.Insecure,
+		caData:     string(c.TLS.CAData),
+		certData:   string(c.TLS.CertData),
+		keyData:    string(c.TLS.KeyData),
+		getCert:    fmt.Sprintf("%p", c.TLS.GetCert),
+		serverName: c.TLS.ServerName,
+		dial:       fmt.Sprintf("%p", c.Dial),
+	}, nil
+}
diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go
new file mode 100644
index 0000000..4081c23
--- /dev/null
+++ b/vendor/k8s.io/client-go/transport/config.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transport
+
+import (
+	"context"
+	"crypto/tls"
+	"net"
+	"net/http"
+)
+
+// Config holds various options for establishing a transport.
+type Config struct {
+	// UserAgent is an optional field that specifies the caller of this
+	// request.
+	UserAgent string
+
+	// The base TLS configuration for this transport.
+	TLS TLSConfig
+
+	// Username and password for basic authentication
+	Username string
+	Password string
+
+	// Bearer token for authentication
+	BearerToken string
+
+	// Impersonate is the config that this Config will impersonate using
+	Impersonate ImpersonationConfig
+
+	// Transport may be used for custom HTTP behavior. This attribute may
+	// not be specified with the TLS client certificate options. Use
+	// WrapTransport for most client level operations.
+	Transport http.RoundTripper
+
+	// WrapTransport will be invoked for custom HTTP behavior after the
+	// underlying transport is initialized (either the transport created
+	// from TLSClientConfig, Transport, or http.DefaultTransport). The
+	// config may layer other RoundTrippers on top of the returned
+	// RoundTripper.
+	WrapTransport func(rt http.RoundTripper) http.RoundTripper
+
+	// Dial specifies the dial function for creating unencrypted TCP connections.
+	Dial func(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+// ImpersonationConfig has all the available impersonation options
+type ImpersonationConfig struct {
+	// UserName matches user.Info.GetName()
+	UserName string
+	// Groups matches user.Info.GetGroups()
+	Groups []string
+	// Extra matches user.Info.GetExtra()
+	Extra map[string][]string
+}
+
+// HasCA returns whether the configuration has a certificate authority or not.
+func (c *Config) HasCA() bool {
+	return len(c.TLS.CAData) > 0 || len(c.TLS.CAFile) > 0
+}
+
+// HasBasicAuth returns whether the configuration has basic authentication or not.
+func (c *Config) HasBasicAuth() bool {
+	return len(c.Username) != 0
+}
+
+// HasTokenAuth returns whether the configuration has token authentication or not.
+func (c *Config) HasTokenAuth() bool {
+	return len(c.BearerToken) != 0
+}
+
+// HasCertAuth returns whether the configuration has certificate authentication or not.
+func (c *Config) HasCertAuth() bool {
+	return (len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0) && (len(c.TLS.KeyData) != 0 || len(c.TLS.KeyFile) != 0)
+}
+
+// HasCertCallbacks returns whether the configuration has certificate callback or not.
+func (c *Config) HasCertCallback() bool {
+	return c.TLS.GetCert != nil
+}
+
+// TLSConfig holds the information needed to set up a TLS transport.
+type TLSConfig struct {
+	CAFile   string // Path of the PEM-encoded server trusted root certificates.
+	CertFile string // Path of the PEM-encoded client certificate.
+	KeyFile  string // Path of the PEM-encoded client key.
+
+	Insecure   bool   // Server should be accessed without verifying the certificate. For testing only.
+	ServerName string // Override for the server name passed to the server for SNI and used to verify certificates.
+
+	CAData   []byte // Bytes of the PEM-encoded server trusted root certificates. Supercedes CAFile.
+	CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile.
+	KeyData  []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile.
+
+	GetCert func() (*tls.Certificate, error) // Callback that returns a TLS client certificate. CertData, CertFile, KeyData and KeyFile supercede this field.
+}
diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go
new file mode 100644
index 0000000..da417cf
--- /dev/null
+++ b/vendor/k8s.io/client-go/transport/round_trippers.go
@@ -0,0 +1,531 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transport
+
+import (
+	"fmt"
+	"net/http"
+	"strings"
+	"time"
+
+	"k8s.io/klog"
+
+	utilnet "k8s.io/apimachinery/pkg/util/net"
+)
+
+// HTTPWrappersForConfig wraps a round tripper with any relevant layered
+// behavior from the config. Exposed to allow more clients that need HTTP-like
+// behavior but then must hijack the underlying connection (like WebSocket or
+// HTTP2 clients). Pure HTTP clients should use the RoundTripper returned from
+// New.
+func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) {
+	if config.WrapTransport != nil {
+		rt = config.WrapTransport(rt)
+	}
+
+	rt = DebugWrappers(rt)
+
+	// Set authentication wrappers
+	switch {
+	case config.HasBasicAuth() && config.HasTokenAuth():
+		return nil, fmt.Errorf("username/password or bearer token may be set, but not both")
+	case config.HasTokenAuth():
+		rt = NewBearerAuthRoundTripper(config.BearerToken, rt)
+	case config.HasBasicAuth():
+		rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt)
+	}
+	if len(config.UserAgent) > 0 {
+		rt = NewUserAgentRoundTripper(config.UserAgent, rt)
+	}
+	if len(config.Impersonate.UserName) > 0 ||
+		len(config.Impersonate.Groups) > 0 ||
+		len(config.Impersonate.Extra) > 0 {
+		rt = NewImpersonatingRoundTripper(config.Impersonate, rt)
+	}
+	return rt, nil
+}
+
+// DebugWrappers wraps a round tripper and logs based on the current log level.
+func DebugWrappers(rt http.RoundTripper) http.RoundTripper {
+	switch {
+	case bool(klog.V(9)):
+		rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders)
+	case bool(klog.V(8)):
+		rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders)
+	case bool(klog.V(7)):
+		rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus)
+	case bool(klog.V(6)):
+		rt = newDebuggingRoundTripper(rt, debugURLTiming)
+	}
+
+	return rt
+}
+
+type requestCanceler interface {
+	CancelRequest(*http.Request)
+}
+
+type authProxyRoundTripper struct {
+	username string
+	groups   []string
+	extra    map[string][]string
+
+	rt http.RoundTripper
+}
+
+// NewAuthProxyRoundTripper provides a roundtripper which will add auth proxy fields to requests for
+// authentication terminating proxy cases
+// assuming you pull the user from the context:
+// username is the user.Info.GetName() of the user
+// groups is the user.Info.GetGroups() of the user
+// extra is the user.Info.GetExtra() of the user
+// extra can contain any additional information that the authenticator
+// thought was interesting, for example authorization scopes.
+// In order to faithfully round-trip through an impersonation flow, these keys
+// MUST be lowercase.
+func NewAuthProxyRoundTripper(username string, groups []string, extra map[string][]string, rt http.RoundTripper) http.RoundTripper {
+	return &authProxyRoundTripper{
+		username: username,
+		groups:   groups,
+		extra:    extra,
+		rt:       rt,
+	}
+}
+
+func (rt *authProxyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	req = utilnet.CloneRequest(req)
+	SetAuthProxyHeaders(req, rt.username, rt.groups, rt.extra)
+
+	return rt.rt.RoundTrip(req)
+}
+
+// SetAuthProxyHeaders stomps the auth proxy header fields.  It mutates its argument.
+func SetAuthProxyHeaders(req *http.Request, username string, groups []string, extra map[string][]string) {
+	req.Header.Del("X-Remote-User")
+	req.Header.Del("X-Remote-Group")
+	for key := range req.Header {
+		if strings.HasPrefix(strings.ToLower(key), strings.ToLower("X-Remote-Extra-")) {
+			req.Header.Del(key)
+		}
+	}
+
+	req.Header.Set("X-Remote-User", username)
+	for _, group := range groups {
+		req.Header.Add("X-Remote-Group", group)
+	}
+	for key, values := range extra {
+		for _, value := range values {
+			req.Header.Add("X-Remote-Extra-"+headerKeyEscape(key), value)
+		}
+	}
+}
+
+func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) {
+	if canceler, ok := rt.rt.(requestCanceler); ok {
+		canceler.CancelRequest(req)
+	} else {
+		klog.Errorf("CancelRequest not implemented by %T", rt.rt)
+	}
+}
+
+func (rt *authProxyRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt }
+
+type userAgentRoundTripper struct {
+	agent string
+	rt    http.RoundTripper
+}
+
+func NewUserAgentRoundTripper(agent string, rt http.RoundTripper) http.RoundTripper {
+	return &userAgentRoundTripper{agent, rt}
+}
+
+func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	if len(req.Header.Get("User-Agent")) != 0 {
+		return rt.rt.RoundTrip(req)
+	}
+	req = utilnet.CloneRequest(req)
+	req.Header.Set("User-Agent", rt.agent)
+	return rt.rt.RoundTrip(req)
+}
+
+func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) {
+	if canceler, ok := rt.rt.(requestCanceler); ok {
+		canceler.CancelRequest(req)
+	} else {
+		klog.Errorf("CancelRequest not implemented by %T", rt.rt)
+	}
+}
+
+func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt }
+
+type basicAuthRoundTripper struct {
+	username string
+	password string
+	rt       http.RoundTripper
+}
+
+// NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a
+// request unless it has already been set.
+func NewBasicAuthRoundTripper(username, password string, rt http.RoundTripper) http.RoundTripper {
+	return &basicAuthRoundTripper{username, password, rt}
+}
+
+func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	if len(req.Header.Get("Authorization")) != 0 {
+		return rt.rt.RoundTrip(req)
+	}
+	req = utilnet.CloneRequest(req)
+	req.SetBasicAuth(rt.username, rt.password)
+	return rt.rt.RoundTrip(req)
+}
+
+func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) {
+	if canceler, ok := rt.rt.(requestCanceler); ok {
+		canceler.CancelRequest(req)
+	} else {
+		klog.Errorf("CancelRequest not implemented by %T", rt.rt)
+	}
+}
+
+func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt }
+
+// These correspond to the headers used in pkg/apis/authentication.  We don't want the package dependency,
+// but you must not change the values.
+const (
+	// ImpersonateUserHeader is used to impersonate a particular user during an API server request
+	ImpersonateUserHeader = "Impersonate-User"
+
+	// ImpersonateGroupHeader is used to impersonate a particular group during an API server request.
+	// It can be repeated multiplied times for multiple groups.
+	ImpersonateGroupHeader = "Impersonate-Group"
+
+	// ImpersonateUserExtraHeaderPrefix is a prefix for a header used to impersonate an entry in the
+	// extra map[string][]string for user.Info.  The key for the `extra` map is suffix.
+	// The same key can be repeated multiple times to have multiple elements in the slice under a single key.
+	// For instance:
+	// Impersonate-Extra-Foo: one
+	// Impersonate-Extra-Foo: two
+	// results in extra["Foo"] = []string{"one", "two"}
+	ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-"
+)
+
+type impersonatingRoundTripper struct {
+	impersonate ImpersonationConfig
+	delegate    http.RoundTripper
+}
+
+// NewImpersonatingRoundTripper will add an Act-As header to a request unless it has already been set.
+func NewImpersonatingRoundTripper(impersonate ImpersonationConfig, delegate http.RoundTripper) http.RoundTripper {
+	return &impersonatingRoundTripper{impersonate, delegate}
+}
+
+func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	// use the user header as marker for the rest.
+	if len(req.Header.Get(ImpersonateUserHeader)) != 0 {
+		return rt.delegate.RoundTrip(req)
+	}
+	req = utilnet.CloneRequest(req)
+	req.Header.Set(ImpersonateUserHeader, rt.impersonate.UserName)
+
+	for _, group := range rt.impersonate.Groups {
+		req.Header.Add(ImpersonateGroupHeader, group)
+	}
+	for k, vv := range rt.impersonate.Extra {
+		for _, v := range vv {
+			req.Header.Add(ImpersonateUserExtraHeaderPrefix+headerKeyEscape(k), v)
+		}
+	}
+
+	return rt.delegate.RoundTrip(req)
+}
+
+func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) {
+	if canceler, ok := rt.delegate.(requestCanceler); ok {
+		canceler.CancelRequest(req)
+	} else {
+		klog.Errorf("CancelRequest not implemented by %T", rt.delegate)
+	}
+}
+
+func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.delegate }
+
+type bearerAuthRoundTripper struct {
+	bearer string
+	rt     http.RoundTripper
+}
+
+// NewBearerAuthRoundTripper adds the provided bearer token to a request
+// unless the authorization header has already been set.
+func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper {
+	return &bearerAuthRoundTripper{bearer, rt}
+}
+
+func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	if len(req.Header.Get("Authorization")) != 0 {
+		return rt.rt.RoundTrip(req)
+	}
+
+	req = utilnet.CloneRequest(req)
+	req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rt.bearer))
+	return rt.rt.RoundTrip(req)
+}
+
+func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) {
+	if canceler, ok := rt.rt.(requestCanceler); ok {
+		canceler.CancelRequest(req)
+	} else {
+		klog.Errorf("CancelRequest not implemented by %T", rt.rt)
+	}
+}
+
+func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt }
+
+// requestInfo keeps track of information about a request/response combination
+type requestInfo struct {
+	RequestHeaders http.Header
+	RequestVerb    string
+	RequestURL     string
+
+	ResponseStatus  string
+	ResponseHeaders http.Header
+	ResponseErr     error
+
+	Duration time.Duration
+}
+
+// newRequestInfo creates a new RequestInfo based on an http request
+func newRequestInfo(req *http.Request) *requestInfo {
+	return &requestInfo{
+		RequestURL:     req.URL.String(),
+		RequestVerb:    req.Method,
+		RequestHeaders: req.Header,
+	}
+}
+
+// complete adds information about the response to the requestInfo
+func (r *requestInfo) complete(response *http.Response, err error) {
+	if err != nil {
+		r.ResponseErr = err
+		return
+	}
+	r.ResponseStatus = response.Status
+	r.ResponseHeaders = response.Header
+}
+
+// toCurl returns a string that can be run as a command in a terminal (minus the body)
+func (r *requestInfo) toCurl() string {
+	headers := ""
+	for key, values := range r.RequestHeaders {
+		for _, value := range values {
+			headers += fmt.Sprintf(` -H %q`, fmt.Sprintf("%s: %s", key, value))
+		}
+	}
+
+	return fmt.Sprintf("curl -k -v -X%s %s '%s'", r.RequestVerb, headers, r.RequestURL)
+}
+
+// debuggingRoundTripper will display information about the requests passing
+// through it based on what is configured
+type debuggingRoundTripper struct {
+	delegatedRoundTripper http.RoundTripper
+
+	levels map[debugLevel]bool
+}
+
+type debugLevel int
+
+const (
+	debugJustURL debugLevel = iota
+	debugURLTiming
+	debugCurlCommand
+	debugRequestHeaders
+	debugResponseStatus
+	debugResponseHeaders
+)
+
+func newDebuggingRoundTripper(rt http.RoundTripper, levels ...debugLevel) *debuggingRoundTripper {
+	drt := &debuggingRoundTripper{
+		delegatedRoundTripper: rt,
+		levels:                make(map[debugLevel]bool, len(levels)),
+	}
+	for _, v := range levels {
+		drt.levels[v] = true
+	}
+	return drt
+}
+
+func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) {
+	if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok {
+		canceler.CancelRequest(req)
+	} else {
+		klog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper)
+	}
+}
+
+func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	reqInfo := newRequestInfo(req)
+
+	if rt.levels[debugJustURL] {
+		klog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL)
+	}
+	if rt.levels[debugCurlCommand] {
+		klog.Infof("%s", reqInfo.toCurl())
+
+	}
+	if rt.levels[debugRequestHeaders] {
+		klog.Infof("Request Headers:")
+		for key, values := range reqInfo.RequestHeaders {
+			for _, value := range values {
+				klog.Infof("    %s: %s", key, value)
+			}
+		}
+	}
+
+	startTime := time.Now()
+	response, err := rt.delegatedRoundTripper.RoundTrip(req)
+	reqInfo.Duration = time.Since(startTime)
+
+	reqInfo.complete(response, err)
+
+	if rt.levels[debugURLTiming] {
+		klog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
+	}
+	if rt.levels[debugResponseStatus] {
+		klog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
+	}
+	if rt.levels[debugResponseHeaders] {
+		klog.Infof("Response Headers:")
+		for key, values := range reqInfo.ResponseHeaders {
+			for _, value := range values {
+				klog.Infof("    %s: %s", key, value)
+			}
+		}
+	}
+
+	return response, err
+}
+
+func (rt *debuggingRoundTripper) WrappedRoundTripper() http.RoundTripper {
+	return rt.delegatedRoundTripper
+}
+
+func legalHeaderByte(b byte) bool {
+	return int(b) < len(legalHeaderKeyBytes) && legalHeaderKeyBytes[b]
+}
+
+func shouldEscape(b byte) bool {
+	// url.PathUnescape() returns an error if any '%' is not followed by two
+	// hexadecimal digits, so we'll intentionally encode it.
+	return !legalHeaderByte(b) || b == '%'
+}
+
+func headerKeyEscape(key string) string {
+	buf := strings.Builder{}
+	for i := 0; i < len(key); i++ {
+		b := key[i]
+		if shouldEscape(b) {
+			// %-encode bytes that should be escaped:
+			// https://tools.ietf.org/html/rfc3986#section-2.1
+			fmt.Fprintf(&buf, "%%%02X", b)
+			continue
+		}
+		buf.WriteByte(b)
+	}
+	return buf.String()
+}
+
+// legalHeaderKeyBytes was copied from net/http/lex.go's isTokenTable.
+// See https://httpwg.github.io/specs/rfc7230.html#rule.token.separators
+var legalHeaderKeyBytes = [127]bool{
+	'%':  true,
+	'!':  true,
+	'#':  true,
+	'$':  true,
+	'&':  true,
+	'\'': true,
+	'*':  true,
+	'+':  true,
+	'-':  true,
+	'.':  true,
+	'0':  true,
+	'1':  true,
+	'2':  true,
+	'3':  true,
+	'4':  true,
+	'5':  true,
+	'6':  true,
+	'7':  true,
+	'8':  true,
+	'9':  true,
+	'A':  true,
+	'B':  true,
+	'C':  true,
+	'D':  true,
+	'E':  true,
+	'F':  true,
+	'G':  true,
+	'H':  true,
+	'I':  true,
+	'J':  true,
+	'K':  true,
+	'L':  true,
+	'M':  true,
+	'N':  true,
+	'O':  true,
+	'P':  true,
+	'Q':  true,
+	'R':  true,
+	'S':  true,
+	'T':  true,
+	'U':  true,
+	'W':  true,
+	'V':  true,
+	'X':  true,
+	'Y':  true,
+	'Z':  true,
+	'^':  true,
+	'_':  true,
+	'`':  true,
+	'a':  true,
+	'b':  true,
+	'c':  true,
+	'd':  true,
+	'e':  true,
+	'f':  true,
+	'g':  true,
+	'h':  true,
+	'i':  true,
+	'j':  true,
+	'k':  true,
+	'l':  true,
+	'm':  true,
+	'n':  true,
+	'o':  true,
+	'p':  true,
+	'q':  true,
+	'r':  true,
+	's':  true,
+	't':  true,
+	'u':  true,
+	'v':  true,
+	'w':  true,
+	'x':  true,
+	'y':  true,
+	'z':  true,
+	'|':  true,
+	'~':  true,
+}
diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go
new file mode 100644
index 0000000..c19739f
--- /dev/null
+++ b/vendor/k8s.io/client-go/transport/transport.go
@@ -0,0 +1,169 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transport
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+)
+
+// New returns an http.RoundTripper that will provide the authentication
+// or transport level security defined by the provided Config.
+func New(config *Config) (http.RoundTripper, error) {
+	// Set transport level security
+	if config.Transport != nil && (config.HasCA() || config.HasCertAuth() || config.HasCertCallback() || config.TLS.Insecure) {
+		return nil, fmt.Errorf("using a custom transport with TLS certificate options or the insecure flag is not allowed")
+	}
+
+	var (
+		rt  http.RoundTripper
+		err error
+	)
+
+	if config.Transport != nil {
+		rt = config.Transport
+	} else {
+		rt, err = tlsCache.get(config)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return HTTPWrappersForConfig(config, rt)
+}
+
+// TLSConfigFor returns a tls.Config that will provide the transport level security defined
+// by the provided Config. Will return nil if no transport level security is requested.
+func TLSConfigFor(c *Config) (*tls.Config, error) {
+	if !(c.HasCA() || c.HasCertAuth() || c.HasCertCallback() || c.TLS.Insecure || len(c.TLS.ServerName) > 0) {
+		return nil, nil
+	}
+	if c.HasCA() && c.TLS.Insecure {
+		return nil, fmt.Errorf("specifying a root certificates file with the insecure flag is not allowed")
+	}
+	if err := loadTLSFiles(c); err != nil {
+		return nil, err
+	}
+
+	tlsConfig := &tls.Config{
+		// Can't use SSLv3 because of POODLE and BEAST
+		// Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher
+		// Can't use TLSv1.1 because of RC4 cipher usage
+		MinVersion:         tls.VersionTLS12,
+		InsecureSkipVerify: c.TLS.Insecure,
+		ServerName:         c.TLS.ServerName,
+	}
+
+	if c.HasCA() {
+		tlsConfig.RootCAs = rootCertPool(c.TLS.CAData)
+	}
+
+	var staticCert *tls.Certificate
+	if c.HasCertAuth() {
+		// If key/cert were provided, verify them before setting up
+		// tlsConfig.GetClientCertificate.
+		cert, err := tls.X509KeyPair(c.TLS.CertData, c.TLS.KeyData)
+		if err != nil {
+			return nil, err
+		}
+		staticCert = &cert
+	}
+
+	if c.HasCertAuth() || c.HasCertCallback() {
+		tlsConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
+			// Note: static key/cert data always take precedence over cert
+			// callback.
+			if staticCert != nil {
+				return staticCert, nil
+			}
+			if c.HasCertCallback() {
+				cert, err := c.TLS.GetCert()
+				if err != nil {
+					return nil, err
+				}
+				// GetCert may return empty value, meaning no cert.
+				if cert != nil {
+					return cert, nil
+				}
+			}
+
+			// Both c.TLS.CertData/KeyData were unset and GetCert didn't return
+			// anything. Return an empty tls.Certificate, no client cert will
+			// be sent to the server.
+			return &tls.Certificate{}, nil
+		}
+	}
+
+	return tlsConfig, nil
+}
+
+// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData,
+// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are
+// either populated or were empty to start.
+func loadTLSFiles(c *Config) error {
+	var err error
+	c.TLS.CAData, err = dataFromSliceOrFile(c.TLS.CAData, c.TLS.CAFile)
+	if err != nil {
+		return err
+	}
+
+	c.TLS.CertData, err = dataFromSliceOrFile(c.TLS.CertData, c.TLS.CertFile)
+	if err != nil {
+		return err
+	}
+
+	c.TLS.KeyData, err = dataFromSliceOrFile(c.TLS.KeyData, c.TLS.KeyFile)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file,
+// or an error if an error occurred reading the file
+func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
+	if len(data) > 0 {
+		return data, nil
+	}
+	if len(file) > 0 {
+		fileData, err := ioutil.ReadFile(file)
+		if err != nil {
+			return []byte{}, err
+		}
+		return fileData, nil
+	}
+	return nil, nil
+}
+
+// rootCertPool returns nil if caData is empty.  When passed along, this will mean "use system CAs".
+// When caData is not empty, it will be the ONLY information used in the CertPool.
+func rootCertPool(caData []byte) *x509.CertPool {
+	// What we really want is a copy of x509.systemRootsPool, but that isn't exposed.  It's difficult to build (see the go
+	// code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values
+	// It doesn't allow trusting either/or, but hopefully that won't be an issue
+	if len(caData) == 0 {
+		return nil
+	}
+
+	// if we have caData, use it
+	certPool := x509.NewCertPool()
+	certPool.AppendCertsFromPEM(caData)
+	return certPool
+}
diff --git a/vendor/k8s.io/client-go/util/cert/OWNERS b/vendor/k8s.io/client-go/util/cert/OWNERS
new file mode 100644
index 0000000..470b7a1
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/cert/OWNERS
@@ -0,0 +1,7 @@
+approvers:
+- sig-auth-certificates-approvers
+reviewers:
+- sig-auth-certificates-reviewers
+labels:
+- sig/auth
+
diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go
new file mode 100644
index 0000000..3429c82
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/cert/cert.go
@@ -0,0 +1,269 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cert
+
+import (
+	"bytes"
+	"crypto"
+	"crypto/ecdsa"
+	"crypto/elliptic"
+	"crypto/rand"
+	cryptorand "crypto/rand"
+	"crypto/rsa"
+	"crypto/x509"
+	"crypto/x509/pkix"
+	"encoding/pem"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"math"
+	"math/big"
+	"net"
+	"path"
+	"strings"
+	"time"
+)
+
+const (
+	rsaKeySize   = 2048
+	duration365d = time.Hour * 24 * 365
+)
+
+// Config contains the basic fields required for creating a certificate
+type Config struct {
+	CommonName   string
+	Organization []string
+	AltNames     AltNames
+	Usages       []x509.ExtKeyUsage
+}
+
+// AltNames contains the domain names and IP addresses that will be added
+// to the API Server's x509 certificate SubAltNames field. The values will
+// be passed directly to the x509.Certificate object.
+type AltNames struct {
+	DNSNames []string
+	IPs      []net.IP
+}
+
+// NewPrivateKey creates an RSA private key
+func NewPrivateKey() (*rsa.PrivateKey, error) {
+	return rsa.GenerateKey(cryptorand.Reader, rsaKeySize)
+}
+
+// NewSelfSignedCACert creates a CA certificate
+func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) {
+	now := time.Now()
+	tmpl := x509.Certificate{
+		SerialNumber: new(big.Int).SetInt64(0),
+		Subject: pkix.Name{
+			CommonName:   cfg.CommonName,
+			Organization: cfg.Organization,
+		},
+		NotBefore:             now.UTC(),
+		NotAfter:              now.Add(duration365d * 10).UTC(),
+		KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+		BasicConstraintsValid: true,
+		IsCA:                  true,
+	}
+
+	certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
+	if err != nil {
+		return nil, err
+	}
+	return x509.ParseCertificate(certDERBytes)
+}
+
+// NewSignedCert creates a signed certificate using the given CA certificate and key
+func NewSignedCert(cfg Config, key crypto.Signer, caCert *x509.Certificate, caKey crypto.Signer) (*x509.Certificate, error) {
+	serial, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64))
+	if err != nil {
+		return nil, err
+	}
+	if len(cfg.CommonName) == 0 {
+		return nil, errors.New("must specify a CommonName")
+	}
+	if len(cfg.Usages) == 0 {
+		return nil, errors.New("must specify at least one ExtKeyUsage")
+	}
+
+	certTmpl := x509.Certificate{
+		Subject: pkix.Name{
+			CommonName:   cfg.CommonName,
+			Organization: cfg.Organization,
+		},
+		DNSNames:     cfg.AltNames.DNSNames,
+		IPAddresses:  cfg.AltNames.IPs,
+		SerialNumber: serial,
+		NotBefore:    caCert.NotBefore,
+		NotAfter:     time.Now().Add(duration365d).UTC(),
+		KeyUsage:     x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+		ExtKeyUsage:  cfg.Usages,
+	}
+	certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey)
+	if err != nil {
+		return nil, err
+	}
+	return x509.ParseCertificate(certDERBytes)
+}
+
+// MakeEllipticPrivateKeyPEM creates an ECDSA private key
+func MakeEllipticPrivateKeyPEM() ([]byte, error) {
+	privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
+	if err != nil {
+		return nil, err
+	}
+
+	derBytes, err := x509.MarshalECPrivateKey(privateKey)
+	if err != nil {
+		return nil, err
+	}
+
+	privateKeyPemBlock := &pem.Block{
+		Type:  ECPrivateKeyBlockType,
+		Bytes: derBytes,
+	}
+	return pem.EncodeToMemory(privateKeyPemBlock), nil
+}
+
+// GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host.
+// Host may be an IP or a DNS name
+// You may also specify additional subject alt names (either ip or dns names) for the certificate.
+func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS []string) ([]byte, []byte, error) {
+	return GenerateSelfSignedCertKeyWithFixtures(host, alternateIPs, alternateDNS, "")
+}
+
+// GenerateSelfSignedCertKeyWithFixtures creates a self-signed certificate and key for the given host.
+// Host may be an IP or a DNS name. You may also specify additional subject alt names (either ip or dns names)
+// for the certificate.
+//
+// If fixtureDirectory is non-empty, it is a directory path which can contain pre-generated certs. The format is:
+// <host>_<ip>-<ip>_<alternateDNS>-<alternateDNS>.crt
+// <host>_<ip>-<ip>_<alternateDNS>-<alternateDNS>.key
+// Certs/keys not existing in that directory are created.
+func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, alternateDNS []string, fixtureDirectory string) ([]byte, []byte, error) {
+	validFrom := time.Now().Add(-time.Hour) // valid an hour earlier to avoid flakes due to clock skew
+	maxAge := time.Hour * 24 * 365          // one year self-signed certs
+
+	baseName := fmt.Sprintf("%s_%s_%s", host, strings.Join(ipsToStrings(alternateIPs), "-"), strings.Join(alternateDNS, "-"))
+	certFixturePath := path.Join(fixtureDirectory, baseName+".crt")
+	keyFixturePath := path.Join(fixtureDirectory, baseName+".key")
+	if len(fixtureDirectory) > 0 {
+		cert, err := ioutil.ReadFile(certFixturePath)
+		if err == nil {
+			key, err := ioutil.ReadFile(keyFixturePath)
+			if err == nil {
+				return cert, key, nil
+			}
+			return nil, nil, fmt.Errorf("cert %s can be read, but key %s cannot: %v", certFixturePath, keyFixturePath, err)
+		}
+		maxAge = 100 * time.Hour * 24 * 365 // 100 years fixtures
+	}
+
+	caKey, err := rsa.GenerateKey(cryptorand.Reader, 2048)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	caTemplate := x509.Certificate{
+		SerialNumber: big.NewInt(1),
+		Subject: pkix.Name{
+			CommonName: fmt.Sprintf("%s-ca@%d", host, time.Now().Unix()),
+		},
+		NotBefore: validFrom,
+		NotAfter:  validFrom.Add(maxAge),
+
+		KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+		BasicConstraintsValid: true,
+		IsCA:                  true,
+	}
+
+	caDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	caCertificate, err := x509.ParseCertificate(caDERBytes)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	priv, err := rsa.GenerateKey(cryptorand.Reader, 2048)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	template := x509.Certificate{
+		SerialNumber: big.NewInt(2),
+		Subject: pkix.Name{
+			CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()),
+		},
+		NotBefore: validFrom,
+		NotAfter:  validFrom.Add(maxAge),
+
+		KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+		ExtKeyUsage:           []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
+		BasicConstraintsValid: true,
+	}
+
+	if ip := net.ParseIP(host); ip != nil {
+		template.IPAddresses = append(template.IPAddresses, ip)
+	} else {
+		template.DNSNames = append(template.DNSNames, host)
+	}
+
+	template.IPAddresses = append(template.IPAddresses, alternateIPs...)
+	template.DNSNames = append(template.DNSNames, alternateDNS...)
+
+	derBytes, err := x509.CreateCertificate(cryptorand.Reader, &template, caCertificate, &priv.PublicKey, caKey)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Generate cert, followed by ca
+	certBuffer := bytes.Buffer{}
+	if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: derBytes}); err != nil {
+		return nil, nil, err
+	}
+	if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: caDERBytes}); err != nil {
+		return nil, nil, err
+	}
+
+	// Generate key
+	keyBuffer := bytes.Buffer{}
+	if err := pem.Encode(&keyBuffer, &pem.Block{Type: RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {
+		return nil, nil, err
+	}
+
+	if len(fixtureDirectory) > 0 {
+		if err := ioutil.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil {
+			return nil, nil, fmt.Errorf("failed to write cert fixture to %s: %v", certFixturePath, err)
+		}
+		if err := ioutil.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0644); err != nil {
+			return nil, nil, fmt.Errorf("failed to write key fixture to %s: %v", certFixturePath, err)
+		}
+	}
+
+	return certBuffer.Bytes(), keyBuffer.Bytes(), nil
+}
+
+func ipsToStrings(ips []net.IP) []string {
+	ss := make([]string, 0, len(ips))
+	for _, ip := range ips {
+		ss = append(ss, ip.String())
+	}
+	return ss
+}
diff --git a/vendor/k8s.io/client-go/util/cert/csr.go b/vendor/k8s.io/client-go/util/cert/csr.go
new file mode 100644
index 0000000..39a6751
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/cert/csr.go
@@ -0,0 +1,75 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cert
+
+import (
+	cryptorand "crypto/rand"
+	"crypto/rsa"
+	"crypto/x509"
+	"crypto/x509/pkix"
+	"encoding/pem"
+	"net"
+)
+
+// MakeCSR generates a PEM-encoded CSR using the supplied private key, subject, and SANs.
+// All key types that are implemented via crypto.Signer are supported (This includes *rsa.PrivateKey and *ecdsa.PrivateKey.)
+func MakeCSR(privateKey interface{}, subject *pkix.Name, dnsSANs []string, ipSANs []net.IP) (csr []byte, err error) {
+	template := &x509.CertificateRequest{
+		Subject:     *subject,
+		DNSNames:    dnsSANs,
+		IPAddresses: ipSANs,
+	}
+
+	return MakeCSRFromTemplate(privateKey, template)
+}
+
+// MakeCSRFromTemplate generates a PEM-encoded CSR using the supplied private
+// key and certificate request as a template. All key types that are
+// implemented via crypto.Signer are supported (This includes *rsa.PrivateKey
+// and *ecdsa.PrivateKey.)
+func MakeCSRFromTemplate(privateKey interface{}, template *x509.CertificateRequest) ([]byte, error) {
+	t := *template
+	t.SignatureAlgorithm = sigType(privateKey)
+
+	csrDER, err := x509.CreateCertificateRequest(cryptorand.Reader, &t, privateKey)
+	if err != nil {
+		return nil, err
+	}
+
+	csrPemBlock := &pem.Block{
+		Type:  CertificateRequestBlockType,
+		Bytes: csrDER,
+	}
+
+	return pem.EncodeToMemory(csrPemBlock), nil
+}
+
+func sigType(privateKey interface{}) x509.SignatureAlgorithm {
+	// Customize the signature for RSA keys, depending on the key size
+	if privateKey, ok := privateKey.(*rsa.PrivateKey); ok {
+		keySize := privateKey.N.BitLen()
+		switch {
+		case keySize >= 4096:
+			return x509.SHA512WithRSA
+		case keySize >= 3072:
+			return x509.SHA384WithRSA
+		default:
+			return x509.SHA256WithRSA
+		}
+	}
+	return x509.UnknownSignatureAlgorithm
+}
diff --git a/vendor/k8s.io/client-go/util/cert/io.go b/vendor/k8s.io/client-go/util/cert/io.go
new file mode 100644
index 0000000..a57bf09
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/cert/io.go
@@ -0,0 +1,193 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cert
+
+import (
+	"crypto"
+	"crypto/ecdsa"
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/pem"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+)
+
+// CanReadCertAndKey returns true if the certificate and key files already exists,
+// otherwise returns false. If lost one of cert and key, returns error.
+func CanReadCertAndKey(certPath, keyPath string) (bool, error) {
+	certReadable := canReadFile(certPath)
+	keyReadable := canReadFile(keyPath)
+
+	if certReadable == false && keyReadable == false {
+		return false, nil
+	}
+
+	if certReadable == false {
+		return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", certPath)
+	}
+
+	if keyReadable == false {
+		return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", keyPath)
+	}
+
+	return true, nil
+}
+
+// If the file represented by path exists and
+// readable, returns true otherwise returns false.
+func canReadFile(path string) bool {
+	f, err := os.Open(path)
+	if err != nil {
+		return false
+	}
+
+	defer f.Close()
+
+	return true
+}
+
+// WriteCert writes the pem-encoded certificate data to certPath.
+// The certificate file will be created with file mode 0644.
+// If the certificate file already exists, it will be overwritten.
+// The parent directory of the certPath will be created as needed with file mode 0755.
+func WriteCert(certPath string, data []byte) error {
+	if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil {
+		return err
+	}
+	return ioutil.WriteFile(certPath, data, os.FileMode(0644))
+}
+
+// WriteKey writes the pem-encoded key data to keyPath.
+// The key file will be created with file mode 0600.
+// If the key file already exists, it will be overwritten.
+// The parent directory of the keyPath will be created as needed with file mode 0755.
+func WriteKey(keyPath string, data []byte) error {
+	if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil {
+		return err
+	}
+	return ioutil.WriteFile(keyPath, data, os.FileMode(0600))
+}
+
+// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it
+// can't find one, it will generate a new key and store it there.
+func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) {
+	loadedData, err := ioutil.ReadFile(keyPath)
+	// Call verifyKeyData to ensure the file wasn't empty/corrupt.
+	if err == nil && verifyKeyData(loadedData) {
+		return loadedData, false, err
+	}
+	if !os.IsNotExist(err) {
+		return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err)
+	}
+
+	generatedData, err := MakeEllipticPrivateKeyPEM()
+	if err != nil {
+		return nil, false, fmt.Errorf("error generating key: %v", err)
+	}
+	if err := WriteKey(keyPath, generatedData); err != nil {
+		return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err)
+	}
+	return generatedData, true, nil
+}
+
+// MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to
+// a PEM encoded block or returns an error.
+func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) {
+	switch t := privateKey.(type) {
+	case *ecdsa.PrivateKey:
+		derBytes, err := x509.MarshalECPrivateKey(t)
+		if err != nil {
+			return nil, err
+		}
+		privateKeyPemBlock := &pem.Block{
+			Type:  ECPrivateKeyBlockType,
+			Bytes: derBytes,
+		}
+		return pem.EncodeToMemory(privateKeyPemBlock), nil
+	case *rsa.PrivateKey:
+		return EncodePrivateKeyPEM(t), nil
+	default:
+		return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey)
+	}
+}
+
+// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file.
+// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
+func NewPool(filename string) (*x509.CertPool, error) {
+	certs, err := CertsFromFile(filename)
+	if err != nil {
+		return nil, err
+	}
+	pool := x509.NewCertPool()
+	for _, cert := range certs {
+		pool.AddCert(cert)
+	}
+	return pool, nil
+}
+
+// CertsFromFile returns the x509.Certificates contained in the given PEM-encoded file.
+// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
+func CertsFromFile(file string) ([]*x509.Certificate, error) {
+	pemBlock, err := ioutil.ReadFile(file)
+	if err != nil {
+		return nil, err
+	}
+	certs, err := ParseCertsPEM(pemBlock)
+	if err != nil {
+		return nil, fmt.Errorf("error reading %s: %s", file, err)
+	}
+	return certs, nil
+}
+
+// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file.
+// Returns an error if the file could not be read or if the private key could not be parsed.
+func PrivateKeyFromFile(file string) (interface{}, error) {
+	data, err := ioutil.ReadFile(file)
+	if err != nil {
+		return nil, err
+	}
+	key, err := ParsePrivateKeyPEM(data)
+	if err != nil {
+		return nil, fmt.Errorf("error reading private key file %s: %v", file, err)
+	}
+	return key, nil
+}
+
+// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file.
+// Reads public keys from both public and private key files.
+func PublicKeysFromFile(file string) ([]interface{}, error) {
+	data, err := ioutil.ReadFile(file)
+	if err != nil {
+		return nil, err
+	}
+	keys, err := ParsePublicKeysPEM(data)
+	if err != nil {
+		return nil, fmt.Errorf("error reading public key file %s: %v", file, err)
+	}
+	return keys, nil
+}
+
+// verifyKeyData returns true if the provided data appears to be a valid private key.
+func verifyKeyData(data []byte) bool {
+	if len(data) == 0 {
+		return false
+	}
+	_, err := ParsePrivateKeyPEM(data)
+	return err == nil
+}
diff --git a/vendor/k8s.io/client-go/util/cert/pem.go b/vendor/k8s.io/client-go/util/cert/pem.go
new file mode 100644
index 0000000..b99e366
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/cert/pem.go
@@ -0,0 +1,269 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cert
+
+import (
+	"crypto/ecdsa"
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/pem"
+	"errors"
+	"fmt"
+)
+
+const (
+	// ECPrivateKeyBlockType is a possible value for pem.Block.Type.
+	ECPrivateKeyBlockType = "EC PRIVATE KEY"
+	// RSAPrivateKeyBlockType is a possible value for pem.Block.Type.
+	RSAPrivateKeyBlockType = "RSA PRIVATE KEY"
+	// PrivateKeyBlockType is a possible value for pem.Block.Type.
+	PrivateKeyBlockType = "PRIVATE KEY"
+	// PublicKeyBlockType is a possible value for pem.Block.Type.
+	PublicKeyBlockType = "PUBLIC KEY"
+	// CertificateBlockType is a possible value for pem.Block.Type.
+	CertificateBlockType = "CERTIFICATE"
+	// CertificateRequestBlockType is a possible value for pem.Block.Type.
+	CertificateRequestBlockType = "CERTIFICATE REQUEST"
+)
+
+// EncodePublicKeyPEM returns PEM-encoded public data
+func EncodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) {
+	der, err := x509.MarshalPKIXPublicKey(key)
+	if err != nil {
+		return []byte{}, err
+	}
+	block := pem.Block{
+		Type:  PublicKeyBlockType,
+		Bytes: der,
+	}
+	return pem.EncodeToMemory(&block), nil
+}
+
+// EncodePrivateKeyPEM returns PEM-encoded private key data
+func EncodePrivateKeyPEM(key *rsa.PrivateKey) []byte {
+	block := pem.Block{
+		Type:  RSAPrivateKeyBlockType,
+		Bytes: x509.MarshalPKCS1PrivateKey(key),
+	}
+	return pem.EncodeToMemory(&block)
+}
+
+// EncodeCertPEM returns PEM-endcoded certificate data
+func EncodeCertPEM(cert *x509.Certificate) []byte {
+	block := pem.Block{
+		Type:  CertificateBlockType,
+		Bytes: cert.Raw,
+	}
+	return pem.EncodeToMemory(&block)
+}
+
+// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data.
+// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY"
+func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) {
+	var privateKeyPemBlock *pem.Block
+	for {
+		privateKeyPemBlock, keyData = pem.Decode(keyData)
+		if privateKeyPemBlock == nil {
+			break
+		}
+
+		switch privateKeyPemBlock.Type {
+		case ECPrivateKeyBlockType:
+			// ECDSA Private Key in ASN.1 format
+			if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil {
+				return key, nil
+			}
+		case RSAPrivateKeyBlockType:
+			// RSA Private Key in PKCS#1 format
+			if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil {
+				return key, nil
+			}
+		case PrivateKeyBlockType:
+			// RSA or ECDSA Private Key in unencrypted PKCS#8 format
+			if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil {
+				return key, nil
+			}
+		}
+
+		// tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks
+		// originally, only the first PEM block was parsed and expected to be a key block
+	}
+
+	// we read all the PEM blocks and didn't recognize one
+	return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key")
+}
+
+// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array.
+// Reads public keys from both public and private key files.
+func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) {
+	var block *pem.Block
+	keys := []interface{}{}
+	for {
+		// read the next block
+		block, keyData = pem.Decode(keyData)
+		if block == nil {
+			break
+		}
+
+		// test block against parsing functions
+		if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil {
+			keys = append(keys, &privateKey.PublicKey)
+			continue
+		}
+		if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil {
+			keys = append(keys, publicKey)
+			continue
+		}
+		if privateKey, err := parseECPrivateKey(block.Bytes); err == nil {
+			keys = append(keys, &privateKey.PublicKey)
+			continue
+		}
+		if publicKey, err := parseECPublicKey(block.Bytes); err == nil {
+			keys = append(keys, publicKey)
+			continue
+		}
+
+		// tolerate non-key PEM blocks for backwards compatibility
+		// originally, only the first PEM block was parsed and expected to be a key block
+	}
+
+	if len(keys) == 0 {
+		return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys")
+	}
+	return keys, nil
+}
+
+// ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array
+// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates
+func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) {
+	ok := false
+	certs := []*x509.Certificate{}
+	for len(pemCerts) > 0 {
+		var block *pem.Block
+		block, pemCerts = pem.Decode(pemCerts)
+		if block == nil {
+			break
+		}
+		// Only use PEM "CERTIFICATE" blocks without extra headers
+		if block.Type != CertificateBlockType || len(block.Headers) != 0 {
+			continue
+		}
+
+		cert, err := x509.ParseCertificate(block.Bytes)
+		if err != nil {
+			return certs, err
+		}
+
+		certs = append(certs, cert)
+		ok = true
+	}
+
+	if !ok {
+		return certs, errors.New("data does not contain any valid RSA or ECDSA certificates")
+	}
+	return certs, nil
+}
+
+// parseRSAPublicKey parses a single RSA public key from the provided data
+func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) {
+	var err error
+
+	// Parse the key
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
+		if cert, err := x509.ParseCertificate(data); err == nil {
+			parsedKey = cert.PublicKey
+		} else {
+			return nil, err
+		}
+	}
+
+	// Test if parsed key is an RSA Public Key
+	var pubKey *rsa.PublicKey
+	var ok bool
+	if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok {
+		return nil, fmt.Errorf("data doesn't contain valid RSA Public Key")
+	}
+
+	return pubKey, nil
+}
+
+// parseRSAPrivateKey parses a single RSA private key from the provided data
+func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) {
+	var err error
+
+	// Parse the key
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil {
+		if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil {
+			return nil, err
+		}
+	}
+
+	// Test if parsed key is an RSA Private Key
+	var privKey *rsa.PrivateKey
+	var ok bool
+	if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+		return nil, fmt.Errorf("data doesn't contain valid RSA Private Key")
+	}
+
+	return privKey, nil
+}
+
+// parseECPublicKey parses a single ECDSA public key from the provided data
+func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) {
+	var err error
+
+	// Parse the key
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
+		if cert, err := x509.ParseCertificate(data); err == nil {
+			parsedKey = cert.PublicKey
+		} else {
+			return nil, err
+		}
+	}
+
+	// Test if parsed key is an ECDSA Public Key
+	var pubKey *ecdsa.PublicKey
+	var ok bool
+	if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+		return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key")
+	}
+
+	return pubKey, nil
+}
+
+// parseECPrivateKey parses a single ECDSA private key from the provided data
+func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) {
+	var err error
+
+	// Parse the key
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParseECPrivateKey(data); err != nil {
+		return nil, err
+	}
+
+	// Test if parsed key is an ECDSA Private Key
+	var privKey *ecdsa.PrivateKey
+	var ok bool
+	if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+		return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key")
+	}
+
+	return privKey, nil
+}
diff --git a/vendor/k8s.io/client-go/util/connrotation/connrotation.go b/vendor/k8s.io/client-go/util/connrotation/connrotation.go
new file mode 100644
index 0000000..235a9e0
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/connrotation/connrotation.go
@@ -0,0 +1,105 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package connrotation implements a connection dialer that tracks and can close
+// all created connections.
+//
+// This is used for credential rotation of long-lived connections, when there's
+// no way to re-authenticate on a live connection.
+package connrotation
+
+import (
+	"context"
+	"net"
+	"sync"
+)
+
+// DialFunc is a shorthand for signature of net.DialContext.
+type DialFunc func(ctx context.Context, network, address string) (net.Conn, error)
+
+// Dialer opens connections through Dial and tracks them.
+type Dialer struct {
+	dial DialFunc
+
+	mu    sync.Mutex
+	conns map[*closableConn]struct{}
+}
+
+// NewDialer creates a new Dialer instance.
+//
+// If dial is not nil, it will be used to create new underlying connections.
+// Otherwise net.DialContext is used.
+func NewDialer(dial DialFunc) *Dialer {
+	return &Dialer{
+		dial:  dial,
+		conns: make(map[*closableConn]struct{}),
+	}
+}
+
+// CloseAll forcibly closes all tracked connections.
+//
+// Note: new connections may get created before CloseAll returns.
+func (d *Dialer) CloseAll() {
+	d.mu.Lock()
+	conns := d.conns
+	d.conns = make(map[*closableConn]struct{})
+	d.mu.Unlock()
+
+	for conn := range conns {
+		conn.Close()
+	}
+}
+
+// Dial creates a new tracked connection.
+func (d *Dialer) Dial(network, address string) (net.Conn, error) {
+	return d.DialContext(context.Background(), network, address)
+}
+
+// DialContext creates a new tracked connection.
+func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
+	conn, err := d.dial(ctx, network, address)
+	if err != nil {
+		return nil, err
+	}
+
+	closable := &closableConn{Conn: conn}
+
+	// Start tracking the connection
+	d.mu.Lock()
+	d.conns[closable] = struct{}{}
+	d.mu.Unlock()
+
+	// When the connection is closed, remove it from the map. This will
+	// be no-op if the connection isn't in the map, e.g. if CloseAll()
+	// is called.
+	closable.onClose = func() {
+		d.mu.Lock()
+		delete(d.conns, closable)
+		d.mu.Unlock()
+	}
+
+	return closable, nil
+}
+
+type closableConn struct {
+	onClose func()
+	net.Conn
+}
+
+func (c *closableConn) Close() error {
+	go c.onClose()
+	return c.Conn.Close()
+}
diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
new file mode 100644
index 0000000..71d442a
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
@@ -0,0 +1,149 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flowcontrol
+
+import (
+	"sync"
+	"time"
+
+	"k8s.io/apimachinery/pkg/util/clock"
+	"k8s.io/client-go/util/integer"
+)
+
+type backoffEntry struct {
+	backoff    time.Duration
+	lastUpdate time.Time
+}
+
+type Backoff struct {
+	sync.Mutex
+	Clock           clock.Clock
+	defaultDuration time.Duration
+	maxDuration     time.Duration
+	perItemBackoff  map[string]*backoffEntry
+}
+
+func NewFakeBackOff(initial, max time.Duration, tc *clock.FakeClock) *Backoff {
+	return &Backoff{
+		perItemBackoff:  map[string]*backoffEntry{},
+		Clock:           tc,
+		defaultDuration: initial,
+		maxDuration:     max,
+	}
+}
+
+func NewBackOff(initial, max time.Duration) *Backoff {
+	return &Backoff{
+		perItemBackoff:  map[string]*backoffEntry{},
+		Clock:           clock.RealClock{},
+		defaultDuration: initial,
+		maxDuration:     max,
+	}
+}
+
+// Get the current backoff Duration
+func (p *Backoff) Get(id string) time.Duration {
+	p.Lock()
+	defer p.Unlock()
+	var delay time.Duration
+	entry, ok := p.perItemBackoff[id]
+	if ok {
+		delay = entry.backoff
+	}
+	return delay
+}
+
+// move backoff to the next mark, capping at maxDuration
+func (p *Backoff) Next(id string, eventTime time.Time) {
+	p.Lock()
+	defer p.Unlock()
+	entry, ok := p.perItemBackoff[id]
+	if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+		entry = p.initEntryUnsafe(id)
+	} else {
+		delay := entry.backoff * 2 // exponential
+		entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration)))
+	}
+	entry.lastUpdate = p.Clock.Now()
+}
+
+// Reset forces clearing of all backoff data for a given key.
+func (p *Backoff) Reset(id string) {
+	p.Lock()
+	defer p.Unlock()
+	delete(p.perItemBackoff, id)
+}
+
+// Returns True if the elapsed time since eventTime is smaller than the current backoff window
+func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
+	p.Lock()
+	defer p.Unlock()
+	entry, ok := p.perItemBackoff[id]
+	if !ok {
+		return false
+	}
+	if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+		return false
+	}
+	return p.Clock.Now().Sub(eventTime) < entry.backoff
+}
+
+// Returns True if time since lastupdate is less than the current backoff window.
+func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool {
+	p.Lock()
+	defer p.Unlock()
+	entry, ok := p.perItemBackoff[id]
+	if !ok {
+		return false
+	}
+	if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+		return false
+	}
+	return eventTime.Sub(entry.lastUpdate) < entry.backoff
+}
+
+// Garbage collect records that have aged past maxDuration. Backoff users are expected
+// to invoke this periodically.
+func (p *Backoff) GC() {
+	p.Lock()
+	defer p.Unlock()
+	now := p.Clock.Now()
+	for id, entry := range p.perItemBackoff {
+		if now.Sub(entry.lastUpdate) > p.maxDuration*2 {
+			// GC when entry has not been updated for 2*maxDuration
+			delete(p.perItemBackoff, id)
+		}
+	}
+}
+
+func (p *Backoff) DeleteEntry(id string) {
+	p.Lock()
+	defer p.Unlock()
+	delete(p.perItemBackoff, id)
+}
+
+// Take a lock on *Backoff, before calling initEntryUnsafe
+func (p *Backoff) initEntryUnsafe(id string) *backoffEntry {
+	entry := &backoffEntry{backoff: p.defaultDuration}
+	p.perItemBackoff[id] = entry
+	return entry
+}
+
+// After 2*maxDuration we restart the backoff factor to the beginning
+func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
+	return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration
+}
diff --git a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go
new file mode 100644
index 0000000..e671c04
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go
@@ -0,0 +1,143 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flowcontrol
+
+import (
+	"sync"
+	"time"
+
+	"golang.org/x/time/rate"
+)
+
+type RateLimiter interface {
+	// TryAccept returns true if a token is taken immediately. Otherwise,
+	// it returns false.
+	TryAccept() bool
+	// Accept returns once a token becomes available.
+	Accept()
+	// Stop stops the rate limiter, subsequent calls to CanAccept will return false
+	Stop()
+	// QPS returns QPS of this rate limiter
+	QPS() float32
+}
+
+type tokenBucketRateLimiter struct {
+	limiter *rate.Limiter
+	clock   Clock
+	qps     float32
+}
+
+// NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach.
+// The rate limiter allows bursts of up to 'burst' to exceed the QPS, while still maintaining a
+// smoothed qps rate of 'qps'.
+// The bucket is initially filled with 'burst' tokens, and refills at a rate of 'qps'.
+// The maximum number of tokens in the bucket is capped at 'burst'.
+func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter {
+	limiter := rate.NewLimiter(rate.Limit(qps), burst)
+	return newTokenBucketRateLimiter(limiter, realClock{}, qps)
+}
+
+// An injectable, mockable clock interface.
+type Clock interface {
+	Now() time.Time
+	Sleep(time.Duration)
+}
+
+type realClock struct{}
+
+func (realClock) Now() time.Time {
+	return time.Now()
+}
+func (realClock) Sleep(d time.Duration) {
+	time.Sleep(d)
+}
+
+// NewTokenBucketRateLimiterWithClock is identical to NewTokenBucketRateLimiter
+// but allows an injectable clock, for testing.
+func NewTokenBucketRateLimiterWithClock(qps float32, burst int, c Clock) RateLimiter {
+	limiter := rate.NewLimiter(rate.Limit(qps), burst)
+	return newTokenBucketRateLimiter(limiter, c, qps)
+}
+
+func newTokenBucketRateLimiter(limiter *rate.Limiter, c Clock, qps float32) RateLimiter {
+	return &tokenBucketRateLimiter{
+		limiter: limiter,
+		clock:   c,
+		qps:     qps,
+	}
+}
+
+func (t *tokenBucketRateLimiter) TryAccept() bool {
+	return t.limiter.AllowN(t.clock.Now(), 1)
+}
+
+// Accept will block until a token becomes available
+func (t *tokenBucketRateLimiter) Accept() {
+	now := t.clock.Now()
+	t.clock.Sleep(t.limiter.ReserveN(now, 1).DelayFrom(now))
+}
+
+func (t *tokenBucketRateLimiter) Stop() {
+}
+
+func (t *tokenBucketRateLimiter) QPS() float32 {
+	return t.qps
+}
+
+type fakeAlwaysRateLimiter struct{}
+
+func NewFakeAlwaysRateLimiter() RateLimiter {
+	return &fakeAlwaysRateLimiter{}
+}
+
+func (t *fakeAlwaysRateLimiter) TryAccept() bool {
+	return true
+}
+
+func (t *fakeAlwaysRateLimiter) Stop() {}
+
+func (t *fakeAlwaysRateLimiter) Accept() {}
+
+func (t *fakeAlwaysRateLimiter) QPS() float32 {
+	return 1
+}
+
+type fakeNeverRateLimiter struct {
+	wg sync.WaitGroup
+}
+
+func NewFakeNeverRateLimiter() RateLimiter {
+	rl := fakeNeverRateLimiter{}
+	rl.wg.Add(1)
+	return &rl
+}
+
+func (t *fakeNeverRateLimiter) TryAccept() bool {
+	return false
+}
+
+func (t *fakeNeverRateLimiter) Stop() {
+	t.wg.Done()
+}
+
+func (t *fakeNeverRateLimiter) Accept() {
+	t.wg.Wait()
+}
+
+func (t *fakeNeverRateLimiter) QPS() float32 {
+	return 1
+}
diff --git a/vendor/k8s.io/client-go/util/integer/integer.go b/vendor/k8s.io/client-go/util/integer/integer.go
new file mode 100644
index 0000000..c6ea106
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/integer/integer.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integer
+
+func IntMax(a, b int) int {
+	if b > a {
+		return b
+	}
+	return a
+}
+
+func IntMin(a, b int) int {
+	if b < a {
+		return b
+	}
+	return a
+}
+
+func Int32Max(a, b int32) int32 {
+	if b > a {
+		return b
+	}
+	return a
+}
+
+func Int32Min(a, b int32) int32 {
+	if b < a {
+		return b
+	}
+	return a
+}
+
+func Int64Max(a, b int64) int64 {
+	if b > a {
+		return b
+	}
+	return a
+}
+
+func Int64Min(a, b int64) int64 {
+	if b < a {
+		return b
+	}
+	return a
+}
+
+// RoundToInt32 rounds floats into integer numbers.
+func RoundToInt32(a float64) int32 {
+	if a < 0 {
+		return int32(a - 0.5)
+	}
+	return int32(a + 0.5)
+}
diff --git a/vendor/k8s.io/klog/.travis.yml b/vendor/k8s.io/klog/.travis.yml
new file mode 100644
index 0000000..fc0d2ca
--- /dev/null
+++ b/vendor/k8s.io/klog/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+dist: xenial
+go:
+  - 1.9.x
+  - 1.10.x
+  - 1.11.x
+script:
+  - go get -t -v ./...
+  - diff -u <(echo -n) <(gofmt -d .)
+  - diff -u <(echo -n) <(golint $(go list -e ./...))
+  - go tool vet .
+  - go test -v -race ./...
+install:
+  - go get golang.org/x/lint/golint
diff --git a/vendor/k8s.io/klog/CONTRIBUTING.md b/vendor/k8s.io/klog/CONTRIBUTING.md
new file mode 100644
index 0000000..de47115
--- /dev/null
+++ b/vendor/k8s.io/klog/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing Guidelines
+
+Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
+
+_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
+
+## Getting Started
+
+We have full documentation on how to get started contributing here:
+
+<!---
+If your repo has certain guidelines for contribution, put them here ahead of the general k8s resources
+-->
+
+- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
+- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
+- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers
+
+## Mentorship
+
+- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
+
+<!---
+Custom Information - if you're copying this template for the first time you can add custom content here, for example:
+
+## Contact Information
+
+- [Slack channel](https://kubernetes.slack.com/messages/kubernetes-users) - Replace `kubernetes-users` with your slack channel string, this will send users directly to your channel. 
+- [Mailing list](URL)
+
+-->
diff --git a/vendor/k8s.io/klog/LICENSE b/vendor/k8s.io/klog/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/vendor/k8s.io/klog/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/k8s.io/klog/OWNERS b/vendor/k8s.io/klog/OWNERS
new file mode 100644
index 0000000..56b0eb0
--- /dev/null
+++ b/vendor/k8s.io/klog/OWNERS
@@ -0,0 +1,11 @@
+# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
+
+approvers:
+  - dims
+  - thockin
+  - justinsb
+  - tallclair
+  - piosz
+  - brancz
+  - DirectXMan12
+  - lavalamp
diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md
new file mode 100644
index 0000000..a747f53
--- /dev/null
+++ b/vendor/k8s.io/klog/README.md
@@ -0,0 +1,51 @@
+klog
+====
+
+klog is a permanant fork of https://github.com/golang/glog. original README from glog is below
+
+----
+
+glog
+====
+
+Leveled execution logs for Go.
+
+This is an efficient pure Go implementation of leveled logs in the
+manner of the open source C++ package
+	https://github.com/google/glog
+
+By binding methods to booleans it is possible to use the log package
+without paying the expense of evaluating the arguments to the log.
+Through the -vmodule flag, the package also provides fine-grained
+control over logging at the file level.
+
+The comment from glog.go introduces the ideas:
+
+	Package glog implements logging analogous to the Google-internal
+	C++ INFO/ERROR/V setup.  It provides functions Info, Warning,
+	Error, Fatal, plus formatting variants such as Infof. It
+	also provides V-style logging controlled by the -v and
+	-vmodule=file=2 flags.
+	
+	Basic examples:
+	
+		glog.Info("Prepare to repel boarders")
+	
+		glog.Fatalf("Initialization failed: %s", err)
+	
+	See the documentation for the V function for an explanation
+	of these examples:
+	
+		if glog.V(2) {
+			glog.Info("Starting transaction...")
+		}
+	
+		glog.V(2).Infoln("Processed", nItems, "elements")
+
+
+The repository contains an open source version of the log package
+used inside Google. The master copy of the source lives inside
+Google, not here. The code in this repo is for export only and is not itself
+under development. Feature requests will be ignored.
+
+Send bug reports to golang-nuts@googlegroups.com.
diff --git a/vendor/k8s.io/klog/RELEASE.md b/vendor/k8s.io/klog/RELEASE.md
new file mode 100644
index 0000000..b53eb96
--- /dev/null
+++ b/vendor/k8s.io/klog/RELEASE.md
@@ -0,0 +1,9 @@
+# Release Process
+
+The `klog` is released on an as-needed basis. The process is as follows:
+
+1. An issue is proposing a new release with a changelog since the last release
+1. All [OWNERS](OWNERS) must LGTM this release
+1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
+1. The release issue is closed
+1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released`
diff --git a/vendor/k8s.io/klog/SECURITY_CONTACTS b/vendor/k8s.io/klog/SECURITY_CONTACTS
new file mode 100644
index 0000000..520ddb5
--- /dev/null
+++ b/vendor/k8s.io/klog/SECURITY_CONTACTS
@@ -0,0 +1,20 @@
+# Defined below are the security contacts for this repo.
+#
+# They are the contact point for the Product Security Team to reach out
+# to for triaging and handling of incoming issues.
+#
+# The below names agree to abide by the
+# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
+# and will be removed and replaced if they violate that agreement.
+#
+# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
+# INSTRUCTIONS AT https://kubernetes.io/security/
+
+dims
+thockin
+justinsb
+tallclair
+piosz
+brancz
+DirectXMan12
+lavalamp
diff --git a/vendor/k8s.io/klog/klog.go b/vendor/k8s.io/klog/klog.go
new file mode 100644
index 0000000..13bcc81
--- /dev/null
+++ b/vendor/k8s.io/klog/klog.go
@@ -0,0 +1,1239 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
+// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
+// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
+//
+// Basic examples:
+//
+//	glog.Info("Prepare to repel boarders")
+//
+//	glog.Fatalf("Initialization failed: %s", err)
+//
+// See the documentation for the V function for an explanation of these examples:
+//
+//	if glog.V(2) {
+//		glog.Info("Starting transaction...")
+//	}
+//
+//	glog.V(2).Infoln("Processed", nItems, "elements")
+//
+// Log output is buffered and written periodically using Flush. Programs
+// should call Flush before exiting to guarantee all log output is written.
+//
+// By default, all log statements write to files in a temporary directory.
+// This package provides several flags that modify this behavior.
+// As a result, flag.Parse must be called before any logging is done.
+//
+//	-logtostderr=false
+//		Logs are written to standard error instead of to files.
+//	-alsologtostderr=false
+//		Logs are written to standard error as well as to files.
+//	-stderrthreshold=ERROR
+//		Log events at or above this severity are logged to standard
+//		error as well as to files.
+//	-log_dir=""
+//		Log files will be written to this directory instead of the
+//		default temporary directory.
+//
+//	Other flags provide aids to debugging.
+//
+//	-log_backtrace_at=""
+//		When set to a file and line number holding a logging statement,
+//		such as
+//			-log_backtrace_at=gopherflakes.go:234
+//		a stack trace will be written to the Info log whenever execution
+//		hits that statement. (Unlike with -vmodule, the ".go" must be
+//		present.)
+//	-v=0
+//		Enable V-leveled logging at the specified level.
+//	-vmodule=""
+//		The syntax of the argument is a comma-separated list of pattern=N,
+//		where pattern is a literal file name (minus the ".go" suffix) or
+//		"glob" pattern and N is a V level. For instance,
+//			-vmodule=gopher*=3
+//		sets the V level to 3 in all Go files whose names begin "gopher".
+//
+package klog
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"flag"
+	"fmt"
+	"io"
+	stdLog "log"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// severity identifies the sort of log: info, warning etc. It also implements
+// the flag.Value interface. The -stderrthreshold flag is of type severity and
+// should be modified only through the flag.Value interface. The values match
+// the corresponding constants in C++.
+type severity int32 // sync/atomic int32
+
+// These constants identify the log levels in order of increasing severity.
+// A message written to a high-severity log file is also written to each
+// lower-severity log file.
+const (
+	infoLog severity = iota
+	warningLog
+	errorLog
+	fatalLog
+	numSeverity = 4
+)
+
+const severityChar = "IWEF"
+
+var severityName = []string{
+	infoLog:    "INFO",
+	warningLog: "WARNING",
+	errorLog:   "ERROR",
+	fatalLog:   "FATAL",
+}
+
+// get returns the value of the severity.
+func (s *severity) get() severity {
+	return severity(atomic.LoadInt32((*int32)(s)))
+}
+
+// set sets the value of the severity.
+func (s *severity) set(val severity) {
+	atomic.StoreInt32((*int32)(s), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (s *severity) String() string {
+	return strconv.FormatInt(int64(*s), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (s *severity) Get() interface{} {
+	return *s
+}
+
+// Set is part of the flag.Value interface.
+func (s *severity) Set(value string) error {
+	var threshold severity
+	// Is it a known name?
+	if v, ok := severityByName(value); ok {
+		threshold = v
+	} else {
+		v, err := strconv.Atoi(value)
+		if err != nil {
+			return err
+		}
+		threshold = severity(v)
+	}
+	logging.stderrThreshold.set(threshold)
+	return nil
+}
+
+func severityByName(s string) (severity, bool) {
+	s = strings.ToUpper(s)
+	for i, name := range severityName {
+		if name == s {
+			return severity(i), true
+		}
+	}
+	return 0, false
+}
+
+// OutputStats tracks the number of output lines and bytes written.
+type OutputStats struct {
+	lines int64
+	bytes int64
+}
+
+// Lines returns the number of lines written.
+func (s *OutputStats) Lines() int64 {
+	return atomic.LoadInt64(&s.lines)
+}
+
+// Bytes returns the number of bytes written.
+func (s *OutputStats) Bytes() int64 {
+	return atomic.LoadInt64(&s.bytes)
+}
+
+// Stats tracks the number of lines of output and number of bytes
+// per severity level. Values must be read with atomic.LoadInt64.
+var Stats struct {
+	Info, Warning, Error OutputStats
+}
+
+var severityStats = [numSeverity]*OutputStats{
+	infoLog:    &Stats.Info,
+	warningLog: &Stats.Warning,
+	errorLog:   &Stats.Error,
+}
+
+// Level is exported because it appears in the arguments to V and is
+// the type of the v flag, which can be set programmatically.
+// It's a distinct type because we want to discriminate it from logType.
+// Variables of type level are only changed under logging.mu.
+// The -v flag is read only with atomic ops, so the state of the logging
+// module is consistent.
+
+// Level is treated as a sync/atomic int32.
+
+// Level specifies a level of verbosity for V logs. *Level implements
+// flag.Value; the -v flag is of type Level and should be modified
+// only through the flag.Value interface.
+type Level int32
+
+// get returns the value of the Level.
+func (l *Level) get() Level {
+	return Level(atomic.LoadInt32((*int32)(l)))
+}
+
+// set sets the value of the Level.
+func (l *Level) set(val Level) {
+	atomic.StoreInt32((*int32)(l), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (l *Level) String() string {
+	return strconv.FormatInt(int64(*l), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (l *Level) Get() interface{} {
+	return *l
+}
+
+// Set is part of the flag.Value interface.
+func (l *Level) Set(value string) error {
+	v, err := strconv.Atoi(value)
+	if err != nil {
+		return err
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	logging.setVState(Level(v), logging.vmodule.filter, false)
+	return nil
+}
+
+// moduleSpec represents the setting of the -vmodule flag.
+type moduleSpec struct {
+	filter []modulePat
+}
+
+// modulePat contains a filter for the -vmodule flag.
+// It holds a verbosity level and a file pattern to match.
+type modulePat struct {
+	pattern string
+	literal bool // The pattern is a literal string
+	level   Level
+}
+
+// match reports whether the file matches the pattern. It uses a string
+// comparison if the pattern contains no metacharacters.
+func (m *modulePat) match(file string) bool {
+	if m.literal {
+		return file == m.pattern
+	}
+	match, _ := filepath.Match(m.pattern, file)
+	return match
+}
+
+func (m *moduleSpec) String() string {
+	// Lock because the type is not atomic. TODO: clean this up.
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	var b bytes.Buffer
+	for i, f := range m.filter {
+		if i > 0 {
+			b.WriteRune(',')
+		}
+		fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
+	}
+	return b.String()
+}
+
+// Get is part of the (Go 1.2)  flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported.
+func (m *moduleSpec) Get() interface{} {
+	return nil
+}
+
+var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
+
+// Syntax: -vmodule=recordio=2,file=1,gfs*=3
+func (m *moduleSpec) Set(value string) error {
+	var filter []modulePat
+	for _, pat := range strings.Split(value, ",") {
+		if len(pat) == 0 {
+			// Empty strings such as from a trailing comma can be ignored.
+			continue
+		}
+		patLev := strings.Split(pat, "=")
+		if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
+			return errVmoduleSyntax
+		}
+		pattern := patLev[0]
+		v, err := strconv.Atoi(patLev[1])
+		if err != nil {
+			return errors.New("syntax error: expect comma-separated list of filename=N")
+		}
+		if v < 0 {
+			return errors.New("negative value for vmodule level")
+		}
+		if v == 0 {
+			continue // Ignore. It's harmless but no point in paying the overhead.
+		}
+		// TODO: check syntax of filter?
+		filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	logging.setVState(logging.verbosity, filter, true)
+	return nil
+}
+
+// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
+// that require filepath.Match to be called to match the pattern.
+func isLiteral(pattern string) bool {
+	return !strings.ContainsAny(pattern, `\*?[]`)
+}
+
+// traceLocation represents the setting of the -log_backtrace_at flag.
+type traceLocation struct {
+	file string
+	line int
+}
+
+// isSet reports whether the trace location has been specified.
+// logging.mu is held.
+func (t *traceLocation) isSet() bool {
+	return t.line > 0
+}
+
+// match reports whether the specified file and line matches the trace location.
+// The argument file name is the full path, not the basename specified in the flag.
+// logging.mu is held.
+func (t *traceLocation) match(file string, line int) bool {
+	if t.line != line {
+		return false
+	}
+	if i := strings.LastIndex(file, "/"); i >= 0 {
+		file = file[i+1:]
+	}
+	return t.file == file
+}
+
+func (t *traceLocation) String() string {
+	// Lock because the type is not atomic. TODO: clean this up.
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	return fmt.Sprintf("%s:%d", t.file, t.line)
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported
+func (t *traceLocation) Get() interface{} {
+	return nil
+}
+
+var errTraceSyntax = errors.New("syntax error: expect file.go:234")
+
+// Syntax: -log_backtrace_at=gopherflakes.go:234
+// Note that unlike vmodule the file extension is included here.
+func (t *traceLocation) Set(value string) error {
+	if value == "" {
+		// Unset.
+		t.line = 0
+		t.file = ""
+	}
+	fields := strings.Split(value, ":")
+	if len(fields) != 2 {
+		return errTraceSyntax
+	}
+	file, line := fields[0], fields[1]
+	if !strings.Contains(file, ".") {
+		return errTraceSyntax
+	}
+	v, err := strconv.Atoi(line)
+	if err != nil {
+		return errTraceSyntax
+	}
+	if v <= 0 {
+		return errors.New("negative or zero value for level")
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	t.line = v
+	t.file = file
+	return nil
+}
+
+// flushSyncWriter is the interface satisfied by logging destinations.
+type flushSyncWriter interface {
+	Flush() error
+	Sync() error
+	io.Writer
+}
+
+func init() {
+	// Default stderrThreshold is ERROR.
+	logging.stderrThreshold = errorLog
+
+	logging.setVState(0, nil, false)
+	go logging.flushDaemon()
+}
+
+// InitFlags is for explicitly initializing the flags
+func InitFlags(flagset *flag.FlagSet) {
+	if flagset == nil {
+		flagset = flag.CommandLine
+	}
+	flagset.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory")
+	flagset.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file")
+	flagset.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
+	flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
+	flagset.Var(&logging.verbosity, "v", "log level for V logs")
+	flagset.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages")
+	flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
+	flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
+	flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
+}
+
+// Flush flushes all pending log I/O.
+func Flush() {
+	logging.lockAndFlushAll()
+}
+
+// loggingT collects all the global state of the logging setup.
+type loggingT struct {
+	// Boolean flags. Not handled atomically because the flag.Value interface
+	// does not let us avoid the =true, and that shorthand is necessary for
+	// compatibility. TODO: does this matter enough to fix? Seems unlikely.
+	toStderr     bool // The -logtostderr flag.
+	alsoToStderr bool // The -alsologtostderr flag.
+
+	// Level flag. Handled atomically.
+	stderrThreshold severity // The -stderrthreshold flag.
+
+	// freeList is a list of byte buffers, maintained under freeListMu.
+	freeList *buffer
+	// freeListMu maintains the free list. It is separate from the main mutex
+	// so buffers can be grabbed and printed to without holding the main lock,
+	// for better parallelization.
+	freeListMu sync.Mutex
+
+	// mu protects the remaining elements of this structure and is
+	// used to synchronize logging.
+	mu sync.Mutex
+	// file holds writer for each of the log types.
+	file [numSeverity]flushSyncWriter
+	// pcs is used in V to avoid an allocation when computing the caller's PC.
+	pcs [1]uintptr
+	// vmap is a cache of the V Level for each V() call site, identified by PC.
+	// It is wiped whenever the vmodule flag changes state.
+	vmap map[uintptr]Level
+	// filterLength stores the length of the vmodule filter chain. If greater
+	// than zero, it means vmodule is enabled. It may be read safely
+	// using sync.LoadInt32, but is only modified under mu.
+	filterLength int32
+	// traceLocation is the state of the -log_backtrace_at flag.
+	traceLocation traceLocation
+	// These flags are modified only under lock, although verbosity may be fetched
+	// safely using atomic.LoadInt32.
+	vmodule   moduleSpec // The state of the -vmodule flag.
+	verbosity Level      // V logging level, the value of the -v flag/
+
+	// If non-empty, overrides the choice of directory in which to write logs.
+	// See createLogDirs for the full list of possible destinations.
+	logDir string
+
+	// If non-empty, specifies the path of the file to write logs. mutually exclusive
+	// with the log-dir option.
+	logFile string
+
+	// If true, do not add the prefix headers, useful when used with SetOutput
+	skipHeaders bool
+}
+
+// buffer holds a byte Buffer for reuse. The zero value is ready for use.
+type buffer struct {
+	bytes.Buffer
+	tmp  [64]byte // temporary byte array for creating headers.
+	next *buffer
+}
+
+var logging loggingT
+
+// setVState sets a consistent state for V logging.
+// l.mu is held.
+func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
+	// Turn verbosity off so V will not fire while we are in transition.
+	logging.verbosity.set(0)
+	// Ditto for filter length.
+	atomic.StoreInt32(&logging.filterLength, 0)
+
+	// Set the new filters and wipe the pc->Level map if the filter has changed.
+	if setFilter {
+		logging.vmodule.filter = filter
+		logging.vmap = make(map[uintptr]Level)
+	}
+
+	// Things are consistent now, so enable filtering and verbosity.
+	// They are enabled in order opposite to that in V.
+	atomic.StoreInt32(&logging.filterLength, int32(len(filter)))
+	logging.verbosity.set(verbosity)
+}
+
+// getBuffer returns a new, ready-to-use buffer.
+func (l *loggingT) getBuffer() *buffer {
+	l.freeListMu.Lock()
+	b := l.freeList
+	if b != nil {
+		l.freeList = b.next
+	}
+	l.freeListMu.Unlock()
+	if b == nil {
+		b = new(buffer)
+	} else {
+		b.next = nil
+		b.Reset()
+	}
+	return b
+}
+
+// putBuffer returns a buffer to the free list.
+func (l *loggingT) putBuffer(b *buffer) {
+	if b.Len() >= 256 {
+		// Let big buffers die a natural death.
+		return
+	}
+	l.freeListMu.Lock()
+	b.next = l.freeList
+	l.freeList = b
+	l.freeListMu.Unlock()
+}
+
+var timeNow = time.Now // Stubbed out for testing.
+
+/*
+header formats a log header as defined by the C++ implementation.
+It returns a buffer containing the formatted header and the user's file and line number.
+The depth specifies how many stack frames above lives the source line to be identified in the log message.
+
+Log lines have this form:
+	Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
+where the fields are defined as follows:
+	L                A single character, representing the log level (eg 'I' for INFO)
+	mm               The month (zero padded; ie May is '05')
+	dd               The day (zero padded)
+	hh:mm:ss.uuuuuu  Time in hours, minutes and fractional seconds
+	threadid         The space-padded thread ID as returned by GetTID()
+	file             The file name
+	line             The line number
+	msg              The user-supplied message
+*/
+func (l *loggingT) header(s severity, depth int) (*buffer, string, int) {
+	_, file, line, ok := runtime.Caller(3 + depth)
+	if !ok {
+		file = "???"
+		line = 1
+	} else {
+		slash := strings.LastIndex(file, "/")
+		if slash >= 0 {
+			file = file[slash+1:]
+		}
+	}
+	return l.formatHeader(s, file, line), file, line
+}
+
+// formatHeader formats a log header using the provided file name and line number.
+func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
+	now := timeNow()
+	if line < 0 {
+		line = 0 // not a real line number, but acceptable to someDigits
+	}
+	if s > fatalLog {
+		s = infoLog // for safety.
+	}
+	buf := l.getBuffer()
+	if l.skipHeaders {
+		return buf
+	}
+
+	// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
+	// It's worth about 3X. Fprintf is hard.
+	_, month, day := now.Date()
+	hour, minute, second := now.Clock()
+	// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+	buf.tmp[0] = severityChar[s]
+	buf.twoDigits(1, int(month))
+	buf.twoDigits(3, day)
+	buf.tmp[5] = ' '
+	buf.twoDigits(6, hour)
+	buf.tmp[8] = ':'
+	buf.twoDigits(9, minute)
+	buf.tmp[11] = ':'
+	buf.twoDigits(12, second)
+	buf.tmp[14] = '.'
+	buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
+	buf.tmp[21] = ' '
+	buf.nDigits(7, 22, pid, ' ') // TODO: should be TID
+	buf.tmp[29] = ' '
+	buf.Write(buf.tmp[:30])
+	buf.WriteString(file)
+	buf.tmp[0] = ':'
+	n := buf.someDigits(1, line)
+	buf.tmp[n+1] = ']'
+	buf.tmp[n+2] = ' '
+	buf.Write(buf.tmp[:n+3])
+	return buf
+}
+
+// Some custom tiny helper functions to print the log header efficiently.
+
+const digits = "0123456789"
+
+// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].
+func (buf *buffer) twoDigits(i, d int) {
+	buf.tmp[i+1] = digits[d%10]
+	d /= 10
+	buf.tmp[i] = digits[d%10]
+}
+
+// nDigits formats an n-digit integer at buf.tmp[i],
+// padding with pad on the left.
+// It assumes d >= 0.
+func (buf *buffer) nDigits(n, i, d int, pad byte) {
+	j := n - 1
+	for ; j >= 0 && d > 0; j-- {
+		buf.tmp[i+j] = digits[d%10]
+		d /= 10
+	}
+	for ; j >= 0; j-- {
+		buf.tmp[i+j] = pad
+	}
+}
+
+// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].
+func (buf *buffer) someDigits(i, d int) int {
+	// Print into the top, then copy down. We know there's space for at least
+	// a 10-digit number.
+	j := len(buf.tmp)
+	for {
+		j--
+		buf.tmp[j] = digits[d%10]
+		d /= 10
+		if d == 0 {
+			break
+		}
+	}
+	return copy(buf.tmp[i:], buf.tmp[j:])
+}
+
+func (l *loggingT) println(s severity, args ...interface{}) {
+	buf, file, line := l.header(s, 0)
+	fmt.Fprintln(buf, args...)
+	l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) print(s severity, args ...interface{}) {
+	l.printDepth(s, 1, args...)
+}
+
+func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) {
+	buf, file, line := l.header(s, depth)
+	fmt.Fprint(buf, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) printf(s severity, format string, args ...interface{}) {
+	buf, file, line := l.header(s, 0)
+	fmt.Fprintf(buf, format, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, buf, file, line, false)
+}
+
+// printWithFileLine behaves like print but uses the provided file and line number.  If
+// alsoLogToStderr is true, the log message always appears on standard error; it
+// will also appear in the log file unless --logtostderr is set.
+func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) {
+	buf := l.formatHeader(s, file, line)
+	fmt.Fprint(buf, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, buf, file, line, alsoToStderr)
+}
+
+// redirectBuffer is used to set an alternate destination for the logs
+type redirectBuffer struct {
+	w io.Writer
+}
+
+func (rb *redirectBuffer) Sync() error {
+	return nil
+}
+
+func (rb *redirectBuffer) Flush() error {
+	return nil
+}
+
+func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
+	return rb.w.Write(bytes)
+}
+
+// SetOutput sets the output destination for all severities
+func SetOutput(w io.Writer) {
+	for s := fatalLog; s >= infoLog; s-- {
+		rb := &redirectBuffer{
+			w: w,
+		}
+		logging.file[s] = rb
+	}
+}
+
+// SetOutputBySeverity sets the output destination for specific severity
+func SetOutputBySeverity(name string, w io.Writer) {
+	sev, ok := severityByName(name)
+	if !ok {
+		panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
+	}
+	rb := &redirectBuffer{
+		w: w,
+	}
+	logging.file[sev] = rb
+}
+
+// output writes the data to the log files and releases the buffer.
+func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {
+	l.mu.Lock()
+	if l.traceLocation.isSet() {
+		if l.traceLocation.match(file, line) {
+			buf.Write(stacks(false))
+		}
+	}
+	data := buf.Bytes()
+	if l.toStderr {
+		os.Stderr.Write(data)
+	} else {
+		if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
+			os.Stderr.Write(data)
+		}
+		if l.file[s] == nil {
+			if err := l.createFiles(s); err != nil {
+				os.Stderr.Write(data) // Make sure the message appears somewhere.
+				l.exit(err)
+			}
+		}
+		switch s {
+		case fatalLog:
+			l.file[fatalLog].Write(data)
+			fallthrough
+		case errorLog:
+			l.file[errorLog].Write(data)
+			fallthrough
+		case warningLog:
+			l.file[warningLog].Write(data)
+			fallthrough
+		case infoLog:
+			l.file[infoLog].Write(data)
+		}
+	}
+	if s == fatalLog {
+		// If we got here via Exit rather than Fatal, print no stacks.
+		if atomic.LoadUint32(&fatalNoStacks) > 0 {
+			l.mu.Unlock()
+			timeoutFlush(10 * time.Second)
+			os.Exit(1)
+		}
+		// Dump all goroutine stacks before exiting.
+		// First, make sure we see the trace for the current goroutine on standard error.
+		// If -logtostderr has been specified, the loop below will do that anyway
+		// as the first stack in the full dump.
+		if !l.toStderr {
+			os.Stderr.Write(stacks(false))
+		}
+		// Write the stack trace for all goroutines to the files.
+		trace := stacks(true)
+		logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
+		for log := fatalLog; log >= infoLog; log-- {
+			if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
+				f.Write(trace)
+			}
+		}
+		l.mu.Unlock()
+		timeoutFlush(10 * time.Second)
+		os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
+	}
+	l.putBuffer(buf)
+	l.mu.Unlock()
+	if stats := severityStats[s]; stats != nil {
+		atomic.AddInt64(&stats.lines, 1)
+		atomic.AddInt64(&stats.bytes, int64(len(data)))
+	}
+}
+
+// timeoutFlush calls Flush and returns when it completes or after timeout
+// elapses, whichever happens first.  This is needed because the hooks invoked
+// by Flush may deadlock when glog.Fatal is called from a hook that holds
+// a lock.
+func timeoutFlush(timeout time.Duration) {
+	done := make(chan bool, 1)
+	go func() {
+		Flush() // calls logging.lockAndFlushAll()
+		done <- true
+	}()
+	select {
+	case <-done:
+	case <-time.After(timeout):
+		fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout)
+	}
+}
+
+// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
+func stacks(all bool) []byte {
+	// We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
+	n := 10000
+	if all {
+		n = 100000
+	}
+	var trace []byte
+	for i := 0; i < 5; i++ {
+		trace = make([]byte, n)
+		nbytes := runtime.Stack(trace, all)
+		if nbytes < len(trace) {
+			return trace[:nbytes]
+		}
+		n *= 2
+	}
+	return trace
+}
+
+// logExitFunc provides a simple mechanism to override the default behavior
+// of exiting on error. Used in testing and to guarantee we reach a required exit
+// for fatal logs. Instead, exit could be a function rather than a method but that
+// would make its use clumsier.
+var logExitFunc func(error)
+
+// exit is called if there is trouble creating or writing log files.
+// It flushes the logs and exits the program; there's no point in hanging around.
+// l.mu is held.
+func (l *loggingT) exit(err error) {
+	fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
+	// If logExitFunc is set, we do that instead of exiting.
+	if logExitFunc != nil {
+		logExitFunc(err)
+		return
+	}
+	l.flushAll()
+	os.Exit(2)
+}
+
+// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
+// file's Sync method and providing a wrapper for the Write method that provides log
+// file rotation. There are conflicting methods, so the file cannot be embedded.
+// l.mu is held for all its methods.
+type syncBuffer struct {
+	logger *loggingT
+	*bufio.Writer
+	file   *os.File
+	sev    severity
+	nbytes uint64 // The number of bytes written to this file
+}
+
+func (sb *syncBuffer) Sync() error {
+	return sb.file.Sync()
+}
+
+func (sb *syncBuffer) Write(p []byte) (n int, err error) {
+	if sb.nbytes+uint64(len(p)) >= MaxSize {
+		if err := sb.rotateFile(time.Now()); err != nil {
+			sb.logger.exit(err)
+		}
+	}
+	n, err = sb.Writer.Write(p)
+	sb.nbytes += uint64(n)
+	if err != nil {
+		sb.logger.exit(err)
+	}
+	return
+}
+
+// rotateFile closes the syncBuffer's file and starts a new one.
+func (sb *syncBuffer) rotateFile(now time.Time) error {
+	if sb.file != nil {
+		sb.Flush()
+		sb.file.Close()
+	}
+	var err error
+	sb.file, _, err = create(severityName[sb.sev], now)
+	sb.nbytes = 0
+	if err != nil {
+		return err
+	}
+
+	sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
+
+	// Write header.
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
+	fmt.Fprintf(&buf, "Running on machine: %s\n", host)
+	fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+	fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
+	n, err := sb.file.Write(buf.Bytes())
+	sb.nbytes += uint64(n)
+	return err
+}
+
+// bufferSize sizes the buffer associated with each log file. It's large
+// so that log records can accumulate without the logging thread blocking
+// on disk I/O. The flushDaemon will block instead.
+const bufferSize = 256 * 1024
+
+// createFiles creates all the log files for severity from sev down to infoLog.
+// l.mu is held.
+func (l *loggingT) createFiles(sev severity) error {
+	now := time.Now()
+	// Files are created in decreasing severity order, so as soon as we find one
+	// has already been created, we can stop.
+	for s := sev; s >= infoLog && l.file[s] == nil; s-- {
+		sb := &syncBuffer{
+			logger: l,
+			sev:    s,
+		}
+		if err := sb.rotateFile(now); err != nil {
+			return err
+		}
+		l.file[s] = sb
+	}
+	return nil
+}
+
+const flushInterval = 30 * time.Second
+
+// flushDaemon periodically flushes the log file buffers.
+func (l *loggingT) flushDaemon() {
+	for range time.NewTicker(flushInterval).C {
+		l.lockAndFlushAll()
+	}
+}
+
+// lockAndFlushAll is like flushAll but locks l.mu first.
+func (l *loggingT) lockAndFlushAll() {
+	l.mu.Lock()
+	l.flushAll()
+	l.mu.Unlock()
+}
+
+// flushAll flushes all the logs and attempts to "sync" their data to disk.
+// l.mu is held.
+func (l *loggingT) flushAll() {
+	// Flush from fatal down, in case there's trouble flushing.
+	for s := fatalLog; s >= infoLog; s-- {
+		file := l.file[s]
+		if file != nil {
+			file.Flush() // ignore error
+			file.Sync()  // ignore error
+		}
+	}
+}
+
+// CopyStandardLogTo arranges for messages written to the Go "log" package's
+// default logs to also appear in the Google logs for the named and lower
+// severities.  Subsequent changes to the standard log's default output location
+// or format may break this behavior.
+//
+// Valid names are "INFO", "WARNING", "ERROR", and "FATAL".  If the name is not
+// recognized, CopyStandardLogTo panics.
+func CopyStandardLogTo(name string) {
+	sev, ok := severityByName(name)
+	if !ok {
+		panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
+	}
+	// Set a log format that captures the user's file and line:
+	//   d.go:23: message
+	stdLog.SetFlags(stdLog.Lshortfile)
+	stdLog.SetOutput(logBridge(sev))
+}
+
+// logBridge provides the Write method that enables CopyStandardLogTo to connect
+// Go's standard logs to the logs provided by this package.
+type logBridge severity
+
+// Write parses the standard logging line and passes its components to the
+// logger for severity(lb).
+func (lb logBridge) Write(b []byte) (n int, err error) {
+	var (
+		file = "???"
+		line = 1
+		text string
+	)
+	// Split "d.go:23: message" into "d.go", "23", and "message".
+	if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
+		text = fmt.Sprintf("bad log format: %s", b)
+	} else {
+		file = string(parts[0])
+		text = string(parts[2][1:]) // skip leading space
+		line, err = strconv.Atoi(string(parts[1]))
+		if err != nil {
+			text = fmt.Sprintf("bad line number: %s", b)
+			line = 1
+		}
+	}
+	// printWithFileLine with alsoToStderr=true, so standard log messages
+	// always appear on standard error.
+	logging.printWithFileLine(severity(lb), file, line, true, text)
+	return len(b), nil
+}
+
+// setV computes and remembers the V level for a given PC
+// when vmodule is enabled.
+// File pattern matching takes the basename of the file, stripped
+// of its .go suffix, and uses filepath.Match, which is a little more
+// general than the *? matching used in C++.
+// l.mu is held.
+func (l *loggingT) setV(pc uintptr) Level {
+	fn := runtime.FuncForPC(pc)
+	file, _ := fn.FileLine(pc)
+	// The file is something like /a/b/c/d.go. We want just the d.
+	if strings.HasSuffix(file, ".go") {
+		file = file[:len(file)-3]
+	}
+	if slash := strings.LastIndex(file, "/"); slash >= 0 {
+		file = file[slash+1:]
+	}
+	for _, filter := range l.vmodule.filter {
+		if filter.match(file) {
+			l.vmap[pc] = filter.level
+			return filter.level
+		}
+	}
+	l.vmap[pc] = 0
+	return 0
+}
+
+// Verbose is a boolean type that implements Infof (like Printf) etc.
+// See the documentation of V for more information.
+type Verbose bool
+
+// V reports whether verbosity at the call site is at least the requested level.
+// The returned value is a boolean of type Verbose, which implements Info, Infoln
+// and Infof. These methods will write to the Info log if called.
+// Thus, one may write either
+//	if glog.V(2) { glog.Info("log this") }
+// or
+//	glog.V(2).Info("log this")
+// The second form is shorter but the first is cheaper if logging is off because it does
+// not evaluate its arguments.
+//
+// Whether an individual call to V generates a log record depends on the setting of
+// the -v and --vmodule flags; both are off by default. If the level in the call to
+// V is at least the value of -v, or of -vmodule for the source file containing the
+// call, the V call will log.
+func V(level Level) Verbose {
+	// This function tries hard to be cheap unless there's work to do.
+	// The fast path is two atomic loads and compares.
+
+	// Here is a cheap but safe test to see if V logging is enabled globally.
+	if logging.verbosity.get() >= level {
+		return Verbose(true)
+	}
+
+	// It's off globally but it vmodule may still be set.
+	// Here is another cheap but safe test to see if vmodule is enabled.
+	if atomic.LoadInt32(&logging.filterLength) > 0 {
+		// Now we need a proper lock to use the logging structure. The pcs field
+		// is shared so we must lock before accessing it. This is fairly expensive,
+		// but if V logging is enabled we're slow anyway.
+		logging.mu.Lock()
+		defer logging.mu.Unlock()
+		if runtime.Callers(2, logging.pcs[:]) == 0 {
+			return Verbose(false)
+		}
+		v, ok := logging.vmap[logging.pcs[0]]
+		if !ok {
+			v = logging.setV(logging.pcs[0])
+		}
+		return Verbose(v >= level)
+	}
+	return Verbose(false)
+}
+
+// Info is equivalent to the global Info function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Info(args ...interface{}) {
+	if v {
+		logging.print(infoLog, args...)
+	}
+}
+
+// Infoln is equivalent to the global Infoln function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infoln(args ...interface{}) {
+	if v {
+		logging.println(infoLog, args...)
+	}
+}
+
+// Infof is equivalent to the global Infof function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infof(format string, args ...interface{}) {
+	if v {
+		logging.printf(infoLog, format, args...)
+	}
+}
+
+// Info logs to the INFO log.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Info(args ...interface{}) {
+	logging.print(infoLog, args...)
+}
+
+// InfoDepth acts as Info but uses depth to determine which call frame to log.
+// InfoDepth(0, "msg") is the same as Info("msg").
+func InfoDepth(depth int, args ...interface{}) {
+	logging.printDepth(infoLog, depth, args...)
+}
+
+// Infoln logs to the INFO log.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Infoln(args ...interface{}) {
+	logging.println(infoLog, args...)
+}
+
+// Infof logs to the INFO log.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Infof(format string, args ...interface{}) {
+	logging.printf(infoLog, format, args...)
+}
+
+// Warning logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Warning(args ...interface{}) {
+	logging.print(warningLog, args...)
+}
+
+// WarningDepth acts as Warning but uses depth to determine which call frame to log.
+// WarningDepth(0, "msg") is the same as Warning("msg").
+func WarningDepth(depth int, args ...interface{}) {
+	logging.printDepth(warningLog, depth, args...)
+}
+
+// Warningln logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Warningln(args ...interface{}) {
+	logging.println(warningLog, args...)
+}
+
+// Warningf logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Warningf(format string, args ...interface{}) {
+	logging.printf(warningLog, format, args...)
+}
+
+// Error logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Error(args ...interface{}) {
+	logging.print(errorLog, args...)
+}
+
+// ErrorDepth acts as Error but uses depth to determine which call frame to log.
+// ErrorDepth(0, "msg") is the same as Error("msg").
+func ErrorDepth(depth int, args ...interface{}) {
+	logging.printDepth(errorLog, depth, args...)
+}
+
+// Errorln logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Errorln(args ...interface{}) {
+	logging.println(errorLog, args...)
+}
+
+// Errorf logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Errorf(format string, args ...interface{}) {
+	logging.printf(errorLog, format, args...)
+}
+
+// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Fatal(args ...interface{}) {
+	logging.print(fatalLog, args...)
+}
+
+// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
+// FatalDepth(0, "msg") is the same as Fatal("msg").
+func FatalDepth(depth int, args ...interface{}) {
+	logging.printDepth(fatalLog, depth, args...)
+}
+
+// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Fatalln(args ...interface{}) {
+	logging.println(fatalLog, args...)
+}
+
+// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Fatalf(format string, args ...interface{}) {
+	logging.printf(fatalLog, format, args...)
+}
+
+// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
+// It allows Exit and relatives to use the Fatal logs.
+var fatalNoStacks uint32
+
+// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Exit(args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.print(fatalLog, args...)
+}
+
+// ExitDepth acts as Exit but uses depth to determine which call frame to log.
+// ExitDepth(0, "msg") is the same as Exit("msg").
+func ExitDepth(depth int, args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.printDepth(fatalLog, depth, args...)
+}
+
+// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+func Exitln(args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.println(fatalLog, args...)
+}
+
+// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Exitf(format string, args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.printf(fatalLog, format, args...)
+}
diff --git a/vendor/k8s.io/klog/klog_file.go b/vendor/k8s.io/klog/klog_file.go
new file mode 100644
index 0000000..b76a4e1
--- /dev/null
+++ b/vendor/k8s.io/klog/klog_file.go
@@ -0,0 +1,126 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// File I/O for logs.
+
+package klog
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"os/user"
+	"path/filepath"
+	"strings"
+	"sync"
+	"time"
+)
+
+// MaxSize is the maximum size of a log file in bytes.
+var MaxSize uint64 = 1024 * 1024 * 1800
+
+// logDirs lists the candidate directories for new log files.
+var logDirs []string
+
+func createLogDirs() {
+	if logging.logDir != "" {
+		logDirs = append(logDirs, logging.logDir)
+	}
+	logDirs = append(logDirs, os.TempDir())
+}
+
+var (
+	pid      = os.Getpid()
+	program  = filepath.Base(os.Args[0])
+	host     = "unknownhost"
+	userName = "unknownuser"
+)
+
+func init() {
+	h, err := os.Hostname()
+	if err == nil {
+		host = shortHostname(h)
+	}
+
+	current, err := user.Current()
+	if err == nil {
+		userName = current.Username
+	}
+
+	// Sanitize userName since it may contain filepath separators on Windows.
+	userName = strings.Replace(userName, `\`, "_", -1)
+}
+
+// shortHostname returns its argument, truncating at the first period.
+// For instance, given "www.google.com" it returns "www".
+func shortHostname(hostname string) string {
+	if i := strings.Index(hostname, "."); i >= 0 {
+		return hostname[:i]
+	}
+	return hostname
+}
+
+// logName returns a new log file name containing tag, with start time t, and
+// the name for the symlink for tag.
+func logName(tag string, t time.Time) (name, link string) {
+	name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
+		program,
+		host,
+		userName,
+		tag,
+		t.Year(),
+		t.Month(),
+		t.Day(),
+		t.Hour(),
+		t.Minute(),
+		t.Second(),
+		pid)
+	return name, program + "." + tag
+}
+
+var onceLogDirs sync.Once
+
+// create creates a new log file and returns the file and its filename, which
+// contains tag ("INFO", "FATAL", etc.) and t.  If the file is created
+// successfully, create also attempts to update the symlink for that tag, ignoring
+// errors.
+func create(tag string, t time.Time) (f *os.File, filename string, err error) {
+	if logging.logFile != "" {
+		f, err := os.Create(logging.logFile)
+		if err == nil {
+			return f, logging.logFile, nil
+		}
+		return nil, "", fmt.Errorf("log: unable to create log: %v", err)
+	}
+	onceLogDirs.Do(createLogDirs)
+	if len(logDirs) == 0 {
+		return nil, "", errors.New("log: no log dirs")
+	}
+	name, link := logName(tag, t)
+	var lastErr error
+	for _, dir := range logDirs {
+		fname := filepath.Join(dir, name)
+		f, err := os.Create(fname)
+		if err == nil {
+			symlink := filepath.Join(dir, link)
+			os.Remove(symlink)        // ignore err
+			os.Symlink(name, symlink) // ignore err
+			return f, fname, nil
+		}
+		lastErr = err
+	}
+	return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
+}
diff --git a/vendor/sigs.k8s.io/yaml/.gitignore b/vendor/sigs.k8s.io/yaml/.gitignore
new file mode 100644
index 0000000..e256a31
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/.gitignore
@@ -0,0 +1,20 @@
+# OSX leaves these everywhere on SMB shares
+._*
+
+# Eclipse files
+.classpath
+.project
+.settings/**
+
+# Emacs save files
+*~
+
+# Vim-related files
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+*.un~
+Session.vim
+.netrwhist
+
+# Go test binaries
+*.test
diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml
new file mode 100644
index 0000000..03ddc73
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+dist: xenial
+go:
+  - 1.9.x
+  - 1.10.x
+  - 1.11.x
+script:
+  - go get -t -v ./...
+  - diff -u <(echo -n) <(gofmt -d .)
+  - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON)
+  - go tool vet .
+  - go test -v -race ./...
+install:
+  - go get golang.org/x/lint/golint
diff --git a/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md b/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md
new file mode 100644
index 0000000..de47115
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing Guidelines
+
+Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
+
+_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
+
+## Getting Started
+
+We have full documentation on how to get started contributing here:
+
+<!---
+If your repo has certain guidelines for contribution, put them here ahead of the general k8s resources
+-->
+
+- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
+- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
+- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers
+
+## Mentorship
+
+- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
+
+<!---
+Custom Information - if you're copying this template for the first time you can add custom content here, for example:
+
+## Contact Information
+
+- [Slack channel](https://kubernetes.slack.com/messages/kubernetes-users) - Replace `kubernetes-users` with your slack channel string, this will send users directly to your channel. 
+- [Mailing list](URL)
+
+-->
diff --git a/vendor/sigs.k8s.io/yaml/LICENSE b/vendor/sigs.k8s.io/yaml/LICENSE
new file mode 100644
index 0000000..7805d36
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/LICENSE
@@ -0,0 +1,50 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Sam Ghods
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS
new file mode 100644
index 0000000..11ad7ce
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/OWNERS
@@ -0,0 +1,25 @@
+approvers:
+- dims
+- lavalamp
+- smarterclayton
+- deads2k
+- sttts
+- liggitt
+- caesarxuchao
+reviewers:
+- dims
+- thockin
+- lavalamp
+- smarterclayton
+- wojtek-t
+- deads2k
+- derekwaynecarr
+- caesarxuchao
+- mikedanese
+- liggitt
+- gmarek
+- sttts
+- ncdc
+- tallclair
+labels:
+- sig/api-machinery
diff --git a/vendor/sigs.k8s.io/yaml/README.md b/vendor/sigs.k8s.io/yaml/README.md
new file mode 100644
index 0000000..0200f75
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/README.md
@@ -0,0 +1,121 @@
+# YAML marshaling and unmarshaling support for Go
+
+[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
+
+## Introduction
+
+A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
+
+In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
+
+## Compatibility
+
+This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
+
+## Caveats
+
+**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
+
+```
+BAD:
+	exampleKey: !!binary gIGC
+
+GOOD:
+	exampleKey: gIGC
+... and decode the base64 data in your code.
+```
+
+**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
+
+## Installation and usage
+
+To install, run:
+
+```
+$ go get github.com/ghodss/yaml
+```
+
+And import using:
+
+```
+import "github.com/ghodss/yaml"
+```
+
+Usage is very similar to the JSON library:
+
+```go
+package main
+
+import (
+	"fmt"
+
+	"github.com/ghodss/yaml"
+)
+
+type Person struct {
+	Name string `json:"name"` // Affects YAML field names too.
+	Age  int    `json:"age"`
+}
+
+func main() {
+	// Marshal a Person struct to YAML.
+	p := Person{"John", 30}
+	y, err := yaml.Marshal(p)
+	if err != nil {
+		fmt.Printf("err: %v\n", err)
+		return
+	}
+	fmt.Println(string(y))
+	/* Output:
+	age: 30
+	name: John
+	*/
+
+	// Unmarshal the YAML back into a Person struct.
+	var p2 Person
+	err = yaml.Unmarshal(y, &p2)
+	if err != nil {
+		fmt.Printf("err: %v\n", err)
+		return
+	}
+	fmt.Println(p2)
+	/* Output:
+	{John 30}
+	*/
+}
+```
+
+`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
+
+```go
+package main
+
+import (
+	"fmt"
+
+	"github.com/ghodss/yaml"
+)
+
+func main() {
+	j := []byte(`{"name": "John", "age": 30}`)
+	y, err := yaml.JSONToYAML(j)
+	if err != nil {
+		fmt.Printf("err: %v\n", err)
+		return
+	}
+	fmt.Println(string(y))
+	/* Output:
+	name: John
+	age: 30
+	*/
+	j2, err := yaml.YAMLToJSON(y)
+	if err != nil {
+		fmt.Printf("err: %v\n", err)
+		return
+	}
+	fmt.Println(string(j2))
+	/* Output:
+	{"age":30,"name":"John"}
+	*/
+}
+```
diff --git a/vendor/sigs.k8s.io/yaml/RELEASE.md b/vendor/sigs.k8s.io/yaml/RELEASE.md
new file mode 100644
index 0000000..6b64246
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/RELEASE.md
@@ -0,0 +1,9 @@
+# Release Process
+
+The `yaml` Project is released on an as-needed basis. The process is as follows:
+
+1. An issue is proposing a new release with a changelog since the last release
+1. All [OWNERS](OWNERS) must LGTM this release
+1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
+1. The release issue is closed
+1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released`
diff --git a/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS b/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS
new file mode 100644
index 0000000..0648a8e
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS
@@ -0,0 +1,17 @@
+# Defined below are the security contacts for this repo.
+#
+# They are the contact point for the Product Security Team to reach out
+# to for triaging and handling of incoming issues.
+#
+# The below names agree to abide by the
+# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
+# and will be removed and replaced if they violate that agreement.
+#
+# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
+# INSTRUCTIONS AT https://kubernetes.io/security/
+
+cjcullen
+jessfraz
+liggitt
+philips
+tallclair
diff --git a/vendor/sigs.k8s.io/yaml/code-of-conduct.md b/vendor/sigs.k8s.io/yaml/code-of-conduct.md
new file mode 100644
index 0000000..0d15c00
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/code-of-conduct.md
@@ -0,0 +1,3 @@
+# Kubernetes Community Code of Conduct
+
+Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
diff --git a/vendor/sigs.k8s.io/yaml/fields.go b/vendor/sigs.k8s.io/yaml/fields.go
new file mode 100644
index 0000000..235b7f2
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/fields.go
@@ -0,0 +1,502 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package yaml
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/json"
+	"reflect"
+	"sort"
+	"strings"
+	"sync"
+	"unicode"
+	"unicode/utf8"
+)
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+	// If v is a named type and is addressable,
+	// start with its address, so that if the type has pointer methods,
+	// we find them.
+	if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+		v = v.Addr()
+	}
+	for {
+		// Load value from interface, but only if the result will be
+		// usefully addressable.
+		if v.Kind() == reflect.Interface && !v.IsNil() {
+			e := v.Elem()
+			if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+				v = e
+				continue
+			}
+		}
+
+		if v.Kind() != reflect.Ptr {
+			break
+		}
+
+		if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+			break
+		}
+		if v.IsNil() {
+			if v.CanSet() {
+				v.Set(reflect.New(v.Type().Elem()))
+			} else {
+				v = reflect.New(v.Type().Elem())
+			}
+		}
+		if v.Type().NumMethod() > 0 {
+			if u, ok := v.Interface().(json.Unmarshaler); ok {
+				return u, nil, reflect.Value{}
+			}
+			if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+				return nil, u, reflect.Value{}
+			}
+		}
+		v = v.Elem()
+	}
+	return nil, nil, v
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+	name      string
+	nameBytes []byte                 // []byte(name)
+	equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+	tag       bool
+	index     []int
+	typ       reflect.Type
+	omitEmpty bool
+	quoted    bool
+}
+
+func fillField(f field) field {
+	f.nameBytes = []byte(f.name)
+	f.equalFold = foldFunc(f.nameBytes)
+	return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+	if x[i].name != x[j].name {
+		return x[i].name < x[j].name
+	}
+	if len(x[i].index) != len(x[j].index) {
+		return len(x[i].index) < len(x[j].index)
+	}
+	if x[i].tag != x[j].tag {
+		return x[i].tag
+	}
+	return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+	for k, xik := range x[i].index {
+		if k >= len(x[j].index) {
+			return false
+		}
+		if xik != x[j].index[k] {
+			return xik < x[j].index[k]
+		}
+	}
+	return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+	// Anonymous fields to explore at the current level and the next.
+	current := []field{}
+	next := []field{{typ: t}}
+
+	// Count of queued names for current level and the next.
+	count := map[reflect.Type]int{}
+	nextCount := map[reflect.Type]int{}
+
+	// Types already visited at an earlier level.
+	visited := map[reflect.Type]bool{}
+
+	// Fields found.
+	var fields []field
+
+	for len(next) > 0 {
+		current, next = next, current[:0]
+		count, nextCount = nextCount, map[reflect.Type]int{}
+
+		for _, f := range current {
+			if visited[f.typ] {
+				continue
+			}
+			visited[f.typ] = true
+
+			// Scan f.typ for fields to include.
+			for i := 0; i < f.typ.NumField(); i++ {
+				sf := f.typ.Field(i)
+				if sf.PkgPath != "" { // unexported
+					continue
+				}
+				tag := sf.Tag.Get("json")
+				if tag == "-" {
+					continue
+				}
+				name, opts := parseTag(tag)
+				if !isValidTag(name) {
+					name = ""
+				}
+				index := make([]int, len(f.index)+1)
+				copy(index, f.index)
+				index[len(f.index)] = i
+
+				ft := sf.Type
+				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+					// Follow pointer.
+					ft = ft.Elem()
+				}
+
+				// Record found field and index sequence.
+				if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+					tagged := name != ""
+					if name == "" {
+						name = sf.Name
+					}
+					fields = append(fields, fillField(field{
+						name:      name,
+						tag:       tagged,
+						index:     index,
+						typ:       ft,
+						omitEmpty: opts.Contains("omitempty"),
+						quoted:    opts.Contains("string"),
+					}))
+					if count[f.typ] > 1 {
+						// If there were multiple instances, add a second,
+						// so that the annihilation code will see a duplicate.
+						// It only cares about the distinction between 1 or 2,
+						// so don't bother generating any more copies.
+						fields = append(fields, fields[len(fields)-1])
+					}
+					continue
+				}
+
+				// Record new anonymous struct to explore in next round.
+				nextCount[ft]++
+				if nextCount[ft] == 1 {
+					next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+				}
+			}
+		}
+	}
+
+	sort.Sort(byName(fields))
+
+	// Delete all fields that are hidden by the Go rules for embedded fields,
+	// except that fields with JSON tags are promoted.
+
+	// The fields are sorted in primary order of name, secondary order
+	// of field index length. Loop over names; for each name, delete
+	// hidden fields by choosing the one dominant field that survives.
+	out := fields[:0]
+	for advance, i := 0, 0; i < len(fields); i += advance {
+		// One iteration per name.
+		// Find the sequence of fields with the name of this first field.
+		fi := fields[i]
+		name := fi.name
+		for advance = 1; i+advance < len(fields); advance++ {
+			fj := fields[i+advance]
+			if fj.name != name {
+				break
+			}
+		}
+		if advance == 1 { // Only one field with this name
+			out = append(out, fi)
+			continue
+		}
+		dominant, ok := dominantField(fields[i : i+advance])
+		if ok {
+			out = append(out, dominant)
+		}
+	}
+
+	fields = out
+	sort.Sort(byIndex(fields))
+
+	return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+	// The fields are sorted in increasing index-length order. The winner
+	// must therefore be one with the shortest index length. Drop all
+	// longer entries, which is easy: just truncate the slice.
+	length := len(fields[0].index)
+	tagged := -1 // Index of first tagged field.
+	for i, f := range fields {
+		if len(f.index) > length {
+			fields = fields[:i]
+			break
+		}
+		if f.tag {
+			if tagged >= 0 {
+				// Multiple tagged fields at the same level: conflict.
+				// Return no field.
+				return field{}, false
+			}
+			tagged = i
+		}
+	}
+	if tagged >= 0 {
+		return fields[tagged], true
+	}
+	// All remaining fields have the same length. If there's more than one,
+	// we have a conflict (two fields named "X" at the same level) and we
+	// return no field.
+	if len(fields) > 1 {
+		return field{}, false
+	}
+	return fields[0], true
+}
+
+var fieldCache struct {
+	sync.RWMutex
+	m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+	fieldCache.RLock()
+	f := fieldCache.m[t]
+	fieldCache.RUnlock()
+	if f != nil {
+		return f
+	}
+
+	// Compute fields without lock.
+	// Might duplicate effort but won't hold other computations back.
+	f = typeFields(t)
+	if f == nil {
+		f = []field{}
+	}
+
+	fieldCache.Lock()
+	if fieldCache.m == nil {
+		fieldCache.m = map[reflect.Type][]field{}
+	}
+	fieldCache.m[t] = f
+	fieldCache.Unlock()
+	return f
+}
+
+func isValidTag(s string) bool {
+	if s == "" {
+		return false
+	}
+	for _, c := range s {
+		switch {
+		case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+			// Backslash and quote chars are reserved, but
+			// otherwise any punctuation chars are allowed
+			// in a tag name.
+		default:
+			if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+const (
+	caseMask     = ^byte(0x20) // Mask to ignore case in ASCII.
+	kelvin       = '\u212a'
+	smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+//  * S maps to s and to U+017F 'ſ' Latin small letter long s
+//  * k maps to K and to U+212A 'K' Kelvin sign
+// See http://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+	nonLetter := false
+	special := false // special letter
+	for _, b := range s {
+		if b >= utf8.RuneSelf {
+			return bytes.EqualFold
+		}
+		upper := b & caseMask
+		if upper < 'A' || upper > 'Z' {
+			nonLetter = true
+		} else if upper == 'K' || upper == 'S' {
+			// See above for why these letters are special.
+			special = true
+		}
+	}
+	if special {
+		return equalFoldRight
+	}
+	if nonLetter {
+		return asciiEqualFold
+	}
+	return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+	for _, sb := range s {
+		if len(t) == 0 {
+			return false
+		}
+		tb := t[0]
+		if tb < utf8.RuneSelf {
+			if sb != tb {
+				sbUpper := sb & caseMask
+				if 'A' <= sbUpper && sbUpper <= 'Z' {
+					if sbUpper != tb&caseMask {
+						return false
+					}
+				} else {
+					return false
+				}
+			}
+			t = t[1:]
+			continue
+		}
+		// sb is ASCII and t is not. t must be either kelvin
+		// sign or long s; sb must be s, S, k, or K.
+		tr, size := utf8.DecodeRune(t)
+		switch sb {
+		case 's', 'S':
+			if tr != smallLongEss {
+				return false
+			}
+		case 'k', 'K':
+			if tr != kelvin {
+				return false
+			}
+		default:
+			return false
+		}
+		t = t[size:]
+
+	}
+	if len(t) > 0 {
+		return false
+	}
+	return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i, sb := range s {
+		tb := t[i]
+		if sb == tb {
+			continue
+		}
+		if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+			if sb&caseMask != tb&caseMask {
+				return false
+			}
+		} else {
+			return false
+		}
+	}
+	return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i, b := range s {
+		if b&caseMask != t[i]&caseMask {
+			return false
+		}
+	}
+	return true
+}
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+	if idx := strings.Index(tag, ","); idx != -1 {
+		return tag[:idx], tagOptions(tag[idx+1:])
+	}
+	return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+	if len(o) == 0 {
+		return false
+	}
+	s := string(o)
+	for s != "" {
+		var next string
+		i := strings.Index(s, ",")
+		if i >= 0 {
+			s, next = s[:i], s[i+1:]
+		}
+		if s == optionName {
+			return true
+		}
+		s = next
+	}
+	return false
+}
diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go
new file mode 100644
index 0000000..0245961
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/yaml.go
@@ -0,0 +1,319 @@
+package yaml
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+
+	"gopkg.in/yaml.v2"
+)
+
+// Marshal marshals the object into JSON then converts JSON to YAML and returns the
+// YAML.
+func Marshal(o interface{}) ([]byte, error) {
+	j, err := json.Marshal(o)
+	if err != nil {
+		return nil, fmt.Errorf("error marshaling into JSON: %v", err)
+	}
+
+	y, err := JSONToYAML(j)
+	if err != nil {
+		return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
+	}
+
+	return y, nil
+}
+
+// JSONOpt is a decoding option for decoding from JSON format.
+type JSONOpt func(*json.Decoder) *json.Decoder
+
+// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object,
+// optionally configuring the behavior of the JSON unmarshal.
+func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error {
+	return yamlUnmarshal(y, o, false, opts...)
+}
+
+// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal
+// into an object, optionally configuring the behavior of the JSON unmarshal.
+func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error {
+	return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...)
+}
+
+// yamlUnmarshal unmarshals the given YAML byte stream into the given interface,
+// optionally performing the unmarshalling strictly
+func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error {
+	vo := reflect.ValueOf(o)
+	unmarshalFn := yaml.Unmarshal
+	if strict {
+		unmarshalFn = yaml.UnmarshalStrict
+	}
+	j, err := yamlToJSON(y, &vo, unmarshalFn)
+	if err != nil {
+		return fmt.Errorf("error converting YAML to JSON: %v", err)
+	}
+
+	err = jsonUnmarshal(bytes.NewReader(j), o, opts...)
+	if err != nil {
+		return fmt.Errorf("error unmarshaling JSON: %v", err)
+	}
+
+	return nil
+}
+
+// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the
+// object, optionally applying decoder options prior to decoding.  We are not
+// using json.Unmarshal directly as we want the chance to pass in non-default
+// options.
+func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error {
+	d := json.NewDecoder(r)
+	for _, opt := range opts {
+		d = opt(d)
+	}
+	if err := d.Decode(&o); err != nil {
+		return fmt.Errorf("while decoding JSON: %v", err)
+	}
+	return nil
+}
+
+// JSONToYAML Converts JSON to YAML.
+func JSONToYAML(j []byte) ([]byte, error) {
+	// Convert the JSON to an object.
+	var jsonObj interface{}
+	// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
+	// Go JSON library doesn't try to pick the right number type (int, float,
+	// etc.) when unmarshalling to interface{}, it just picks float64
+	// universally. go-yaml does go through the effort of picking the right
+	// number type, so we can preserve number type throughout this process.
+	err := yaml.Unmarshal(j, &jsonObj)
+	if err != nil {
+		return nil, err
+	}
+
+	// Marshal this object into YAML.
+	return yaml.Marshal(jsonObj)
+}
+
+// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML,
+// passing JSON through this method should be a no-op.
+//
+// Things YAML can do that are not supported by JSON:
+// * In YAML you can have binary and null keys in your maps. These are invalid
+//   in JSON. (int and float keys are converted to strings.)
+// * Binary data in YAML with the !!binary tag is not supported. If you want to
+//   use binary data with this library, encode the data as base64 as usual but do
+//   not use the !!binary tag in your YAML. This will ensure the original base64
+//   encoded data makes it all the way through to the JSON.
+//
+// For strict decoding of YAML, use YAMLToJSONStrict.
+func YAMLToJSON(y []byte) ([]byte, error) {
+	return yamlToJSON(y, nil, yaml.Unmarshal)
+}
+
+// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding,
+// returning an error on any duplicate field names.
+func YAMLToJSONStrict(y []byte) ([]byte, error) {
+	return yamlToJSON(y, nil, yaml.UnmarshalStrict)
+}
+
+func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) {
+	// Convert the YAML to an object.
+	var yamlObj interface{}
+	err := yamlUnmarshal(y, &yamlObj)
+	if err != nil {
+		return nil, err
+	}
+
+	// YAML objects are not completely compatible with JSON objects (e.g. you
+	// can have non-string keys in YAML). So, convert the YAML-compatible object
+	// to a JSON-compatible object, failing with an error if irrecoverable
+	// incompatibilties happen along the way.
+	jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
+	if err != nil {
+		return nil, err
+	}
+
+	// Convert this object to JSON and return the data.
+	return json.Marshal(jsonObj)
+}
+
+func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
+	var err error
+
+	// Resolve jsonTarget to a concrete value (i.e. not a pointer or an
+	// interface). We pass decodingNull as false because we're not actually
+	// decoding into the value, we're just checking if the ultimate target is a
+	// string.
+	if jsonTarget != nil {
+		ju, tu, pv := indirect(*jsonTarget, false)
+		// We have a JSON or Text Umarshaler at this level, so we can't be trying
+		// to decode into a string.
+		if ju != nil || tu != nil {
+			jsonTarget = nil
+		} else {
+			jsonTarget = &pv
+		}
+	}
+
+	// If yamlObj is a number or a boolean, check if jsonTarget is a string -
+	// if so, coerce.  Else return normal.
+	// If yamlObj is a map or array, find the field that each key is
+	// unmarshaling to, and when you recurse pass the reflect.Value for that
+	// field back into this function.
+	switch typedYAMLObj := yamlObj.(type) {
+	case map[interface{}]interface{}:
+		// JSON does not support arbitrary keys in a map, so we must convert
+		// these keys to strings.
+		//
+		// From my reading of go-yaml v2 (specifically the resolve function),
+		// keys can only have the types string, int, int64, float64, binary
+		// (unsupported), or null (unsupported).
+		strMap := make(map[string]interface{})
+		for k, v := range typedYAMLObj {
+			// Resolve the key to a string first.
+			var keyString string
+			switch typedKey := k.(type) {
+			case string:
+				keyString = typedKey
+			case int:
+				keyString = strconv.Itoa(typedKey)
+			case int64:
+				// go-yaml will only return an int64 as a key if the system
+				// architecture is 32-bit and the key's value is between 32-bit
+				// and 64-bit. Otherwise the key type will simply be int.
+				keyString = strconv.FormatInt(typedKey, 10)
+			case float64:
+				// Stolen from go-yaml to use the same conversion to string as
+				// the go-yaml library uses to convert float to string when
+				// Marshaling.
+				s := strconv.FormatFloat(typedKey, 'g', -1, 32)
+				switch s {
+				case "+Inf":
+					s = ".inf"
+				case "-Inf":
+					s = "-.inf"
+				case "NaN":
+					s = ".nan"
+				}
+				keyString = s
+			case bool:
+				if typedKey {
+					keyString = "true"
+				} else {
+					keyString = "false"
+				}
+			default:
+				return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
+					reflect.TypeOf(k), k, v)
+			}
+
+			// jsonTarget should be a struct or a map. If it's a struct, find
+			// the field it's going to map to and pass its reflect.Value. If
+			// it's a map, find the element type of the map and pass the
+			// reflect.Value created from that type. If it's neither, just pass
+			// nil - JSON conversion will error for us if it's a real issue.
+			if jsonTarget != nil {
+				t := *jsonTarget
+				if t.Kind() == reflect.Struct {
+					keyBytes := []byte(keyString)
+					// Find the field that the JSON library would use.
+					var f *field
+					fields := cachedTypeFields(t.Type())
+					for i := range fields {
+						ff := &fields[i]
+						if bytes.Equal(ff.nameBytes, keyBytes) {
+							f = ff
+							break
+						}
+						// Do case-insensitive comparison.
+						if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
+							f = ff
+						}
+					}
+					if f != nil {
+						// Find the reflect.Value of the most preferential
+						// struct field.
+						jtf := t.Field(f.index[0])
+						strMap[keyString], err = convertToJSONableObject(v, &jtf)
+						if err != nil {
+							return nil, err
+						}
+						continue
+					}
+				} else if t.Kind() == reflect.Map {
+					// Create a zero value of the map's element type to use as
+					// the JSON target.
+					jtv := reflect.Zero(t.Type().Elem())
+					strMap[keyString], err = convertToJSONableObject(v, &jtv)
+					if err != nil {
+						return nil, err
+					}
+					continue
+				}
+			}
+			strMap[keyString], err = convertToJSONableObject(v, nil)
+			if err != nil {
+				return nil, err
+			}
+		}
+		return strMap, nil
+	case []interface{}:
+		// We need to recurse into arrays in case there are any
+		// map[interface{}]interface{}'s inside and to convert any
+		// numbers to strings.
+
+		// If jsonTarget is a slice (which it really should be), find the
+		// thing it's going to map to. If it's not a slice, just pass nil
+		// - JSON conversion will error for us if it's a real issue.
+		var jsonSliceElemValue *reflect.Value
+		if jsonTarget != nil {
+			t := *jsonTarget
+			if t.Kind() == reflect.Slice {
+				// By default slices point to nil, but we need a reflect.Value
+				// pointing to a value of the slice type, so we create one here.
+				ev := reflect.Indirect(reflect.New(t.Type().Elem()))
+				jsonSliceElemValue = &ev
+			}
+		}
+
+		// Make and use a new array.
+		arr := make([]interface{}, len(typedYAMLObj))
+		for i, v := range typedYAMLObj {
+			arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
+			if err != nil {
+				return nil, err
+			}
+		}
+		return arr, nil
+	default:
+		// If the target type is a string and the YAML type is a number,
+		// convert the YAML type to a string.
+		if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
+			// Based on my reading of go-yaml, it may return int, int64,
+			// float64, or uint64.
+			var s string
+			switch typedVal := typedYAMLObj.(type) {
+			case int:
+				s = strconv.FormatInt(int64(typedVal), 10)
+			case int64:
+				s = strconv.FormatInt(typedVal, 10)
+			case float64:
+				s = strconv.FormatFloat(typedVal, 'g', -1, 32)
+			case uint64:
+				s = strconv.FormatUint(typedVal, 10)
+			case bool:
+				if typedVal {
+					s = "true"
+				} else {
+					s = "false"
+				}
+			}
+			if len(s) > 0 {
+				yamlObj = interface{}(s)
+			}
+		}
+		return yamlObj, nil
+	}
+}
diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go
new file mode 100644
index 0000000..ab3e06a
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/yaml_go110.go
@@ -0,0 +1,14 @@
+// This file contains changes that are only compatible with go 1.10 and onwards.
+
+// +build go1.10
+
+package yaml
+
+import "encoding/json"
+
+// DisallowUnknownFields configures the JSON decoder to error out if unknown
+// fields come along, instead of dropping them by default.
+func DisallowUnknownFields(d *json.Decoder) *json.Decoder {
+	d.DisallowUnknownFields()
+	return d
+}